max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
users/urls.py | dhirajshah04/moneytracker | 1 | 6624951 | <reponame>dhirajshah04/moneytracker
from django.urls import path
from users import views
app_name = 'users'
urlpatterns = [
path('login/', views.user_login, name='login'),
path('logout/', views.user_logout, name='user_logout'),
path('register/', views.user_register, name='user_register'),
] | from django.urls import path
from users import views
app_name = 'users'
urlpatterns = [
path('login/', views.user_login, name='login'),
path('logout/', views.user_logout, name='user_logout'),
path('register/', views.user_register, name='user_register'),
] | none | 1 | 1.742513 | 2 | |
src/v1/009-log1p.py | peterorum/kaggle-house-prices | 0 | 6624952 | # feature importance
# local score 0.0449
# kaggle score .14106
# minimize score
import os
import sys # noqa
from time import time
from pprint import pprint # noqa
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
pd.options.display.float_format = '{:.4f}'.format
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 2000)
np.set_printoptions(threshold=sys.maxsize)
is_kaggle = os.environ['HOME'] == '/tmp'
zipext = '' # if is_kaggle else '.zip'
train_file = 'train' # if is_kaggle else 'sample'
start_time = time()
last_time = time()
def timer():
global last_time
print(f'{((time() - last_time) / 60):.1f} mins\n') # noqa
last_time = time()
def evaluate(train, test, unique_id, target):
print('evaluate')
lgb_model = lgb.LGBMRegressor(nthread=4, n_jobs=-1, verbose=-1, metric='rmse')
x_train = train.drop([target, unique_id], axis=1)
y_train = train[target]
x_test = test[x_train.columns]
lgb_model.fit(x_train, y_train)
train_predictions = lgb_model.predict(x_train)
test_predictions = lgb_model.predict(x_test)
train_score = np.sqrt(mean_squared_error(train_predictions, y_train))
timer()
return test_predictions, train_score
# --- missing values
def get_many_missing_values(train, test, unique_id, target):
print(f'get_many_missing_values')
train_targets = train[target]
threshold = 0.75
train_missing = (train.isnull().sum() / len(train)).sort_values(ascending=False)
test_missing = (test.isnull().sum() / len(test)).sort_values(ascending=False)
# identify missing values above threshold
train_missing = train_missing.index[train_missing > threshold]
test_missing = test_missing.index[test_missing > threshold]
all_missing = list(set(set(train_missing) | set(test_missing)))
if len(all_missing) > 0:
print(f'columns with more than {threshold}% missing values')
pprint(all_missing)
train = train.drop(columns=all_missing, axis=1)
test = test.drop(columns=all_missing, axis=1)
train, test = train.align(test, join='inner', axis=1)
# restore after align
train[target] = train_targets
timer()
return train, test
# --- remove keys
def remove_keys(list, keys):
result = [x for x in list if x not in keys]
return result
# --- replace missing values
def replace_missing_values(train, test, unique_id, target):
print(f'replace_missing_values')
numeric_cols = [col for col in train.columns
if (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
numeric_cols = remove_keys(numeric_cols, [unique_id, target])
categorical_cols = [col for col in train.columns if train[col].dtype == 'object']
categorical_cols = remove_keys(categorical_cols, [unique_id, target])
# replace missing numericals with mean
for col in numeric_cols:
if train[col].isna().any() | test[col].isna().any():
mean = train[col].mean()
train[col].fillna(mean, inplace=True)
if col in test.columns:
test[col].fillna(mean, inplace=True)
# convert to lowercase
for col in categorical_cols:
train[col] = train[col].apply(lambda x: str(x).lower())
if col in test.columns:
test[col] = test[col].apply(lambda x: str(x).lower())
# replace string nan with np.nan
train.replace('nan', np.nan, inplace=True)
test.replace('nan', np.nan, inplace=True)
# replace missing categoricals with mode
for col in categorical_cols:
if train[col].isna().any() or test[col].isna().any():
mode = train[col].mode()[0]
train[col].fillna(mode, inplace=True)
if col in test.columns:
test[col].fillna(mode, inplace=True)
timer()
return train, test
# --- column differences
def get_column_differences(train, test, unique_id, target):
print(f'get_column_differences')
train_without_target = train.drop(target, axis=1)
not_in_test = train_without_target.columns.difference(test.columns)
not_in_train = test.columns.difference(train_without_target.columns)
if len(not_in_test) > 0:
print(f'In train but not test\n{not_in_test}')
if len(not_in_train) > 0:
print(f'In test but not train\n{not_in_train}')
timer()
return train, test
# --- categorical data
def get_categorical_data(train, test, unique_id, target):
print(f'get_categorical_data')
train_targets = train[target]
categorical_cols = [col for col in train.columns if train[col].dtype == 'object']
if unique_id in categorical_cols:
categorical_cols.remove(unique_id)
max_categories = train.shape[0] * 0.5
too_many_value_categorical_cols = [col for col in categorical_cols
if train[col].nunique() >= max_categories]
if len(too_many_value_categorical_cols) > 0:
print('too many categorical values', too_many_value_categorical_cols)
# drop if too many values - usually a unique id column
categorical_cols = [i for i in categorical_cols if i not in too_many_value_categorical_cols]
train = train.drop(too_many_value_categorical_cols, axis=1)
test.drop([col for col in too_many_value_categorical_cols
if col in test.columns], axis=1, inplace=True)
# one-hot encode if not too many values
max_ohe_categories = 10
ohe_categorical_cols = [col for col in categorical_cols
if train[col].nunique() <= max_ohe_categories]
categorical_cols = [i for i in categorical_cols if i not in ohe_categorical_cols]
if len(ohe_categorical_cols) > 0:
print('one-hot encode', ohe_categorical_cols)
# one-hot encode & align to have same columns
train = pd.get_dummies(train, columns=ohe_categorical_cols)
test = pd.get_dummies(test, columns=ohe_categorical_cols)
train, test = train.align(test, join='inner', axis=1)
# restore after align
train[target] = train_targets
# possibly rank encode rather than ohe. see gstore.
# label encode (convert to integer)
label_encode_categorical_cols = categorical_cols
print('label encode', label_encode_categorical_cols)
for col in label_encode_categorical_cols:
lbl = LabelEncoder()
lbl.fit(list(train[col].values.astype('str')) + list(test[col].values.astype('str')))
train[col] = lbl.transform(list(train[col].values.astype('str')))
test[col] = lbl.transform(list(test[col].values.astype('str')))
timer()
return train, test
# --- feature selection
def get_feature_selection(train, test, unique_id, target):
print(f'get_feature_selection')
all_numeric_cols = [col for col in train.columns
if (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
if unique_id in all_numeric_cols:
all_numeric_cols.remove(unique_id)
if target in all_numeric_cols:
all_numeric_cols.remove(target)
# feature selection via variance
train_numeric = train[all_numeric_cols].fillna(0)
select_features = VarianceThreshold(threshold=0.2)
select_features.fit(train_numeric)
numeric_cols = train_numeric.columns[select_features.get_support(indices=True)].tolist()
# remove cols without variance
for col in all_numeric_cols:
if col not in numeric_cols:
print(f'variance drop {col}')
train.drop(col, axis=1, inplace=True)
if col in test.columns:
test.drop(col, axis=1, inplace=True)
timer()
return train, test
# --- feature importance
def get_feature_importance(train, test, unique_id, target):
print(f'get_feature_importance')
model = lgb.LGBMRegressor(nthread=4, n_jobs=-1, verbose=-1)
x_train = train.drop([unique_id, target], axis=1)
# initialize an empty array to hold feature importances
feature_importances = np.zeros(x_train.shape[1])
# fit the model twice to avoid overfitting
for i in range(2):
# split into training and validation set
train_features, valid_features, train_y, valid_y = train_test_split(x_train, train[target],
test_size=0.25, random_state=i)
# train using early stopping
model.fit(train_features, train_y, early_stopping_rounds=100,
eval_set=[(valid_features, valid_y)],
eval_metric='rmse', verbose=False)
# record the feature importances
feature_importances += model.feature_importances_
# average feature importances!
feature_importances = feature_importances / 2
feature_importances = pd.DataFrame(
{'feature': list(x_train.columns), 'importance': feature_importances}).sort_values('importance', ascending=False)
# sort features according to importance
feature_importances = feature_importances.sort_values('importance', ascending=False).reset_index()
most_important_features = feature_importances[0:10]['feature'].tolist()
# normalize the feature importances to add up to one
feature_importances['importance_normalized'] = feature_importances['importance'] / feature_importances['importance'].sum()
feature_importances['cumulative_importance'] = np.cumsum(feature_importances['importance_normalized'])
# find the features with minimal importance
# unimportant_features = list(feature_importances[feature_importances['importance'] == 0.0]['feature'])
# Threshold for cumulative importance
threshold = 0.9996
# extract the features to drop
features_to_drop = list(feature_importances[feature_importances[
'cumulative_importance'] > threshold]['feature'])
if len(features_to_drop) > 0:
print(feature_importances)
print(f'features to drop, under {threshold} importance:')
pprint(features_to_drop)
train = train.drop(features_to_drop, axis=1)
test = test.drop(features_to_drop, axis=1)
timer()
return train, test, most_important_features
# --- remove collinear features
def get_collinear_features(train, test, unique_id, target):
print('get_collinear_features')
corrs = train.corr()
upper = corrs.where(np.triu(np.ones(corrs.shape), k=1).astype(np.bool))
threshold = 0.8
# select columns with correlations above threshold
to_drop = [column for column in upper.columns if any(upper[column] > threshold) and column not in [unique_id, target]]
if len(to_drop) > 0:
print('collinear drop')
pprint(to_drop)
train = train.drop(columns=to_drop, axis=1)
test = test.drop(columns=to_drop, axis=1)
timer()
return train, test
# arithmetic features
def get_arithmetic_features(train, test, unique_id, target, cols, source_cols):
print('get_arithmetic_features')
# just choose from original columns, not encodeds
numeric_cols = [col for col in cols
if (col in source_cols) & (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
numeric_cols = remove_keys(numeric_cols, [unique_id, target])
for i1 in range(0, len(numeric_cols)):
col1 = numeric_cols[i1]
# powers
train[f'{col1} squared'] = train[col1] ** 2
test[f'{col1} squared'] = test[col1] ** 2
train[f'{col1} cubed'] = train[col1] ** 3
test[f'{col1} cubed'] = test[col1] ** 3
train[f'{col1}^4'] = train[col1] ** 4
test[f'{col1}^4'] = test[col1] ** 4
for i2 in range(i1 + 1, len(numeric_cols)):
col2 = numeric_cols[i2]
train[f'{col1} by {col2}'] = train[col1] * train[col2]
test[f'{col1} by {col2}'] = test[col1] * test[col2]
train[f'{col1} plus {col2}'] = train[col1] + train[col2]
test[f'{col1} plus {col2}'] = test[col1] + test[col2]
train[f'{col1} minus {col2}'] = train[col1] - train[col2]
test[f'{col1} minus {col2}'] = test[col1] - test[col2]
if not (train[col2] == 0).any():
train[f'{col1} on {col2}'] = train[col1] / train[col2]
test[f'{col1} on {col2}'] = test[col1] / test[col2]
elif not (train[col1] == 0).any():
train[f'{col2} on {col1}'] = train[col2] / train[col1]
test[f'{col2} on {col1}'] = test[col2] / test[col1]
timer()
return train, test
# custom features
def get_custom_features(train, test, unique_id, target):
print(f'get_custom_features')
timer()
return train, test
# remove skew towards a few large values by using log1p
def get_logged(train, test, target):
train[target] = np.log1p(train[target])
return train, test
# --------------------- run
def run():
unique_id = 'Id'
target = 'SalePrice'
# load data
train = pd.read_csv(f'../input/{train_file}.csv{zipext}')
test = pd.read_csv(f'../input/test.csv{zipext}')
original_columns = train.columns.tolist()
train, test = get_logged(train, test, target)
train, test = get_many_missing_values(train, test, unique_id, target)
train, test = replace_missing_values(train, test, unique_id, target)
train, test = get_column_differences(train, test, unique_id, target)
train, test = get_custom_features(train, test, unique_id, target)
train, test = get_categorical_data(train, test, unique_id, target)
train, test, most_important_cols = get_feature_importance(train, test, unique_id, target)
train, test = get_arithmetic_features(train, test, unique_id, target, most_important_cols, original_columns)
train, test = get_collinear_features(train, test, unique_id, target)
train, test = get_feature_selection(train, test, unique_id, target)
train, test, _ = get_feature_importance(train, test, unique_id, target)
# ----------
test_predictions, train_score = evaluate(train, test, unique_id, target)
print('score', train_score)
test[target] = np.expm1(test_predictions)
predictions = test[[unique_id, target]]
predictions.to_csv('submission.csv', index=False)
# -------- main
run()
print(f'Finished {((time() - start_time) / 60):.1f} mins\a')
| # feature importance
# local score 0.0449
# kaggle score .14106
# minimize score
import os
import sys # noqa
from time import time
from pprint import pprint # noqa
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
pd.options.display.float_format = '{:.4f}'.format
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 2000)
np.set_printoptions(threshold=sys.maxsize)
is_kaggle = os.environ['HOME'] == '/tmp'
zipext = '' # if is_kaggle else '.zip'
train_file = 'train' # if is_kaggle else 'sample'
start_time = time()
last_time = time()
def timer():
global last_time
print(f'{((time() - last_time) / 60):.1f} mins\n') # noqa
last_time = time()
def evaluate(train, test, unique_id, target):
print('evaluate')
lgb_model = lgb.LGBMRegressor(nthread=4, n_jobs=-1, verbose=-1, metric='rmse')
x_train = train.drop([target, unique_id], axis=1)
y_train = train[target]
x_test = test[x_train.columns]
lgb_model.fit(x_train, y_train)
train_predictions = lgb_model.predict(x_train)
test_predictions = lgb_model.predict(x_test)
train_score = np.sqrt(mean_squared_error(train_predictions, y_train))
timer()
return test_predictions, train_score
# --- missing values
def get_many_missing_values(train, test, unique_id, target):
print(f'get_many_missing_values')
train_targets = train[target]
threshold = 0.75
train_missing = (train.isnull().sum() / len(train)).sort_values(ascending=False)
test_missing = (test.isnull().sum() / len(test)).sort_values(ascending=False)
# identify missing values above threshold
train_missing = train_missing.index[train_missing > threshold]
test_missing = test_missing.index[test_missing > threshold]
all_missing = list(set(set(train_missing) | set(test_missing)))
if len(all_missing) > 0:
print(f'columns with more than {threshold}% missing values')
pprint(all_missing)
train = train.drop(columns=all_missing, axis=1)
test = test.drop(columns=all_missing, axis=1)
train, test = train.align(test, join='inner', axis=1)
# restore after align
train[target] = train_targets
timer()
return train, test
# --- remove keys
def remove_keys(list, keys):
result = [x for x in list if x not in keys]
return result
# --- replace missing values
def replace_missing_values(train, test, unique_id, target):
print(f'replace_missing_values')
numeric_cols = [col for col in train.columns
if (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
numeric_cols = remove_keys(numeric_cols, [unique_id, target])
categorical_cols = [col for col in train.columns if train[col].dtype == 'object']
categorical_cols = remove_keys(categorical_cols, [unique_id, target])
# replace missing numericals with mean
for col in numeric_cols:
if train[col].isna().any() | test[col].isna().any():
mean = train[col].mean()
train[col].fillna(mean, inplace=True)
if col in test.columns:
test[col].fillna(mean, inplace=True)
# convert to lowercase
for col in categorical_cols:
train[col] = train[col].apply(lambda x: str(x).lower())
if col in test.columns:
test[col] = test[col].apply(lambda x: str(x).lower())
# replace string nan with np.nan
train.replace('nan', np.nan, inplace=True)
test.replace('nan', np.nan, inplace=True)
# replace missing categoricals with mode
for col in categorical_cols:
if train[col].isna().any() or test[col].isna().any():
mode = train[col].mode()[0]
train[col].fillna(mode, inplace=True)
if col in test.columns:
test[col].fillna(mode, inplace=True)
timer()
return train, test
# --- column differences
def get_column_differences(train, test, unique_id, target):
print(f'get_column_differences')
train_without_target = train.drop(target, axis=1)
not_in_test = train_without_target.columns.difference(test.columns)
not_in_train = test.columns.difference(train_without_target.columns)
if len(not_in_test) > 0:
print(f'In train but not test\n{not_in_test}')
if len(not_in_train) > 0:
print(f'In test but not train\n{not_in_train}')
timer()
return train, test
# --- categorical data
def get_categorical_data(train, test, unique_id, target):
print(f'get_categorical_data')
train_targets = train[target]
categorical_cols = [col for col in train.columns if train[col].dtype == 'object']
if unique_id in categorical_cols:
categorical_cols.remove(unique_id)
max_categories = train.shape[0] * 0.5
too_many_value_categorical_cols = [col for col in categorical_cols
if train[col].nunique() >= max_categories]
if len(too_many_value_categorical_cols) > 0:
print('too many categorical values', too_many_value_categorical_cols)
# drop if too many values - usually a unique id column
categorical_cols = [i for i in categorical_cols if i not in too_many_value_categorical_cols]
train = train.drop(too_many_value_categorical_cols, axis=1)
test.drop([col for col in too_many_value_categorical_cols
if col in test.columns], axis=1, inplace=True)
# one-hot encode if not too many values
max_ohe_categories = 10
ohe_categorical_cols = [col for col in categorical_cols
if train[col].nunique() <= max_ohe_categories]
categorical_cols = [i for i in categorical_cols if i not in ohe_categorical_cols]
if len(ohe_categorical_cols) > 0:
print('one-hot encode', ohe_categorical_cols)
# one-hot encode & align to have same columns
train = pd.get_dummies(train, columns=ohe_categorical_cols)
test = pd.get_dummies(test, columns=ohe_categorical_cols)
train, test = train.align(test, join='inner', axis=1)
# restore after align
train[target] = train_targets
# possibly rank encode rather than ohe. see gstore.
# label encode (convert to integer)
label_encode_categorical_cols = categorical_cols
print('label encode', label_encode_categorical_cols)
for col in label_encode_categorical_cols:
lbl = LabelEncoder()
lbl.fit(list(train[col].values.astype('str')) + list(test[col].values.astype('str')))
train[col] = lbl.transform(list(train[col].values.astype('str')))
test[col] = lbl.transform(list(test[col].values.astype('str')))
timer()
return train, test
# --- feature selection
def get_feature_selection(train, test, unique_id, target):
print(f'get_feature_selection')
all_numeric_cols = [col for col in train.columns
if (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
if unique_id in all_numeric_cols:
all_numeric_cols.remove(unique_id)
if target in all_numeric_cols:
all_numeric_cols.remove(target)
# feature selection via variance
train_numeric = train[all_numeric_cols].fillna(0)
select_features = VarianceThreshold(threshold=0.2)
select_features.fit(train_numeric)
numeric_cols = train_numeric.columns[select_features.get_support(indices=True)].tolist()
# remove cols without variance
for col in all_numeric_cols:
if col not in numeric_cols:
print(f'variance drop {col}')
train.drop(col, axis=1, inplace=True)
if col in test.columns:
test.drop(col, axis=1, inplace=True)
timer()
return train, test
# --- feature importance
def get_feature_importance(train, test, unique_id, target):
print(f'get_feature_importance')
model = lgb.LGBMRegressor(nthread=4, n_jobs=-1, verbose=-1)
x_train = train.drop([unique_id, target], axis=1)
# initialize an empty array to hold feature importances
feature_importances = np.zeros(x_train.shape[1])
# fit the model twice to avoid overfitting
for i in range(2):
# split into training and validation set
train_features, valid_features, train_y, valid_y = train_test_split(x_train, train[target],
test_size=0.25, random_state=i)
# train using early stopping
model.fit(train_features, train_y, early_stopping_rounds=100,
eval_set=[(valid_features, valid_y)],
eval_metric='rmse', verbose=False)
# record the feature importances
feature_importances += model.feature_importances_
# average feature importances!
feature_importances = feature_importances / 2
feature_importances = pd.DataFrame(
{'feature': list(x_train.columns), 'importance': feature_importances}).sort_values('importance', ascending=False)
# sort features according to importance
feature_importances = feature_importances.sort_values('importance', ascending=False).reset_index()
most_important_features = feature_importances[0:10]['feature'].tolist()
# normalize the feature importances to add up to one
feature_importances['importance_normalized'] = feature_importances['importance'] / feature_importances['importance'].sum()
feature_importances['cumulative_importance'] = np.cumsum(feature_importances['importance_normalized'])
# find the features with minimal importance
# unimportant_features = list(feature_importances[feature_importances['importance'] == 0.0]['feature'])
# Threshold for cumulative importance
threshold = 0.9996
# extract the features to drop
features_to_drop = list(feature_importances[feature_importances[
'cumulative_importance'] > threshold]['feature'])
if len(features_to_drop) > 0:
print(feature_importances)
print(f'features to drop, under {threshold} importance:')
pprint(features_to_drop)
train = train.drop(features_to_drop, axis=1)
test = test.drop(features_to_drop, axis=1)
timer()
return train, test, most_important_features
# --- remove collinear features
def get_collinear_features(train, test, unique_id, target):
print('get_collinear_features')
corrs = train.corr()
upper = corrs.where(np.triu(np.ones(corrs.shape), k=1).astype(np.bool))
threshold = 0.8
# select columns with correlations above threshold
to_drop = [column for column in upper.columns if any(upper[column] > threshold) and column not in [unique_id, target]]
if len(to_drop) > 0:
print('collinear drop')
pprint(to_drop)
train = train.drop(columns=to_drop, axis=1)
test = test.drop(columns=to_drop, axis=1)
timer()
return train, test
# arithmetic features
def get_arithmetic_features(train, test, unique_id, target, cols, source_cols):
print('get_arithmetic_features')
# just choose from original columns, not encodeds
numeric_cols = [col for col in cols
if (col in source_cols) & (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
numeric_cols = remove_keys(numeric_cols, [unique_id, target])
for i1 in range(0, len(numeric_cols)):
col1 = numeric_cols[i1]
# powers
train[f'{col1} squared'] = train[col1] ** 2
test[f'{col1} squared'] = test[col1] ** 2
train[f'{col1} cubed'] = train[col1] ** 3
test[f'{col1} cubed'] = test[col1] ** 3
train[f'{col1}^4'] = train[col1] ** 4
test[f'{col1}^4'] = test[col1] ** 4
for i2 in range(i1 + 1, len(numeric_cols)):
col2 = numeric_cols[i2]
train[f'{col1} by {col2}'] = train[col1] * train[col2]
test[f'{col1} by {col2}'] = test[col1] * test[col2]
train[f'{col1} plus {col2}'] = train[col1] + train[col2]
test[f'{col1} plus {col2}'] = test[col1] + test[col2]
train[f'{col1} minus {col2}'] = train[col1] - train[col2]
test[f'{col1} minus {col2}'] = test[col1] - test[col2]
if not (train[col2] == 0).any():
train[f'{col1} on {col2}'] = train[col1] / train[col2]
test[f'{col1} on {col2}'] = test[col1] / test[col2]
elif not (train[col1] == 0).any():
train[f'{col2} on {col1}'] = train[col2] / train[col1]
test[f'{col2} on {col1}'] = test[col2] / test[col1]
timer()
return train, test
# custom features
def get_custom_features(train, test, unique_id, target):
print(f'get_custom_features')
timer()
return train, test
# remove skew towards a few large values by using log1p
def get_logged(train, test, target):
train[target] = np.log1p(train[target])
return train, test
# --------------------- run
def run():
unique_id = 'Id'
target = 'SalePrice'
# load data
train = pd.read_csv(f'../input/{train_file}.csv{zipext}')
test = pd.read_csv(f'../input/test.csv{zipext}')
original_columns = train.columns.tolist()
train, test = get_logged(train, test, target)
train, test = get_many_missing_values(train, test, unique_id, target)
train, test = replace_missing_values(train, test, unique_id, target)
train, test = get_column_differences(train, test, unique_id, target)
train, test = get_custom_features(train, test, unique_id, target)
train, test = get_categorical_data(train, test, unique_id, target)
train, test, most_important_cols = get_feature_importance(train, test, unique_id, target)
train, test = get_arithmetic_features(train, test, unique_id, target, most_important_cols, original_columns)
train, test = get_collinear_features(train, test, unique_id, target)
train, test = get_feature_selection(train, test, unique_id, target)
train, test, _ = get_feature_importance(train, test, unique_id, target)
# ----------
test_predictions, train_score = evaluate(train, test, unique_id, target)
print('score', train_score)
test[target] = np.expm1(test_predictions)
predictions = test[[unique_id, target]]
predictions.to_csv('submission.csv', index=False)
# -------- main
run()
print(f'Finished {((time() - start_time) / 60):.1f} mins\a')
| en | 0.710445 | # feature importance # local score 0.0449 # kaggle score .14106 # minimize score # noqa # noqa # if is_kaggle else '.zip' # if is_kaggle else 'sample' # noqa # --- missing values # identify missing values above threshold # restore after align # --- remove keys # --- replace missing values # replace missing numericals with mean # convert to lowercase # replace string nan with np.nan # replace missing categoricals with mode # --- column differences # --- categorical data # drop if too many values - usually a unique id column # one-hot encode if not too many values # one-hot encode & align to have same columns # restore after align # possibly rank encode rather than ohe. see gstore. # label encode (convert to integer) # --- feature selection # feature selection via variance # remove cols without variance # --- feature importance # initialize an empty array to hold feature importances # fit the model twice to avoid overfitting # split into training and validation set # train using early stopping # record the feature importances # average feature importances! # sort features according to importance # normalize the feature importances to add up to one # find the features with minimal importance # unimportant_features = list(feature_importances[feature_importances['importance'] == 0.0]['feature']) # Threshold for cumulative importance # extract the features to drop # --- remove collinear features # select columns with correlations above threshold # arithmetic features # just choose from original columns, not encodeds # powers # custom features # remove skew towards a few large values by using log1p # --------------------- run # load data # ---------- # -------- main | 2.63204 | 3 |
QUANTAXIS/QAFetch/Fetcher.py | Gahyu96/QUANTAXIS | 1 | 6624953 | <reponame>Gahyu96/QUANTAXIS<gh_stars>1-10
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
QA fetch module
@yutiansut
QAFetch is Under [QAStandard#0.0.2@10x] Protocol
"""
from QUANTAXIS.QAFetch import QAWind as QAWind
from QUANTAXIS.QAFetch import QATushare as QATushare
from QUANTAXIS.QAFetch import QATdx as QATdx
from QUANTAXIS.QAFetch import QAThs as QAThs
from QUANTAXIS.QAFetch import QAQuery
from QUANTAXIS.QAFetch import QAQuery_Advance as QAQueryAdv
from QUANTAXIS.QAFetch import QAEastMoney as QAEM
from QUANTAXIS.QAUtil.QAParameter import FREQUENCE, MARKET_TYPE, DATASOURCE, OUTPUT_FORMAT, DATABASE_TABLE
from QUANTAXIS.QAUtil.QASql import QA_util_sql_mongo_setting
class QA_Fetcher():
def __init__(self, ip='127.0.0.1', port=27017, username='',password=''):
"""
初始化的时候 会初始化
"""
self.ip = ip
self.port = port
self.database = QA_util_sql_mongo_setting(ip, port).quantaxis
self.history = {}
self.best_ip=QATdx.select_best_ip()
def change_ip(self, ip, port):
self.database = QA_util_sql_mongo_setting(ip, port).quantaxis
return self
def get_quotation(self, code=None, start=None, end=None, frequence=None, market=None, source=None, output=None):
"""
Arguments:
code {str/list} -- 证券/股票的代码
start {str} -- 开始日期
end {str} -- 结束日期
frequence {enum} -- 频率 QA.FREQUENCE
market {enum} -- 市场 QA.MARKET_TYPE
source {enum} -- 来源 QA.DATASOURCE
output {enum} -- 输出类型 QA.OUTPUT_FORMAT
"""
pass
def get_info(self,code,frequence,market,source,output):
if source is DATASOURCE.TDX:
res=QATdx.QA_fetch_get_stock_info(code,self.best_ip)
return res
elif source is DATASOURCE.MONGO:
res=QAQuery.QA_fetch_stock_info(code,format=output,collections=self.database.stock_info)
return res
# todo 🛠 output 参数没有用到, 默认返回的 是 QA_DataStruct
def QA_quotation(code, start, end, frequence, market, source, output):
"""一个统一的fetch
Arguments:
code {str/list} -- 证券/股票的代码
start {str} -- 开始日期
end {str} -- 结束日期
frequence {enum} -- 频率 QA.FREQUENCE
market {enum} -- 市场 QA.MARKET_TYPE
source {enum} -- 来源 QA.DATASOURCE
output {enum} -- 输出类型 QA.OUTPUT_FORMAT
"""
if market is MARKET_TYPE.STOCK_CN:
if frequence is FREQUENCE.DAY:
if source is DATASOURCE.MONGO:
res = QAQueryAdv.QA_fetch_stock_day_adv(code, start, end)
elif source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_day(code, start, end, '00')
elif source is DATASOURCE.TUSHARE:
res = QATushare.QA_fetch_get_stock_day(code, start, end, '00')
elif frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]:
if source is DATASOURCE.MONGO:
res = QAQueryAdv.QA_fetch_stock_min_adv(
code, start, end, frequence=frequence)
elif source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_min(
code, start, end, frequence=frequence)
elif frequence is FREQUENCE.TICK:
if source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_transaction(code, start, end)
#指数代码和股票代码是冲突重复的, sh000001 上证指数 000001 是不同的
elif market is MARKET_TYPE.INDEX_CN:
if frequence is FREQUENCE.DAY:
if source is DATASOURCE.MONGO:
res = QAQueryAdv.QA_fetch_index_day_adv(code, start, end)
elif market is MARKET_TYPE.OPTION_CN:
if source is DATABASE_TABLE.MONGO:
res = QAQueryAdv.QA_fetch_option_day_adv(code,start,end);
#print(type(res))
return res
if __name__ == '__main__':
print(QA_quotation('000001', '2017-01-01', '2017-01-31', frequence=FREQUENCE.DAY,
market=MARKET_TYPE.STOCK_CN, source=DATASOURCE.TDX, output=OUTPUT_FORMAT.DATAFRAME))
| # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
QA fetch module
@yutiansut
QAFetch is Under [QAStandard#0.0.2@10x] Protocol
"""
from QUANTAXIS.QAFetch import QAWind as QAWind
from QUANTAXIS.QAFetch import QATushare as QATushare
from QUANTAXIS.QAFetch import QATdx as QATdx
from QUANTAXIS.QAFetch import QAThs as QAThs
from QUANTAXIS.QAFetch import QAQuery
from QUANTAXIS.QAFetch import QAQuery_Advance as QAQueryAdv
from QUANTAXIS.QAFetch import QAEastMoney as QAEM
from QUANTAXIS.QAUtil.QAParameter import FREQUENCE, MARKET_TYPE, DATASOURCE, OUTPUT_FORMAT, DATABASE_TABLE
from QUANTAXIS.QAUtil.QASql import QA_util_sql_mongo_setting
class QA_Fetcher():
def __init__(self, ip='127.0.0.1', port=27017, username='',password=''):
"""
初始化的时候 会初始化
"""
self.ip = ip
self.port = port
self.database = QA_util_sql_mongo_setting(ip, port).quantaxis
self.history = {}
self.best_ip=QATdx.select_best_ip()
def change_ip(self, ip, port):
self.database = QA_util_sql_mongo_setting(ip, port).quantaxis
return self
def get_quotation(self, code=None, start=None, end=None, frequence=None, market=None, source=None, output=None):
"""
Arguments:
code {str/list} -- 证券/股票的代码
start {str} -- 开始日期
end {str} -- 结束日期
frequence {enum} -- 频率 QA.FREQUENCE
market {enum} -- 市场 QA.MARKET_TYPE
source {enum} -- 来源 QA.DATASOURCE
output {enum} -- 输出类型 QA.OUTPUT_FORMAT
"""
pass
def get_info(self,code,frequence,market,source,output):
if source is DATASOURCE.TDX:
res=QATdx.QA_fetch_get_stock_info(code,self.best_ip)
return res
elif source is DATASOURCE.MONGO:
res=QAQuery.QA_fetch_stock_info(code,format=output,collections=self.database.stock_info)
return res
# todo 🛠 output 参数没有用到, 默认返回的 是 QA_DataStruct
def QA_quotation(code, start, end, frequence, market, source, output):
"""一个统一的fetch
Arguments:
code {str/list} -- 证券/股票的代码
start {str} -- 开始日期
end {str} -- 结束日期
frequence {enum} -- 频率 QA.FREQUENCE
market {enum} -- 市场 QA.MARKET_TYPE
source {enum} -- 来源 QA.DATASOURCE
output {enum} -- 输出类型 QA.OUTPUT_FORMAT
"""
if market is MARKET_TYPE.STOCK_CN:
if frequence is FREQUENCE.DAY:
if source is DATASOURCE.MONGO:
res = QAQueryAdv.QA_fetch_stock_day_adv(code, start, end)
elif source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_day(code, start, end, '00')
elif source is DATASOURCE.TUSHARE:
res = QATushare.QA_fetch_get_stock_day(code, start, end, '00')
elif frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]:
if source is DATASOURCE.MONGO:
res = QAQueryAdv.QA_fetch_stock_min_adv(
code, start, end, frequence=frequence)
elif source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_min(
code, start, end, frequence=frequence)
elif frequence is FREQUENCE.TICK:
if source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_transaction(code, start, end)
#指数代码和股票代码是冲突重复的, sh000001 上证指数 000001 是不同的
elif market is MARKET_TYPE.INDEX_CN:
if frequence is FREQUENCE.DAY:
if source is DATASOURCE.MONGO:
res = QAQueryAdv.QA_fetch_index_day_adv(code, start, end)
elif market is MARKET_TYPE.OPTION_CN:
if source is DATABASE_TABLE.MONGO:
res = QAQueryAdv.QA_fetch_option_day_adv(code,start,end);
#print(type(res))
return res
if __name__ == '__main__':
print(QA_quotation('000001', '2017-01-01', '2017-01-31', frequence=FREQUENCE.DAY,
market=MARKET_TYPE.STOCK_CN, source=DATASOURCE.TDX, output=OUTPUT_FORMAT.DATAFRAME)) | en | 0.456563 | # coding:utf-8 # # The MIT License (MIT) # # Copyright (c) 2016-2018 yutiansut/QUANTAXIS # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. QA fetch module @yutiansut QAFetch is Under [QAStandard#0.0.2@10x] Protocol 初始化的时候 会初始化 Arguments: code {str/list} -- 证券/股票的代码 start {str} -- 开始日期 end {str} -- 结束日期 frequence {enum} -- 频率 QA.FREQUENCE market {enum} -- 市场 QA.MARKET_TYPE source {enum} -- 来源 QA.DATASOURCE output {enum} -- 输出类型 QA.OUTPUT_FORMAT # todo 🛠 output 参数没有用到, 默认返回的 是 QA_DataStruct 一个统一的fetch Arguments: code {str/list} -- 证券/股票的代码 start {str} -- 开始日期 end {str} -- 结束日期 frequence {enum} -- 频率 QA.FREQUENCE market {enum} -- 市场 QA.MARKET_TYPE source {enum} -- 来源 QA.DATASOURCE output {enum} -- 输出类型 QA.OUTPUT_FORMAT #指数代码和股票代码是冲突重复的, sh000001 上证指数 000001 是不同的 #print(type(res)) | 1.188816 | 1 |
school_backend/school_backend/migrations/0003_auto_20200920_1730.py | Robin-Bonnin/manatal-django-test | 0 | 6624954 | # Generated by Django 3.1.1 on 2020-09-20 10:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('school_backend', '0002_school_type'),
]
operations = [
migrations.RenameField(
model_name='school',
old_name='type',
new_name='school_type',
),
]
| # Generated by Django 3.1.1 on 2020-09-20 10:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('school_backend', '0002_school_type'),
]
operations = [
migrations.RenameField(
model_name='school',
old_name='type',
new_name='school_type',
),
]
| en | 0.739788 | # Generated by Django 3.1.1 on 2020-09-20 10:30 | 1.77314 | 2 |
tests/test_external/test_sdk/test_measure_if_conn.py | Doomsk/netqasm | 6 | 6624955 | from netqasm.logging.glob import get_netqasm_logger
from netqasm.runtime.application import default_app_instance
from netqasm.sdk import Qubit
from netqasm.sdk.external import NetQASMConnection, simulate_application
logger = get_netqasm_logger()
def run_alice():
num = 1
with NetQASMConnection("Alice") as alice:
for _ in range(num):
q = Qubit(alice)
q.H()
m = q.measure(inplace=True)
def body(alice):
q.X()
alice.if_eq(m, 1, body)
zero = q.measure()
alice.flush()
assert zero == 0
def test_measure_if_conn():
# set_log_level("DEBUG")
app_instance = default_app_instance(
[
("Alice", run_alice),
]
)
simulate_application(app_instance, use_app_config=False, enable_logging=False)
| from netqasm.logging.glob import get_netqasm_logger
from netqasm.runtime.application import default_app_instance
from netqasm.sdk import Qubit
from netqasm.sdk.external import NetQASMConnection, simulate_application
logger = get_netqasm_logger()
def run_alice():
num = 1
with NetQASMConnection("Alice") as alice:
for _ in range(num):
q = Qubit(alice)
q.H()
m = q.measure(inplace=True)
def body(alice):
q.X()
alice.if_eq(m, 1, body)
zero = q.measure()
alice.flush()
assert zero == 0
def test_measure_if_conn():
# set_log_level("DEBUG")
app_instance = default_app_instance(
[
("Alice", run_alice),
]
)
simulate_application(app_instance, use_app_config=False, enable_logging=False)
| ja | 0.215721 | # set_log_level("DEBUG") | 2.13449 | 2 |
docs/ui/examples/example983d145a22433403a41b63bde6927f49.py | okajun35/Flexx_translate_ja | 1 | 6624956 | from flexx import app, event, ui
class Example(ui.Widget):
CSS = '''
.flx-DropdownContainer > .flx-TreeWidget {
min-height: 150px;
}
'''
def init(self):
# A nice and cosy tree view
with ui.DropdownContainer(text='Scene graph'):
with ui.TreeWidget(max_selected=1):
for i in range(20):
ui.TreeItem(text='foo %i' % i, checked=False)
# A combobox
self.combo = ui.ComboBox(editable=True,
options=('foo', 'bar', 'spaaaaaaaaam', 'eggs'))
self.label = ui.Label()
class JS:
@event.connect('combo.text')
def on_combobox_text(self, *events):
self.label.text = 'Combobox text: ' + self.combo.text
if self.combo.selected_index is not None:
self.label.text += ' (index %i)' % self.combo.selected_index
| from flexx import app, event, ui
class Example(ui.Widget):
CSS = '''
.flx-DropdownContainer > .flx-TreeWidget {
min-height: 150px;
}
'''
def init(self):
# A nice and cosy tree view
with ui.DropdownContainer(text='Scene graph'):
with ui.TreeWidget(max_selected=1):
for i in range(20):
ui.TreeItem(text='foo %i' % i, checked=False)
# A combobox
self.combo = ui.ComboBox(editable=True,
options=('foo', 'bar', 'spaaaaaaaaam', 'eggs'))
self.label = ui.Label()
class JS:
@event.connect('combo.text')
def on_combobox_text(self, *events):
self.label.text = 'Combobox text: ' + self.combo.text
if self.combo.selected_index is not None:
self.label.text += ' (index %i)' % self.combo.selected_index
| en | 0.333867 | .flx-DropdownContainer > .flx-TreeWidget { min-height: 150px; } # A nice and cosy tree view # A combobox | 2.6998 | 3 |
Source/build/scripts/make_element_factory.py | quanganh2627/bytm-x64-L-w05-2015_external_chromium_org_third_party_WebKit | 0 | 6624957 | #!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from collections import defaultdict
import in_generator
import template_expander
import name_utilities
from make_qualified_names import MakeQualifiedNamesWriter
class MakeElementFactoryWriter(MakeQualifiedNamesWriter):
defaults = dict(MakeQualifiedNamesWriter.default_parameters, **{
'JSInterfaceName': None,
'Conditional': None,
'constructorNeedsCreatedByParser': None,
'constructorNeedsFormElement': None,
'contextConditional': None,
'interfaceName': None,
'noConstructor': None,
'noTypeHelpers': None,
'runtimeEnabled': None,
})
default_parameters = dict(MakeQualifiedNamesWriter.default_parameters, **{
'fallbackInterfaceName': '',
'fallbackJSInterfaceName': '',
})
filters = MakeQualifiedNamesWriter.filters
def __init__(self, in_file_paths):
super(MakeElementFactoryWriter, self).__init__(in_file_paths)
# FIXME: When we start using these element factories, we'll want to
# remove the "new" prefix and also have our base class generate
# *Names.h and *Names.cpp.
self._outputs.update({
(self.namespace + 'ElementFactory.h'): self.generate_factory_header,
(self.namespace + 'ElementFactory.cpp'): self.generate_factory_implementation,
('V8' + self.namespace + 'ElementWrapperFactory.h'): self.generate_wrapper_factory_header,
('V8' + self.namespace + 'ElementWrapperFactory.cpp'): self.generate_wrapper_factory_implementation,
})
fallback_interface = self.tags_in_file.parameters['fallbackInterfaceName'].strip('"')
fallback_js_interface = self.tags_in_file.parameters['fallbackJSInterfaceName'].strip('"') or fallback_interface
interface_counts = defaultdict(int)
tags = self._template_context['tags']
for tag in tags:
tag['has_js_interface'] = self._has_js_interface(tag)
tag['js_interface'] = self._js_interface(tag)
tag['interface'] = self._interface(tag)
interface_counts[tag['interface']] += 1
for tag in tags:
tag['multipleTagNames'] = (interface_counts[tag['interface']] > 1 or tag['interface'] == fallback_interface)
self._template_context.update({
'fallback_interface': fallback_interface,
'fallback_js_interface': fallback_js_interface,
})
@template_expander.use_jinja('ElementFactory.h.tmpl', filters=filters)
def generate_factory_header(self):
return self._template_context
@template_expander.use_jinja('ElementFactory.cpp.tmpl', filters=filters)
def generate_factory_implementation(self):
return self._template_context
@template_expander.use_jinja('ElementWrapperFactory.h.tmpl', filters=filters)
def generate_wrapper_factory_header(self):
return self._template_context
@template_expander.use_jinja('ElementWrapperFactory.cpp.tmpl', filters=filters)
def generate_wrapper_factory_implementation(self):
return self._template_context
def _interface(self, tag):
if tag['interfaceName']:
return tag['interfaceName']
name = name_utilities.upper_first(tag['name'])
# FIXME: We shouldn't hard-code HTML here.
if name == 'HTML':
name = 'Html'
dash = name.find('-')
while dash != -1:
name = name[:dash] + name[dash + 1].upper() + name[dash + 2:]
dash = name.find('-')
return '%s%sElement' % (self.namespace, name)
def _js_interface(self, tag):
if tag['JSInterfaceName']:
return tag['JSInterfaceName']
return self._interface(tag)
def _has_js_interface(self, tag):
return not tag['noConstructor'] and self._js_interface(tag) != ('%sElement' % self.namespace)
if __name__ == "__main__":
in_generator.Maker(MakeElementFactoryWriter).main(sys.argv)
| #!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from collections import defaultdict
import in_generator
import template_expander
import name_utilities
from make_qualified_names import MakeQualifiedNamesWriter
class MakeElementFactoryWriter(MakeQualifiedNamesWriter):
defaults = dict(MakeQualifiedNamesWriter.default_parameters, **{
'JSInterfaceName': None,
'Conditional': None,
'constructorNeedsCreatedByParser': None,
'constructorNeedsFormElement': None,
'contextConditional': None,
'interfaceName': None,
'noConstructor': None,
'noTypeHelpers': None,
'runtimeEnabled': None,
})
default_parameters = dict(MakeQualifiedNamesWriter.default_parameters, **{
'fallbackInterfaceName': '',
'fallbackJSInterfaceName': '',
})
filters = MakeQualifiedNamesWriter.filters
def __init__(self, in_file_paths):
super(MakeElementFactoryWriter, self).__init__(in_file_paths)
# FIXME: When we start using these element factories, we'll want to
# remove the "new" prefix and also have our base class generate
# *Names.h and *Names.cpp.
self._outputs.update({
(self.namespace + 'ElementFactory.h'): self.generate_factory_header,
(self.namespace + 'ElementFactory.cpp'): self.generate_factory_implementation,
('V8' + self.namespace + 'ElementWrapperFactory.h'): self.generate_wrapper_factory_header,
('V8' + self.namespace + 'ElementWrapperFactory.cpp'): self.generate_wrapper_factory_implementation,
})
fallback_interface = self.tags_in_file.parameters['fallbackInterfaceName'].strip('"')
fallback_js_interface = self.tags_in_file.parameters['fallbackJSInterfaceName'].strip('"') or fallback_interface
interface_counts = defaultdict(int)
tags = self._template_context['tags']
for tag in tags:
tag['has_js_interface'] = self._has_js_interface(tag)
tag['js_interface'] = self._js_interface(tag)
tag['interface'] = self._interface(tag)
interface_counts[tag['interface']] += 1
for tag in tags:
tag['multipleTagNames'] = (interface_counts[tag['interface']] > 1 or tag['interface'] == fallback_interface)
self._template_context.update({
'fallback_interface': fallback_interface,
'fallback_js_interface': fallback_js_interface,
})
@template_expander.use_jinja('ElementFactory.h.tmpl', filters=filters)
def generate_factory_header(self):
return self._template_context
@template_expander.use_jinja('ElementFactory.cpp.tmpl', filters=filters)
def generate_factory_implementation(self):
return self._template_context
@template_expander.use_jinja('ElementWrapperFactory.h.tmpl', filters=filters)
def generate_wrapper_factory_header(self):
return self._template_context
@template_expander.use_jinja('ElementWrapperFactory.cpp.tmpl', filters=filters)
def generate_wrapper_factory_implementation(self):
return self._template_context
def _interface(self, tag):
if tag['interfaceName']:
return tag['interfaceName']
name = name_utilities.upper_first(tag['name'])
# FIXME: We shouldn't hard-code HTML here.
if name == 'HTML':
name = 'Html'
dash = name.find('-')
while dash != -1:
name = name[:dash] + name[dash + 1].upper() + name[dash + 2:]
dash = name.find('-')
return '%s%sElement' % (self.namespace, name)
def _js_interface(self, tag):
if tag['JSInterfaceName']:
return tag['JSInterfaceName']
return self._interface(tag)
def _has_js_interface(self, tag):
return not tag['noConstructor'] and self._js_interface(tag) != ('%sElement' % self.namespace)
if __name__ == "__main__":
in_generator.Maker(MakeElementFactoryWriter).main(sys.argv)
| en | 0.729829 | #!/usr/bin/env python # Copyright (C) 2013 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # FIXME: When we start using these element factories, we'll want to # remove the "new" prefix and also have our base class generate # *Names.h and *Names.cpp. # FIXME: We shouldn't hard-code HTML here. | 1.365429 | 1 |
src/dss/Serializer.py | akiyoko/django-simple-serializer | 0 | 6624958 | # coding: utf-8
from __future__ import unicode_literals
import sys
PY2 = True
if sys.version < '3':
from future.builtins import str, int
PY2 = False
import datetime
import json
from decimal import Decimal
from .TimeFormatFactory import TimeFormatFactory
try:
from django.db import models
from django.db.models import manager
from django.core.paginator import Page
from django.db.models.query import QuerySet
from django.db.models.fields.files import ImageFieldFile, FileField
except ImportError:
raise RuntimeError('django is required in django simple serializer')
class Serializer(object):
include_attr = []
exclude_attr = []
objects = []
origin_data = None
output_type = 'raw'
datetime_format = 'timestamp'
foreign = False
many = False
through = True
def __init__(self, data, datetime_format='timestamp', output_type='raw', include_attr=None, exclude_attr=None,
foreign=False, many=False, through=True, *args, **kwargs):
if include_attr:
self.include_attr = include_attr
if exclude_attr:
self.exclude_attr = exclude_attr
self.origin_data = data
self.output_type = output_type
self.foreign = foreign
self.many = many
self.through = through
self.through_fields = []
self.source_field = None
self.datetime_format = datetime_format
self.time_func = TimeFormatFactory.get_time_func(datetime_format)
self._dict_check = kwargs.get('dict_check', False)
def check_attr(self, attr):
if self.exclude_attr and attr in self.exclude_attr:
return False
if self.include_attr and attr not in self.include_attr:
return False
return True
def data_inspect(self, data, extra=None):
if isinstance(data, (QuerySet, Page, list)):
convert_data = []
if extra:
for i, obj in enumerate(data):
convert_data.append(self.data_inspect(obj, extra.get(
**{self.through_fields[0]: obj, self.through_fields[1]: self.source_field})))
else:
for obj in data:
convert_data.append(self.data_inspect(obj))
return convert_data
elif isinstance(data, models.Model):
obj_dict = {}
concrete_model = data._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.remote_field is None:
if self.check_attr(field.name) and hasattr(data, field.name):
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
else:
if self.check_attr(field.name) and self.foreign:
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
for field in concrete_model._meta.many_to_many:
if self.check_attr(field.name) and self.many:
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
for k, v in data.__dict__.items():
if not str(k).startswith('_') and k not in obj_dict.keys() and self.check_attr(k):
obj_dict[k] = self.data_inspect(v)
if extra:
for field in extra._meta.concrete_model._meta.local_fields:
if field.name not in obj_dict.keys() and field.name not in self.through_fields:
if field.remote_field is None:
if self.check_attr(field.name) and hasattr(extra, field.name):
obj_dict[field.name] = self.data_inspect(getattr(extra, field.name))
else:
if self.check_attr(field.name) and self.foreign:
obj_dict[field.name] = self.data_inspect(getattr(extra, field.name))
return obj_dict
elif isinstance(data, manager.Manager):
through_list = data.through._meta.concrete_model._meta.local_fields
through_data = data.through._default_manager
self.through_fields = [data.target_field.name, data.source_field.name]
self.source_field = data.instance
if len(through_list) > 3 and self.through:
return self.data_inspect(data.all(), through_data)
else:
return self.data_inspect(data.all())
elif isinstance(data, (datetime.datetime, datetime.date, datetime.time)):
return self.time_func(data)
elif isinstance(data, (ImageFieldFile, FileField)):
return data.url if data.url else data.path
elif isinstance(data, Decimal):
return float(data)
elif isinstance(data, dict):
obj_dict = {}
if self._dict_check:
for k, v in data.items():
obj_dict[k] = self.data_inspect(v)
else:
for k, v in data.items():
if self.check_attr(k):
obj_dict[k] = self.data_inspect(v)
return obj_dict
elif isinstance(data, (str, bool, float, int)):
return data
else:
return None
def data_format(self):
self.objects = self.data_inspect(self.origin_data)
def get_values(self):
output_switch = {'dict': self.objects,
'raw': self.objects,
'json': json.dumps(self.objects, indent=4)}
return output_switch.get(self.output_type, self.objects)
def __call__(self):
self.data_format()
return self.get_values()
def serializer(data, datetime_format='timestamp', output_type='raw', include_attr=None, exclude_attr=None,
foreign=False, many=False, through=True, *args, **kwargs):
s = Serializer(data, datetime_format, output_type, include_attr, exclude_attr,
foreign, many, through, *args, **kwargs)
return s()
| # coding: utf-8
from __future__ import unicode_literals
import sys
PY2 = True
if sys.version < '3':
from future.builtins import str, int
PY2 = False
import datetime
import json
from decimal import Decimal
from .TimeFormatFactory import TimeFormatFactory
try:
from django.db import models
from django.db.models import manager
from django.core.paginator import Page
from django.db.models.query import QuerySet
from django.db.models.fields.files import ImageFieldFile, FileField
except ImportError:
raise RuntimeError('django is required in django simple serializer')
class Serializer(object):
include_attr = []
exclude_attr = []
objects = []
origin_data = None
output_type = 'raw'
datetime_format = 'timestamp'
foreign = False
many = False
through = True
def __init__(self, data, datetime_format='timestamp', output_type='raw', include_attr=None, exclude_attr=None,
foreign=False, many=False, through=True, *args, **kwargs):
if include_attr:
self.include_attr = include_attr
if exclude_attr:
self.exclude_attr = exclude_attr
self.origin_data = data
self.output_type = output_type
self.foreign = foreign
self.many = many
self.through = through
self.through_fields = []
self.source_field = None
self.datetime_format = datetime_format
self.time_func = TimeFormatFactory.get_time_func(datetime_format)
self._dict_check = kwargs.get('dict_check', False)
def check_attr(self, attr):
if self.exclude_attr and attr in self.exclude_attr:
return False
if self.include_attr and attr not in self.include_attr:
return False
return True
def data_inspect(self, data, extra=None):
if isinstance(data, (QuerySet, Page, list)):
convert_data = []
if extra:
for i, obj in enumerate(data):
convert_data.append(self.data_inspect(obj, extra.get(
**{self.through_fields[0]: obj, self.through_fields[1]: self.source_field})))
else:
for obj in data:
convert_data.append(self.data_inspect(obj))
return convert_data
elif isinstance(data, models.Model):
obj_dict = {}
concrete_model = data._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.remote_field is None:
if self.check_attr(field.name) and hasattr(data, field.name):
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
else:
if self.check_attr(field.name) and self.foreign:
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
for field in concrete_model._meta.many_to_many:
if self.check_attr(field.name) and self.many:
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
for k, v in data.__dict__.items():
if not str(k).startswith('_') and k not in obj_dict.keys() and self.check_attr(k):
obj_dict[k] = self.data_inspect(v)
if extra:
for field in extra._meta.concrete_model._meta.local_fields:
if field.name not in obj_dict.keys() and field.name not in self.through_fields:
if field.remote_field is None:
if self.check_attr(field.name) and hasattr(extra, field.name):
obj_dict[field.name] = self.data_inspect(getattr(extra, field.name))
else:
if self.check_attr(field.name) and self.foreign:
obj_dict[field.name] = self.data_inspect(getattr(extra, field.name))
return obj_dict
elif isinstance(data, manager.Manager):
through_list = data.through._meta.concrete_model._meta.local_fields
through_data = data.through._default_manager
self.through_fields = [data.target_field.name, data.source_field.name]
self.source_field = data.instance
if len(through_list) > 3 and self.through:
return self.data_inspect(data.all(), through_data)
else:
return self.data_inspect(data.all())
elif isinstance(data, (datetime.datetime, datetime.date, datetime.time)):
return self.time_func(data)
elif isinstance(data, (ImageFieldFile, FileField)):
return data.url if data.url else data.path
elif isinstance(data, Decimal):
return float(data)
elif isinstance(data, dict):
obj_dict = {}
if self._dict_check:
for k, v in data.items():
obj_dict[k] = self.data_inspect(v)
else:
for k, v in data.items():
if self.check_attr(k):
obj_dict[k] = self.data_inspect(v)
return obj_dict
elif isinstance(data, (str, bool, float, int)):
return data
else:
return None
def data_format(self):
self.objects = self.data_inspect(self.origin_data)
def get_values(self):
output_switch = {'dict': self.objects,
'raw': self.objects,
'json': json.dumps(self.objects, indent=4)}
return output_switch.get(self.output_type, self.objects)
def __call__(self):
self.data_format()
return self.get_values()
def serializer(data, datetime_format='timestamp', output_type='raw', include_attr=None, exclude_attr=None,
foreign=False, many=False, through=True, *args, **kwargs):
s = Serializer(data, datetime_format, output_type, include_attr, exclude_attr,
foreign, many, through, *args, **kwargs)
return s()
| en | 0.833554 | # coding: utf-8 | 2.122511 | 2 |
easy_efficientdet/_third_party/decoder.py | waldemarmeier/easy-efficientdet | 0 | 6624959 | """
extracted from keras examples
no explicit license notice is provided
Original Code:
https://keras.io/examples/vision/retinanet/#implementing-a-custom-layer-to-decode-predictions
"""
from typing import Optional, Sequence
import tensorflow as tf
from tensorflow import keras
from easy_efficientdet.anchors import generate_anchor_boxes
from easy_efficientdet.utils import convert_to_corners
class DecodePredictions(keras.layers.Layer):
"""A Keras layer that decodes predictions of the RetinaNet model.
Attributes:
num_classes: Number of classes in the dataset
confidence_threshold: Minimum class probability, below which detections
are pruned.
nms_iou_threshold: IOU threshold for the NMS operation
max_detections_per_class: Maximum number of detections to retain per
class.
max_detections: Maximum number of detections to retain across all
classes.
box_variance: The scaling factors used to scale the bounding box
predictions.
"""
def __init__(
self,
num_classes: int = 4,
image_shape: Sequence[int] = (512, 512),
confidence_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_detections_per_class: int = 100,
max_detections: int = 100,
box_variance: Optional[
Sequence[float]] = None, # default should be none, [0.1, 0.1, 0.2, 0.2]
**kwargs):
super(DecodePredictions, self).__init__(**kwargs)
self.num_classes = num_classes
self.confidence_threshold = confidence_threshold
self.nms_iou_threshold = nms_iou_threshold
self.max_detections_per_class = max_detections_per_class
self.max_detections = max_detections
# TODO generate_anchor_boxes must be better configurable
self.anchor_boxes = generate_anchor_boxes(image_shape)
self.box_variance = box_variance
def _decode_box_predictions(self, anchor_boxes, box_predictions) -> tf.Tensor:
if self.box_variance is not None:
boxes = box_predictions * self.box_variance
else:
boxes = box_predictions
boxes = tf.concat(
[
boxes[:, :, :2] * anchor_boxes[:, :, 2:] + anchor_boxes[:, :, :2],
tf.math.exp(boxes[:, :, 2:]) * anchor_boxes[:, :, 2:],
],
axis=-1,
)
boxes_transformed = convert_to_corners(boxes)
return boxes_transformed
def call(self, predictions) -> tf.Tensor:
box_predictions = predictions[:, :, :4]
cls_predictions = tf.nn.sigmoid(predictions[:, :, 4:])
# TODO pre selection of boxes (like in soft nms) based on score
boxes = self._decode_box_predictions(self.anchor_boxes[None, ...],
box_predictions)
return tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
cls_predictions,
self.max_detections_per_class,
self.max_detections,
self.nms_iou_threshold,
self.confidence_threshold,
clip_boxes=False,
)
| """
extracted from keras examples
no explicit license notice is provided
Original Code:
https://keras.io/examples/vision/retinanet/#implementing-a-custom-layer-to-decode-predictions
"""
from typing import Optional, Sequence
import tensorflow as tf
from tensorflow import keras
from easy_efficientdet.anchors import generate_anchor_boxes
from easy_efficientdet.utils import convert_to_corners
class DecodePredictions(keras.layers.Layer):
"""A Keras layer that decodes predictions of the RetinaNet model.
Attributes:
num_classes: Number of classes in the dataset
confidence_threshold: Minimum class probability, below which detections
are pruned.
nms_iou_threshold: IOU threshold for the NMS operation
max_detections_per_class: Maximum number of detections to retain per
class.
max_detections: Maximum number of detections to retain across all
classes.
box_variance: The scaling factors used to scale the bounding box
predictions.
"""
def __init__(
self,
num_classes: int = 4,
image_shape: Sequence[int] = (512, 512),
confidence_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_detections_per_class: int = 100,
max_detections: int = 100,
box_variance: Optional[
Sequence[float]] = None, # default should be none, [0.1, 0.1, 0.2, 0.2]
**kwargs):
super(DecodePredictions, self).__init__(**kwargs)
self.num_classes = num_classes
self.confidence_threshold = confidence_threshold
self.nms_iou_threshold = nms_iou_threshold
self.max_detections_per_class = max_detections_per_class
self.max_detections = max_detections
# TODO generate_anchor_boxes must be better configurable
self.anchor_boxes = generate_anchor_boxes(image_shape)
self.box_variance = box_variance
def _decode_box_predictions(self, anchor_boxes, box_predictions) -> tf.Tensor:
if self.box_variance is not None:
boxes = box_predictions * self.box_variance
else:
boxes = box_predictions
boxes = tf.concat(
[
boxes[:, :, :2] * anchor_boxes[:, :, 2:] + anchor_boxes[:, :, :2],
tf.math.exp(boxes[:, :, 2:]) * anchor_boxes[:, :, 2:],
],
axis=-1,
)
boxes_transformed = convert_to_corners(boxes)
return boxes_transformed
def call(self, predictions) -> tf.Tensor:
box_predictions = predictions[:, :, :4]
cls_predictions = tf.nn.sigmoid(predictions[:, :, 4:])
# TODO pre selection of boxes (like in soft nms) based on score
boxes = self._decode_box_predictions(self.anchor_boxes[None, ...],
box_predictions)
return tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
cls_predictions,
self.max_detections_per_class,
self.max_detections,
self.nms_iou_threshold,
self.confidence_threshold,
clip_boxes=False,
)
| en | 0.736453 | extracted from keras examples no explicit license notice is provided Original Code: https://keras.io/examples/vision/retinanet/#implementing-a-custom-layer-to-decode-predictions A Keras layer that decodes predictions of the RetinaNet model. Attributes: num_classes: Number of classes in the dataset confidence_threshold: Minimum class probability, below which detections are pruned. nms_iou_threshold: IOU threshold for the NMS operation max_detections_per_class: Maximum number of detections to retain per class. max_detections: Maximum number of detections to retain across all classes. box_variance: The scaling factors used to scale the bounding box predictions. # default should be none, [0.1, 0.1, 0.2, 0.2] # TODO generate_anchor_boxes must be better configurable # TODO pre selection of boxes (like in soft nms) based on score | 2.975527 | 3 |
nikola/winutils.py | ivanyschen/nikola | 1,901 | 6624960 | # -*- coding: utf-8 -*-
# Copyright © 2012-2021 <NAME> and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""windows utilities to workaround problems with symlinks in a git clone."""
import os
import shutil
import io
# don't add imports to nikola code, will be imported in setup.py
def is_file_into_dir(filename, dirname):
"""Check if a file is in directory."""
try:
res = not os.path.relpath(filename, dirname).startswith('.')
except ValueError:
res = False
return res
def fix_all_git_symlinked(topdir):
"""Convert git symlinks to real content.
Most (all?) of git implementations in windows store a symlink pointing
into the repo as a text file, the text being the relative path to the
file with the real content.
So, in a clone of nikola in windows the symlinked files will have the
wrong content; a .zip download from Github has the same problem.
This function will rewrite each symlinked file with the correct contents, but
keep in mind that the working copy will be seen as dirty by git after operation.
Expects to find a list of symlinked files at nikola/data/symlinked.txt
The list can be generated by scripts/generate_symlinked_list.sh , which is
basically a redirect of
cd nikola_checkout
git ls-files -s | awk '/120000/{print $4}'
Weakness: if interrupted of fail amidst a directory copy, next run will not
see the missing files.
"""
# Determine whether or not symlinks need fixing (they don’t if installing
# from a .tar.gz file)
with io.open(topdir + r'\nikola\data\symlink-test-link.txt', 'r', encoding='utf-8-sig') as f:
text = f.read()
if text.startswith("NIKOLA_SYMLINKS=OK"):
return -1
with io.open(topdir + r'\nikola\data\symlinked.txt', 'r', encoding='utf-8-sig') as f:
text = f.read()
# expect each line a relpath from git or zip root,
# smoke test relpaths are relative to git root
if text.startswith('.'):
raise Exception(r'Bad data in \nikola\data\symlinked.txt')
relnames = text.split('\n')
relnames = [name.strip().replace('/', '\\') for name in relnames]
relnames = [name for name in relnames if name]
failures = 0
for name in relnames:
# build dst path and do some basic validation
dst = os.path.join(topdir, name)
# don't access files outside topdir
if not is_file_into_dir(dst, topdir):
continue
if os.path.isdir(dst):
# assume the file was de-symlinked
continue
# build src path and do some basic validation
with io.open(os.path.join(topdir, dst), 'r', encoding='utf-8-sig') as f:
text = f.read()
dst_dir = os.path.dirname(dst)
try:
src = os.path.normpath(os.path.join(dst_dir, text))
if not os.path.exists(src):
# assume the file was de-symlinked before
continue
# don't access files outside topdir
if not is_file_into_dir(src, topdir):
continue
except Exception:
# assume the file was de-symlinked before
continue
# copy src to dst
try:
if os.path.isdir(src):
os.unlink(dst)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
except Exception:
failures += 1
print("*** copy failed for")
print("\t src:", src)
print("\t dst:", dst)
return failures
| # -*- coding: utf-8 -*-
# Copyright © 2012-2021 <NAME> and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""windows utilities to workaround problems with symlinks in a git clone."""
import os
import shutil
import io
# don't add imports to nikola code, will be imported in setup.py
def is_file_into_dir(filename, dirname):
"""Check if a file is in directory."""
try:
res = not os.path.relpath(filename, dirname).startswith('.')
except ValueError:
res = False
return res
def fix_all_git_symlinked(topdir):
"""Convert git symlinks to real content.
Most (all?) of git implementations in windows store a symlink pointing
into the repo as a text file, the text being the relative path to the
file with the real content.
So, in a clone of nikola in windows the symlinked files will have the
wrong content; a .zip download from Github has the same problem.
This function will rewrite each symlinked file with the correct contents, but
keep in mind that the working copy will be seen as dirty by git after operation.
Expects to find a list of symlinked files at nikola/data/symlinked.txt
The list can be generated by scripts/generate_symlinked_list.sh , which is
basically a redirect of
cd nikola_checkout
git ls-files -s | awk '/120000/{print $4}'
Weakness: if interrupted of fail amidst a directory copy, next run will not
see the missing files.
"""
# Determine whether or not symlinks need fixing (they don’t if installing
# from a .tar.gz file)
with io.open(topdir + r'\nikola\data\symlink-test-link.txt', 'r', encoding='utf-8-sig') as f:
text = f.read()
if text.startswith("NIKOLA_SYMLINKS=OK"):
return -1
with io.open(topdir + r'\nikola\data\symlinked.txt', 'r', encoding='utf-8-sig') as f:
text = f.read()
# expect each line a relpath from git or zip root,
# smoke test relpaths are relative to git root
if text.startswith('.'):
raise Exception(r'Bad data in \nikola\data\symlinked.txt')
relnames = text.split('\n')
relnames = [name.strip().replace('/', '\\') for name in relnames]
relnames = [name for name in relnames if name]
failures = 0
for name in relnames:
# build dst path and do some basic validation
dst = os.path.join(topdir, name)
# don't access files outside topdir
if not is_file_into_dir(dst, topdir):
continue
if os.path.isdir(dst):
# assume the file was de-symlinked
continue
# build src path and do some basic validation
with io.open(os.path.join(topdir, dst), 'r', encoding='utf-8-sig') as f:
text = f.read()
dst_dir = os.path.dirname(dst)
try:
src = os.path.normpath(os.path.join(dst_dir, text))
if not os.path.exists(src):
# assume the file was de-symlinked before
continue
# don't access files outside topdir
if not is_file_into_dir(src, topdir):
continue
except Exception:
# assume the file was de-symlinked before
continue
# copy src to dst
try:
if os.path.isdir(src):
os.unlink(dst)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
except Exception:
failures += 1
print("*** copy failed for")
print("\t src:", src)
print("\t dst:", dst)
return failures
| en | 0.841441 | # -*- coding: utf-8 -*- # Copyright © 2012-2021 <NAME> and others. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. windows utilities to workaround problems with symlinks in a git clone. # don't add imports to nikola code, will be imported in setup.py Check if a file is in directory. Convert git symlinks to real content. Most (all?) of git implementations in windows store a symlink pointing into the repo as a text file, the text being the relative path to the file with the real content. So, in a clone of nikola in windows the symlinked files will have the wrong content; a .zip download from Github has the same problem. This function will rewrite each symlinked file with the correct contents, but keep in mind that the working copy will be seen as dirty by git after operation. Expects to find a list of symlinked files at nikola/data/symlinked.txt The list can be generated by scripts/generate_symlinked_list.sh , which is basically a redirect of cd nikola_checkout git ls-files -s | awk '/120000/{print $4}' Weakness: if interrupted of fail amidst a directory copy, next run will not see the missing files. # Determine whether or not symlinks need fixing (they don’t if installing # from a .tar.gz file) # expect each line a relpath from git or zip root, # smoke test relpaths are relative to git root # build dst path and do some basic validation # don't access files outside topdir # assume the file was de-symlinked # build src path and do some basic validation # assume the file was de-symlinked before # don't access files outside topdir # assume the file was de-symlinked before # copy src to dst | 1.754266 | 2 |
easyreg/test_expr.py | norveclibalikci/easyreg-mirror | 0 | 6624961 | from time import time
from .net_utils import get_test_model
import os
import numpy as np
def test_model(opt,model, dataloaders):
model_path = opt['tsk_set']['path']['model_load_path']
if isinstance(model_path, list):
for i, path in enumerate(model_path):
__test_model(opt,model,dataloaders,path,str(i)+'_')
else:
__test_model(opt,model, dataloaders,model_path)
def __test_model(opt,model,dataloaders, model_path,task_name=''):
since = time()
record_path = opt['tsk_set']['path']['record_path']
cur_gpu_id = opt['tsk_set'][('gpu_ids', -1,"the gpu id")]
task_type = opt['dataset'][('task_type','reg',"the task type, either 'reg' or 'seg'")]
running_range=[-1]#opt['tsk_set']['running_range'] # todo should be [-1]
running_part_data = running_range[0]>=0
if running_part_data:
print("running part of the test data from range {}".format(running_range))
gpu_id = cur_gpu_id
if model.network is not None and gpu_id>=0:
model.network = model.network.cuda()
save_fig_on = opt['tsk_set'][('save_fig_on', True, 'saving fig')]
save_3d_img_on = opt['tsk_set'][('save_3d_img_on', True, 'saving fig')]
output_taking_original_image_format = opt['tsk_set'][('output_taking_original_image_format', False, 'output follows the same sz and physical format of the original image (input by command line or txt)')]
phases = ['test'] #['val','test'] ###################################3
if len(model_path):
get_test_model(model_path, model.network, model.optimizer) ##############TODO model.optimizer
else:
print("Warning, the model is not manual loaded, make sure your model itself has been inited")
model.set_cur_epoch(-1)
for phase in phases:
num_samples = len(dataloaders[phase])
if running_part_data:
num_samples = len(running_range)
records_score_np = np.zeros(num_samples)
records_time_np = np.zeros(num_samples)
if task_type == 'reg':
records_jacobi_val_np = np.zeros(num_samples)
records_jacobi_num_np = np.zeros(num_samples)
loss_detail_list = []
jacobi_val_res = 0.
jacobi_num_res = 0.
running_test_score = 0
time_total= 0
for idx, data in enumerate(dataloaders[phase]):
i= idx
if running_part_data:
if i not in running_range:
continue
i = i - running_range[0]
batch_size = len(data[0]['image'])
is_train = False
if model.network is not None:
model.network.train(False)
model.set_val()
model.set_input(data, is_train)
ex_time = time()
model.cal_test_errors()
batch_time = time() - ex_time
time_total += batch_time
print("the batch sample registration takes {} to complete".format(batch_time))
records_time_np[i] = batch_time
if save_fig_on:
model.save_fig('debug_model_'+phase)
if save_3d_img_on:
model.save_fig_3D(phase='test')
if task_type == 'reg':
model.save_deformation()
if output_taking_original_image_format:
model.save_image_into_original_sz_with_given_reference()
loss,loss_detail = model.get_test_res(detail=True)
print("the loss_detailed is {}".format(loss_detail))
running_test_score += loss * batch_size
records_score_np[i] = loss
loss_detail_list += [loss_detail]
print("id {} and current pair name is : {}".format(i,data[1]))
print('the current running_score:{}'.format(loss))
print('the current average running_score:{}'.format(running_test_score/(i+1)/batch_size))
if task_type == 'reg':
jaocbi_res = model.get_jacobi_val()
if jaocbi_res is not None:
jacobi_val_res += jaocbi_res[0] * batch_size
jacobi_num_res += jaocbi_res[1] * batch_size
records_jacobi_val_np[i] = jaocbi_res[0]
records_jacobi_num_np[i] = jaocbi_res[1]
print('the current jacobi is {}'.format(jaocbi_res))
print('the current averge jocobi val is {}'.format(jacobi_val_res/(i+1)/batch_size))
print('the current averge jocobi num is {}'.format(jacobi_num_res/(i+1)/batch_size))
test_score = running_test_score / len(dataloaders[phase].dataset)
time_per_img = time_total / len((dataloaders[phase].dataset))
print('the average {}_loss: {:.4f}'.format(phase, test_score))
print("the average time for per image is {}".format(time_per_img))
time_elapsed = time() - since
print('the size of {} is {}, evaluation complete in {:.0f}m {:.0f}s'.format(len(dataloaders[phase].dataset),phase,
time_elapsed // 60,
time_elapsed % 60))
np.save(os.path.join(record_path,task_name+'records'),records_score_np)
records_detail_np = extract_interest_loss(loss_detail_list,sample_num=len(dataloaders[phase].dataset))
np.save(os.path.join(record_path,task_name+'records_detail'),records_detail_np)
np.save(os.path.join(record_path,task_name+'records_time'),records_time_np)
if task_type == 'reg':
jacobi_val_res = jacobi_val_res / len(dataloaders[phase].dataset)
jacobi_num_res = jacobi_num_res / len(dataloaders[phase].dataset)
print("the average {}_ jacobi val: {} :".format(phase, jacobi_val_res))
print("the average {}_ jacobi num: {} :".format(phase, jacobi_num_res))
np.save(os.path.join(record_path, task_name + 'records_jacobi'), records_jacobi_val_np)
np.save(os.path.join(record_path, task_name + 'records_jacobi_num'), records_jacobi_num_np)
return model
def extract_interest_loss(loss_detail_list,sample_num):
"""" multi_metric_res:{iou: Bx #label , dice: Bx#label...} ,"""
assert len(loss_detail_list)>0
if isinstance(loss_detail_list[0],dict):
label_num = loss_detail_list[0]['dice'].shape[1]
records_detail_np = np.zeros([sample_num,label_num])
sample_count = 0
for multi_metric_res in loss_detail_list:
batch_len = multi_metric_res['dice'].shape[0]
records_detail_np[sample_count:sample_count+batch_len,:] = multi_metric_res['dice']
sample_count += batch_len
else:
records_detail_np=np.array([-1])
return records_detail_np
| from time import time
from .net_utils import get_test_model
import os
import numpy as np
def test_model(opt,model, dataloaders):
model_path = opt['tsk_set']['path']['model_load_path']
if isinstance(model_path, list):
for i, path in enumerate(model_path):
__test_model(opt,model,dataloaders,path,str(i)+'_')
else:
__test_model(opt,model, dataloaders,model_path)
def __test_model(opt,model,dataloaders, model_path,task_name=''):
since = time()
record_path = opt['tsk_set']['path']['record_path']
cur_gpu_id = opt['tsk_set'][('gpu_ids', -1,"the gpu id")]
task_type = opt['dataset'][('task_type','reg',"the task type, either 'reg' or 'seg'")]
running_range=[-1]#opt['tsk_set']['running_range'] # todo should be [-1]
running_part_data = running_range[0]>=0
if running_part_data:
print("running part of the test data from range {}".format(running_range))
gpu_id = cur_gpu_id
if model.network is not None and gpu_id>=0:
model.network = model.network.cuda()
save_fig_on = opt['tsk_set'][('save_fig_on', True, 'saving fig')]
save_3d_img_on = opt['tsk_set'][('save_3d_img_on', True, 'saving fig')]
output_taking_original_image_format = opt['tsk_set'][('output_taking_original_image_format', False, 'output follows the same sz and physical format of the original image (input by command line or txt)')]
phases = ['test'] #['val','test'] ###################################3
if len(model_path):
get_test_model(model_path, model.network, model.optimizer) ##############TODO model.optimizer
else:
print("Warning, the model is not manual loaded, make sure your model itself has been inited")
model.set_cur_epoch(-1)
for phase in phases:
num_samples = len(dataloaders[phase])
if running_part_data:
num_samples = len(running_range)
records_score_np = np.zeros(num_samples)
records_time_np = np.zeros(num_samples)
if task_type == 'reg':
records_jacobi_val_np = np.zeros(num_samples)
records_jacobi_num_np = np.zeros(num_samples)
loss_detail_list = []
jacobi_val_res = 0.
jacobi_num_res = 0.
running_test_score = 0
time_total= 0
for idx, data in enumerate(dataloaders[phase]):
i= idx
if running_part_data:
if i not in running_range:
continue
i = i - running_range[0]
batch_size = len(data[0]['image'])
is_train = False
if model.network is not None:
model.network.train(False)
model.set_val()
model.set_input(data, is_train)
ex_time = time()
model.cal_test_errors()
batch_time = time() - ex_time
time_total += batch_time
print("the batch sample registration takes {} to complete".format(batch_time))
records_time_np[i] = batch_time
if save_fig_on:
model.save_fig('debug_model_'+phase)
if save_3d_img_on:
model.save_fig_3D(phase='test')
if task_type == 'reg':
model.save_deformation()
if output_taking_original_image_format:
model.save_image_into_original_sz_with_given_reference()
loss,loss_detail = model.get_test_res(detail=True)
print("the loss_detailed is {}".format(loss_detail))
running_test_score += loss * batch_size
records_score_np[i] = loss
loss_detail_list += [loss_detail]
print("id {} and current pair name is : {}".format(i,data[1]))
print('the current running_score:{}'.format(loss))
print('the current average running_score:{}'.format(running_test_score/(i+1)/batch_size))
if task_type == 'reg':
jaocbi_res = model.get_jacobi_val()
if jaocbi_res is not None:
jacobi_val_res += jaocbi_res[0] * batch_size
jacobi_num_res += jaocbi_res[1] * batch_size
records_jacobi_val_np[i] = jaocbi_res[0]
records_jacobi_num_np[i] = jaocbi_res[1]
print('the current jacobi is {}'.format(jaocbi_res))
print('the current averge jocobi val is {}'.format(jacobi_val_res/(i+1)/batch_size))
print('the current averge jocobi num is {}'.format(jacobi_num_res/(i+1)/batch_size))
test_score = running_test_score / len(dataloaders[phase].dataset)
time_per_img = time_total / len((dataloaders[phase].dataset))
print('the average {}_loss: {:.4f}'.format(phase, test_score))
print("the average time for per image is {}".format(time_per_img))
time_elapsed = time() - since
print('the size of {} is {}, evaluation complete in {:.0f}m {:.0f}s'.format(len(dataloaders[phase].dataset),phase,
time_elapsed // 60,
time_elapsed % 60))
np.save(os.path.join(record_path,task_name+'records'),records_score_np)
records_detail_np = extract_interest_loss(loss_detail_list,sample_num=len(dataloaders[phase].dataset))
np.save(os.path.join(record_path,task_name+'records_detail'),records_detail_np)
np.save(os.path.join(record_path,task_name+'records_time'),records_time_np)
if task_type == 'reg':
jacobi_val_res = jacobi_val_res / len(dataloaders[phase].dataset)
jacobi_num_res = jacobi_num_res / len(dataloaders[phase].dataset)
print("the average {}_ jacobi val: {} :".format(phase, jacobi_val_res))
print("the average {}_ jacobi num: {} :".format(phase, jacobi_num_res))
np.save(os.path.join(record_path, task_name + 'records_jacobi'), records_jacobi_val_np)
np.save(os.path.join(record_path, task_name + 'records_jacobi_num'), records_jacobi_num_np)
return model
def extract_interest_loss(loss_detail_list,sample_num):
"""" multi_metric_res:{iou: Bx #label , dice: Bx#label...} ,"""
assert len(loss_detail_list)>0
if isinstance(loss_detail_list[0],dict):
label_num = loss_detail_list[0]['dice'].shape[1]
records_detail_np = np.zeros([sample_num,label_num])
sample_count = 0
for multi_metric_res in loss_detail_list:
batch_len = multi_metric_res['dice'].shape[0]
records_detail_np[sample_count:sample_count+batch_len,:] = multi_metric_res['dice']
sample_count += batch_len
else:
records_detail_np=np.array([-1])
return records_detail_np
| en | 0.175731 | #opt['tsk_set']['running_range'] # todo should be [-1] #['val','test'] ###################################3 ##############TODO model.optimizer " multi_metric_res:{iou: Bx #label , dice: Bx#label...} , | 2.06216 | 2 |
evennia/utils/ansi.py | TwinklePie/evennia-nvn | 0 | 6624962 | <gh_stars>0
"""
ANSI - Gives colour to text.
Use the codes defined in ANSIPARSER in your text
to apply colour to text according to the ANSI standard.
Examples:
This is %crRed text%cn and this is normal again.
This is {rRed text{n and this is normal again.
Mostly you should not need to call parse_ansi() explicitly;
it is run by Evennia just before returning data to/from the
user.
"""
import re
from evennia.utils import utils
from evennia.utils.utils import to_str, to_unicode
# ANSI definitions
ANSI_BEEP = "\07"
ANSI_ESCAPE = "\033"
ANSI_NORMAL = "\033[0m"
ANSI_UNDERLINE = "\033[4m"
ANSI_HILITE = "\033[1m"
ANSI_UNHILITE = "\033[22m"
ANSI_BLINK = "\033[5m"
ANSI_INVERSE = "\033[7m"
ANSI_INV_HILITE = "\033[1;7m"
ANSI_INV_BLINK = "\033[7;5m"
ANSI_BLINK_HILITE = "\033[1;5m"
ANSI_INV_BLINK_HILITE = "\033[1;5;7m"
# Foreground colors
ANSI_BLACK = "\033[30m"
ANSI_RED = "\033[31m"
ANSI_GREEN = "\033[32m"
ANSI_YELLOW = "\033[33m"
ANSI_BLUE = "\033[34m"
ANSI_MAGENTA = "\033[35m"
ANSI_CYAN = "\033[36m"
ANSI_WHITE = "\033[37m"
# Background colors
ANSI_BACK_BLACK = "\033[40m"
ANSI_BACK_RED = "\033[41m"
ANSI_BACK_GREEN = "\033[42m"
ANSI_BACK_YELLOW = "\033[43m"
ANSI_BACK_BLUE = "\033[44m"
ANSI_BACK_MAGENTA = "\033[45m"
ANSI_BACK_CYAN = "\033[46m"
ANSI_BACK_WHITE = "\033[47m"
# Formatting Characters
ANSI_RETURN = "\r\n"
ANSI_TAB = "\t"
ANSI_SPACE = " "
# Escapes
ANSI_ESCAPES = ("{{", "\\\\")
from collections import OrderedDict
_PARSE_CACHE = OrderedDict()
_PARSE_CACHE_SIZE = 10000
class ANSIParser(object):
"""
A class that parses ANSI markup
to ANSI command sequences
We also allow to escape colour codes
by prepending with a \ for xterm256,
an extra { for Merc-style codes
"""
def sub_ansi(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
markers with correct ANSI sequences
"""
return self.ansi_map.get(ansimatch.group(), "")
def sub_brightbg(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
bright background markers with Xterm256 replacement
"""
return self.ansi_bright_bgs.get(ansimatch.group(), "")
def sub_xterm256(self, rgbmatch, convert=False):
"""
This is a replacer method called by `re.sub` with the matched
tag. It must return the correct ansi sequence.
It checks `self.do_xterm256` to determine if conversion
to standard ANSI should be done or not.
"""
if not rgbmatch:
return ""
# get tag, stripping the initial marker
rgbtag = rgbmatch.group()[1:]
background = rgbtag[0] == '['
if background:
red, green, blue = int(rgbtag[1]), int(rgbtag[2]), int(rgbtag[3])
else:
red, green, blue = int(rgbtag[0]), int(rgbtag[1]), int(rgbtag[2])
if convert:
colval = 16 + (red * 36) + (green * 6) + blue
#print "RGB colours:", red, green, blue
return "\033[%s8;5;%s%s%sm" % (3 + int(background), colval/100, (colval % 100)/10, colval%10)
else:
#print "ANSI convert:", red, green, blue
# xterm256 not supported, convert the rgb value to ansi instead
if red == green and red == blue and red < 2:
if background:
return ANSI_BACK_BLACK
elif red >= 1:
return ANSI_HILITE + ANSI_BLACK
else:
return ANSI_NORMAL + ANSI_BLACK
elif red == green and red == blue:
if background:
return ANSI_BACK_WHITE
elif red >= 4:
return ANSI_HILITE + ANSI_WHITE
else:
return ANSI_NORMAL + ANSI_WHITE
elif red > green and red > blue:
if background:
return ANSI_BACK_RED
elif red >= 3:
return ANSI_HILITE + ANSI_RED
else:
return ANSI_NORMAL + ANSI_RED
elif red == green and red > blue:
if background:
return ANSI_BACK_YELLOW
elif red >= 3:
return ANSI_HILITE + ANSI_YELLOW
else:
return ANSI_NORMAL + ANSI_YELLOW
elif red == blue and red > green:
if background:
return ANSI_BACK_MAGENTA
elif red >= 3:
return ANSI_HILITE + ANSI_MAGENTA
else:
return ANSI_NORMAL + ANSI_MAGENTA
elif green > blue:
if background:
return ANSI_BACK_GREEN
elif green >= 3:
return ANSI_HILITE + ANSI_GREEN
else:
return ANSI_NORMAL + ANSI_GREEN
elif green == blue:
if background:
return ANSI_BACK_CYAN
elif green >= 3:
return ANSI_HILITE + ANSI_CYAN
else:
return ANSI_NORMAL + ANSI_CYAN
else: # mostly blue
if background:
return ANSI_BACK_BLUE
elif blue >= 3:
return ANSI_HILITE + ANSI_BLUE
else:
return ANSI_NORMAL + ANSI_BLUE
def strip_raw_codes(self, string):
"""
Strips raw ANSI codes from a string.
"""
return self.ansi_regex.sub("", string)
def strip_mxp(self, string):
"""
Strips all MXP codes from a string.
"""
return self.mxp_sub.sub(r'\2', string)
def parse_ansi(self, string, strip_ansi=False, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes according to
the stored mapping.
strip_ansi flag instead removes all ANSI markup.
"""
if hasattr(string, '_raw_string'):
if strip_ansi:
return string.clean()
else:
return string.raw()
if not string:
return ''
# check cached parsings
global _PARSE_CACHE
cachekey = "%s-%s-%s" % (string, strip_ansi, xterm256)
if cachekey in _PARSE_CACHE:
return _PARSE_CACHE[cachekey]
# pre-convert bright colors to xterm256 color tags
string = self.brightbg_sub.sub(self.sub_brightbg, string)
def do_xterm256(part):
return self.sub_xterm256(part, xterm256)
in_string = utils.to_str(string)
# do string replacement
parsed_string = ""
parts = self.ansi_escapes.split(in_string) + [" "]
for part, sep in zip(parts[::2], parts[1::2]):
pstring = self.xterm256_sub.sub(do_xterm256, part)
pstring = self.ansi_sub.sub(self.sub_ansi, pstring)
parsed_string += "%s%s" % (pstring, sep[0].strip())
if not mxp:
parsed_string = self.strip_mxp(parsed_string)
if strip_ansi:
# remove all ansi codes (including those manually
# inserted in string)
return self.strip_raw_codes(parsed_string)
# cache and crop old cache
_PARSE_CACHE[cachekey] = parsed_string
if len(_PARSE_CACHE) > _PARSE_CACHE_SIZE:
_PARSE_CACHE.popitem(last=False)
return parsed_string
# Mapping using {r {n etc
hilite = ANSI_HILITE
unhilite = ANSI_UNHILITE
ext_ansi_map = [
(r'{n', ANSI_NORMAL), # reset
(r'{/', ANSI_RETURN), # line break
(r'{-', ANSI_TAB), # tab
(r'{_', ANSI_SPACE), # space
(r'{*', ANSI_INVERSE), # invert
(r'{^', ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'{r', hilite + ANSI_RED),
(r'{g', hilite + ANSI_GREEN),
(r'{y', hilite + ANSI_YELLOW),
(r'{b', hilite + ANSI_BLUE),
(r'{m', hilite + ANSI_MAGENTA),
(r'{c', hilite + ANSI_CYAN),
(r'{w', hilite + ANSI_WHITE), # pure white
(r'{x', hilite + ANSI_BLACK), # dark grey
(r'{R', unhilite + ANSI_RED),
(r'{G', unhilite + ANSI_GREEN),
(r'{Y', unhilite + ANSI_YELLOW),
(r'{B', unhilite + ANSI_BLUE),
(r'{M', unhilite + ANSI_MAGENTA),
(r'{C', unhilite + ANSI_CYAN),
(r'{W', unhilite + ANSI_WHITE), # light grey
(r'{X', unhilite + ANSI_BLACK), # pure black
# hilight-able colors
(r'{h', hilite),
(r'{H', unhilite),
(r'{!R', ANSI_RED),
(r'{!G', ANSI_GREEN),
(r'{!Y', ANSI_YELLOW),
(r'{!B', ANSI_BLUE),
(r'{!M', ANSI_MAGENTA),
(r'{!C', ANSI_CYAN),
(r'{!W', ANSI_WHITE), # light grey
(r'{!X', ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'{[R', ANSI_BACK_RED),
(r'{[G', ANSI_BACK_GREEN),
(r'{[Y', ANSI_BACK_YELLOW),
(r'{[B', ANSI_BACK_BLUE),
(r'{[M', ANSI_BACK_MAGENTA),
(r'{[C', ANSI_BACK_CYAN),
(r'{[W', ANSI_BACK_WHITE), # light grey background
(r'{[X', ANSI_BACK_BLACK) # pure black background
]
ansi_bright_bgs = [
# "bright" ANSI backgrounds using xterm256 since ANSI
# standard does not support it (will
# fallback to dark ANSI background colors if xterm256
# is not supported by client)
(r'{[r', r'{[500'),
(r'{[g', r'{[050'),
(r'{[y', r'{[550'),
(r'{[b', r'{[005'),
(r'{[m', r'{[505'),
(r'{[c', r'{[055'),
(r'{[w', r'{[555'), # white background
(r'{[x', r'{[222')] # dark grey background
# xterm256 {123, %c134. These are replaced directly by
# the sub_xterm256 method
xterm256_map = [
(r'%[0-5]{3}', ""), # %123 - foreground colour
(r'%\[[0-5]{3}', ""), # %[123 - background colour
(r'\{[0-5]{3}', ""), # {123 - foreground colour
(r'\{\[[0-5]{3}', "") # {[123 - background colour
]
mxp_re = r'\{lc(.*?)\{lt(.*?)\{le'
# prepare regex matching
brightbg_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ansi_bright_bgs]), re.DOTALL)
xterm256_sub = re.compile(r"|".join([tup[0] for tup in xterm256_map]), re.DOTALL)
ansi_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ext_ansi_map]), re.DOTALL)
mxp_sub = re.compile(mxp_re, re.DOTALL)
# used by regex replacer to correctly map ansi sequences
ansi_map = dict(ext_ansi_map)
ansi_bright_bgs = dict(ansi_bright_bgs)
# prepare matching ansi codes overall
ansi_re = r"\033\[[0-9;]+m"
ansi_regex = re.compile(ansi_re)
# escapes - these double-chars will be replaced with a single
# instance of each
ansi_escapes = re.compile(r"(%s)" % "|".join(ANSI_ESCAPES), re.DOTALL)
ANSI_PARSER = ANSIParser()
#
# Access function
#
def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes as needed.
"""
return parser.parse_ansi(string, strip_ansi=strip_ansi, xterm256=xterm256, mxp=mxp)
def strip_raw_ansi(string, parser=ANSI_PARSER):
"""
Remove raw ansi codes from string
"""
return parser.strip_raw_codes(string)
def raw(string):
"""
Escapes a string into a form which won't be colorized by the ansi parser.
"""
return string.replace('{', '{{')
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def _spacing_preflight(func):
"""
This wrapper function is used to do some preflight checks on functions used
for padding ANSIStrings.
"""
def wrapped(self, width, fillchar=None):
if fillchar is None:
fillchar = " "
if (len(fillchar) != 1) or (not isinstance(fillchar, basestring)):
raise TypeError("must be char, not %s" % type(fillchar))
if not isinstance(width, int):
raise TypeError("integer argument expected, got %s" % type(width))
difference = width - len(self)
if difference <= 0:
return self
return func(self, width, fillchar, difference)
return wrapped
def _query_super(func_name):
"""
Have the string class handle this with the cleaned string instead of
ANSIString.
"""
def wrapped(self, *args, **kwargs):
return getattr(self.clean(), func_name)(*args, **kwargs)
return wrapped
def _on_raw(func_name):
"""
Like query_super, but makes the operation run on the raw string.
"""
def wrapped(self, *args, **kwargs):
args = list(args)
try:
string = args.pop(0)
if hasattr(string, '_raw_string'):
args.insert(0, string.raw())
else:
args.insert(0, string)
except IndexError:
pass
result = getattr(self._raw_string, func_name)(*args, **kwargs)
if isinstance(result, basestring):
return ANSIString(result, decoded=True)
return result
return wrapped
def _transform(func_name):
"""
Some string functions, like those manipulating capital letters,
return a string the same length as the original. This function
allows us to do the same, replacing all the non-coded characters
with the resulting string.
"""
def wrapped(self, *args, **kwargs):
replacement_string = _query_super(func_name)(self, *args, **kwargs)
to_string = []
char_counter = 0
for index in range(0, len(self._raw_string)):
if index in self._code_indexes:
to_string.append(self._raw_string[index])
elif index in self._char_indexes:
to_string.append(replacement_string[char_counter])
char_counter += 1
return ANSIString(
''.join(to_string), decoded=True,
code_indexes=self._code_indexes, char_indexes=self._char_indexes,
clean_string=replacement_string)
return wrapped
class ANSIMeta(type):
"""
Many functions on ANSIString are just light wrappers around the unicode
base class. We apply them here, as part of the classes construction.
"""
def __init__(cls, *args, **kwargs):
for func_name in [
'count', 'startswith', 'endswith', 'find', 'index', 'isalnum',
'isalpha', 'isdigit', 'islower', 'isspace', 'istitle', 'isupper',
'rfind', 'rindex', '__len__']:
setattr(cls, func_name, _query_super(func_name))
for func_name in [
'__mod__', 'expandtabs', 'decode', 'replace', 'format',
'encode']:
setattr(cls, func_name, _on_raw(func_name))
for func_name in [
'capitalize', 'translate', 'lower', 'upper', 'swapcase']:
setattr(cls, func_name, _transform(func_name))
super(ANSIMeta, cls).__init__(*args, **kwargs)
class ANSIString(unicode):
"""
String-like object that is aware of ANSI codes.
This isn't especially efficient, as it doesn't really have an
understanding of what the codes mean in order to eliminate
redundant characters. This could be made as an enhancement to ANSI_PARSER.
If one is going to use ANSIString, one should generally avoid converting
away from it until one is about to send information on the wire. This is
because escape sequences in the string may otherwise already be decoded,
and taken literally the second time around.
Please refer to the Metaclass, ANSIMeta, which is used to apply wrappers
for several of the methods that need not be defined directly here.
"""
__metaclass__ = ANSIMeta
def __new__(cls, *args, **kwargs):
"""
When creating a new ANSIString, you may use a custom parser that has
the same attributes as the standard one, and you may declare the
string to be handled as already decoded. It is important not to double
decode strings, as escapes can only be respected once.
Internally, ANSIString can also passes itself precached code/character
indexes and clean strings to avoid doing extra work when combining
ANSIStrings.
"""
string = args[0]
if not isinstance(string, basestring):
string = to_str(string, force_string=True)
parser = kwargs.get('parser', ANSI_PARSER)
decoded = kwargs.get('decoded', False) or hasattr(string, '_raw_string')
code_indexes = kwargs.pop('code_indexes', None)
char_indexes = kwargs.pop('char_indexes', None)
clean_string = kwargs.pop('clean_string', None)
# All True, or All False, not just one.
checks = map(lambda x: x is None, [code_indexes, char_indexes, clean_string])
if not len(set(checks)) == 1:
raise ValueError("You must specify code_indexes, char_indexes, "
"and clean_string together, or not at all.")
if not all(checks):
decoded = True
if not decoded:
# Completely new ANSI String
clean_string = to_unicode(parser.parse_ansi(string, strip_ansi=True, mxp=True))
string = parser.parse_ansi(string, xterm256=True, mxp=True)
elif clean_string is not None:
# We have an explicit clean string.
pass
elif hasattr(string, '_clean_string'):
# It's already an ANSIString
clean_string = string._clean_string
code_indexes = string._code_indexes
char_indexes = string._char_indexes
string = string._raw_string
else:
# It's a string that has been pre-ansi decoded.
clean_string = parser.strip_raw_codes(string)
if not isinstance(string, unicode):
string = string.decode('utf-8')
ansi_string = super(ANSIString, cls).__new__(ANSIString, to_str(clean_string), "utf-8")
ansi_string._raw_string = string
ansi_string._clean_string = clean_string
ansi_string._code_indexes = code_indexes
ansi_string._char_indexes = char_indexes
return ansi_string
def __str__(self):
return self._raw_string.encode('utf-8')
def __unicode__(self):
"""
Unfortunately, this is not called during print() statements due to a
bug in the Python interpreter. You can always do unicode() or str()
around the resulting ANSIString and print that.
"""
return self._raw_string
def __repr__(self):
"""
Let's make the repr the command that would actually be used to
construct this object, for convenience and reference.
"""
return "ANSIString(%s, decoded=True)" % repr(self._raw_string)
def __init__(self, *_, **kwargs):
"""
When the ANSIString is first initialized, a few internal variables
have to be set.
The first is the parser. It is possible to replace Evennia's standard
ANSI parser with one of your own syntax if you wish, so long as it
implements the same interface.
The second is the _raw_string. It should be noted that the ANSIStrings
are unicode based. This seemed more reasonable than basing it off of
the string class, because if someone were to use a unicode character,
the benefits of knowing the indexes of the ANSI characters would be
negated by the fact that a character within the string might require
more than one byte to be represented. The raw string is, then, a
unicode object rather than a true encoded string. If you need the
encoded string for sending over the wire, try using the .encode()
method.
The third thing to set is the _clean_string. This is a unicode object
that is devoid of all ANSI Escapes.
Finally, _code_indexes and _char_indexes are defined. These are lookup
tables for which characters in the raw string are related to ANSI
escapes, and which are for the readable text.
"""
self.parser = kwargs.pop('parser', ANSI_PARSER)
super(ANSIString, self).__init__()
if self._code_indexes is None:
self._code_indexes, self._char_indexes = self._get_indexes()
@staticmethod
def _shifter(iterable, offset):
"""
Takes a list of integers, and produces a new one incrementing all
by a number.
"""
return [i + offset for i in iterable]
@classmethod
def _adder(cls, first, second):
"""
Joins two ANSIStrings, preserving calculated info.
"""
raw_string = first._raw_string + second._raw_string
clean_string = first._clean_string + second._clean_string
code_indexes = first._code_indexes[:]
char_indexes = first._char_indexes[:]
code_indexes.extend(
cls._shifter(second._code_indexes, len(first._raw_string)))
char_indexes.extend(
cls._shifter(second._char_indexes, len(first._raw_string)))
return ANSIString(raw_string, code_indexes=code_indexes,
char_indexes=char_indexes,
clean_string=clean_string)
def __add__(self, other):
"""
We have to be careful when adding two strings not to reprocess things
that don't need to be reprocessed, lest we end up with escapes being
interpreted literally.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(self, other)
def __radd__(self, other):
"""
Likewise, if we're on the other end.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(other, self)
def __getslice__(self, i, j):
"""
This function is deprecated, so we just make it call the proper
function.
"""
return self.__getitem__(slice(i, j))
def _slice(self, slc):
"""
This function takes a slice() object.
Slices have to be handled specially. Not only are they able to specify
a start and end with [x:y], but many forget that they can also specify
an interval with [x:y:z]. As a result, not only do we have to track
the ANSI Escapes that have played before the start of the slice, we
must also replay any in these intervals, should they exist.
Thankfully, slicing the _char_indexes table gives us the actual
indexes that need slicing in the raw string. We can check between
those indexes to figure out what escape characters need to be
replayed.
"""
slice_indexes = self._char_indexes[slc]
# If it's the end of the string, we need to append final color codes.
if not slice_indexes:
return ANSIString('')
try:
string = self[slc.start]._raw_string
except IndexError:
return ANSIString('')
last_mark = slice_indexes[0]
# Check between the slice intervals for escape sequences.
i = None
for i in slice_indexes[1:]:
for index in xrange(last_mark, i):
if index in self._code_indexes:
string += self._raw_string[index]
last_mark = i
try:
string += self._raw_string[i]
except IndexError:
pass
if i is not None:
append_tail = self._get_interleving(self._char_indexes.index(i) + 1)
else:
append_tail = ''
return ANSIString(string + append_tail, decoded=True)
def __getitem__(self, item):
"""
Gateway for slices and getting specific indexes in the ANSIString. If
this is a regexable ANSIString, it will get the data from the raw
string instead, bypassing ANSIString's intelligent escape skipping,
for reasons explained in the __new__ method's docstring.
"""
if isinstance(item, slice):
# Slices must be handled specially.
return self._slice(item)
try:
self._char_indexes[item]
except IndexError:
raise IndexError("ANSIString Index out of range")
# Get character codes after the index as well.
if self._char_indexes[-1] == self._char_indexes[item]:
append_tail = self._get_interleving(item + 1)
else:
append_tail = ''
item = self._char_indexes[item]
clean = self._raw_string[item]
result = ''
# Get the character they're after, and replay all escape sequences
# previous to it.
for index in xrange(0, item + 1):
if index in self._code_indexes:
result += self._raw_string[index]
return ANSIString(result + clean + append_tail, decoded=True)
def clean(self):
"""
Return a unicode object without the ANSI escapes.
"""
return self._clean_string
def raw(self):
"""
Return a unicode object with the ANSI escapes.
"""
return self._raw_string
def partition(self, sep, reverse=False):
"""
Similar to split, but always creates a tuple with three items:
1. The part before the separator
2. The separator itself.
3. The part after.
We use the same techniques we used in split() to make sure each are
colored.
"""
if hasattr(sep, '_clean_string'):
sep = sep.clean()
if reverse:
parent_result = self._clean_string.rpartition(sep)
else:
parent_result = self._clean_string.partition(sep)
current_index = 0
result = tuple()
for section in parent_result:
result += (self[current_index:current_index + len(section)],)
current_index += len(section)
return result
def _get_indexes(self):
"""
Two tables need to be made, one which contains the indexes of all
readable characters, and one which contains the indexes of all ANSI
escapes. It's important to remember that ANSI escapes require more
that one character at a time, though no readable character needs more
than one character, since the unicode base class abstracts that away
from us. However, several readable characters can be placed in a row.
We must use regexes here to figure out where all the escape sequences
are hiding in the string. Then we use the ranges of their starts and
ends to create a final, comprehensive list of all indexes which are
dedicated to code, and all dedicated to text.
It's possible that only one of these tables is actually needed, the
other assumed to be what isn't in the first.
"""
code_indexes = []
for match in self.parser.ansi_regex.finditer(self._raw_string):
code_indexes.extend(range(match.start(), match.end()))
if not code_indexes:
# Plain string, no ANSI codes.
return code_indexes, range(0, len(self._raw_string))
# all indexes not occupied by ansi codes are normal characters
char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes]
return code_indexes, char_indexes
def _get_interleving(self, index):
"""
Get the code characters from the given slice end to the next
character.
"""
try:
index = self._char_indexes[index - 1]
except IndexError:
return ''
s = ''
while True:
index += 1
if index in self._char_indexes:
break
elif index in self._code_indexes:
s += self._raw_string[index]
else:
break
return s
def split(self, by, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
res = []
start = 0
while maxsplit != 0:
next = self._clean_string.find(by, start)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[start:next])
start = next + bylen
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[start:len(self)])
return res
def __mul__(self, other):
"""
Multiplication method. Implemented for performance reasons.
"""
if not isinstance(other, int):
return NotImplemented
raw_string = self._raw_string * other
clean_string = self._clean_string * other
code_indexes = self._code_indexes[:]
char_indexes = self._char_indexes[:]
for i in range(1, other + 1):
code_indexes.extend(
self._shifter(self._code_indexes, i * len(self._raw_string)))
char_indexes.extend(
self._shifter(self._char_indexes, i * len(self._raw_string)))
return ANSIString(
raw_string, code_indexes=code_indexes, char_indexes=char_indexes,
clean_string=clean_string)
def __rmul__(self, other):
return self.__mul__(other)
def rsplit(self, by, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
res = []
end = len(self)
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
while maxsplit != 0:
next = self._clean_string.rfind(by, 0, end)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[next+bylen:end])
end = next
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[:end])
res.reverse()
return res
def join(self, iterable):
"""
Joins together strings in an iterable.
"""
result = ANSIString('')
last_item = None
for item in iterable:
if last_item is not None:
result += self._raw_string
if not isinstance(item, ANSIString):
item = ANSIString(item)
result += item
last_item = item
return result
def _filler(self, char, amount):
"""
Generate a line of characters in a more efficient way than just adding
ANSIStrings.
"""
if not isinstance(char, ANSIString):
line = char * amount
return ANSIString(
char * amount, code_indexes=[], char_indexes=range(0, len(line)),
clean_string=char)
try:
start = char._code_indexes[0]
except IndexError:
start = None
end = char._char_indexes[0]
prefix = char._raw_string[start:end]
postfix = char._raw_string[end + 1:]
line = char._clean_string * amount
code_indexes = [i for i in range(0, len(prefix))]
length = len(prefix) + len(line)
code_indexes.extend([i for i in range(length, length + len(postfix))])
char_indexes = self._shifter(xrange(0, len(line)), len(prefix))
raw_string = prefix + line + postfix
return ANSIString(
raw_string, clean_string=line, char_indexes=char_indexes,
code_indexes=code_indexes)
@_spacing_preflight
def center(self, width, fillchar, difference):
"""
Center some text with some spaces padding both sides.
"""
remainder = difference % 2
difference /= 2
spacing = self._filler(fillchar, difference)
result = spacing + self + spacing + self._filler(fillchar, remainder)
return result
@_spacing_preflight
def ljust(self, width, fillchar, difference):
"""
Left justify some text.
"""
return self + self._filler(fillchar, difference)
@_spacing_preflight
def rjust(self, width, fillchar, difference):
"""
Right justify some text.
"""
return self._filler(fillchar, difference) + self
| """
ANSI - Gives colour to text.
Use the codes defined in ANSIPARSER in your text
to apply colour to text according to the ANSI standard.
Examples:
This is %crRed text%cn and this is normal again.
This is {rRed text{n and this is normal again.
Mostly you should not need to call parse_ansi() explicitly;
it is run by Evennia just before returning data to/from the
user.
"""
import re
from evennia.utils import utils
from evennia.utils.utils import to_str, to_unicode
# ANSI definitions
ANSI_BEEP = "\07"
ANSI_ESCAPE = "\033"
ANSI_NORMAL = "\033[0m"
ANSI_UNDERLINE = "\033[4m"
ANSI_HILITE = "\033[1m"
ANSI_UNHILITE = "\033[22m"
ANSI_BLINK = "\033[5m"
ANSI_INVERSE = "\033[7m"
ANSI_INV_HILITE = "\033[1;7m"
ANSI_INV_BLINK = "\033[7;5m"
ANSI_BLINK_HILITE = "\033[1;5m"
ANSI_INV_BLINK_HILITE = "\033[1;5;7m"
# Foreground colors
ANSI_BLACK = "\033[30m"
ANSI_RED = "\033[31m"
ANSI_GREEN = "\033[32m"
ANSI_YELLOW = "\033[33m"
ANSI_BLUE = "\033[34m"
ANSI_MAGENTA = "\033[35m"
ANSI_CYAN = "\033[36m"
ANSI_WHITE = "\033[37m"
# Background colors
ANSI_BACK_BLACK = "\033[40m"
ANSI_BACK_RED = "\033[41m"
ANSI_BACK_GREEN = "\033[42m"
ANSI_BACK_YELLOW = "\033[43m"
ANSI_BACK_BLUE = "\033[44m"
ANSI_BACK_MAGENTA = "\033[45m"
ANSI_BACK_CYAN = "\033[46m"
ANSI_BACK_WHITE = "\033[47m"
# Formatting Characters
ANSI_RETURN = "\r\n"
ANSI_TAB = "\t"
ANSI_SPACE = " "
# Escapes
ANSI_ESCAPES = ("{{", "\\\\")
from collections import OrderedDict
_PARSE_CACHE = OrderedDict()
_PARSE_CACHE_SIZE = 10000
class ANSIParser(object):
"""
A class that parses ANSI markup
to ANSI command sequences
We also allow to escape colour codes
by prepending with a \ for xterm256,
an extra { for Merc-style codes
"""
def sub_ansi(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
markers with correct ANSI sequences
"""
return self.ansi_map.get(ansimatch.group(), "")
def sub_brightbg(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
bright background markers with Xterm256 replacement
"""
return self.ansi_bright_bgs.get(ansimatch.group(), "")
def sub_xterm256(self, rgbmatch, convert=False):
"""
This is a replacer method called by `re.sub` with the matched
tag. It must return the correct ansi sequence.
It checks `self.do_xterm256` to determine if conversion
to standard ANSI should be done or not.
"""
if not rgbmatch:
return ""
# get tag, stripping the initial marker
rgbtag = rgbmatch.group()[1:]
background = rgbtag[0] == '['
if background:
red, green, blue = int(rgbtag[1]), int(rgbtag[2]), int(rgbtag[3])
else:
red, green, blue = int(rgbtag[0]), int(rgbtag[1]), int(rgbtag[2])
if convert:
colval = 16 + (red * 36) + (green * 6) + blue
#print "RGB colours:", red, green, blue
return "\033[%s8;5;%s%s%sm" % (3 + int(background), colval/100, (colval % 100)/10, colval%10)
else:
#print "ANSI convert:", red, green, blue
# xterm256 not supported, convert the rgb value to ansi instead
if red == green and red == blue and red < 2:
if background:
return ANSI_BACK_BLACK
elif red >= 1:
return ANSI_HILITE + ANSI_BLACK
else:
return ANSI_NORMAL + ANSI_BLACK
elif red == green and red == blue:
if background:
return ANSI_BACK_WHITE
elif red >= 4:
return ANSI_HILITE + ANSI_WHITE
else:
return ANSI_NORMAL + ANSI_WHITE
elif red > green and red > blue:
if background:
return ANSI_BACK_RED
elif red >= 3:
return ANSI_HILITE + ANSI_RED
else:
return ANSI_NORMAL + ANSI_RED
elif red == green and red > blue:
if background:
return ANSI_BACK_YELLOW
elif red >= 3:
return ANSI_HILITE + ANSI_YELLOW
else:
return ANSI_NORMAL + ANSI_YELLOW
elif red == blue and red > green:
if background:
return ANSI_BACK_MAGENTA
elif red >= 3:
return ANSI_HILITE + ANSI_MAGENTA
else:
return ANSI_NORMAL + ANSI_MAGENTA
elif green > blue:
if background:
return ANSI_BACK_GREEN
elif green >= 3:
return ANSI_HILITE + ANSI_GREEN
else:
return ANSI_NORMAL + ANSI_GREEN
elif green == blue:
if background:
return ANSI_BACK_CYAN
elif green >= 3:
return ANSI_HILITE + ANSI_CYAN
else:
return ANSI_NORMAL + ANSI_CYAN
else: # mostly blue
if background:
return ANSI_BACK_BLUE
elif blue >= 3:
return ANSI_HILITE + ANSI_BLUE
else:
return ANSI_NORMAL + ANSI_BLUE
def strip_raw_codes(self, string):
"""
Strips raw ANSI codes from a string.
"""
return self.ansi_regex.sub("", string)
def strip_mxp(self, string):
"""
Strips all MXP codes from a string.
"""
return self.mxp_sub.sub(r'\2', string)
def parse_ansi(self, string, strip_ansi=False, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes according to
the stored mapping.
strip_ansi flag instead removes all ANSI markup.
"""
if hasattr(string, '_raw_string'):
if strip_ansi:
return string.clean()
else:
return string.raw()
if not string:
return ''
# check cached parsings
global _PARSE_CACHE
cachekey = "%s-%s-%s" % (string, strip_ansi, xterm256)
if cachekey in _PARSE_CACHE:
return _PARSE_CACHE[cachekey]
# pre-convert bright colors to xterm256 color tags
string = self.brightbg_sub.sub(self.sub_brightbg, string)
def do_xterm256(part):
return self.sub_xterm256(part, xterm256)
in_string = utils.to_str(string)
# do string replacement
parsed_string = ""
parts = self.ansi_escapes.split(in_string) + [" "]
for part, sep in zip(parts[::2], parts[1::2]):
pstring = self.xterm256_sub.sub(do_xterm256, part)
pstring = self.ansi_sub.sub(self.sub_ansi, pstring)
parsed_string += "%s%s" % (pstring, sep[0].strip())
if not mxp:
parsed_string = self.strip_mxp(parsed_string)
if strip_ansi:
# remove all ansi codes (including those manually
# inserted in string)
return self.strip_raw_codes(parsed_string)
# cache and crop old cache
_PARSE_CACHE[cachekey] = parsed_string
if len(_PARSE_CACHE) > _PARSE_CACHE_SIZE:
_PARSE_CACHE.popitem(last=False)
return parsed_string
# Mapping using {r {n etc
hilite = ANSI_HILITE
unhilite = ANSI_UNHILITE
ext_ansi_map = [
(r'{n', ANSI_NORMAL), # reset
(r'{/', ANSI_RETURN), # line break
(r'{-', ANSI_TAB), # tab
(r'{_', ANSI_SPACE), # space
(r'{*', ANSI_INVERSE), # invert
(r'{^', ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'{r', hilite + ANSI_RED),
(r'{g', hilite + ANSI_GREEN),
(r'{y', hilite + ANSI_YELLOW),
(r'{b', hilite + ANSI_BLUE),
(r'{m', hilite + ANSI_MAGENTA),
(r'{c', hilite + ANSI_CYAN),
(r'{w', hilite + ANSI_WHITE), # pure white
(r'{x', hilite + ANSI_BLACK), # dark grey
(r'{R', unhilite + ANSI_RED),
(r'{G', unhilite + ANSI_GREEN),
(r'{Y', unhilite + ANSI_YELLOW),
(r'{B', unhilite + ANSI_BLUE),
(r'{M', unhilite + ANSI_MAGENTA),
(r'{C', unhilite + ANSI_CYAN),
(r'{W', unhilite + ANSI_WHITE), # light grey
(r'{X', unhilite + ANSI_BLACK), # pure black
# hilight-able colors
(r'{h', hilite),
(r'{H', unhilite),
(r'{!R', ANSI_RED),
(r'{!G', ANSI_GREEN),
(r'{!Y', ANSI_YELLOW),
(r'{!B', ANSI_BLUE),
(r'{!M', ANSI_MAGENTA),
(r'{!C', ANSI_CYAN),
(r'{!W', ANSI_WHITE), # light grey
(r'{!X', ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'{[R', ANSI_BACK_RED),
(r'{[G', ANSI_BACK_GREEN),
(r'{[Y', ANSI_BACK_YELLOW),
(r'{[B', ANSI_BACK_BLUE),
(r'{[M', ANSI_BACK_MAGENTA),
(r'{[C', ANSI_BACK_CYAN),
(r'{[W', ANSI_BACK_WHITE), # light grey background
(r'{[X', ANSI_BACK_BLACK) # pure black background
]
ansi_bright_bgs = [
# "bright" ANSI backgrounds using xterm256 since ANSI
# standard does not support it (will
# fallback to dark ANSI background colors if xterm256
# is not supported by client)
(r'{[r', r'{[500'),
(r'{[g', r'{[050'),
(r'{[y', r'{[550'),
(r'{[b', r'{[005'),
(r'{[m', r'{[505'),
(r'{[c', r'{[055'),
(r'{[w', r'{[555'), # white background
(r'{[x', r'{[222')] # dark grey background
# xterm256 {123, %c134. These are replaced directly by
# the sub_xterm256 method
xterm256_map = [
(r'%[0-5]{3}', ""), # %123 - foreground colour
(r'%\[[0-5]{3}', ""), # %[123 - background colour
(r'\{[0-5]{3}', ""), # {123 - foreground colour
(r'\{\[[0-5]{3}', "") # {[123 - background colour
]
mxp_re = r'\{lc(.*?)\{lt(.*?)\{le'
# prepare regex matching
brightbg_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ansi_bright_bgs]), re.DOTALL)
xterm256_sub = re.compile(r"|".join([tup[0] for tup in xterm256_map]), re.DOTALL)
ansi_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ext_ansi_map]), re.DOTALL)
mxp_sub = re.compile(mxp_re, re.DOTALL)
# used by regex replacer to correctly map ansi sequences
ansi_map = dict(ext_ansi_map)
ansi_bright_bgs = dict(ansi_bright_bgs)
# prepare matching ansi codes overall
ansi_re = r"\033\[[0-9;]+m"
ansi_regex = re.compile(ansi_re)
# escapes - these double-chars will be replaced with a single
# instance of each
ansi_escapes = re.compile(r"(%s)" % "|".join(ANSI_ESCAPES), re.DOTALL)
ANSI_PARSER = ANSIParser()
#
# Access function
#
def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes as needed.
"""
return parser.parse_ansi(string, strip_ansi=strip_ansi, xterm256=xterm256, mxp=mxp)
def strip_raw_ansi(string, parser=ANSI_PARSER):
"""
Remove raw ansi codes from string
"""
return parser.strip_raw_codes(string)
def raw(string):
"""
Escapes a string into a form which won't be colorized by the ansi parser.
"""
return string.replace('{', '{{')
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def _spacing_preflight(func):
"""
This wrapper function is used to do some preflight checks on functions used
for padding ANSIStrings.
"""
def wrapped(self, width, fillchar=None):
if fillchar is None:
fillchar = " "
if (len(fillchar) != 1) or (not isinstance(fillchar, basestring)):
raise TypeError("must be char, not %s" % type(fillchar))
if not isinstance(width, int):
raise TypeError("integer argument expected, got %s" % type(width))
difference = width - len(self)
if difference <= 0:
return self
return func(self, width, fillchar, difference)
return wrapped
def _query_super(func_name):
"""
Have the string class handle this with the cleaned string instead of
ANSIString.
"""
def wrapped(self, *args, **kwargs):
return getattr(self.clean(), func_name)(*args, **kwargs)
return wrapped
def _on_raw(func_name):
"""
Like query_super, but makes the operation run on the raw string.
"""
def wrapped(self, *args, **kwargs):
args = list(args)
try:
string = args.pop(0)
if hasattr(string, '_raw_string'):
args.insert(0, string.raw())
else:
args.insert(0, string)
except IndexError:
pass
result = getattr(self._raw_string, func_name)(*args, **kwargs)
if isinstance(result, basestring):
return ANSIString(result, decoded=True)
return result
return wrapped
def _transform(func_name):
"""
Some string functions, like those manipulating capital letters,
return a string the same length as the original. This function
allows us to do the same, replacing all the non-coded characters
with the resulting string.
"""
def wrapped(self, *args, **kwargs):
replacement_string = _query_super(func_name)(self, *args, **kwargs)
to_string = []
char_counter = 0
for index in range(0, len(self._raw_string)):
if index in self._code_indexes:
to_string.append(self._raw_string[index])
elif index in self._char_indexes:
to_string.append(replacement_string[char_counter])
char_counter += 1
return ANSIString(
''.join(to_string), decoded=True,
code_indexes=self._code_indexes, char_indexes=self._char_indexes,
clean_string=replacement_string)
return wrapped
class ANSIMeta(type):
"""
Many functions on ANSIString are just light wrappers around the unicode
base class. We apply them here, as part of the classes construction.
"""
def __init__(cls, *args, **kwargs):
for func_name in [
'count', 'startswith', 'endswith', 'find', 'index', 'isalnum',
'isalpha', 'isdigit', 'islower', 'isspace', 'istitle', 'isupper',
'rfind', 'rindex', '__len__']:
setattr(cls, func_name, _query_super(func_name))
for func_name in [
'__mod__', 'expandtabs', 'decode', 'replace', 'format',
'encode']:
setattr(cls, func_name, _on_raw(func_name))
for func_name in [
'capitalize', 'translate', 'lower', 'upper', 'swapcase']:
setattr(cls, func_name, _transform(func_name))
super(ANSIMeta, cls).__init__(*args, **kwargs)
class ANSIString(unicode):
"""
String-like object that is aware of ANSI codes.
This isn't especially efficient, as it doesn't really have an
understanding of what the codes mean in order to eliminate
redundant characters. This could be made as an enhancement to ANSI_PARSER.
If one is going to use ANSIString, one should generally avoid converting
away from it until one is about to send information on the wire. This is
because escape sequences in the string may otherwise already be decoded,
and taken literally the second time around.
Please refer to the Metaclass, ANSIMeta, which is used to apply wrappers
for several of the methods that need not be defined directly here.
"""
__metaclass__ = ANSIMeta
def __new__(cls, *args, **kwargs):
"""
When creating a new ANSIString, you may use a custom parser that has
the same attributes as the standard one, and you may declare the
string to be handled as already decoded. It is important not to double
decode strings, as escapes can only be respected once.
Internally, ANSIString can also passes itself precached code/character
indexes and clean strings to avoid doing extra work when combining
ANSIStrings.
"""
string = args[0]
if not isinstance(string, basestring):
string = to_str(string, force_string=True)
parser = kwargs.get('parser', ANSI_PARSER)
decoded = kwargs.get('decoded', False) or hasattr(string, '_raw_string')
code_indexes = kwargs.pop('code_indexes', None)
char_indexes = kwargs.pop('char_indexes', None)
clean_string = kwargs.pop('clean_string', None)
# All True, or All False, not just one.
checks = map(lambda x: x is None, [code_indexes, char_indexes, clean_string])
if not len(set(checks)) == 1:
raise ValueError("You must specify code_indexes, char_indexes, "
"and clean_string together, or not at all.")
if not all(checks):
decoded = True
if not decoded:
# Completely new ANSI String
clean_string = to_unicode(parser.parse_ansi(string, strip_ansi=True, mxp=True))
string = parser.parse_ansi(string, xterm256=True, mxp=True)
elif clean_string is not None:
# We have an explicit clean string.
pass
elif hasattr(string, '_clean_string'):
# It's already an ANSIString
clean_string = string._clean_string
code_indexes = string._code_indexes
char_indexes = string._char_indexes
string = string._raw_string
else:
# It's a string that has been pre-ansi decoded.
clean_string = parser.strip_raw_codes(string)
if not isinstance(string, unicode):
string = string.decode('utf-8')
ansi_string = super(ANSIString, cls).__new__(ANSIString, to_str(clean_string), "utf-8")
ansi_string._raw_string = string
ansi_string._clean_string = clean_string
ansi_string._code_indexes = code_indexes
ansi_string._char_indexes = char_indexes
return ansi_string
def __str__(self):
return self._raw_string.encode('utf-8')
def __unicode__(self):
"""
Unfortunately, this is not called during print() statements due to a
bug in the Python interpreter. You can always do unicode() or str()
around the resulting ANSIString and print that.
"""
return self._raw_string
def __repr__(self):
"""
Let's make the repr the command that would actually be used to
construct this object, for convenience and reference.
"""
return "ANSIString(%s, decoded=True)" % repr(self._raw_string)
def __init__(self, *_, **kwargs):
"""
When the ANSIString is first initialized, a few internal variables
have to be set.
The first is the parser. It is possible to replace Evennia's standard
ANSI parser with one of your own syntax if you wish, so long as it
implements the same interface.
The second is the _raw_string. It should be noted that the ANSIStrings
are unicode based. This seemed more reasonable than basing it off of
the string class, because if someone were to use a unicode character,
the benefits of knowing the indexes of the ANSI characters would be
negated by the fact that a character within the string might require
more than one byte to be represented. The raw string is, then, a
unicode object rather than a true encoded string. If you need the
encoded string for sending over the wire, try using the .encode()
method.
The third thing to set is the _clean_string. This is a unicode object
that is devoid of all ANSI Escapes.
Finally, _code_indexes and _char_indexes are defined. These are lookup
tables for which characters in the raw string are related to ANSI
escapes, and which are for the readable text.
"""
self.parser = kwargs.pop('parser', ANSI_PARSER)
super(ANSIString, self).__init__()
if self._code_indexes is None:
self._code_indexes, self._char_indexes = self._get_indexes()
@staticmethod
def _shifter(iterable, offset):
"""
Takes a list of integers, and produces a new one incrementing all
by a number.
"""
return [i + offset for i in iterable]
@classmethod
def _adder(cls, first, second):
"""
Joins two ANSIStrings, preserving calculated info.
"""
raw_string = first._raw_string + second._raw_string
clean_string = first._clean_string + second._clean_string
code_indexes = first._code_indexes[:]
char_indexes = first._char_indexes[:]
code_indexes.extend(
cls._shifter(second._code_indexes, len(first._raw_string)))
char_indexes.extend(
cls._shifter(second._char_indexes, len(first._raw_string)))
return ANSIString(raw_string, code_indexes=code_indexes,
char_indexes=char_indexes,
clean_string=clean_string)
def __add__(self, other):
"""
We have to be careful when adding two strings not to reprocess things
that don't need to be reprocessed, lest we end up with escapes being
interpreted literally.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(self, other)
def __radd__(self, other):
"""
Likewise, if we're on the other end.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(other, self)
def __getslice__(self, i, j):
"""
This function is deprecated, so we just make it call the proper
function.
"""
return self.__getitem__(slice(i, j))
def _slice(self, slc):
"""
This function takes a slice() object.
Slices have to be handled specially. Not only are they able to specify
a start and end with [x:y], but many forget that they can also specify
an interval with [x:y:z]. As a result, not only do we have to track
the ANSI Escapes that have played before the start of the slice, we
must also replay any in these intervals, should they exist.
Thankfully, slicing the _char_indexes table gives us the actual
indexes that need slicing in the raw string. We can check between
those indexes to figure out what escape characters need to be
replayed.
"""
slice_indexes = self._char_indexes[slc]
# If it's the end of the string, we need to append final color codes.
if not slice_indexes:
return ANSIString('')
try:
string = self[slc.start]._raw_string
except IndexError:
return ANSIString('')
last_mark = slice_indexes[0]
# Check between the slice intervals for escape sequences.
i = None
for i in slice_indexes[1:]:
for index in xrange(last_mark, i):
if index in self._code_indexes:
string += self._raw_string[index]
last_mark = i
try:
string += self._raw_string[i]
except IndexError:
pass
if i is not None:
append_tail = self._get_interleving(self._char_indexes.index(i) + 1)
else:
append_tail = ''
return ANSIString(string + append_tail, decoded=True)
def __getitem__(self, item):
"""
Gateway for slices and getting specific indexes in the ANSIString. If
this is a regexable ANSIString, it will get the data from the raw
string instead, bypassing ANSIString's intelligent escape skipping,
for reasons explained in the __new__ method's docstring.
"""
if isinstance(item, slice):
# Slices must be handled specially.
return self._slice(item)
try:
self._char_indexes[item]
except IndexError:
raise IndexError("ANSIString Index out of range")
# Get character codes after the index as well.
if self._char_indexes[-1] == self._char_indexes[item]:
append_tail = self._get_interleving(item + 1)
else:
append_tail = ''
item = self._char_indexes[item]
clean = self._raw_string[item]
result = ''
# Get the character they're after, and replay all escape sequences
# previous to it.
for index in xrange(0, item + 1):
if index in self._code_indexes:
result += self._raw_string[index]
return ANSIString(result + clean + append_tail, decoded=True)
def clean(self):
"""
Return a unicode object without the ANSI escapes.
"""
return self._clean_string
def raw(self):
"""
Return a unicode object with the ANSI escapes.
"""
return self._raw_string
def partition(self, sep, reverse=False):
"""
Similar to split, but always creates a tuple with three items:
1. The part before the separator
2. The separator itself.
3. The part after.
We use the same techniques we used in split() to make sure each are
colored.
"""
if hasattr(sep, '_clean_string'):
sep = sep.clean()
if reverse:
parent_result = self._clean_string.rpartition(sep)
else:
parent_result = self._clean_string.partition(sep)
current_index = 0
result = tuple()
for section in parent_result:
result += (self[current_index:current_index + len(section)],)
current_index += len(section)
return result
def _get_indexes(self):
"""
Two tables need to be made, one which contains the indexes of all
readable characters, and one which contains the indexes of all ANSI
escapes. It's important to remember that ANSI escapes require more
that one character at a time, though no readable character needs more
than one character, since the unicode base class abstracts that away
from us. However, several readable characters can be placed in a row.
We must use regexes here to figure out where all the escape sequences
are hiding in the string. Then we use the ranges of their starts and
ends to create a final, comprehensive list of all indexes which are
dedicated to code, and all dedicated to text.
It's possible that only one of these tables is actually needed, the
other assumed to be what isn't in the first.
"""
code_indexes = []
for match in self.parser.ansi_regex.finditer(self._raw_string):
code_indexes.extend(range(match.start(), match.end()))
if not code_indexes:
# Plain string, no ANSI codes.
return code_indexes, range(0, len(self._raw_string))
# all indexes not occupied by ansi codes are normal characters
char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes]
return code_indexes, char_indexes
def _get_interleving(self, index):
"""
Get the code characters from the given slice end to the next
character.
"""
try:
index = self._char_indexes[index - 1]
except IndexError:
return ''
s = ''
while True:
index += 1
if index in self._char_indexes:
break
elif index in self._code_indexes:
s += self._raw_string[index]
else:
break
return s
def split(self, by, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
res = []
start = 0
while maxsplit != 0:
next = self._clean_string.find(by, start)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[start:next])
start = next + bylen
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[start:len(self)])
return res
def __mul__(self, other):
"""
Multiplication method. Implemented for performance reasons.
"""
if not isinstance(other, int):
return NotImplemented
raw_string = self._raw_string * other
clean_string = self._clean_string * other
code_indexes = self._code_indexes[:]
char_indexes = self._char_indexes[:]
for i in range(1, other + 1):
code_indexes.extend(
self._shifter(self._code_indexes, i * len(self._raw_string)))
char_indexes.extend(
self._shifter(self._char_indexes, i * len(self._raw_string)))
return ANSIString(
raw_string, code_indexes=code_indexes, char_indexes=char_indexes,
clean_string=clean_string)
def __rmul__(self, other):
return self.__mul__(other)
def rsplit(self, by, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
res = []
end = len(self)
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
while maxsplit != 0:
next = self._clean_string.rfind(by, 0, end)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[next+bylen:end])
end = next
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[:end])
res.reverse()
return res
def join(self, iterable):
"""
Joins together strings in an iterable.
"""
result = ANSIString('')
last_item = None
for item in iterable:
if last_item is not None:
result += self._raw_string
if not isinstance(item, ANSIString):
item = ANSIString(item)
result += item
last_item = item
return result
def _filler(self, char, amount):
"""
Generate a line of characters in a more efficient way than just adding
ANSIStrings.
"""
if not isinstance(char, ANSIString):
line = char * amount
return ANSIString(
char * amount, code_indexes=[], char_indexes=range(0, len(line)),
clean_string=char)
try:
start = char._code_indexes[0]
except IndexError:
start = None
end = char._char_indexes[0]
prefix = char._raw_string[start:end]
postfix = char._raw_string[end + 1:]
line = char._clean_string * amount
code_indexes = [i for i in range(0, len(prefix))]
length = len(prefix) + len(line)
code_indexes.extend([i for i in range(length, length + len(postfix))])
char_indexes = self._shifter(xrange(0, len(line)), len(prefix))
raw_string = prefix + line + postfix
return ANSIString(
raw_string, clean_string=line, char_indexes=char_indexes,
code_indexes=code_indexes)
@_spacing_preflight
def center(self, width, fillchar, difference):
"""
Center some text with some spaces padding both sides.
"""
remainder = difference % 2
difference /= 2
spacing = self._filler(fillchar, difference)
result = spacing + self + spacing + self._filler(fillchar, remainder)
return result
@_spacing_preflight
def ljust(self, width, fillchar, difference):
"""
Left justify some text.
"""
return self + self._filler(fillchar, difference)
@_spacing_preflight
def rjust(self, width, fillchar, difference):
"""
Right justify some text.
"""
return self._filler(fillchar, difference) + self | en | 0.873319 | ANSI - Gives colour to text. Use the codes defined in ANSIPARSER in your text to apply colour to text according to the ANSI standard. Examples: This is %crRed text%cn and this is normal again. This is {rRed text{n and this is normal again. Mostly you should not need to call parse_ansi() explicitly; it is run by Evennia just before returning data to/from the user. # ANSI definitions # Foreground colors # Background colors # Formatting Characters # Escapes A class that parses ANSI markup to ANSI command sequences We also allow to escape colour codes by prepending with a \ for xterm256, an extra { for Merc-style codes Replacer used by `re.sub` to replace ANSI markers with correct ANSI sequences Replacer used by `re.sub` to replace ANSI bright background markers with Xterm256 replacement This is a replacer method called by `re.sub` with the matched tag. It must return the correct ansi sequence. It checks `self.do_xterm256` to determine if conversion to standard ANSI should be done or not. # get tag, stripping the initial marker #print "RGB colours:", red, green, blue #print "ANSI convert:", red, green, blue # xterm256 not supported, convert the rgb value to ansi instead # mostly blue Strips raw ANSI codes from a string. Strips all MXP codes from a string. Parses a string, subbing color codes according to the stored mapping. strip_ansi flag instead removes all ANSI markup. # check cached parsings # pre-convert bright colors to xterm256 color tags # do string replacement # remove all ansi codes (including those manually # inserted in string) # cache and crop old cache # Mapping using {r {n etc # reset # line break # tab # space # invert # blinking text (very annoying and not supported by all clients) # pure white # dark grey # light grey # pure black # hilight-able colors # light grey # pure black # normal ANSI backgrounds # light grey background # pure black background # "bright" ANSI backgrounds using xterm256 since ANSI # standard does not support it (will # fallback to dark ANSI background colors if xterm256 # is not supported by client) # white background # dark grey background # xterm256 {123, %c134. These are replaced directly by # the sub_xterm256 method # %123 - foreground colour # %[123 - background colour # {123 - foreground colour # {[123 - background colour # prepare regex matching # used by regex replacer to correctly map ansi sequences # prepare matching ansi codes overall # escapes - these double-chars will be replaced with a single # instance of each # # Access function # Parses a string, subbing color codes as needed. Remove raw ansi codes from string Escapes a string into a form which won't be colorized by the ansi parser. This wrapper function is used to do some preflight checks on functions used for padding ANSIStrings. Have the string class handle this with the cleaned string instead of ANSIString. Like query_super, but makes the operation run on the raw string. Some string functions, like those manipulating capital letters, return a string the same length as the original. This function allows us to do the same, replacing all the non-coded characters with the resulting string. Many functions on ANSIString are just light wrappers around the unicode base class. We apply them here, as part of the classes construction. String-like object that is aware of ANSI codes. This isn't especially efficient, as it doesn't really have an understanding of what the codes mean in order to eliminate redundant characters. This could be made as an enhancement to ANSI_PARSER. If one is going to use ANSIString, one should generally avoid converting away from it until one is about to send information on the wire. This is because escape sequences in the string may otherwise already be decoded, and taken literally the second time around. Please refer to the Metaclass, ANSIMeta, which is used to apply wrappers for several of the methods that need not be defined directly here. When creating a new ANSIString, you may use a custom parser that has the same attributes as the standard one, and you may declare the string to be handled as already decoded. It is important not to double decode strings, as escapes can only be respected once. Internally, ANSIString can also passes itself precached code/character indexes and clean strings to avoid doing extra work when combining ANSIStrings. # All True, or All False, not just one. # Completely new ANSI String # We have an explicit clean string. # It's already an ANSIString # It's a string that has been pre-ansi decoded. Unfortunately, this is not called during print() statements due to a bug in the Python interpreter. You can always do unicode() or str() around the resulting ANSIString and print that. Let's make the repr the command that would actually be used to construct this object, for convenience and reference. When the ANSIString is first initialized, a few internal variables have to be set. The first is the parser. It is possible to replace Evennia's standard ANSI parser with one of your own syntax if you wish, so long as it implements the same interface. The second is the _raw_string. It should be noted that the ANSIStrings are unicode based. This seemed more reasonable than basing it off of the string class, because if someone were to use a unicode character, the benefits of knowing the indexes of the ANSI characters would be negated by the fact that a character within the string might require more than one byte to be represented. The raw string is, then, a unicode object rather than a true encoded string. If you need the encoded string for sending over the wire, try using the .encode() method. The third thing to set is the _clean_string. This is a unicode object that is devoid of all ANSI Escapes. Finally, _code_indexes and _char_indexes are defined. These are lookup tables for which characters in the raw string are related to ANSI escapes, and which are for the readable text. Takes a list of integers, and produces a new one incrementing all by a number. Joins two ANSIStrings, preserving calculated info. We have to be careful when adding two strings not to reprocess things that don't need to be reprocessed, lest we end up with escapes being interpreted literally. Likewise, if we're on the other end. This function is deprecated, so we just make it call the proper function. This function takes a slice() object. Slices have to be handled specially. Not only are they able to specify a start and end with [x:y], but many forget that they can also specify an interval with [x:y:z]. As a result, not only do we have to track the ANSI Escapes that have played before the start of the slice, we must also replay any in these intervals, should they exist. Thankfully, slicing the _char_indexes table gives us the actual indexes that need slicing in the raw string. We can check between those indexes to figure out what escape characters need to be replayed. # If it's the end of the string, we need to append final color codes. # Check between the slice intervals for escape sequences. Gateway for slices and getting specific indexes in the ANSIString. If this is a regexable ANSIString, it will get the data from the raw string instead, bypassing ANSIString's intelligent escape skipping, for reasons explained in the __new__ method's docstring. # Slices must be handled specially. # Get character codes after the index as well. # Get the character they're after, and replay all escape sequences # previous to it. Return a unicode object without the ANSI escapes. Return a unicode object with the ANSI escapes. Similar to split, but always creates a tuple with three items: 1. The part before the separator 2. The separator itself. 3. The part after. We use the same techniques we used in split() to make sure each are colored. Two tables need to be made, one which contains the indexes of all readable characters, and one which contains the indexes of all ANSI escapes. It's important to remember that ANSI escapes require more that one character at a time, though no readable character needs more than one character, since the unicode base class abstracts that away from us. However, several readable characters can be placed in a row. We must use regexes here to figure out where all the escape sequences are hiding in the string. Then we use the ranges of their starts and ends to create a final, comprehensive list of all indexes which are dedicated to code, and all dedicated to text. It's possible that only one of these tables is actually needed, the other assumed to be what isn't in the first. # Plain string, no ANSI codes. # all indexes not occupied by ansi codes are normal characters Get the code characters from the given slice end to the next character. Stolen from PyPy's pure Python string implementation, tweaked for ANSIString. PyPy is distributed under the MIT licence. http://opensource.org/licenses/MIT # Get character codes after the index as well. # NB. if it's already < 0, it stays < 0 Multiplication method. Implemented for performance reasons. Stolen from PyPy's pure Python string implementation, tweaked for ANSIString. PyPy is distributed under the MIT licence. http://opensource.org/licenses/MIT # Get character codes after the index as well. # NB. if it's already < 0, it stays < 0 Joins together strings in an iterable. Generate a line of characters in a more efficient way than just adding ANSIStrings. Center some text with some spaces padding both sides. Left justify some text. Right justify some text. | 3.673561 | 4 |
shaka/tools/make_license_file.py | jgongo/shaka-player-embedded | 185 | 6624963 | <reponame>jgongo/shaka-player-embedded
#!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepares a licenses.txt file for the package.
The license paths file is expected to contain a series of titles and paths to
output. The paths should be relative to the file itself. For example,
Title: path/to/LICENSE
The extra licenses file is expected to contain input similar to the output;
a series of licenses, each with a title on a separate line prefixed by '@'.
This file is used in the demo to display the various licenses for the
dependencies of the project.
"""
import argparse
import os
import sys
import json
def _GenLicensesFile(out, paths, extras, base_path):
"""Reads the input files, and writes a licenses.txt file to the given output.
Args:
out: A file object for the output.
paths: A file object for the paths file.
extras: A file object for the extra licenses file.
base_path: The URL base used to resolve the relative URLs in the paths file.
"""
licenses = []
for line in paths:
name, path = line.split(': ', 1)
path = os.path.join(base_path, path.rstrip('\n'))
with open(path, 'r') as file:
licenses.append({'name': name, 'text': file.read()})
while True:
name = extras.readline()
if not name: break
text = extras.readline().replace('\\n', '\n')
licenses.append({'name': name, 'text': text})
out.write(json.dumps(licenses))
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--paths-file', required=True,
help='A file that contains paths to licenses.')
parser.add_argument('--extras-file', required=True,
help='A file that contains extra license text, ' +
'copied verbatim.')
parser.add_argument('--output', required=True,
help='The path to the file to generate.')
parsed_args = parser.parse_args(argv)
output_dir = os.path.dirname(parsed_args.output)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(parsed_args.output, 'w') as out:
with open(parsed_args.paths_file, 'r') as paths:
with open(parsed_args.extras_file, 'r') as extras:
base_path = os.path.dirname(parsed_args.paths_file)
_GenLicensesFile(out, paths, extras, base_path);
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepares a licenses.txt file for the package.
The license paths file is expected to contain a series of titles and paths to
output. The paths should be relative to the file itself. For example,
Title: path/to/LICENSE
The extra licenses file is expected to contain input similar to the output;
a series of licenses, each with a title on a separate line prefixed by '@'.
This file is used in the demo to display the various licenses for the
dependencies of the project.
"""
import argparse
import os
import sys
import json
def _GenLicensesFile(out, paths, extras, base_path):
"""Reads the input files, and writes a licenses.txt file to the given output.
Args:
out: A file object for the output.
paths: A file object for the paths file.
extras: A file object for the extra licenses file.
base_path: The URL base used to resolve the relative URLs in the paths file.
"""
licenses = []
for line in paths:
name, path = line.split(': ', 1)
path = os.path.join(base_path, path.rstrip('\n'))
with open(path, 'r') as file:
licenses.append({'name': name, 'text': file.read()})
while True:
name = extras.readline()
if not name: break
text = extras.readline().replace('\\n', '\n')
licenses.append({'name': name, 'text': text})
out.write(json.dumps(licenses))
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--paths-file', required=True,
help='A file that contains paths to licenses.')
parser.add_argument('--extras-file', required=True,
help='A file that contains extra license text, ' +
'copied verbatim.')
parser.add_argument('--output', required=True,
help='The path to the file to generate.')
parsed_args = parser.parse_args(argv)
output_dir = os.path.dirname(parsed_args.output)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(parsed_args.output, 'w') as out:
with open(parsed_args.paths_file, 'r') as paths:
with open(parsed_args.extras_file, 'r') as extras:
base_path = os.path.dirname(parsed_args.paths_file)
_GenLicensesFile(out, paths, extras, base_path);
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | en | 0.860253 | #!/usr/bin/python # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Prepares a licenses.txt file for the package. The license paths file is expected to contain a series of titles and paths to output. The paths should be relative to the file itself. For example, Title: path/to/LICENSE The extra licenses file is expected to contain input similar to the output; a series of licenses, each with a title on a separate line prefixed by '@'. This file is used in the demo to display the various licenses for the dependencies of the project. Reads the input files, and writes a licenses.txt file to the given output. Args: out: A file object for the output. paths: A file object for the paths file. extras: A file object for the extra licenses file. base_path: The URL base used to resolve the relative URLs in the paths file. | 2.716442 | 3 |
buylling.py | Tanushree28/Cyberbullying-Detection | 1 | 6624964 | <filename>buylling.py
# linear algebra
import pandas as pd
import numpy as np
import nltk
import re
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score , classification_report , confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import pickle
import os
df = pd.read_csv('suspicious tweets.csv')
#top n (5 by default) rows of a data frame or series.
df.head(10)
#Return a tuple representing the dimensionality of the DataFrame
df.shape
#Detect missing values.
df.isnull().sum()
# Generate descriptive statistics.
# Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset’s distribution, excluding NaN values
df.describe()
# Print a concise summary of a DataFrame
df.info()
# Hash table-based unique. Uniques are returned in order of appearance.
df['label'].unique()
# Return a Series containing counts of unique rows in the DataFrame.
df['label'].value_counts()
df.groupby('label').describe()
# genertaing length of message column using len funtion
df['length'] = df['message'].apply(len)
df.head()
# Plot pairwise relationships in a dataset.
# Create the default pairplot
sns.pairplot(df)
# A correlation matrix is a matrix that shows the correlation values of the variables in the dataset.
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
df.dataframeName = 'suspicious tweets.csv'
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Correlation matrix:
plotCorrelationMatrix(df, 8)
# counts of observations in each categorical bin using bars
# ax = sns.countplot(x="class", data=suspicious tweets)
sns.countplot(df['label'])
count_Class = pd.value_counts(df.label, sort = True)
#Data to PLot
labels = '0','1'
sizes = [count_Class[0], count_Class[1]]
colors = ['red','blue']
explode = (0.1, 0.1)
#Plot
plt.pie(sizes, explode = explode, labels = labels, colors = colors, autopct = '%1.1f%%', shadow = True, startangle = 90)
plt.title('Percentage of 0s and 1s in column label')
plt.axis('equal')
plt.show()
# This transformer should be used to encode target values, i.e. y, and not the input X.
# Transform labels to normalized encoding.
label = LabelEncoder()
df['label'] = label.fit_transform(df['label'])
X = df['message']
y = df['label']
ps = PorterStemmer()
corpus = []
for i in range(len(X)):
print(i)
review = re.sub("[^a-zA-Z]"," ", X[i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if word not in set(stopwords.words("english"))]
review = " ".join(review)
corpus.append(review)
from sklearn.feature_extraction.text import TfidfVectorizer
cv = TfidfVectorizer(max_features=5000)
X = cv.fit_transform(corpus).toarray()
X.shape
X_train , X_test , y_train , y_test = train_test_split(X , y , test_size=0.2 , random_state=101)
X_train.shape , X_test.shape , y_train.shape , y_test.shape
mnb = MultinomialNB()
mnb.fit(X_train , y_train)
pred = mnb.predict(X_test)
print(np.mean(pred == y_test))
print(accuracy_score(y_test , pred))
print(confusion_matrix(y_test , pred))
print(classification_report(y_test , pred))
pd.DataFrame(np.c_[y_test , pred] , columns=["Actual" , "Predicted"])
pickle.dump(cv , open("count-Vectorizer.pkl" , "wb"))
pickle.dump(mnb , open("Cyberbullying_Detection_One.pkl" , "wb")) # 1: pos , 0:Neg
save_cv = pickle.load(open('count-Vectorizer.pkl','rb'))
model = pickle.load(open('Cyberbullying_Detection_One.pkl','rb')) | <filename>buylling.py
# linear algebra
import pandas as pd
import numpy as np
import nltk
import re
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score , classification_report , confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import pickle
import os
df = pd.read_csv('suspicious tweets.csv')
#top n (5 by default) rows of a data frame or series.
df.head(10)
#Return a tuple representing the dimensionality of the DataFrame
df.shape
#Detect missing values.
df.isnull().sum()
# Generate descriptive statistics.
# Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset’s distribution, excluding NaN values
df.describe()
# Print a concise summary of a DataFrame
df.info()
# Hash table-based unique. Uniques are returned in order of appearance.
df['label'].unique()
# Return a Series containing counts of unique rows in the DataFrame.
df['label'].value_counts()
df.groupby('label').describe()
# genertaing length of message column using len funtion
df['length'] = df['message'].apply(len)
df.head()
# Plot pairwise relationships in a dataset.
# Create the default pairplot
sns.pairplot(df)
# A correlation matrix is a matrix that shows the correlation values of the variables in the dataset.
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
df.dataframeName = 'suspicious tweets.csv'
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Correlation matrix:
plotCorrelationMatrix(df, 8)
# counts of observations in each categorical bin using bars
# ax = sns.countplot(x="class", data=suspicious tweets)
sns.countplot(df['label'])
count_Class = pd.value_counts(df.label, sort = True)
#Data to PLot
labels = '0','1'
sizes = [count_Class[0], count_Class[1]]
colors = ['red','blue']
explode = (0.1, 0.1)
#Plot
plt.pie(sizes, explode = explode, labels = labels, colors = colors, autopct = '%1.1f%%', shadow = True, startangle = 90)
plt.title('Percentage of 0s and 1s in column label')
plt.axis('equal')
plt.show()
# This transformer should be used to encode target values, i.e. y, and not the input X.
# Transform labels to normalized encoding.
label = LabelEncoder()
df['label'] = label.fit_transform(df['label'])
X = df['message']
y = df['label']
ps = PorterStemmer()
corpus = []
for i in range(len(X)):
print(i)
review = re.sub("[^a-zA-Z]"," ", X[i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if word not in set(stopwords.words("english"))]
review = " ".join(review)
corpus.append(review)
from sklearn.feature_extraction.text import TfidfVectorizer
cv = TfidfVectorizer(max_features=5000)
X = cv.fit_transform(corpus).toarray()
X.shape
X_train , X_test , y_train , y_test = train_test_split(X , y , test_size=0.2 , random_state=101)
X_train.shape , X_test.shape , y_train.shape , y_test.shape
mnb = MultinomialNB()
mnb.fit(X_train , y_train)
pred = mnb.predict(X_test)
print(np.mean(pred == y_test))
print(accuracy_score(y_test , pred))
print(confusion_matrix(y_test , pred))
print(classification_report(y_test , pred))
pd.DataFrame(np.c_[y_test , pred] , columns=["Actual" , "Predicted"])
pickle.dump(cv , open("count-Vectorizer.pkl" , "wb"))
pickle.dump(mnb , open("Cyberbullying_Detection_One.pkl" , "wb")) # 1: pos , 0:Neg
save_cv = pickle.load(open('count-Vectorizer.pkl','rb'))
model = pickle.load(open('Cyberbullying_Detection_One.pkl','rb')) | en | 0.727616 | # linear algebra #top n (5 by default) rows of a data frame or series. #Return a tuple representing the dimensionality of the DataFrame #Detect missing values. # Generate descriptive statistics. # Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset’s distribution, excluding NaN values # Print a concise summary of a DataFrame # Hash table-based unique. Uniques are returned in order of appearance. # Return a Series containing counts of unique rows in the DataFrame. # genertaing length of message column using len funtion # Plot pairwise relationships in a dataset. # Create the default pairplot # A correlation matrix is a matrix that shows the correlation values of the variables in the dataset. # Correlation matrix # drop columns with NaN # keep columns where there are more than 1 unique values # Correlation matrix: # counts of observations in each categorical bin using bars # ax = sns.countplot(x="class", data=suspicious tweets) #Data to PLot #Plot # This transformer should be used to encode target values, i.e. y, and not the input X. # Transform labels to normalized encoding. # 1: pos , 0:Neg | 3.102263 | 3 |
lecture/migrations/0001_initial.py | bluesky0960/MiniProject1-DjangoWebApp_n | 0 | 6624965 | # Generated by Django 3.1.14 on 2022-01-27 02:00
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Memo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=200)),
('content', models.TextField()),
('create_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='MemoLecture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=1000000)),
('date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('user', models.CharField(max_length=1000000)),
('room', models.CharField(max_length=1000000)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('added', models.DateTimeField(auto_now_add=True)),
('url', models.TextField()),
],
options={
'ordering': ['-added'],
},
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('votes', models.IntegerField(default=0)),
('q', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lecture.question')),
],
),
]
| # Generated by Django 3.1.14 on 2022-01-27 02:00
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Memo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=200)),
('content', models.TextField()),
('create_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='MemoLecture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=1000000)),
('date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('user', models.CharField(max_length=1000000)),
('room', models.CharField(max_length=1000000)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('added', models.DateTimeField(auto_now_add=True)),
('url', models.TextField()),
],
options={
'ordering': ['-added'],
},
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('votes', models.IntegerField(default=0)),
('q', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lecture.question')),
],
),
]
| en | 0.843149 | # Generated by Django 3.1.14 on 2022-01-27 02:00 | 1.831323 | 2 |
miriam/schedule_build.py | troydai/Miriam | 1 | 6624966 | <reponame>troydai/Miriam<filename>miriam/schedule_build.py<gh_stars>1-10
import argparse
from azure.batch import BatchServiceClient
from azure.storage.blob import BlockBlobService
def generate_build_id():
from datetime import datetime
timestamp = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
return 'build-{}'.format(timestamp)
def get_build_blob_container_url(storage_client: BlockBlobService):
from datetime import datetime, timedelta
from azure.storage.blob import ContainerPermissions
storage_client.create_container('builds', fail_on_exist=False)
return storage_client.make_blob_url(
container_name='builds',
blob_name='',
protocol='https',
sas_token=storage_client.generate_container_shared_access_signature(
container_name='builds',
permission=ContainerPermissions(list=True, write=True),
expiry=(datetime.utcnow() + timedelta(days=1))))
def _create_build_job(batch_client: BatchServiceClient, storage_client: BlockBlobService, settings: dict):
"""
Schedule a build job in the given pool. returns the container for build output and job reference.
Building and running tests are two separate jobs so that the testing job can relies on job preparation tasks to
prepare test environment. The product and test build is an essential part of the preparation. The jobs can't be
combined because the preparation task has to be defined by the time the job is created. However neither the product
or the test package is ready then.
"""
import sys
from azure.batch.models import (TaskAddParameter, JobAddParameter, PoolInformation, OutputFile,
OutputFileDestination, OutputFileUploadOptions, OutputFileUploadCondition,
OutputFileBlobContainerDestination, OnAllTasksComplete)
from miriam._utility import get_command_string, get_logger
remote_gitsrc_dir = 'gitsrc'
logger = get_logger('build')
build_id = generate_build_id()
pool = batch_client.pool.get(next(p['id'] for p in settings['pools'] if p['usage'] == 'build'))
if not pool:
logger.error('Cannot find a build pool. Please check the pools list in config file.')
sys.exit(1)
logger.info('Creating build job %s in pool %s', build_id, pool.id)
batch_client.job.add(JobAddParameter(id=build_id,
pool_info=PoolInformation(pool.id),
on_all_tasks_complete=OnAllTasksComplete.terminate_job))
logger.info('Job %s is created.', build_id)
build_commands = [
'git clone -b {} -- {} gitsrc'.format(settings['gitsource']['branch'], settings['gitsource']['url']),
f'pushd {remote_gitsrc_dir}',
'./scripts/batch/build_all.sh'
]
build_container_url = get_build_blob_container_url(storage_client)
output_file = OutputFile(f'{remote_gitsrc_dir}/artifacts/**/*.*',
OutputFileDestination(OutputFileBlobContainerDestination(build_container_url, build_id)),
OutputFileUploadOptions(OutputFileUploadCondition.task_success))
build_task = TaskAddParameter(id='build',
command_line=get_command_string(*build_commands),
display_name='Build all product and test code.',
output_files=[output_file])
batch_client.task.add(build_id, build_task)
logger.info('Build task is added to job %s', build_id)
return build_id
def build_entry(arg: argparse.Namespace) -> None:
import yaml
from miriam._utility import create_storage_client, create_batch_client, get_logger
settings = yaml.load(arg.config)
logger = get_logger('build')
build_job_id = _create_build_job(create_batch_client(settings),
create_storage_client(settings),
settings)
logger.info('Build job {} is scheduled. The results will be saved to container builds.'.format(build_job_id))
print(build_job_id)
def setup(subparsers) -> None:
parser = subparsers.add_parser('build', help='Start a build job')
parser.set_defaults(func=build_entry)
| import argparse
from azure.batch import BatchServiceClient
from azure.storage.blob import BlockBlobService
def generate_build_id():
from datetime import datetime
timestamp = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
return 'build-{}'.format(timestamp)
def get_build_blob_container_url(storage_client: BlockBlobService):
from datetime import datetime, timedelta
from azure.storage.blob import ContainerPermissions
storage_client.create_container('builds', fail_on_exist=False)
return storage_client.make_blob_url(
container_name='builds',
blob_name='',
protocol='https',
sas_token=storage_client.generate_container_shared_access_signature(
container_name='builds',
permission=ContainerPermissions(list=True, write=True),
expiry=(datetime.utcnow() + timedelta(days=1))))
def _create_build_job(batch_client: BatchServiceClient, storage_client: BlockBlobService, settings: dict):
"""
Schedule a build job in the given pool. returns the container for build output and job reference.
Building and running tests are two separate jobs so that the testing job can relies on job preparation tasks to
prepare test environment. The product and test build is an essential part of the preparation. The jobs can't be
combined because the preparation task has to be defined by the time the job is created. However neither the product
or the test package is ready then.
"""
import sys
from azure.batch.models import (TaskAddParameter, JobAddParameter, PoolInformation, OutputFile,
OutputFileDestination, OutputFileUploadOptions, OutputFileUploadCondition,
OutputFileBlobContainerDestination, OnAllTasksComplete)
from miriam._utility import get_command_string, get_logger
remote_gitsrc_dir = 'gitsrc'
logger = get_logger('build')
build_id = generate_build_id()
pool = batch_client.pool.get(next(p['id'] for p in settings['pools'] if p['usage'] == 'build'))
if not pool:
logger.error('Cannot find a build pool. Please check the pools list in config file.')
sys.exit(1)
logger.info('Creating build job %s in pool %s', build_id, pool.id)
batch_client.job.add(JobAddParameter(id=build_id,
pool_info=PoolInformation(pool.id),
on_all_tasks_complete=OnAllTasksComplete.terminate_job))
logger.info('Job %s is created.', build_id)
build_commands = [
'git clone -b {} -- {} gitsrc'.format(settings['gitsource']['branch'], settings['gitsource']['url']),
f'pushd {remote_gitsrc_dir}',
'./scripts/batch/build_all.sh'
]
build_container_url = get_build_blob_container_url(storage_client)
output_file = OutputFile(f'{remote_gitsrc_dir}/artifacts/**/*.*',
OutputFileDestination(OutputFileBlobContainerDestination(build_container_url, build_id)),
OutputFileUploadOptions(OutputFileUploadCondition.task_success))
build_task = TaskAddParameter(id='build',
command_line=get_command_string(*build_commands),
display_name='Build all product and test code.',
output_files=[output_file])
batch_client.task.add(build_id, build_task)
logger.info('Build task is added to job %s', build_id)
return build_id
def build_entry(arg: argparse.Namespace) -> None:
import yaml
from miriam._utility import create_storage_client, create_batch_client, get_logger
settings = yaml.load(arg.config)
logger = get_logger('build')
build_job_id = _create_build_job(create_batch_client(settings),
create_storage_client(settings),
settings)
logger.info('Build job {} is scheduled. The results will be saved to container builds.'.format(build_job_id))
print(build_job_id)
def setup(subparsers) -> None:
parser = subparsers.add_parser('build', help='Start a build job')
parser.set_defaults(func=build_entry) | en | 0.944179 | Schedule a build job in the given pool. returns the container for build output and job reference. Building and running tests are two separate jobs so that the testing job can relies on job preparation tasks to prepare test environment. The product and test build is an essential part of the preparation. The jobs can't be combined because the preparation task has to be defined by the time the job is created. However neither the product or the test package is ready then. | 2.322225 | 2 |
python/http/tcp-chat/tcp_chat_client.py | zhubl/planless | 4 | 6624967 | <gh_stars>1-10
# -*- coding:utf8 -*-
# python3
from socket import *
import threading
import time
HOST = '127.0.0.1'
PORT = 5000
ADDR = (HOST, PORT)
BUFSIZ = 1024
tcp_client = socket(AF_INET, SOCK_STREAM)
tcp_client.connect(ADDR)
def send_message():
while True:
input_data = input(">>> ")
if not input_data:
break
tcp_client.send(bytes(input_data, 'utf8'))
tcp_client.close()
def recv_message():
while True:
get_data = tcp_client.recv(BUFSIZ)
if get_data:
print(get_data)
time.sleep(1)
if __name__ == '__main__':
recv_threading = threading.Thread(target=recv_message,args=())
recv_threading.start()
send_threading = threading.Thread(target=send_message,args=())
send_threading.start()
| # -*- coding:utf8 -*-
# python3
from socket import *
import threading
import time
HOST = '127.0.0.1'
PORT = 5000
ADDR = (HOST, PORT)
BUFSIZ = 1024
tcp_client = socket(AF_INET, SOCK_STREAM)
tcp_client.connect(ADDR)
def send_message():
while True:
input_data = input(">>> ")
if not input_data:
break
tcp_client.send(bytes(input_data, 'utf8'))
tcp_client.close()
def recv_message():
while True:
get_data = tcp_client.recv(BUFSIZ)
if get_data:
print(get_data)
time.sleep(1)
if __name__ == '__main__':
recv_threading = threading.Thread(target=recv_message,args=())
recv_threading.start()
send_threading = threading.Thread(target=send_message,args=())
send_threading.start() | en | 0.278348 | # -*- coding:utf8 -*- # python3 | 3.232225 | 3 |
CustomScript/test/create_test_blob.py | shridpant/azure-linux-extensions | 266 | 6624968 | <reponame>shridpant/azure-linux-extensions<filename>CustomScript/test/create_test_blob.py
import blob
import blob_mooncake
import customscript as cs
from azure.storage import BlobService
def create_blob(blob, txt):
uri = blob.uri
host_base = cs.get_host_base_from_uri(uri)
service = BlobService(blob.name,
blob.key,
host_base=host_base)
container_name = cs.get_container_name_from_uri(uri)
blob_name = cs.get_blob_name_from_uri(uri)
service.put_block_blob_from_text(container_name,
blob_name,
txt)
if __name__ == "__main__":
create_blob(blob, "public azure\n")
create_blob(blob_mooncake, "mooncake\n")
| import blob
import blob_mooncake
import customscript as cs
from azure.storage import BlobService
def create_blob(blob, txt):
uri = blob.uri
host_base = cs.get_host_base_from_uri(uri)
service = BlobService(blob.name,
blob.key,
host_base=host_base)
container_name = cs.get_container_name_from_uri(uri)
blob_name = cs.get_blob_name_from_uri(uri)
service.put_block_blob_from_text(container_name,
blob_name,
txt)
if __name__ == "__main__":
create_blob(blob, "public azure\n")
create_blob(blob_mooncake, "mooncake\n") | none | 1 | 2.640901 | 3 | |
ckan/tests/logic/test_converters.py | ziveo/ckan | 58 | 6624969 | <reponame>ziveo/ckan<filename>ckan/tests/logic/test_converters.py<gh_stars>10-100
# encoding: utf-8
"""Unit tests for ckan/logic/converters.py.
"""
import ckan.logic.converters as converters
def test_leading_space():
string = " http://example.com"
expected = "http://example.com"
converted = converters.remove_whitespace(string, {})
assert expected == converted
def test_trailing_space():
string = "http://example.com "
expected = "http://example.com"
converted = converters.remove_whitespace(string, {})
assert expected == converted
def test_space_between():
string = "http://example.com/space between url "
expected = "http://example.com/space between url"
converted = converters.remove_whitespace(string, {})
assert expected == converted
def test_not_a_string():
string = 12345
converted = converters.remove_whitespace(string, {})
assert string == converted
def test_convert_to_extras_output_unflattened():
key = ("test_field",)
data = {("test_field",): "test_value"}
errors = {}
context = {}
converters.convert_to_extras(key, data, errors, context)
assert data[("extras", 0, "key")] == "test_field"
assert data[("extras", 0, "value")] == "test_value"
assert ("extras",) not in data
assert errors == {}
def test_convert_to_extras_output_unflattened_with_correct_index():
key = ("test_field",)
data = {
("test_field",): "test_value",
("extras", 0, "deleted"): "",
("extras", 0, "id"): "",
("extras", 0, "key"): "proper_extra",
("extras", 0, "revision_timestamp"): "",
("extras", 0, "state"): "",
("extras", 0, "value"): "proper_extra_value",
}
errors = {}
context = {}
converters.convert_to_extras(key, data, errors, context)
assert data[("extras", 0, "key")] == "proper_extra"
assert data[("extras", 0, "value")] == "proper_extra_value"
assert data[("extras", 1, "key")] == "test_field"
assert data[("extras", 1, "value")] == "test_value"
assert ("extras",) not in data
assert errors == {}
| # encoding: utf-8
"""Unit tests for ckan/logic/converters.py.
"""
import ckan.logic.converters as converters
def test_leading_space():
string = " http://example.com"
expected = "http://example.com"
converted = converters.remove_whitespace(string, {})
assert expected == converted
def test_trailing_space():
string = "http://example.com "
expected = "http://example.com"
converted = converters.remove_whitespace(string, {})
assert expected == converted
def test_space_between():
string = "http://example.com/space between url "
expected = "http://example.com/space between url"
converted = converters.remove_whitespace(string, {})
assert expected == converted
def test_not_a_string():
string = 12345
converted = converters.remove_whitespace(string, {})
assert string == converted
def test_convert_to_extras_output_unflattened():
key = ("test_field",)
data = {("test_field",): "test_value"}
errors = {}
context = {}
converters.convert_to_extras(key, data, errors, context)
assert data[("extras", 0, "key")] == "test_field"
assert data[("extras", 0, "value")] == "test_value"
assert ("extras",) not in data
assert errors == {}
def test_convert_to_extras_output_unflattened_with_correct_index():
key = ("test_field",)
data = {
("test_field",): "test_value",
("extras", 0, "deleted"): "",
("extras", 0, "id"): "",
("extras", 0, "key"): "proper_extra",
("extras", 0, "revision_timestamp"): "",
("extras", 0, "state"): "",
("extras", 0, "value"): "proper_extra_value",
}
errors = {}
context = {}
converters.convert_to_extras(key, data, errors, context)
assert data[("extras", 0, "key")] == "proper_extra"
assert data[("extras", 0, "value")] == "proper_extra_value"
assert data[("extras", 1, "key")] == "test_field"
assert data[("extras", 1, "value")] == "test_value"
assert ("extras",) not in data
assert errors == {} | en | 0.763044 | # encoding: utf-8 Unit tests for ckan/logic/converters.py. | 2.533012 | 3 |
critiquebrainz/ws/testing.py | akshaaatt/critiquebrainz | 70 | 6624970 | import os
from flask_testing import TestCase
import critiquebrainz.db.oauth_client as db_oauth_client
import critiquebrainz.db.users as db_users
from critiquebrainz.data.utils import create_all, drop_tables, drop_types
from critiquebrainz.ws import create_app
from critiquebrainz.ws.oauth import oauth
class WebServiceTestCase(TestCase):
def create_app(self):
app = create_app(config_path=os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..', 'test_config.py'
))
oauth.init_app(app)
return app
def setUp(self):
self.reset_db()
# TODO(roman): Add stuff form fixtures.
def tearDown(self):
pass
@staticmethod
def reset_db():
drop_tables()
drop_types()
create_all()
@staticmethod
def create_dummy_client(user):
db_oauth_client.create(
user_id=user.id,
name="<NAME>",
desc="Created for testing the webservice",
website="http://example.com/",
redirect_uri="http://example.com/redirect/",
)
client = db_users.clients(user.id)[0]
return client
def create_dummy_token(self, user, client=None):
if client is None:
client = self.create_dummy_client(user)
token = oauth.generate_token(client_id=client["client_id"], refresh_token="",
user_id=user.id, scope="review vote user")
return token[0]
| import os
from flask_testing import TestCase
import critiquebrainz.db.oauth_client as db_oauth_client
import critiquebrainz.db.users as db_users
from critiquebrainz.data.utils import create_all, drop_tables, drop_types
from critiquebrainz.ws import create_app
from critiquebrainz.ws.oauth import oauth
class WebServiceTestCase(TestCase):
def create_app(self):
app = create_app(config_path=os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..', 'test_config.py'
))
oauth.init_app(app)
return app
def setUp(self):
self.reset_db()
# TODO(roman): Add stuff form fixtures.
def tearDown(self):
pass
@staticmethod
def reset_db():
drop_tables()
drop_types()
create_all()
@staticmethod
def create_dummy_client(user):
db_oauth_client.create(
user_id=user.id,
name="<NAME>",
desc="Created for testing the webservice",
website="http://example.com/",
redirect_uri="http://example.com/redirect/",
)
client = db_users.clients(user.id)[0]
return client
def create_dummy_token(self, user, client=None):
if client is None:
client = self.create_dummy_client(user)
token = oauth.generate_token(client_id=client["client_id"], refresh_token="",
user_id=user.id, scope="review vote user")
return token[0]
| en | 0.27904 | # TODO(roman): Add stuff form fixtures. | 2.179742 | 2 |
sdk/keyvault/azure-keyvault-certificates/azure/keyvault/certificates/aio/client.py | GabrielHobold/azure-sdk-for-python | 0 | 6624971 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# pylint:disable=too-many-lines,too-many-public-methods
import base64
from typing import Any, AsyncIterable, Optional, Iterable, List, Dict, Union
from functools import partial
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.polling import async_poller
from azure.keyvault.certificates.models import (
KeyVaultCertificate,
CertificateOperation,
CertificatePolicy,
DeletedCertificate,
CertificateProperties,
CertificateContact,
CertificateIssuer,
IssuerProperties,
)
from ._polling_async import CreateCertificatePollerAsync
from .._shared import AsyncKeyVaultClientBase
from .._shared.exceptions import error_map as _error_map
class CertificateClient(AsyncKeyVaultClientBase):
"""A high-level asynchronous interface for managing a vault's certificates.
:param str vault_endpoint: URL of the vault the client will access
:param credential: An object which can provide an access token for the vault, such as a credential from
:mod:`azure.identity.aio`
Keyword arguments
- **api_version**: version of the Key Vault API to use. Defaults to the most recent.
- **transport**: :class:`~azure.core.pipeline.transport.AsyncHttpTransport` to use. Defaults to
:class:`~azure.core.pipeline.transport.AioHttpTransport`.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START create_certificate_client]
:end-before: [END create_certificate_client]
:language: python
:dedent: 4
:caption: Creates a new instance of the Certificate client
"""
# pylint:disable=protected-access
@distributed_trace_async
async def create_certificate(
self, name: str, policy: CertificatePolicy, **kwargs: "**Any"
) -> Union[KeyVaultCertificate, CertificateOperation]:
"""Creates a new certificate.
If this is the first version, the certificate resource is created. This
operation requires the certificates/create permission.
:param str name: The name of the certificate.
:param policy: The management policy for the certificate.
:type policy:
~azure.keyvault.certificates.models.CertificatePolicy
:returns: A coroutine for the creation of the certificate. Awaiting the coroutine
returns the created KeyVaultCertificate if creation is successful, the CertificateOperation if not.
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate or
~azure.keyvault.certificates.models.CertificateOperation
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START create_certificate]
:end-before: [END create_certificate]
:language: python
:caption: Create a certificate
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
tags = kwargs.pop("tags", None)
if enabled is not None:
attributes = self._client.models.CertificateAttributes(enabled=enabled)
else:
attributes = None
cert_bundle = await self._client.create_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
certificate_policy=policy._to_certificate_policy_bundle(),
certificate_attributes=attributes,
tags=tags,
**kwargs
)
create_certificate_operation = CertificateOperation._from_certificate_operation_bundle(cert_bundle)
command = partial(self.get_certificate_operation, name=name, **kwargs)
get_certificate_command = partial(self.get_certificate, name=name, **kwargs)
create_certificate_polling = CreateCertificatePollerAsync(get_certificate_command=get_certificate_command)
return await async_poller(command, create_certificate_operation, None, create_certificate_polling)
@distributed_trace_async
async def get_certificate(self, name: str, **kwargs: "**Any") -> KeyVaultCertificate:
"""Gets a certificate with its management policy attached.
This operation requires the certificates/get permission. Does not accept the
version of the certificate as a parameter. If you wish to specify version, use
the get_certificate_version function and specify the desired version.
:param str name: The name of the certificate in the given vault.
:returns: An instance of KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_certificate]
:end-before: [END get_certificate]
:language: python
:caption: Get a certificate
:dedent: 8
"""
bundle = await self._client.get_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
certificate_version="",
error_map=_error_map,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def get_certificate_version(self, name: str, version: str, **kwargs: "**Any") -> KeyVaultCertificate:
"""Gets a specific version of a certificate without returning its management policy.
If you wish to get the latest version of your certificate, or to get the certificate's policy as well,
use the get_certificate function.
:param str name: The name of the certificate in the given vault.
:param str version: The version of the certificate.
:returns: An instance of KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_certificate]
:end-before: [END get_certificate]
:language: python
:caption: Get a certificate
:dedent: 8
"""
bundle = await self._client.get_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
certificate_version=version,
error_map=_error_map,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def delete_certificate(self, name: str, **kwargs: "**Any") -> DeletedCertificate:
"""Deletes a certificate from the key vault.
Deletes all versions of a certificate object along with its associated
policy. Delete certificate cannot be used to remove individual versions
of a certificate object. This operation requires the
certificates/delete permission.
:param str name: The name of the certificate.
:returns: The deleted certificate
:rtype: ~azure.keyvault.certificates.models.DeletedCertificate
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START delete_certificate]
:end-before: [END delete_certificate]
:language: python
:caption: Delete a certificate
:dedent: 8
"""
bundle = await self._client.delete_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return DeletedCertificate._from_deleted_certificate_bundle(deleted_certificate_bundle=bundle)
@distributed_trace_async
async def get_deleted_certificate(self, name: str, **kwargs: "**Any") -> DeletedCertificate:
"""Retrieves information about the specified deleted certificate.
Retrieves the deleted certificate information plus its attributes,
such as retention interval, scheduled permanent deletion, and the
current deletion recovery level. This operation requires the certificates/
get permission.
:param str name: The name of the certificate.
:return: The deleted certificate
:rtype: ~azure.keyvault.certificates.models.DeletedCertificate
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_deleted_certificate]
:end-before: [END get_deleted_certificate]
:language: python
:caption: Get a deleted certificate
:dedent: 8
"""
bundle = await self._client.get_deleted_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return DeletedCertificate._from_deleted_certificate_bundle(deleted_certificate_bundle=bundle)
@distributed_trace_async
async def purge_deleted_certificate(self, name: str, **kwargs: "**Any") -> None:
"""Permanently deletes the specified deleted certificate.
Performs an irreversible deletion of the specified certificate, without
possibility for recovery. The operation is not available if the recovery
level does not specified 'Purgeable'. This operation requires the
certificate/purge permission.
:param str name: The name of the certificate
:return: None
:rtype: None
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
await self._client.purge_deleted_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, **kwargs
)
@distributed_trace_async
async def recover_deleted_certificate(self, name: str, **kwargs: "**Any") -> KeyVaultCertificate:
"""Recovers the deleted certificate back to its current version under
/certificates.
Performs the reversal of the Delete operation. THe operation is applicable
in vaults enabled for soft-delete, and must be issued during the retention
interval (available in the deleted certificate's attributes). This operation
requires the certificates/recover permission.
:param str name: The name of the deleted certificate
:return: The recovered certificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START recover_deleted_certificate]
:end-before: [END recover_deleted_certificate]
:language: python
:caption: Recover a deleted certificate
:dedent: 8
"""
bundle = await self._client.recover_deleted_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, **kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def import_certificate(
self, name: str, certificate_bytes: bytes, **kwargs: "**Any"
) -> KeyVaultCertificate:
"""Imports a certificate into a specified key vault.
Imports an existing valid certificate, containing a private key, into
Azure Key Vault. The certificate to be imported can be in either PFX or
PEM format. If the certificate is in PEM format the PEM file must
contain the key as well as x509 certificates. This operation requires
the certificates/import permission.
:param str name: The name of the certificate.
:param bytes certificate_bytes: Bytes of the certificate object to import.
This certificate needs to contain the private key.
:returns: The imported KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs.
- *password (str)* - If the private key in the passed in certificate is encrypted,
it is the password used for encryption.
- *policy (~azure.keyvault.certificates.models.CertificatePolicy)* - The management policy
for the certificate.
"""
enabled = kwargs.pop("enabled", None)
password = kwargs.pop("password", None)
policy = kwargs.pop("policy", None)
if enabled is not None:
attributes = self._client.models.CertificateAttributes(enabled=enabled)
else:
attributes = None
base64_encoded_certificate = base64.b64encode(certificate_bytes).decode("utf-8")
bundle = await self._client.import_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
base64_encoded_certificate=base64_encoded_certificate,
password=password,
certificate_policy=CertificatePolicy._to_certificate_policy_bundle(policy),
certificate_attributes=attributes,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def get_policy(self, certificate_name: str, **kwargs: "**Any") -> CertificatePolicy:
"""Gets the policy for a certificate.
Returns the specified certificate policy resources in the key
vault. This operation requires the certificates/get permission.
:param str certificate_name: The name of the certificate in a given key vault.
:return: The certificate policy
:rtype: ~azure.keyvault.certificates.models.CertificatePolicy
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
bundle = await self._client.get_certificate_policy(
vault_base_url=self.vault_endpoint, certificate_name=certificate_name, **kwargs
)
return CertificatePolicy._from_certificate_policy_bundle(certificate_policy_bundle=bundle)
@distributed_trace_async
async def update_policy(
self, certificate_name: str, policy: CertificatePolicy, **kwargs: "**Any"
) -> CertificatePolicy:
"""Updates the policy for a certificate.
Set specified members in the certificate policy. Leaves others as null.
This operation requries the certificates/update permission.
:param str certificate_name: The name of the certificate in the given vault.
:param policy: The policy for the certificate.
:type policy: ~azure.keyvault.certificates.models.CertificatePolicy
:return: The certificate policy
:rtype: ~azure.keyvault.certificates.models.CertificatePolicy
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
bundle = await self._client.update_certificate_policy(
vault_base_url=self.vault_endpoint,
certificate_name=certificate_name,
certificate_policy=policy._to_certificate_policy_bundle(),
**kwargs
)
return CertificatePolicy._from_certificate_policy_bundle(certificate_policy_bundle=bundle)
@distributed_trace_async
async def update_certificate_properties(
self, name: str, version: Optional[str] = None, **kwargs: "**Any"
) -> KeyVaultCertificate:
"""Updates the specified attributes associated with the given certificate.
The UpdateCertificate operation applies the specified update on the
given certificate; the only elements updated are the certificate's
attributes. This operation requires the certificates/update permission.
:param str name: The name of the certificate in the given key vault.
:param str version: The version of the certificate.
:returns: The updated KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START update_certificate]
:end-before: [END update_certificate]
:language: python
:caption: Update a certificate's attributes
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
if enabled is not None:
attributes = self._client.models.CertificateAttributes(enabled=enabled)
else:
attributes = None
bundle = await self._client.update_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
certificate_version=version or "",
certificate_attributes=attributes,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def backup_certificate(self, name: str, **kwargs: "**Any") -> bytes:
"""Backs up the specified certificate.
Requests that a backup of the specified certificate be downloaded
to the client. All versions of the certificate will be downloaded.
This operation requires the certificates/backup permission.
:param str name: The name of the certificate.
:return: the backup blob containing the backed up certificate.
:rtype: bytes
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START backup_certificate]
:end-before: [END backup_certificate]
:language: python
:caption: Get a certificate backup
:dedent: 8
"""
backup_result = await self._client.backup_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return backup_result.value
@distributed_trace_async
async def restore_certificate_backup(self, backup: bytes, **kwargs: "**Any") -> KeyVaultCertificate:
"""Restores a backed up certificate to a vault.
Restores a backed up certificate, and all its versions, to a vault.
this operation requires the certificates/restore permission.
:param bytes backup: The backup blob associated with a certificate bundle.
:return: The restored KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START restore_certificate]
:end-before: [END restore_certificate]
:language: python
:caption: Restore a certificate backup
:dedent: 8
"""
bundle = await self._client.restore_certificate(
vault_base_url=self.vault_endpoint, certificate_bundle_backup=backup, **kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace
def list_deleted_certificates(self, **kwargs: "**Any") -> AsyncIterable[DeletedCertificate]:
"""Lists the deleted certificates in the specified vault currently
available for recovery.
Retrieves the certificates in the current vault which are in a deleted
state and ready for recovery or purging. This operation includes
deletion-specific information. This operation requires the certificates/get/list
permission. This operation can only be enabled on soft-delete enabled vaults.
:return: An iterator like instance of DeletedCertificate
:rtype:
~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.DeletedCertificate]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *include_pending (bool)* - Specifies whether to include certificates which are
not completely deleted.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START list_deleted_certificates]
:end-before: [END list_deleted_certificates]
:language: python
:caption: List all the deleted certificates
:dedent: 8
"""
max_page_size = kwargs.pop("max_page_size", None)
return self._client.get_deleted_certificates(
vault_base_url=self._vault_endpoint,
maxresults=max_page_size,
cls=lambda objs: [DeletedCertificate._from_deleted_certificate_item(x) for x in objs],
**kwargs
)
@distributed_trace
def list_certificates(self, **kwargs: "**Any") -> AsyncIterable[CertificateProperties]:
"""List certificates in the key vault.
The GetCertificates operation returns the set of certificates resources
in the key vault. This operation requires the
certificates/list permission.
:returns: An iterator like instance of CertificateProperties
:rtype:
~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateProperties]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *include_pending (bool)* - Specifies whether to include certificates which are
not completely provisioned.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START list_certificates]
:end-before: [END list_certificates]
:language: python
:caption: List all certificates
:dedent: 8
"""
max_page_size = kwargs.pop("max_page_size", None)
return self._client.get_certificates(
vault_base_url=self._vault_endpoint,
maxresults=max_page_size,
cls=lambda objs: [CertificateProperties._from_certificate_item(x) for x in objs],
**kwargs
)
@distributed_trace
def list_certificate_versions(self, name: str, **kwargs: "**Any") -> AsyncIterable[CertificateProperties]:
"""List the versions of a certificate.
The GetCertificateVersions operation returns the versions of a
certificate in the key vault. This operation requires the
certificates/list permission.
:param str name: The name of the certificate.
:returns: An iterator like instance of CertificateProperties
:rtype:
~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateProperties]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START list_certificate_versions]
:end-before: [END list_certificate_versions]
:language: python
:caption: List all versions of a certificate
:dedent: 8
"""
max_page_size = kwargs.pop("max_page_size", None)
return self._client.get_certificate_versions(
vault_base_url=self._vault_endpoint,
certificate_name=name,
maxresults=max_page_size,
cls=lambda objs: [CertificateProperties._from_certificate_item(x) for x in objs],
**kwargs
)
@distributed_trace_async
async def create_contacts(
self, contacts: Iterable[CertificateContact], **kwargs: "**Any"
) -> List[CertificateContact]:
# pylint:disable=unsubscriptable-object
# disabled unsubscriptable-object because of pylint bug referenced here:
# https://github.com/PyCQA/pylint/issues/2377
"""Sets the certificate contacts for the key vault.
Sets the certificate contacts for the key vault. This
operation requires the certificates/managecontacts permission.
:param contacts: The contact list for the vault certificates.
:type contacts: list[~azure.keyvault.certificates.models.CertificateContact]
:returns: The created list of contacts
:rtype: list[~azure.keyvault.certificates.models.CertificateContact]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START create_contacts]
:end-before: [END create_contacts]
:language: python
:caption: Create contacts
:dedent: 8
"""
contacts = await self._client.set_certificate_contacts(
vault_base_url=self.vault_endpoint,
contact_list=[c._to_certificate_contacts_item() for c in contacts],
**kwargs
)
return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]
@distributed_trace_async
async def get_contacts(self, **kwargs: "**Any") -> List[CertificateContact]:
# pylint:disable=unsubscriptable-object
# disabled unsubscruptable-object because of pylint bug referenced here:
# https://github.com/PyCQA/pylint/issues/2377
"""Gets the certificate contacts for the key vault.
Returns the set of certificate contact resources in the specified
key vault. This operation requires the certificates/managecontacts
permission.
:return: The certificate contacts for the key vault.
:rtype: list[azure.keyvault.certificates.models.CertificateContact]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_contacts]
:end-before: [END get_contacts]
:language: python
:caption: Get contacts
:dedent: 8
"""
contacts = await self._client.get_certificate_contacts(vault_base_url=self._vault_endpoint, **kwargs)
return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]
@distributed_trace_async
async def delete_contacts(self, **kwargs: "**Any") -> List[CertificateContact]:
# pylint:disable=unsubscriptable-object
# disabled unsubscruptable-object because of pylint bug referenced here:
# https://github.com/PyCQA/pylint/issues/2377
"""Deletes the certificate contacts for the key vault.
Deletes the certificate contacts for the key vault certificate.
This operation requires the certificates/managecontacts permission.
:return: Contacts
:rtype: list[~azure.keyvault.certificates.models.CertificateContact]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START delete_contacts]
:end-before: [END delete_contacts]
:language: python
:caption: Delete contacts
:dedent: 8
"""
contacts = await self._client.delete_certificate_contacts(vault_base_url=self.vault_endpoint, **kwargs)
return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]
@distributed_trace_async
async def get_certificate_operation(self, name: str, **kwargs: "**Any") -> CertificateOperation:
"""Gets the creation operation of a certificate.
Gets the creation operation associated with a specified certificate.
This operation requires the certificates/get permission.
:param str name: The name of the certificate.
:returns: The created CertificateOperation
:rtype: ~azure.keyvault.certificates.models.CertificateOperation
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
"""
bundle = await self._client.get_certificate_operation(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return CertificateOperation._from_certificate_operation_bundle(certificate_operation_bundle=bundle)
@distributed_trace_async
async def delete_certificate_operation(self, name: str, **kwargs: "**Any") -> CertificateOperation:
"""Deletes the creation operation for a specific certificate.
Deletes the creation operation for a specified certificate that is in
the process of being created. The certificate is no longer created.
This operation requires the certificates/update permission.
:param str name: The name of the certificate.
:return: The deleted CertificateOperation
:rtype: ~azure.keyvault.certificates.models.CertificateOperation
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the operation doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
"""
bundle = await self._client.delete_certificate_operation(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return CertificateOperation._from_certificate_operation_bundle(certificate_operation_bundle=bundle)
@distributed_trace_async
async def cancel_certificate_operation(self, name: str, **kwargs: "**Any") -> CertificateOperation:
"""Cancels a certificate operation.
Cancels a certificate creation operation that is already in progress.
This operation requires the certificates/update permission.
:param str name: The name of the certificate.
:returns: The cancelled certificate operation
:rtype: ~azure.keyvault.certificates.models.CertificateOperation
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
bundle = await self._client.update_certificate_operation(
vault_base_url=self.vault_endpoint, certificate_name=name, cancellation_requested=True, **kwargs
)
return CertificateOperation._from_certificate_operation_bundle(certificate_operation_bundle=bundle)
@distributed_trace_async
async def merge_certificate(
self, name: str, x509_certificates: List[bytearray], **kwargs: "**Any"
) -> KeyVaultCertificate:
"""Merges a certificate or a certificate chain with a key pair existing on the server.
Performs the merging of a certificate or certificate chain with a key pair currently
available in the service. This operation requires the certificates/create permission.
Make sure when creating the certificate to merge using create_certificate that you set
its issuer to 'Unknown'. This way Key Vault knows that the certificate will not be signed
by an issuer known to it.
:param str name: The name of the certificate
:param x509_certificates: The certificate or the certificate chain to merge.
:type x509_certificates: list[bytearray]
:return: The merged certificate operation
:rtype: ~azure.keyvault.certificates.models.CertificateOperation
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs.
"""
enabled = kwargs.pop("enabled", None)
if enabled is not None:
attributes = self._client.models.CertificateAttributes(enabled=enabled)
else:
attributes = None
bundle = await self._client.merge_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
x509_certificates=x509_certificates,
certificate_attributes=attributes,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def get_issuer(self, name: str, **kwargs: "**Any") -> CertificateIssuer:
"""Gets the specified certificate issuer.
Returns the specified certificate issuer resources in the key vault.
This operation requires the certificates/manageissuers/getissuers permission.
:param str name: The name of the issuer.
:return: The specified certificate issuer.
:rtype: ~azure.keyvault.certificates.models.CertificateIssuer
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the issuer doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_issuer]
:end-before: [END get_issuer]
:language: python
:caption: Get an issuer
:dedent: 8
"""
issuer_bundle = await self._client.get_certificate_issuer(
vault_base_url=self.vault_endpoint, issuer_name=name, error_map=_error_map, **kwargs
)
return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)
@distributed_trace_async
async def create_issuer(
self, name: str, provider: str, **kwargs: "**Any"
) -> CertificateIssuer:
"""Sets the specified certificate issuer.
The SetCertificateIssuer operation adds or updates the specified
certificate issuer. This operation requires the certificates/setissuers
permission.
:param str name: The name of the issuer.
:param str provider: The issuer provider.
:returns: The created CertificateIssuer
:rtype: ~azure.keyvault.certificates.models.CertificateIssuer
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *account_id (str)* - The user name/account name/account id.
- *password (str)* - The password/secret/account key.
- *organization_id (str)* - Id of the organization.
- *admin_details (list[~azure.keyvault.certificates.models.AdministratorDetails])*
- Details of the organization administrators of the certificate issuer.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START create_issuer]
:end-before: [END create_issuer]
:language: python
:caption: Create an issuer
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
account_id = kwargs.pop("account_id", None)
password = kwargs.pop("password", None)
organization_id = kwargs.pop("organization_id", None)
admin_details = kwargs.pop("admin_details", None)
if account_id or password:
issuer_credentials = self._client.models.IssuerCredentials(account_id=account_id, password=password)
else:
issuer_credentials = None
if admin_details and admin_details[0]:
admin_details_to_pass = list(
self._client.models.AdministratorDetails(
first_name=admin_detail.first_name,
last_name=admin_detail.last_name,
email_address=admin_detail.email,
phone=admin_detail.phone,
)
for admin_detail in admin_details
)
else:
admin_details_to_pass = admin_details
if organization_id or admin_details:
organization_details = self._client.models.OrganizationDetails(
id=organization_id, admin_details=admin_details_to_pass
)
else:
organization_details = None
if enabled is not None:
issuer_attributes = self._client.models.IssuerAttributes(enabled=enabled)
else:
issuer_attributes = None
issuer_bundle = await self._client.set_certificate_issuer(
vault_base_url=self.vault_endpoint,
issuer_name=name,
provider=provider,
credentials=issuer_credentials,
organization_details=organization_details,
attributes=issuer_attributes,
**kwargs
)
return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)
@distributed_trace_async
async def update_issuer(self, name: str, **kwargs: "**Any") -> CertificateIssuer:
"""Updates the specified certificate issuer.
Performs an update on the specified certificate issuer entity.
This operation requires the certificates/setissuers permission.
:param str name: The name of the issuer.
:param str provider: The issuer provider.
:return: The updated issuer
:rtype: ~azure.keyvault.certificates.models.CertificateIssuer
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *provider (str)* - The issuer provider.
- *account_id (str)* - The user name/account name/account id.
- *password (str)* - The password/secret/account key.
- *organization_id (str)* - Id of the organization.
- *admin_details (list[~azure.keyvault.certificates.models.AdministratorDetails])*
- Details of the organization administrators of the certificate issuer.
"""
enabled = kwargs.pop("enabled", None)
provider = kwargs.pop("provider", None)
account_id = kwargs.pop("account_id", None)
password = kwargs.pop("password", None)
organization_id = kwargs.pop("organization_id", None)
admin_details = kwargs.pop("admin_details", None)
if account_id or password:
issuer_credentials = self._client.models.IssuerCredentials(account_id=account_id, password=password)
else:
issuer_credentials = None
if admin_details and admin_details[0]:
admin_details_to_pass = list(
self._client.models.AdministratorDetails(
first_name=admin_detail.first_name,
last_name=admin_detail.last_name,
email_address=admin_detail.email,
phone=admin_detail.phone,
)
for admin_detail in admin_details
)
else:
admin_details_to_pass = admin_details
if organization_id or admin_details:
organization_details = self._client.models.OrganizationDetails(
id=organization_id, admin_details=admin_details_to_pass
)
else:
organization_details = None
if enabled is not None:
issuer_attributes = self._client.models.IssuerAttributes(enabled=enabled)
else:
issuer_attributes = None
issuer_bundle = await self._client.update_certificate_issuer(
vault_base_url=self.vault_endpoint,
issuer_name=name,
provider=provider,
credentials=issuer_credentials,
organization_details=organization_details,
attributes=issuer_attributes,
**kwargs
)
return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)
@distributed_trace_async
async def delete_issuer(self, name: str, **kwargs: "**Any") -> CertificateIssuer:
"""Deletes the specified certificate issuer.
Permanently removes the specified certificate issuer from the vault.
This operation requires the certificates/manageissuers/deleteissuers permission.
:param str name: The name of the issuer.
:return: CertificateIssuer
:rtype: ~azure.keyvault.certificates.models.CertificateIssuer
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START delete_issuer]
:end-before: [END delete_issuer]
:language: python
:caption: Delete an issuer
:dedent: 8
"""
issuer_bundle = await self._client.delete_certificate_issuer(
vault_base_url=self.vault_endpoint, issuer_name=name, **kwargs
)
return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)
@distributed_trace
def list_issuers(self, **kwargs: "**Any") -> AsyncIterable[IssuerProperties]:
"""List certificate issuers for the key vault.
Returns the set of certificate issuer resources in the key
vault. This operation requires the certificates/manageissuers/getissuers
permission.
:return: An iterator like instance of Issuers
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateIssuer]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START list_issuers]
:end-before: [END list_issuers]
:language: python
:caption: List issuers of a vault
:dedent: 8
"""
max_page_size = kwargs.pop("max_page_size", None)
return self._client.get_certificate_issuers(
vault_base_url=self.vault_endpoint,
maxresults=max_page_size,
cls=lambda objs: [IssuerProperties._from_issuer_item(x) for x in objs],
**kwargs
)
| # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# pylint:disable=too-many-lines,too-many-public-methods
import base64
from typing import Any, AsyncIterable, Optional, Iterable, List, Dict, Union
from functools import partial
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.polling import async_poller
from azure.keyvault.certificates.models import (
KeyVaultCertificate,
CertificateOperation,
CertificatePolicy,
DeletedCertificate,
CertificateProperties,
CertificateContact,
CertificateIssuer,
IssuerProperties,
)
from ._polling_async import CreateCertificatePollerAsync
from .._shared import AsyncKeyVaultClientBase
from .._shared.exceptions import error_map as _error_map
class CertificateClient(AsyncKeyVaultClientBase):
"""A high-level asynchronous interface for managing a vault's certificates.
:param str vault_endpoint: URL of the vault the client will access
:param credential: An object which can provide an access token for the vault, such as a credential from
:mod:`azure.identity.aio`
Keyword arguments
- **api_version**: version of the Key Vault API to use. Defaults to the most recent.
- **transport**: :class:`~azure.core.pipeline.transport.AsyncHttpTransport` to use. Defaults to
:class:`~azure.core.pipeline.transport.AioHttpTransport`.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START create_certificate_client]
:end-before: [END create_certificate_client]
:language: python
:dedent: 4
:caption: Creates a new instance of the Certificate client
"""
# pylint:disable=protected-access
@distributed_trace_async
async def create_certificate(
self, name: str, policy: CertificatePolicy, **kwargs: "**Any"
) -> Union[KeyVaultCertificate, CertificateOperation]:
"""Creates a new certificate.
If this is the first version, the certificate resource is created. This
operation requires the certificates/create permission.
:param str name: The name of the certificate.
:param policy: The management policy for the certificate.
:type policy:
~azure.keyvault.certificates.models.CertificatePolicy
:returns: A coroutine for the creation of the certificate. Awaiting the coroutine
returns the created KeyVaultCertificate if creation is successful, the CertificateOperation if not.
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate or
~azure.keyvault.certificates.models.CertificateOperation
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START create_certificate]
:end-before: [END create_certificate]
:language: python
:caption: Create a certificate
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
tags = kwargs.pop("tags", None)
if enabled is not None:
attributes = self._client.models.CertificateAttributes(enabled=enabled)
else:
attributes = None
cert_bundle = await self._client.create_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
certificate_policy=policy._to_certificate_policy_bundle(),
certificate_attributes=attributes,
tags=tags,
**kwargs
)
create_certificate_operation = CertificateOperation._from_certificate_operation_bundle(cert_bundle)
command = partial(self.get_certificate_operation, name=name, **kwargs)
get_certificate_command = partial(self.get_certificate, name=name, **kwargs)
create_certificate_polling = CreateCertificatePollerAsync(get_certificate_command=get_certificate_command)
return await async_poller(command, create_certificate_operation, None, create_certificate_polling)
@distributed_trace_async
async def get_certificate(self, name: str, **kwargs: "**Any") -> KeyVaultCertificate:
"""Gets a certificate with its management policy attached.
This operation requires the certificates/get permission. Does not accept the
version of the certificate as a parameter. If you wish to specify version, use
the get_certificate_version function and specify the desired version.
:param str name: The name of the certificate in the given vault.
:returns: An instance of KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_certificate]
:end-before: [END get_certificate]
:language: python
:caption: Get a certificate
:dedent: 8
"""
bundle = await self._client.get_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
certificate_version="",
error_map=_error_map,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def get_certificate_version(self, name: str, version: str, **kwargs: "**Any") -> KeyVaultCertificate:
"""Gets a specific version of a certificate without returning its management policy.
If you wish to get the latest version of your certificate, or to get the certificate's policy as well,
use the get_certificate function.
:param str name: The name of the certificate in the given vault.
:param str version: The version of the certificate.
:returns: An instance of KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_certificate]
:end-before: [END get_certificate]
:language: python
:caption: Get a certificate
:dedent: 8
"""
bundle = await self._client.get_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
certificate_version=version,
error_map=_error_map,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def delete_certificate(self, name: str, **kwargs: "**Any") -> DeletedCertificate:
"""Deletes a certificate from the key vault.
Deletes all versions of a certificate object along with its associated
policy. Delete certificate cannot be used to remove individual versions
of a certificate object. This operation requires the
certificates/delete permission.
:param str name: The name of the certificate.
:returns: The deleted certificate
:rtype: ~azure.keyvault.certificates.models.DeletedCertificate
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START delete_certificate]
:end-before: [END delete_certificate]
:language: python
:caption: Delete a certificate
:dedent: 8
"""
bundle = await self._client.delete_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return DeletedCertificate._from_deleted_certificate_bundle(deleted_certificate_bundle=bundle)
@distributed_trace_async
async def get_deleted_certificate(self, name: str, **kwargs: "**Any") -> DeletedCertificate:
"""Retrieves information about the specified deleted certificate.
Retrieves the deleted certificate information plus its attributes,
such as retention interval, scheduled permanent deletion, and the
current deletion recovery level. This operation requires the certificates/
get permission.
:param str name: The name of the certificate.
:return: The deleted certificate
:rtype: ~azure.keyvault.certificates.models.DeletedCertificate
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_deleted_certificate]
:end-before: [END get_deleted_certificate]
:language: python
:caption: Get a deleted certificate
:dedent: 8
"""
bundle = await self._client.get_deleted_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return DeletedCertificate._from_deleted_certificate_bundle(deleted_certificate_bundle=bundle)
@distributed_trace_async
async def purge_deleted_certificate(self, name: str, **kwargs: "**Any") -> None:
"""Permanently deletes the specified deleted certificate.
Performs an irreversible deletion of the specified certificate, without
possibility for recovery. The operation is not available if the recovery
level does not specified 'Purgeable'. This operation requires the
certificate/purge permission.
:param str name: The name of the certificate
:return: None
:rtype: None
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
await self._client.purge_deleted_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, **kwargs
)
@distributed_trace_async
async def recover_deleted_certificate(self, name: str, **kwargs: "**Any") -> KeyVaultCertificate:
"""Recovers the deleted certificate back to its current version under
/certificates.
Performs the reversal of the Delete operation. THe operation is applicable
in vaults enabled for soft-delete, and must be issued during the retention
interval (available in the deleted certificate's attributes). This operation
requires the certificates/recover permission.
:param str name: The name of the deleted certificate
:return: The recovered certificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START recover_deleted_certificate]
:end-before: [END recover_deleted_certificate]
:language: python
:caption: Recover a deleted certificate
:dedent: 8
"""
bundle = await self._client.recover_deleted_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, **kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def import_certificate(
self, name: str, certificate_bytes: bytes, **kwargs: "**Any"
) -> KeyVaultCertificate:
"""Imports a certificate into a specified key vault.
Imports an existing valid certificate, containing a private key, into
Azure Key Vault. The certificate to be imported can be in either PFX or
PEM format. If the certificate is in PEM format the PEM file must
contain the key as well as x509 certificates. This operation requires
the certificates/import permission.
:param str name: The name of the certificate.
:param bytes certificate_bytes: Bytes of the certificate object to import.
This certificate needs to contain the private key.
:returns: The imported KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs.
- *password (str)* - If the private key in the passed in certificate is encrypted,
it is the password used for encryption.
- *policy (~azure.keyvault.certificates.models.CertificatePolicy)* - The management policy
for the certificate.
"""
enabled = kwargs.pop("enabled", None)
password = kwargs.pop("password", None)
policy = kwargs.pop("policy", None)
if enabled is not None:
attributes = self._client.models.CertificateAttributes(enabled=enabled)
else:
attributes = None
base64_encoded_certificate = base64.b64encode(certificate_bytes).decode("utf-8")
bundle = await self._client.import_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
base64_encoded_certificate=base64_encoded_certificate,
password=password,
certificate_policy=CertificatePolicy._to_certificate_policy_bundle(policy),
certificate_attributes=attributes,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def get_policy(self, certificate_name: str, **kwargs: "**Any") -> CertificatePolicy:
"""Gets the policy for a certificate.
Returns the specified certificate policy resources in the key
vault. This operation requires the certificates/get permission.
:param str certificate_name: The name of the certificate in a given key vault.
:return: The certificate policy
:rtype: ~azure.keyvault.certificates.models.CertificatePolicy
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
bundle = await self._client.get_certificate_policy(
vault_base_url=self.vault_endpoint, certificate_name=certificate_name, **kwargs
)
return CertificatePolicy._from_certificate_policy_bundle(certificate_policy_bundle=bundle)
@distributed_trace_async
async def update_policy(
self, certificate_name: str, policy: CertificatePolicy, **kwargs: "**Any"
) -> CertificatePolicy:
"""Updates the policy for a certificate.
Set specified members in the certificate policy. Leaves others as null.
This operation requries the certificates/update permission.
:param str certificate_name: The name of the certificate in the given vault.
:param policy: The policy for the certificate.
:type policy: ~azure.keyvault.certificates.models.CertificatePolicy
:return: The certificate policy
:rtype: ~azure.keyvault.certificates.models.CertificatePolicy
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
bundle = await self._client.update_certificate_policy(
vault_base_url=self.vault_endpoint,
certificate_name=certificate_name,
certificate_policy=policy._to_certificate_policy_bundle(),
**kwargs
)
return CertificatePolicy._from_certificate_policy_bundle(certificate_policy_bundle=bundle)
@distributed_trace_async
async def update_certificate_properties(
self, name: str, version: Optional[str] = None, **kwargs: "**Any"
) -> KeyVaultCertificate:
"""Updates the specified attributes associated with the given certificate.
The UpdateCertificate operation applies the specified update on the
given certificate; the only elements updated are the certificate's
attributes. This operation requires the certificates/update permission.
:param str name: The name of the certificate in the given key vault.
:param str version: The version of the certificate.
:returns: The updated KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START update_certificate]
:end-before: [END update_certificate]
:language: python
:caption: Update a certificate's attributes
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
if enabled is not None:
attributes = self._client.models.CertificateAttributes(enabled=enabled)
else:
attributes = None
bundle = await self._client.update_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
certificate_version=version or "",
certificate_attributes=attributes,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def backup_certificate(self, name: str, **kwargs: "**Any") -> bytes:
"""Backs up the specified certificate.
Requests that a backup of the specified certificate be downloaded
to the client. All versions of the certificate will be downloaded.
This operation requires the certificates/backup permission.
:param str name: The name of the certificate.
:return: the backup blob containing the backed up certificate.
:rtype: bytes
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START backup_certificate]
:end-before: [END backup_certificate]
:language: python
:caption: Get a certificate backup
:dedent: 8
"""
backup_result = await self._client.backup_certificate(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return backup_result.value
@distributed_trace_async
async def restore_certificate_backup(self, backup: bytes, **kwargs: "**Any") -> KeyVaultCertificate:
"""Restores a backed up certificate to a vault.
Restores a backed up certificate, and all its versions, to a vault.
this operation requires the certificates/restore permission.
:param bytes backup: The backup blob associated with a certificate bundle.
:return: The restored KeyVaultCertificate
:rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START restore_certificate]
:end-before: [END restore_certificate]
:language: python
:caption: Restore a certificate backup
:dedent: 8
"""
bundle = await self._client.restore_certificate(
vault_base_url=self.vault_endpoint, certificate_bundle_backup=backup, **kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace
def list_deleted_certificates(self, **kwargs: "**Any") -> AsyncIterable[DeletedCertificate]:
"""Lists the deleted certificates in the specified vault currently
available for recovery.
Retrieves the certificates in the current vault which are in a deleted
state and ready for recovery or purging. This operation includes
deletion-specific information. This operation requires the certificates/get/list
permission. This operation can only be enabled on soft-delete enabled vaults.
:return: An iterator like instance of DeletedCertificate
:rtype:
~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.DeletedCertificate]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *include_pending (bool)* - Specifies whether to include certificates which are
not completely deleted.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START list_deleted_certificates]
:end-before: [END list_deleted_certificates]
:language: python
:caption: List all the deleted certificates
:dedent: 8
"""
max_page_size = kwargs.pop("max_page_size", None)
return self._client.get_deleted_certificates(
vault_base_url=self._vault_endpoint,
maxresults=max_page_size,
cls=lambda objs: [DeletedCertificate._from_deleted_certificate_item(x) for x in objs],
**kwargs
)
@distributed_trace
def list_certificates(self, **kwargs: "**Any") -> AsyncIterable[CertificateProperties]:
"""List certificates in the key vault.
The GetCertificates operation returns the set of certificates resources
in the key vault. This operation requires the
certificates/list permission.
:returns: An iterator like instance of CertificateProperties
:rtype:
~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateProperties]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *include_pending (bool)* - Specifies whether to include certificates which are
not completely provisioned.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START list_certificates]
:end-before: [END list_certificates]
:language: python
:caption: List all certificates
:dedent: 8
"""
max_page_size = kwargs.pop("max_page_size", None)
return self._client.get_certificates(
vault_base_url=self._vault_endpoint,
maxresults=max_page_size,
cls=lambda objs: [CertificateProperties._from_certificate_item(x) for x in objs],
**kwargs
)
@distributed_trace
def list_certificate_versions(self, name: str, **kwargs: "**Any") -> AsyncIterable[CertificateProperties]:
"""List the versions of a certificate.
The GetCertificateVersions operation returns the versions of a
certificate in the key vault. This operation requires the
certificates/list permission.
:param str name: The name of the certificate.
:returns: An iterator like instance of CertificateProperties
:rtype:
~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateProperties]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START list_certificate_versions]
:end-before: [END list_certificate_versions]
:language: python
:caption: List all versions of a certificate
:dedent: 8
"""
max_page_size = kwargs.pop("max_page_size", None)
return self._client.get_certificate_versions(
vault_base_url=self._vault_endpoint,
certificate_name=name,
maxresults=max_page_size,
cls=lambda objs: [CertificateProperties._from_certificate_item(x) for x in objs],
**kwargs
)
@distributed_trace_async
async def create_contacts(
self, contacts: Iterable[CertificateContact], **kwargs: "**Any"
) -> List[CertificateContact]:
# pylint:disable=unsubscriptable-object
# disabled unsubscriptable-object because of pylint bug referenced here:
# https://github.com/PyCQA/pylint/issues/2377
"""Sets the certificate contacts for the key vault.
Sets the certificate contacts for the key vault. This
operation requires the certificates/managecontacts permission.
:param contacts: The contact list for the vault certificates.
:type contacts: list[~azure.keyvault.certificates.models.CertificateContact]
:returns: The created list of contacts
:rtype: list[~azure.keyvault.certificates.models.CertificateContact]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START create_contacts]
:end-before: [END create_contacts]
:language: python
:caption: Create contacts
:dedent: 8
"""
contacts = await self._client.set_certificate_contacts(
vault_base_url=self.vault_endpoint,
contact_list=[c._to_certificate_contacts_item() for c in contacts],
**kwargs
)
return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]
@distributed_trace_async
async def get_contacts(self, **kwargs: "**Any") -> List[CertificateContact]:
# pylint:disable=unsubscriptable-object
# disabled unsubscruptable-object because of pylint bug referenced here:
# https://github.com/PyCQA/pylint/issues/2377
"""Gets the certificate contacts for the key vault.
Returns the set of certificate contact resources in the specified
key vault. This operation requires the certificates/managecontacts
permission.
:return: The certificate contacts for the key vault.
:rtype: list[azure.keyvault.certificates.models.CertificateContact]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_contacts]
:end-before: [END get_contacts]
:language: python
:caption: Get contacts
:dedent: 8
"""
contacts = await self._client.get_certificate_contacts(vault_base_url=self._vault_endpoint, **kwargs)
return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]
@distributed_trace_async
async def delete_contacts(self, **kwargs: "**Any") -> List[CertificateContact]:
# pylint:disable=unsubscriptable-object
# disabled unsubscruptable-object because of pylint bug referenced here:
# https://github.com/PyCQA/pylint/issues/2377
"""Deletes the certificate contacts for the key vault.
Deletes the certificate contacts for the key vault certificate.
This operation requires the certificates/managecontacts permission.
:return: Contacts
:rtype: list[~azure.keyvault.certificates.models.CertificateContact]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START delete_contacts]
:end-before: [END delete_contacts]
:language: python
:caption: Delete contacts
:dedent: 8
"""
contacts = await self._client.delete_certificate_contacts(vault_base_url=self.vault_endpoint, **kwargs)
return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]
@distributed_trace_async
async def get_certificate_operation(self, name: str, **kwargs: "**Any") -> CertificateOperation:
"""Gets the creation operation of a certificate.
Gets the creation operation associated with a specified certificate.
This operation requires the certificates/get permission.
:param str name: The name of the certificate.
:returns: The created CertificateOperation
:rtype: ~azure.keyvault.certificates.models.CertificateOperation
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
"""
bundle = await self._client.get_certificate_operation(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return CertificateOperation._from_certificate_operation_bundle(certificate_operation_bundle=bundle)
@distributed_trace_async
async def delete_certificate_operation(self, name: str, **kwargs: "**Any") -> CertificateOperation:
"""Deletes the creation operation for a specific certificate.
Deletes the creation operation for a specified certificate that is in
the process of being created. The certificate is no longer created.
This operation requires the certificates/update permission.
:param str name: The name of the certificate.
:return: The deleted CertificateOperation
:rtype: ~azure.keyvault.certificates.models.CertificateOperation
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the operation doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
"""
bundle = await self._client.delete_certificate_operation(
vault_base_url=self.vault_endpoint, certificate_name=name, error_map=_error_map, **kwargs
)
return CertificateOperation._from_certificate_operation_bundle(certificate_operation_bundle=bundle)
@distributed_trace_async
async def cancel_certificate_operation(self, name: str, **kwargs: "**Any") -> CertificateOperation:
"""Cancels a certificate operation.
Cancels a certificate creation operation that is already in progress.
This operation requires the certificates/update permission.
:param str name: The name of the certificate.
:returns: The cancelled certificate operation
:rtype: ~azure.keyvault.certificates.models.CertificateOperation
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
bundle = await self._client.update_certificate_operation(
vault_base_url=self.vault_endpoint, certificate_name=name, cancellation_requested=True, **kwargs
)
return CertificateOperation._from_certificate_operation_bundle(certificate_operation_bundle=bundle)
@distributed_trace_async
async def merge_certificate(
self, name: str, x509_certificates: List[bytearray], **kwargs: "**Any"
) -> KeyVaultCertificate:
"""Merges a certificate or a certificate chain with a key pair existing on the server.
Performs the merging of a certificate or certificate chain with a key pair currently
available in the service. This operation requires the certificates/create permission.
Make sure when creating the certificate to merge using create_certificate that you set
its issuer to 'Unknown'. This way Key Vault knows that the certificate will not be signed
by an issuer known to it.
:param str name: The name of the certificate
:param x509_certificates: The certificate or the certificate chain to merge.
:type x509_certificates: list[bytearray]
:return: The merged certificate operation
:rtype: ~azure.keyvault.certificates.models.CertificateOperation
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs.
"""
enabled = kwargs.pop("enabled", None)
if enabled is not None:
attributes = self._client.models.CertificateAttributes(enabled=enabled)
else:
attributes = None
bundle = await self._client.merge_certificate(
vault_base_url=self.vault_endpoint,
certificate_name=name,
x509_certificates=x509_certificates,
certificate_attributes=attributes,
**kwargs
)
return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)
@distributed_trace_async
async def get_issuer(self, name: str, **kwargs: "**Any") -> CertificateIssuer:
"""Gets the specified certificate issuer.
Returns the specified certificate issuer resources in the key vault.
This operation requires the certificates/manageissuers/getissuers permission.
:param str name: The name of the issuer.
:return: The specified certificate issuer.
:rtype: ~azure.keyvault.certificates.models.CertificateIssuer
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the issuer doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START get_issuer]
:end-before: [END get_issuer]
:language: python
:caption: Get an issuer
:dedent: 8
"""
issuer_bundle = await self._client.get_certificate_issuer(
vault_base_url=self.vault_endpoint, issuer_name=name, error_map=_error_map, **kwargs
)
return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)
@distributed_trace_async
async def create_issuer(
self, name: str, provider: str, **kwargs: "**Any"
) -> CertificateIssuer:
"""Sets the specified certificate issuer.
The SetCertificateIssuer operation adds or updates the specified
certificate issuer. This operation requires the certificates/setissuers
permission.
:param str name: The name of the issuer.
:param str provider: The issuer provider.
:returns: The created CertificateIssuer
:rtype: ~azure.keyvault.certificates.models.CertificateIssuer
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *account_id (str)* - The user name/account name/account id.
- *password (str)* - The password/secret/account key.
- *organization_id (str)* - Id of the organization.
- *admin_details (list[~azure.keyvault.certificates.models.AdministratorDetails])*
- Details of the organization administrators of the certificate issuer.
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START create_issuer]
:end-before: [END create_issuer]
:language: python
:caption: Create an issuer
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
account_id = kwargs.pop("account_id", None)
password = kwargs.pop("password", None)
organization_id = kwargs.pop("organization_id", None)
admin_details = kwargs.pop("admin_details", None)
if account_id or password:
issuer_credentials = self._client.models.IssuerCredentials(account_id=account_id, password=password)
else:
issuer_credentials = None
if admin_details and admin_details[0]:
admin_details_to_pass = list(
self._client.models.AdministratorDetails(
first_name=admin_detail.first_name,
last_name=admin_detail.last_name,
email_address=admin_detail.email,
phone=admin_detail.phone,
)
for admin_detail in admin_details
)
else:
admin_details_to_pass = admin_details
if organization_id or admin_details:
organization_details = self._client.models.OrganizationDetails(
id=organization_id, admin_details=admin_details_to_pass
)
else:
organization_details = None
if enabled is not None:
issuer_attributes = self._client.models.IssuerAttributes(enabled=enabled)
else:
issuer_attributes = None
issuer_bundle = await self._client.set_certificate_issuer(
vault_base_url=self.vault_endpoint,
issuer_name=name,
provider=provider,
credentials=issuer_credentials,
organization_details=organization_details,
attributes=issuer_attributes,
**kwargs
)
return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)
@distributed_trace_async
async def update_issuer(self, name: str, **kwargs: "**Any") -> CertificateIssuer:
"""Updates the specified certificate issuer.
Performs an update on the specified certificate issuer entity.
This operation requires the certificates/setissuers permission.
:param str name: The name of the issuer.
:param str provider: The issuer provider.
:return: The updated issuer
:rtype: ~azure.keyvault.certificates.models.CertificateIssuer
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Keyword arguments
- *enabled (bool)* - Determines whether the object is enabled.
- *provider (str)* - The issuer provider.
- *account_id (str)* - The user name/account name/account id.
- *password (str)* - The password/secret/account key.
- *organization_id (str)* - Id of the organization.
- *admin_details (list[~azure.keyvault.certificates.models.AdministratorDetails])*
- Details of the organization administrators of the certificate issuer.
"""
enabled = kwargs.pop("enabled", None)
provider = kwargs.pop("provider", None)
account_id = kwargs.pop("account_id", None)
password = kwargs.pop("password", None)
organization_id = kwargs.pop("organization_id", None)
admin_details = kwargs.pop("admin_details", None)
if account_id or password:
issuer_credentials = self._client.models.IssuerCredentials(account_id=account_id, password=password)
else:
issuer_credentials = None
if admin_details and admin_details[0]:
admin_details_to_pass = list(
self._client.models.AdministratorDetails(
first_name=admin_detail.first_name,
last_name=admin_detail.last_name,
email_address=admin_detail.email,
phone=admin_detail.phone,
)
for admin_detail in admin_details
)
else:
admin_details_to_pass = admin_details
if organization_id or admin_details:
organization_details = self._client.models.OrganizationDetails(
id=organization_id, admin_details=admin_details_to_pass
)
else:
organization_details = None
if enabled is not None:
issuer_attributes = self._client.models.IssuerAttributes(enabled=enabled)
else:
issuer_attributes = None
issuer_bundle = await self._client.update_certificate_issuer(
vault_base_url=self.vault_endpoint,
issuer_name=name,
provider=provider,
credentials=issuer_credentials,
organization_details=organization_details,
attributes=issuer_attributes,
**kwargs
)
return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)
@distributed_trace_async
async def delete_issuer(self, name: str, **kwargs: "**Any") -> CertificateIssuer:
"""Deletes the specified certificate issuer.
Permanently removes the specified certificate issuer from the vault.
This operation requires the certificates/manageissuers/deleteissuers permission.
:param str name: The name of the issuer.
:return: CertificateIssuer
:rtype: ~azure.keyvault.certificates.models.CertificateIssuer
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START delete_issuer]
:end-before: [END delete_issuer]
:language: python
:caption: Delete an issuer
:dedent: 8
"""
issuer_bundle = await self._client.delete_certificate_issuer(
vault_base_url=self.vault_endpoint, issuer_name=name, **kwargs
)
return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)
@distributed_trace
def list_issuers(self, **kwargs: "**Any") -> AsyncIterable[IssuerProperties]:
"""List certificate issuers for the key vault.
Returns the set of certificate issuer resources in the key
vault. This operation requires the certificates/manageissuers/getissuers
permission.
:return: An iterator like instance of Issuers
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateIssuer]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_examples_certificates_async.py
:start-after: [START list_issuers]
:end-before: [END list_issuers]
:language: python
:caption: List issuers of a vault
:dedent: 8
"""
max_page_size = kwargs.pop("max_page_size", None)
return self._client.get_certificate_issuers(
vault_base_url=self.vault_endpoint,
maxresults=max_page_size,
cls=lambda objs: [IssuerProperties._from_issuer_item(x) for x in objs],
**kwargs
)
| en | 0.655296 | # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ # pylint:disable=too-many-lines,too-many-public-methods A high-level asynchronous interface for managing a vault's certificates. :param str vault_endpoint: URL of the vault the client will access :param credential: An object which can provide an access token for the vault, such as a credential from :mod:`azure.identity.aio` Keyword arguments - **api_version**: version of the Key Vault API to use. Defaults to the most recent. - **transport**: :class:`~azure.core.pipeline.transport.AsyncHttpTransport` to use. Defaults to :class:`~azure.core.pipeline.transport.AioHttpTransport`. Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START create_certificate_client] :end-before: [END create_certificate_client] :language: python :dedent: 4 :caption: Creates a new instance of the Certificate client # pylint:disable=protected-access Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires the certificates/create permission. :param str name: The name of the certificate. :param policy: The management policy for the certificate. :type policy: ~azure.keyvault.certificates.models.CertificatePolicy :returns: A coroutine for the creation of the certificate. Awaiting the coroutine returns the created KeyVaultCertificate if creation is successful, the CertificateOperation if not. :rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate or ~azure.keyvault.certificates.models.CertificateOperation :raises: :class:`~azure.core.exceptions.HttpResponseError` Keyword arguments - *enabled (bool)* - Determines whether the object is enabled. - *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs. Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START create_certificate] :end-before: [END create_certificate] :language: python :caption: Create a certificate :dedent: 8 Gets a certificate with its management policy attached. This operation requires the certificates/get permission. Does not accept the version of the certificate as a parameter. If you wish to specify version, use the get_certificate_version function and specify the desired version. :param str name: The name of the certificate in the given vault. :returns: An instance of KeyVaultCertificate :rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist, :class:`~azure.core.exceptions.HttpResponseError` for other errors Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START get_certificate] :end-before: [END get_certificate] :language: python :caption: Get a certificate :dedent: 8 Gets a specific version of a certificate without returning its management policy. If you wish to get the latest version of your certificate, or to get the certificate's policy as well, use the get_certificate function. :param str name: The name of the certificate in the given vault. :param str version: The version of the certificate. :returns: An instance of KeyVaultCertificate :rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist, :class:`~azure.core.exceptions.HttpResponseError` for other errors Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START get_certificate] :end-before: [END get_certificate] :language: python :caption: Get a certificate :dedent: 8 Deletes a certificate from the key vault. Deletes all versions of a certificate object along with its associated policy. Delete certificate cannot be used to remove individual versions of a certificate object. This operation requires the certificates/delete permission. :param str name: The name of the certificate. :returns: The deleted certificate :rtype: ~azure.keyvault.certificates.models.DeletedCertificate :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist, :class:`~azure.core.exceptions.HttpResponseError` for other errors Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START delete_certificate] :end-before: [END delete_certificate] :language: python :caption: Delete a certificate :dedent: 8 Retrieves information about the specified deleted certificate. Retrieves the deleted certificate information plus its attributes, such as retention interval, scheduled permanent deletion, and the current deletion recovery level. This operation requires the certificates/ get permission. :param str name: The name of the certificate. :return: The deleted certificate :rtype: ~azure.keyvault.certificates.models.DeletedCertificate :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist, :class:`~azure.core.exceptions.HttpResponseError` for other errors Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START get_deleted_certificate] :end-before: [END get_deleted_certificate] :language: python :caption: Get a deleted certificate :dedent: 8 Permanently deletes the specified deleted certificate. Performs an irreversible deletion of the specified certificate, without possibility for recovery. The operation is not available if the recovery level does not specified 'Purgeable'. This operation requires the certificate/purge permission. :param str name: The name of the certificate :return: None :rtype: None :raises: :class:`~azure.core.exceptions.HttpResponseError` Recovers the deleted certificate back to its current version under /certificates. Performs the reversal of the Delete operation. THe operation is applicable in vaults enabled for soft-delete, and must be issued during the retention interval (available in the deleted certificate's attributes). This operation requires the certificates/recover permission. :param str name: The name of the deleted certificate :return: The recovered certificate :rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate :raises: :class:`~azure.core.exceptions.HttpResponseError` Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START recover_deleted_certificate] :end-before: [END recover_deleted_certificate] :language: python :caption: Recover a deleted certificate :dedent: 8 Imports a certificate into a specified key vault. Imports an existing valid certificate, containing a private key, into Azure Key Vault. The certificate to be imported can be in either PFX or PEM format. If the certificate is in PEM format the PEM file must contain the key as well as x509 certificates. This operation requires the certificates/import permission. :param str name: The name of the certificate. :param bytes certificate_bytes: Bytes of the certificate object to import. This certificate needs to contain the private key. :returns: The imported KeyVaultCertificate :rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate :raises: :class:`~azure.core.exceptions.HttpResponseError` Keyword arguments - *enabled (bool)* - Determines whether the object is enabled. - *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs. - *password (str)* - If the private key in the passed in certificate is encrypted, it is the password used for encryption. - *policy (~azure.keyvault.certificates.models.CertificatePolicy)* - The management policy for the certificate. Gets the policy for a certificate. Returns the specified certificate policy resources in the key vault. This operation requires the certificates/get permission. :param str certificate_name: The name of the certificate in a given key vault. :return: The certificate policy :rtype: ~azure.keyvault.certificates.models.CertificatePolicy :raises: :class:`~azure.core.exceptions.HttpResponseError` Updates the policy for a certificate. Set specified members in the certificate policy. Leaves others as null. This operation requries the certificates/update permission. :param str certificate_name: The name of the certificate in the given vault. :param policy: The policy for the certificate. :type policy: ~azure.keyvault.certificates.models.CertificatePolicy :return: The certificate policy :rtype: ~azure.keyvault.certificates.models.CertificatePolicy :raises: :class:`~azure.core.exceptions.HttpResponseError` Updates the specified attributes associated with the given certificate. The UpdateCertificate operation applies the specified update on the given certificate; the only elements updated are the certificate's attributes. This operation requires the certificates/update permission. :param str name: The name of the certificate in the given key vault. :param str version: The version of the certificate. :returns: The updated KeyVaultCertificate :rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate :raises: :class:`~azure.core.exceptions.HttpResponseError` Keyword arguments - *enabled (bool)* - Determines whether the object is enabled. - *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs. Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START update_certificate] :end-before: [END update_certificate] :language: python :caption: Update a certificate's attributes :dedent: 8 Backs up the specified certificate. Requests that a backup of the specified certificate be downloaded to the client. All versions of the certificate will be downloaded. This operation requires the certificates/backup permission. :param str name: The name of the certificate. :return: the backup blob containing the backed up certificate. :rtype: bytes :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist, :class:`~azure.core.exceptions.HttpResponseError` for other errors Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START backup_certificate] :end-before: [END backup_certificate] :language: python :caption: Get a certificate backup :dedent: 8 Restores a backed up certificate to a vault. Restores a backed up certificate, and all its versions, to a vault. this operation requires the certificates/restore permission. :param bytes backup: The backup blob associated with a certificate bundle. :return: The restored KeyVaultCertificate :rtype: ~azure.keyvault.certificates.models.KeyVaultCertificate :raises: :class:`~azure.core.exceptions.HttpResponseError` Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START restore_certificate] :end-before: [END restore_certificate] :language: python :caption: Restore a certificate backup :dedent: 8 Lists the deleted certificates in the specified vault currently available for recovery. Retrieves the certificates in the current vault which are in a deleted state and ready for recovery or purging. This operation includes deletion-specific information. This operation requires the certificates/get/list permission. This operation can only be enabled on soft-delete enabled vaults. :return: An iterator like instance of DeletedCertificate :rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.DeletedCertificate] :raises: :class:`~azure.core.exceptions.HttpResponseError` Keyword arguments - *include_pending (bool)* - Specifies whether to include certificates which are not completely deleted. Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START list_deleted_certificates] :end-before: [END list_deleted_certificates] :language: python :caption: List all the deleted certificates :dedent: 8 List certificates in the key vault. The GetCertificates operation returns the set of certificates resources in the key vault. This operation requires the certificates/list permission. :returns: An iterator like instance of CertificateProperties :rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateProperties] :raises: :class:`~azure.core.exceptions.HttpResponseError` Keyword arguments - *include_pending (bool)* - Specifies whether to include certificates which are not completely provisioned. Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START list_certificates] :end-before: [END list_certificates] :language: python :caption: List all certificates :dedent: 8 List the versions of a certificate. The GetCertificateVersions operation returns the versions of a certificate in the key vault. This operation requires the certificates/list permission. :param str name: The name of the certificate. :returns: An iterator like instance of CertificateProperties :rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateProperties] :raises: :class:`~azure.core.exceptions.HttpResponseError` Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START list_certificate_versions] :end-before: [END list_certificate_versions] :language: python :caption: List all versions of a certificate :dedent: 8 # pylint:disable=unsubscriptable-object # disabled unsubscriptable-object because of pylint bug referenced here: # https://github.com/PyCQA/pylint/issues/2377 Sets the certificate contacts for the key vault. Sets the certificate contacts for the key vault. This operation requires the certificates/managecontacts permission. :param contacts: The contact list for the vault certificates. :type contacts: list[~azure.keyvault.certificates.models.CertificateContact] :returns: The created list of contacts :rtype: list[~azure.keyvault.certificates.models.CertificateContact] :raises: :class:`~azure.core.exceptions.HttpResponseError` Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START create_contacts] :end-before: [END create_contacts] :language: python :caption: Create contacts :dedent: 8 # pylint:disable=unsubscriptable-object # disabled unsubscruptable-object because of pylint bug referenced here: # https://github.com/PyCQA/pylint/issues/2377 Gets the certificate contacts for the key vault. Returns the set of certificate contact resources in the specified key vault. This operation requires the certificates/managecontacts permission. :return: The certificate contacts for the key vault. :rtype: list[azure.keyvault.certificates.models.CertificateContact] :raises: :class:`~azure.core.exceptions.HttpResponseError` Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START get_contacts] :end-before: [END get_contacts] :language: python :caption: Get contacts :dedent: 8 # pylint:disable=unsubscriptable-object # disabled unsubscruptable-object because of pylint bug referenced here: # https://github.com/PyCQA/pylint/issues/2377 Deletes the certificate contacts for the key vault. Deletes the certificate contacts for the key vault certificate. This operation requires the certificates/managecontacts permission. :return: Contacts :rtype: list[~azure.keyvault.certificates.models.CertificateContact] :raises: :class:`~azure.core.exceptions.HttpResponseError` Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START delete_contacts] :end-before: [END delete_contacts] :language: python :caption: Delete contacts :dedent: 8 Gets the creation operation of a certificate. Gets the creation operation associated with a specified certificate. This operation requires the certificates/get permission. :param str name: The name of the certificate. :returns: The created CertificateOperation :rtype: ~azure.keyvault.certificates.models.CertificateOperation :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` if the certificate doesn't exist, :class:`~azure.core.exceptions.HttpResponseError` for other errors Deletes the creation operation for a specific certificate. Deletes the creation operation for a specified certificate that is in the process of being created. The certificate is no longer created. This operation requires the certificates/update permission. :param str name: The name of the certificate. :return: The deleted CertificateOperation :rtype: ~azure.keyvault.certificates.models.CertificateOperation :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` if the operation doesn't exist, :class:`~azure.core.exceptions.HttpResponseError` for other errors Cancels a certificate operation. Cancels a certificate creation operation that is already in progress. This operation requires the certificates/update permission. :param str name: The name of the certificate. :returns: The cancelled certificate operation :rtype: ~azure.keyvault.certificates.models.CertificateOperation :raises: :class:`~azure.core.exceptions.HttpResponseError` Merges a certificate or a certificate chain with a key pair existing on the server. Performs the merging of a certificate or certificate chain with a key pair currently available in the service. This operation requires the certificates/create permission. Make sure when creating the certificate to merge using create_certificate that you set its issuer to 'Unknown'. This way Key Vault knows that the certificate will not be signed by an issuer known to it. :param str name: The name of the certificate :param x509_certificates: The certificate or the certificate chain to merge. :type x509_certificates: list[bytearray] :return: The merged certificate operation :rtype: ~azure.keyvault.certificates.models.CertificateOperation :raises: :class:`~azure.core.exceptions.HttpResponseError` Keyword arguments - *enabled (bool)* - Determines whether the object is enabled. - *tags (dict[str, str])* - Application specific metadata in the form of key-value pairs. Gets the specified certificate issuer. Returns the specified certificate issuer resources in the key vault. This operation requires the certificates/manageissuers/getissuers permission. :param str name: The name of the issuer. :return: The specified certificate issuer. :rtype: ~azure.keyvault.certificates.models.CertificateIssuer :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` if the issuer doesn't exist, :class:`~azure.core.exceptions.HttpResponseError` for other errors Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START get_issuer] :end-before: [END get_issuer] :language: python :caption: Get an issuer :dedent: 8 Sets the specified certificate issuer. The SetCertificateIssuer operation adds or updates the specified certificate issuer. This operation requires the certificates/setissuers permission. :param str name: The name of the issuer. :param str provider: The issuer provider. :returns: The created CertificateIssuer :rtype: ~azure.keyvault.certificates.models.CertificateIssuer :raises: :class:`~azure.core.exceptions.HttpResponseError` Keyword arguments - *enabled (bool)* - Determines whether the object is enabled. - *account_id (str)* - The user name/account name/account id. - *password (str)* - The password/secret/account key. - *organization_id (str)* - Id of the organization. - *admin_details (list[~azure.keyvault.certificates.models.AdministratorDetails])* - Details of the organization administrators of the certificate issuer. Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START create_issuer] :end-before: [END create_issuer] :language: python :caption: Create an issuer :dedent: 8 Updates the specified certificate issuer. Performs an update on the specified certificate issuer entity. This operation requires the certificates/setissuers permission. :param str name: The name of the issuer. :param str provider: The issuer provider. :return: The updated issuer :rtype: ~azure.keyvault.certificates.models.CertificateIssuer :raises: :class:`~azure.core.exceptions.HttpResponseError` Keyword arguments - *enabled (bool)* - Determines whether the object is enabled. - *provider (str)* - The issuer provider. - *account_id (str)* - The user name/account name/account id. - *password (str)* - The password/secret/account key. - *organization_id (str)* - Id of the organization. - *admin_details (list[~azure.keyvault.certificates.models.AdministratorDetails])* - Details of the organization administrators of the certificate issuer. Deletes the specified certificate issuer. Permanently removes the specified certificate issuer from the vault. This operation requires the certificates/manageissuers/deleteissuers permission. :param str name: The name of the issuer. :return: CertificateIssuer :rtype: ~azure.keyvault.certificates.models.CertificateIssuer :raises: :class:`~azure.core.exceptions.HttpResponseError` Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START delete_issuer] :end-before: [END delete_issuer] :language: python :caption: Delete an issuer :dedent: 8 List certificate issuers for the key vault. Returns the set of certificate issuer resources in the key vault. This operation requires the certificates/manageissuers/getissuers permission. :return: An iterator like instance of Issuers :rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.certificates.models.CertificateIssuer] :raises: :class:`~azure.core.exceptions.HttpResponseError` Example: .. literalinclude:: ../tests/test_examples_certificates_async.py :start-after: [START list_issuers] :end-before: [END list_issuers] :language: python :caption: List issuers of a vault :dedent: 8 | 1.906714 | 2 |
lib/pymodules/socials/socials.py | LimpingNinja/ngenmud | 4 | 6624972 | <gh_stars>1-10
"""
package: socials
socials are commonly used emotes (e.g. smiling, grinning, laughing). Instead
making people have to write out an entire emote every time they would like
to express such an emote, they can simply use one of these simple commands
to perform a pre-made emote. This is essentially a copy of the c module written
in python. There are no changes good or bad.
Description of module concepts:
cmds are the list of commands that trigger the social. More than one cmd
can be specified (comma-separated). Assumes cmds != NULL. All other
arguments can be NULL.
to_char_notgt is the message sent to the character if no target for
the social is provided.
to_room_notgt is the message sent to the room if no target for the
social is provided.
to_char_self is the message sent to ourself when the target provided
was ourself. If to_char_self is not provided, the message will default
to the same one used for to_char_notgt.
to_room_self is the message sent to the room when the target provided
was ourself. If to_room_self is not provided, the message will default
to the same one used for to_char_notgt.
to_char_tgt is the message sent to the character when a target is
provided.
to_vict_tgt is the message sent to the target when a target is provided
to_room_tgt is the message sent to the room when a target is provided
adverbs and adjectives are default modifiers that can be suggested or
applied to the emote through $M and $m (adjective: evil, adverb: evilly).
If a player types an override it will override both $M and $m unless they
clearly specify $M= and/or $m= for advanced usage.
require_tgt is a boolean describing whether this emote forces the caller
to have a target.
min_pos and max_pos are the minimum and maximum positions the socials can
per performed from, respectively.
"""
from mudsys import add_cmd, remove_cmd
from cmd_checks import chk_conscious, chk_can_move, chk_grounded, chk_supine
import mud, storage, char, auxiliary, time, string, hooks, socedit, mudsys
# This stores all the socials themselves, before unlinking
__social_table__ = { }
# This stores all the socials after unlinking
__socials__ = { }
__socials_file__ = "../lib/misc/socials"
class Social:
def __init__(self, cmds = "", to_char_notgt = "", to_room_notgt = "", to_char_self = "",
to_room_self = "", to_char_tgt = "", to_vict_tgt = "", to_room_tgt = "",
adverb = "", adjective = "", require_tgt = "", min_pos = "", max_pos = "",
storeSet = None):
if not storeSet == None:
self.__cmds__ = storeSet.readString("cmds")
self.__to_char_notgt__ = storeSet.readString("to_char_notgt")
self.__to_room_notgt__ = storeSet.readString("to_room_notgt")
self.__to_char_self__ = storeSet.readString("to_char_self")
self.__to_room_self__ = storeSet.readString("to_room_self")
self.__to_char_tgt__ = storeSet.readString("to_char_tgt")
self.__to_vict_tgt__ = storeSet.readString("to_vict_tgt")
self.__to_room_tgt__ = storeSet.readString("to_room_tgt")
self.__adverb__ = storeSet.readString("adverb")
self.__adjective__ = storeSet.readString("adjective")
self.__require_tgt__ = storeSet.readString("require_tgt")
self.__min_pos__ = storeSet.readString("min_pos")
self.__max_pos__ = storeSet.readString("max_pos")
else:
self.__cmds__ = cmds
self.__to_char_notgt__ = to_char_notgt
self.__to_room_notgt__ = to_room_notgt
self.__to_char_self__ = to_char_self
self.__to_room_self__ = to_room_self
self.__to_char_tgt__ = to_char_tgt
self.__to_vict_tgt__ = to_vict_tgt
self.__to_room_tgt__ = to_room_tgt
self.__adverb__ = adverb
self.__adjective__ = adjective
self.__require_tgt__ = require_tgt
self.__min_pos__ = min_pos
self.__max_pos__ = max_pos
def store(self):
set = storage.StorageSet()
set.storeString("cmds", self.__cmds__)
set.storeString("to_char_notgt", self.__to_char_notgt__)
set.storeString("to_room_notgt", self.__to_room_notgt__)
set.storeString("to_char_self", self.__to_char_self__)
set.storeString("to_room_self", self.__to_room_self__)
set.storeString("to_char_tgt", self.__to_char_tgt__)
set.storeString("to_vict_tgt", self.__to_vict_tgt__)
set.storeString("to_room_tgt", self.__to_room_tgt__)
set.storeString("adjective", self.__adjective__)
set.storeString("adverb", self.__adverb__)
set.storeString("require_tgt", self.__require_tgt__)
set.storeString("min_pos", self.__min_pos__)
set.storeString("max_pos", self.__max_pos__)
return set
def get_cmds(self): return self.__cmds__
def get_to_char_notgt(self): return self.__to_char_notgt__
def get_to_char_self(self): return self.__to_char_self__
def get_to_char_tgt(self): return self.__to_char_tgt__
def get_to_room_notgt(self): return self.__to_room_notgt__
def get_to_room_self(self): return self.__to_room_self__
def get_to_room_tgt(self): return self.__to_room_tgt__
def get_to_vict_tgt(self): return self.__to_vict_tgt__
def get_adverb(self): return self.__adverb__
def get_adjective(self): return self.__adjective__
def get_require_tgt(self): return self.__require_tgt__
def get_min_pos(self): return self.__min_pos__
def get_max_pos(self): return self.__max_pos__
def set_cmds(self, val):
self.__cmds__ = val
return self.__cmds__
def set_to_char_notgt(self, val):
self.__to_char_notgt__ = val
return self.__to_char_notgt__
def set_to_char_self(self, val):
self.__to_char_self__ = val
return self.__to_char_self__
def set_to_char_tgt(self, val):
self.__to_char_tgt__ = val
return self.__to_char_tgt__
def set_to_room_notgt(self, val):
self.__to_room_notgt__ = val
return self.__to_room_notgt__
def set_to_room_self(self, val):
self.__to_room_self__ = val
return self.__to_room_self__
def set_to_room_tgt(self, val):
self.__to_room_tgt__ = val
return self.__to_room_tgt__
def set_to_vict_tgt(self, val):
self.__to_vict_tgt__ = val
return self.__to_vict_tgt__
def set_adverb(self, val):
self.__adverb__ = val
return self.__adverb__
def set_adjective(self, val):
self.__adjective__ = val
return self.__adjective__
def set_require_tgt(self, val):
self.__require_tgt__ = val
return self.__require_tgt__
def set_min_pos(self, val):
if val in socedit.Position.items():
self.__min_pos__ = val
return self.__min_pos__
def set_max_pos(self, val):
if val in socedit.Position.items():
self.__max_pos__ = val
return self.__max_pos__
def link_social(new_cmd, old_cmd, save=True):
if old_cmd in __socials__.keys():
unlink_social(new_cmd, save)
social_data = get_social(old_cmd)
cmds = social_data.get_cmds()
keywords = [x.strip() for x in cmds.split(',')]
keywords.append(new_cmd)
# relink all the individual mappings
new_cmds = ','.join(keywords)
for k in keywords:
__socials__[k] = new_cmds
# set the new hash, delete the old one and add the new
social_data.set_cmds(new_cmds)
del __social_table__[cmds]
__social_table__[new_cmds] = social_data
# add the command to the system
add_cmd(new_cmd, None, cmd_social, "player", False)
# this needs to be rewritten
if social_data.get_min_pos == "sitting":
mudsys.add_cmd_check(new_cmd, chk_conscious)
elif social_data.get_min_pos == "standing":
mudsys.add_cmd_check(new_cmd, chk_can_move)
elif social_data.get_max_pos == "standing":
mudsys.add_cmd_check(new_cmd, chk_grounded)
elif social_data.get_max_pos == "sitting":
mudsys.add_cmd_check(new_cmd, chk_supine)
if save is True:
save_socials()
def unlink_social(social_cmd, save=True):
if social_cmd not in __socials__.keys():
return
social_link = __socials__[social_cmd]
if social_link in __social_table__.keys():
social_data = __social_table__.pop(social_link)
if social_data is not None:
cmds = social_data.get_cmds()
result = [x.strip() for x in cmds.split(',')]
# remove the original cmd from the command list
result.remove(social_cmd)
remove_cmd(social_cmd)
# if there are still commands left re-add
if len(result) > 0:
social_data.set_cmds(','.join(result))
__social_table__[social_link] = social_data
else:
__socials__[social_cmd]
if save is True:
save_socials()
def add_social(social_data, save=True):
cmds = social_data.get_cmds()
result = [x.strip() for x in cmds.split(',')]
for res in result:
unlink_social(res)
add_cmd(res, None, cmd_social, "player", False)
if social_data.get_min_pos == "sitting":
mudsys.add_cmd_check(res, chk_conscious)
elif social_data.get_min_pos == "standing":
mudsys.add_cmd_check(res, chk_can_move)
elif social_data.get_max_pos == "standing":
mudsys.add_cmd_check(res, chk_grounded)
elif social_data.get_max_pos == "sitting":
mudsys.add_cmd_check(res, chk_supine)
__socials__[res] = cmds
__social_table__[cmds] = social_data
if save:
save_socials()
def get_social(social):
if social in __socials__:
return __social_table__[__socials__[social]]
return None
def save_socials():
set = storage.StorageSet()
socials = storage.StorageList()
set.storeList("socials", socials)
for cmd, data in __social_table__.iteritems():
one_set = data.store()
socials.add(one_set)
set.write(__socials_file__)
set.close()
return
def save_social(social):
save_socials()
return
def load_socials():
storeSet = storage.StorageSet(__socials_file__)
for social_set in storeSet.readList("socials").sets():
social_data = Social(storeSet=social_set)
cmds = social_data.get_cmds()
result = [x.strip() for x in cmds.split(',')]
__social_table__[cmds] = social_data
for res in result:
add_cmd(res, None, cmd_social, "player", False)
if social_data.get_min_pos() == "sitting":
mudsys.add_cmd_check(res, chk_conscious)
elif social_data.get_min_pos() == "standing":
mudsys.add_cmd_check(res, chk_can_move)
elif social_data.get_max_pos() == "standing":
mudsys.add_cmd_check(res, chk_grounded)
elif social_data.get_max_pos() == "sitting":
mudsys.add_cmd_check(res, chk_supine)
__socials__[res] = cmds
storeSet.close()
return
def cmd_socials(ch, cmd, arg):
'''
Syntax: socials, socials <social name>
Socials are a form of emote, they are prepared emotes that are commands you can use and they
allow for single use, targeting other people, etc. An example of a social would be the grin social.
If you type:
> grin
You will see the following, while others around you will also see a variation as if you had performed
the action:
You grin mischievously.
If you want to grin at Kevin though, you can do so by typing:
> grin kevin
You will see:
You grin mischievously at Kevin.
Since these are targeted, Kevin will see it directed at them and the room will see you directing
this mischievous grin at Kevin. Additionally you can change that mischievous nature of the grin
by typing your own adverb (or even phrase):
> grin stupidly
> grin stupidly at kevin
There are quite a few defined socials that you can do. The command 'socials' will list all of
the socials currently available to you. Additionally you can specify a social and see how a
specific social will look if used, the adverbs, and any synonyms.
'''
buf = [ ]
socs = sorted(__socials__.keys())
count = 0
for soc in socs:
count = count + 1
buf.append("%-20s" % soc)
if count % 4 == 0:
ch.send("".join(buf))
buf = [ ]
if count % 4 != 0:
ch.send("".join(buf))
def cmd_soclink(ch, cmd, arg):
if arg is None or arg is "":
ch.send("Link which social to which?")
return
arg = arg.lower()
arg, new_soc = mud.parse_args(ch, True, cmd, arg, "| word(subcommand) word(arguments)")
if new_soc is None:
ch.send("You must provide a new command and an old social to link it to.")
return
social_data = get_social(arg)
if social_data is None:
ch.send("No social exists for %s" % arg)
link_social(new_soc, arg);
ch.send("%s is now linked to %s" % (new_soc, arg))
def cmd_socunlink(ch, cmd, arg):
if arg is None or arg is "":
ch.send("Unlink which social?")
return
social_data = get_social(arg)
if social_data is None:
ch.send("No social exists for %s." % arg)
return
unlink_social(arg)
ch.send("The %s social was unlinked." % arg)
mud.log_string("%s unlinked the social %s." % (ch.name, arg))
# One generic command for handling socials. Does table lookup on all of
# the existing socials and executes the proper one.
def cmd_social(ch, cmd, arg):
data = get_social(cmd)
# If they used a phrasal or adjective/adverb then this will be 2 length
# otherwise it will be one length.
args = arg.split(" at ", 1)
has_modifier = True if len(args) == 2 else False
# does the social exist? Do we have a problem? DO WE?
if data:
if has_modifier is True:
tgt, type = mud.generic_find(ch, arg, "all", "immediate", False)
if tgt is None or type != "char":
ch.send("That individual does not seem to be here.")
return
else:
tgt, type = mud.generic_find(ch, arg, "all", "immediate", False)
# No target was supplied, the emote is to ourselves.
if tgt is None:
if data.get_to_char_notgt():
mud.message(ch, None, None, None, True, "to_char", "%s" % (
data.get_to_char_notgt() if has_modifier == False
else string.replace(data.get_to_char_notgt(), "$M", arg)))
if data.get_to_room_notgt():
mud.message(ch, None, None, None, True, "to_room", "%s" % (
data.get_to_room_notgt() if has_modifier == False
else string.replace(data.get_to_room_notgt(), "$M", arg)))
return
# a target was supplied and it is us
elif ch == tgt:
if data.get_to_char_self():
mud.message(ch, None, None, None, True, "to_char",
data.get_to_char_self() if has_modifier else
string.replace(data.get_to_char_self(), "$M", args[0]))
elif data.get_to_char_notgt():
mud.message(ch, None, None, None, True, "to_char",
data.get_to_char_notgt() if has_modifier == False
else string.replace(data.get_to_char_notgt(), "$M", args[0]))
if data.get_to_room_self():
mud.message(ch, None, None, None, True, "to_room",
data.get_to_room_self() if has_modifier == False
else string.replace(data.get_to_room_self(), "$M", args[0]))
elif data.get_to_room_notgt():
mud.message(ch, None, None, None, True, "to_room",
data.get_to_room_notgt() if has_modifier == False
else string.replace(data.get_to_room_notgt(), "$M", args[0]))
return
# a target was supplied and it is not us
else:
if data.get_to_char_tgt():
mud.message(ch, tgt, None, None, True, "to_char",
data.get_to_char_tgt() if has_modifier == False
else string.replace(data.get_to_char_tgt(), "$M", args[0]))
if data.get_to_vict_tgt():
mud.message(ch, tgt, None, None, True, "to_char",
data.get_to_vict_tgt() if has_modifier == False
else string.replace(data.get_to_vict_tgt(), "$M", args[0]))
if data.get_to_room_tgt():
mud.message(ch, tgt, None, None, True, "to_room",
data.get_to_room_tgt() if has_modifier == False
else string.replace(data.get_to_room_tgt(), "$M", args[0]))
else:
mud.log_string("ERROR: %s tried social, %s, but no such social exists!" % (ch.name, cmd))
return
load_socials()
add_cmd("socials", None, cmd_socials, "player", False)
add_cmd("socunlink", None, cmd_socunlink, "builder", False)
add_cmd("soclink", None, cmd_soclink, "builder", False) | """
package: socials
socials are commonly used emotes (e.g. smiling, grinning, laughing). Instead
making people have to write out an entire emote every time they would like
to express such an emote, they can simply use one of these simple commands
to perform a pre-made emote. This is essentially a copy of the c module written
in python. There are no changes good or bad.
Description of module concepts:
cmds are the list of commands that trigger the social. More than one cmd
can be specified (comma-separated). Assumes cmds != NULL. All other
arguments can be NULL.
to_char_notgt is the message sent to the character if no target for
the social is provided.
to_room_notgt is the message sent to the room if no target for the
social is provided.
to_char_self is the message sent to ourself when the target provided
was ourself. If to_char_self is not provided, the message will default
to the same one used for to_char_notgt.
to_room_self is the message sent to the room when the target provided
was ourself. If to_room_self is not provided, the message will default
to the same one used for to_char_notgt.
to_char_tgt is the message sent to the character when a target is
provided.
to_vict_tgt is the message sent to the target when a target is provided
to_room_tgt is the message sent to the room when a target is provided
adverbs and adjectives are default modifiers that can be suggested or
applied to the emote through $M and $m (adjective: evil, adverb: evilly).
If a player types an override it will override both $M and $m unless they
clearly specify $M= and/or $m= for advanced usage.
require_tgt is a boolean describing whether this emote forces the caller
to have a target.
min_pos and max_pos are the minimum and maximum positions the socials can
per performed from, respectively.
"""
from mudsys import add_cmd, remove_cmd
from cmd_checks import chk_conscious, chk_can_move, chk_grounded, chk_supine
import mud, storage, char, auxiliary, time, string, hooks, socedit, mudsys
# This stores all the socials themselves, before unlinking
__social_table__ = { }
# This stores all the socials after unlinking
__socials__ = { }
__socials_file__ = "../lib/misc/socials"
class Social:
def __init__(self, cmds = "", to_char_notgt = "", to_room_notgt = "", to_char_self = "",
to_room_self = "", to_char_tgt = "", to_vict_tgt = "", to_room_tgt = "",
adverb = "", adjective = "", require_tgt = "", min_pos = "", max_pos = "",
storeSet = None):
if not storeSet == None:
self.__cmds__ = storeSet.readString("cmds")
self.__to_char_notgt__ = storeSet.readString("to_char_notgt")
self.__to_room_notgt__ = storeSet.readString("to_room_notgt")
self.__to_char_self__ = storeSet.readString("to_char_self")
self.__to_room_self__ = storeSet.readString("to_room_self")
self.__to_char_tgt__ = storeSet.readString("to_char_tgt")
self.__to_vict_tgt__ = storeSet.readString("to_vict_tgt")
self.__to_room_tgt__ = storeSet.readString("to_room_tgt")
self.__adverb__ = storeSet.readString("adverb")
self.__adjective__ = storeSet.readString("adjective")
self.__require_tgt__ = storeSet.readString("require_tgt")
self.__min_pos__ = storeSet.readString("min_pos")
self.__max_pos__ = storeSet.readString("max_pos")
else:
self.__cmds__ = cmds
self.__to_char_notgt__ = to_char_notgt
self.__to_room_notgt__ = to_room_notgt
self.__to_char_self__ = to_char_self
self.__to_room_self__ = to_room_self
self.__to_char_tgt__ = to_char_tgt
self.__to_vict_tgt__ = to_vict_tgt
self.__to_room_tgt__ = to_room_tgt
self.__adverb__ = adverb
self.__adjective__ = adjective
self.__require_tgt__ = require_tgt
self.__min_pos__ = min_pos
self.__max_pos__ = max_pos
def store(self):
set = storage.StorageSet()
set.storeString("cmds", self.__cmds__)
set.storeString("to_char_notgt", self.__to_char_notgt__)
set.storeString("to_room_notgt", self.__to_room_notgt__)
set.storeString("to_char_self", self.__to_char_self__)
set.storeString("to_room_self", self.__to_room_self__)
set.storeString("to_char_tgt", self.__to_char_tgt__)
set.storeString("to_vict_tgt", self.__to_vict_tgt__)
set.storeString("to_room_tgt", self.__to_room_tgt__)
set.storeString("adjective", self.__adjective__)
set.storeString("adverb", self.__adverb__)
set.storeString("require_tgt", self.__require_tgt__)
set.storeString("min_pos", self.__min_pos__)
set.storeString("max_pos", self.__max_pos__)
return set
def get_cmds(self): return self.__cmds__
def get_to_char_notgt(self): return self.__to_char_notgt__
def get_to_char_self(self): return self.__to_char_self__
def get_to_char_tgt(self): return self.__to_char_tgt__
def get_to_room_notgt(self): return self.__to_room_notgt__
def get_to_room_self(self): return self.__to_room_self__
def get_to_room_tgt(self): return self.__to_room_tgt__
def get_to_vict_tgt(self): return self.__to_vict_tgt__
def get_adverb(self): return self.__adverb__
def get_adjective(self): return self.__adjective__
def get_require_tgt(self): return self.__require_tgt__
def get_min_pos(self): return self.__min_pos__
def get_max_pos(self): return self.__max_pos__
def set_cmds(self, val):
self.__cmds__ = val
return self.__cmds__
def set_to_char_notgt(self, val):
self.__to_char_notgt__ = val
return self.__to_char_notgt__
def set_to_char_self(self, val):
self.__to_char_self__ = val
return self.__to_char_self__
def set_to_char_tgt(self, val):
self.__to_char_tgt__ = val
return self.__to_char_tgt__
def set_to_room_notgt(self, val):
self.__to_room_notgt__ = val
return self.__to_room_notgt__
def set_to_room_self(self, val):
self.__to_room_self__ = val
return self.__to_room_self__
def set_to_room_tgt(self, val):
self.__to_room_tgt__ = val
return self.__to_room_tgt__
def set_to_vict_tgt(self, val):
self.__to_vict_tgt__ = val
return self.__to_vict_tgt__
def set_adverb(self, val):
self.__adverb__ = val
return self.__adverb__
def set_adjective(self, val):
self.__adjective__ = val
return self.__adjective__
def set_require_tgt(self, val):
self.__require_tgt__ = val
return self.__require_tgt__
def set_min_pos(self, val):
if val in socedit.Position.items():
self.__min_pos__ = val
return self.__min_pos__
def set_max_pos(self, val):
if val in socedit.Position.items():
self.__max_pos__ = val
return self.__max_pos__
def link_social(new_cmd, old_cmd, save=True):
if old_cmd in __socials__.keys():
unlink_social(new_cmd, save)
social_data = get_social(old_cmd)
cmds = social_data.get_cmds()
keywords = [x.strip() for x in cmds.split(',')]
keywords.append(new_cmd)
# relink all the individual mappings
new_cmds = ','.join(keywords)
for k in keywords:
__socials__[k] = new_cmds
# set the new hash, delete the old one and add the new
social_data.set_cmds(new_cmds)
del __social_table__[cmds]
__social_table__[new_cmds] = social_data
# add the command to the system
add_cmd(new_cmd, None, cmd_social, "player", False)
# this needs to be rewritten
if social_data.get_min_pos == "sitting":
mudsys.add_cmd_check(new_cmd, chk_conscious)
elif social_data.get_min_pos == "standing":
mudsys.add_cmd_check(new_cmd, chk_can_move)
elif social_data.get_max_pos == "standing":
mudsys.add_cmd_check(new_cmd, chk_grounded)
elif social_data.get_max_pos == "sitting":
mudsys.add_cmd_check(new_cmd, chk_supine)
if save is True:
save_socials()
def unlink_social(social_cmd, save=True):
if social_cmd not in __socials__.keys():
return
social_link = __socials__[social_cmd]
if social_link in __social_table__.keys():
social_data = __social_table__.pop(social_link)
if social_data is not None:
cmds = social_data.get_cmds()
result = [x.strip() for x in cmds.split(',')]
# remove the original cmd from the command list
result.remove(social_cmd)
remove_cmd(social_cmd)
# if there are still commands left re-add
if len(result) > 0:
social_data.set_cmds(','.join(result))
__social_table__[social_link] = social_data
else:
__socials__[social_cmd]
if save is True:
save_socials()
def add_social(social_data, save=True):
cmds = social_data.get_cmds()
result = [x.strip() for x in cmds.split(',')]
for res in result:
unlink_social(res)
add_cmd(res, None, cmd_social, "player", False)
if social_data.get_min_pos == "sitting":
mudsys.add_cmd_check(res, chk_conscious)
elif social_data.get_min_pos == "standing":
mudsys.add_cmd_check(res, chk_can_move)
elif social_data.get_max_pos == "standing":
mudsys.add_cmd_check(res, chk_grounded)
elif social_data.get_max_pos == "sitting":
mudsys.add_cmd_check(res, chk_supine)
__socials__[res] = cmds
__social_table__[cmds] = social_data
if save:
save_socials()
def get_social(social):
if social in __socials__:
return __social_table__[__socials__[social]]
return None
def save_socials():
set = storage.StorageSet()
socials = storage.StorageList()
set.storeList("socials", socials)
for cmd, data in __social_table__.iteritems():
one_set = data.store()
socials.add(one_set)
set.write(__socials_file__)
set.close()
return
def save_social(social):
save_socials()
return
def load_socials():
storeSet = storage.StorageSet(__socials_file__)
for social_set in storeSet.readList("socials").sets():
social_data = Social(storeSet=social_set)
cmds = social_data.get_cmds()
result = [x.strip() for x in cmds.split(',')]
__social_table__[cmds] = social_data
for res in result:
add_cmd(res, None, cmd_social, "player", False)
if social_data.get_min_pos() == "sitting":
mudsys.add_cmd_check(res, chk_conscious)
elif social_data.get_min_pos() == "standing":
mudsys.add_cmd_check(res, chk_can_move)
elif social_data.get_max_pos() == "standing":
mudsys.add_cmd_check(res, chk_grounded)
elif social_data.get_max_pos() == "sitting":
mudsys.add_cmd_check(res, chk_supine)
__socials__[res] = cmds
storeSet.close()
return
def cmd_socials(ch, cmd, arg):
'''
Syntax: socials, socials <social name>
Socials are a form of emote, they are prepared emotes that are commands you can use and they
allow for single use, targeting other people, etc. An example of a social would be the grin social.
If you type:
> grin
You will see the following, while others around you will also see a variation as if you had performed
the action:
You grin mischievously.
If you want to grin at Kevin though, you can do so by typing:
> grin kevin
You will see:
You grin mischievously at Kevin.
Since these are targeted, Kevin will see it directed at them and the room will see you directing
this mischievous grin at Kevin. Additionally you can change that mischievous nature of the grin
by typing your own adverb (or even phrase):
> grin stupidly
> grin stupidly at kevin
There are quite a few defined socials that you can do. The command 'socials' will list all of
the socials currently available to you. Additionally you can specify a social and see how a
specific social will look if used, the adverbs, and any synonyms.
'''
buf = [ ]
socs = sorted(__socials__.keys())
count = 0
for soc in socs:
count = count + 1
buf.append("%-20s" % soc)
if count % 4 == 0:
ch.send("".join(buf))
buf = [ ]
if count % 4 != 0:
ch.send("".join(buf))
def cmd_soclink(ch, cmd, arg):
if arg is None or arg is "":
ch.send("Link which social to which?")
return
arg = arg.lower()
arg, new_soc = mud.parse_args(ch, True, cmd, arg, "| word(subcommand) word(arguments)")
if new_soc is None:
ch.send("You must provide a new command and an old social to link it to.")
return
social_data = get_social(arg)
if social_data is None:
ch.send("No social exists for %s" % arg)
link_social(new_soc, arg);
ch.send("%s is now linked to %s" % (new_soc, arg))
def cmd_socunlink(ch, cmd, arg):
if arg is None or arg is "":
ch.send("Unlink which social?")
return
social_data = get_social(arg)
if social_data is None:
ch.send("No social exists for %s." % arg)
return
unlink_social(arg)
ch.send("The %s social was unlinked." % arg)
mud.log_string("%s unlinked the social %s." % (ch.name, arg))
# One generic command for handling socials. Does table lookup on all of
# the existing socials and executes the proper one.
def cmd_social(ch, cmd, arg):
data = get_social(cmd)
# If they used a phrasal or adjective/adverb then this will be 2 length
# otherwise it will be one length.
args = arg.split(" at ", 1)
has_modifier = True if len(args) == 2 else False
# does the social exist? Do we have a problem? DO WE?
if data:
if has_modifier is True:
tgt, type = mud.generic_find(ch, arg, "all", "immediate", False)
if tgt is None or type != "char":
ch.send("That individual does not seem to be here.")
return
else:
tgt, type = mud.generic_find(ch, arg, "all", "immediate", False)
# No target was supplied, the emote is to ourselves.
if tgt is None:
if data.get_to_char_notgt():
mud.message(ch, None, None, None, True, "to_char", "%s" % (
data.get_to_char_notgt() if has_modifier == False
else string.replace(data.get_to_char_notgt(), "$M", arg)))
if data.get_to_room_notgt():
mud.message(ch, None, None, None, True, "to_room", "%s" % (
data.get_to_room_notgt() if has_modifier == False
else string.replace(data.get_to_room_notgt(), "$M", arg)))
return
# a target was supplied and it is us
elif ch == tgt:
if data.get_to_char_self():
mud.message(ch, None, None, None, True, "to_char",
data.get_to_char_self() if has_modifier else
string.replace(data.get_to_char_self(), "$M", args[0]))
elif data.get_to_char_notgt():
mud.message(ch, None, None, None, True, "to_char",
data.get_to_char_notgt() if has_modifier == False
else string.replace(data.get_to_char_notgt(), "$M", args[0]))
if data.get_to_room_self():
mud.message(ch, None, None, None, True, "to_room",
data.get_to_room_self() if has_modifier == False
else string.replace(data.get_to_room_self(), "$M", args[0]))
elif data.get_to_room_notgt():
mud.message(ch, None, None, None, True, "to_room",
data.get_to_room_notgt() if has_modifier == False
else string.replace(data.get_to_room_notgt(), "$M", args[0]))
return
# a target was supplied and it is not us
else:
if data.get_to_char_tgt():
mud.message(ch, tgt, None, None, True, "to_char",
data.get_to_char_tgt() if has_modifier == False
else string.replace(data.get_to_char_tgt(), "$M", args[0]))
if data.get_to_vict_tgt():
mud.message(ch, tgt, None, None, True, "to_char",
data.get_to_vict_tgt() if has_modifier == False
else string.replace(data.get_to_vict_tgt(), "$M", args[0]))
if data.get_to_room_tgt():
mud.message(ch, tgt, None, None, True, "to_room",
data.get_to_room_tgt() if has_modifier == False
else string.replace(data.get_to_room_tgt(), "$M", args[0]))
else:
mud.log_string("ERROR: %s tried social, %s, but no such social exists!" % (ch.name, cmd))
return
load_socials()
add_cmd("socials", None, cmd_socials, "player", False)
add_cmd("socunlink", None, cmd_socunlink, "builder", False)
add_cmd("soclink", None, cmd_soclink, "builder", False) | en | 0.906452 | package: socials socials are commonly used emotes (e.g. smiling, grinning, laughing). Instead making people have to write out an entire emote every time they would like to express such an emote, they can simply use one of these simple commands to perform a pre-made emote. This is essentially a copy of the c module written in python. There are no changes good or bad. Description of module concepts: cmds are the list of commands that trigger the social. More than one cmd can be specified (comma-separated). Assumes cmds != NULL. All other arguments can be NULL. to_char_notgt is the message sent to the character if no target for the social is provided. to_room_notgt is the message sent to the room if no target for the social is provided. to_char_self is the message sent to ourself when the target provided was ourself. If to_char_self is not provided, the message will default to the same one used for to_char_notgt. to_room_self is the message sent to the room when the target provided was ourself. If to_room_self is not provided, the message will default to the same one used for to_char_notgt. to_char_tgt is the message sent to the character when a target is provided. to_vict_tgt is the message sent to the target when a target is provided to_room_tgt is the message sent to the room when a target is provided adverbs and adjectives are default modifiers that can be suggested or applied to the emote through $M and $m (adjective: evil, adverb: evilly). If a player types an override it will override both $M and $m unless they clearly specify $M= and/or $m= for advanced usage. require_tgt is a boolean describing whether this emote forces the caller to have a target. min_pos and max_pos are the minimum and maximum positions the socials can per performed from, respectively. # This stores all the socials themselves, before unlinking # This stores all the socials after unlinking # relink all the individual mappings # set the new hash, delete the old one and add the new # add the command to the system # this needs to be rewritten # remove the original cmd from the command list # if there are still commands left re-add Syntax: socials, socials <social name> Socials are a form of emote, they are prepared emotes that are commands you can use and they allow for single use, targeting other people, etc. An example of a social would be the grin social. If you type: > grin You will see the following, while others around you will also see a variation as if you had performed the action: You grin mischievously. If you want to grin at Kevin though, you can do so by typing: > grin kevin You will see: You grin mischievously at Kevin. Since these are targeted, Kevin will see it directed at them and the room will see you directing this mischievous grin at Kevin. Additionally you can change that mischievous nature of the grin by typing your own adverb (or even phrase): > grin stupidly > grin stupidly at kevin There are quite a few defined socials that you can do. The command 'socials' will list all of the socials currently available to you. Additionally you can specify a social and see how a specific social will look if used, the adverbs, and any synonyms. # One generic command for handling socials. Does table lookup on all of # the existing socials and executes the proper one. # If they used a phrasal or adjective/adverb then this will be 2 length # otherwise it will be one length. # does the social exist? Do we have a problem? DO WE? # No target was supplied, the emote is to ourselves. # a target was supplied and it is us # a target was supplied and it is not us | 2.692797 | 3 |
tensorflow/python/framework/sparse_tensor.py | connectthefuture/tensorflow | 1 | 6624973 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
# pylint: disable=protected-access
_TensorLike = ops._TensorLike
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
class SparseTensor(_TensorLike):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `dense_shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of dense_shape `[N, ndims]`, which specifies
the indices of the elements in the sparse tensor that contain nonzero
values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]`
specifies that the elements with indexes of [1,3] and [2,4] have
nonzero values.
* `values`: A 1-D tensor of any type and dense_shape `[N]`, which supplies the
values for each element in `indices`. For example, given
`indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies
that element [1,3] of the sparse tensor has a value of 18, and element
[2,4] of the tensor has a value of 3.6.
* `dense_shape`: A 1-D int64 tensor of dense_shape `[ndims]`, which specifies
the
dense_shape of the sparse tensor. Takes a list indicating the number of
elements
in each dimension. For example, `dense_shape=[3,6]` specifies a
two-dimensional
3x6 tensor, `dense_shape=[2,3,4]` specifies a three-dimensional 2x3x4
tensor, and
`dense_shape=[9]` specifies a one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = dense_shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse_reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
@@__init__
@@get_shape
@@indices
@@values
@@dense_shape
@@dtype
@@op
@@graph
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not (isinstance(sparse_tensor_value, SparseTensor) or
isinstance(sparse_tensor_value, SparseTensorValue)):
raise TypeError("Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
dense_shape=sparse_tensor_value.dense_shape)
def __init__(self, indices, values, dense_shape=None, shape=None):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of dense_shape `[N, ndims]`.
values: A 1-D tensor of any type and dense_shape `[N]`.
dense_shape: A 1-D int64 tensor of dense_shape `[ndims]`.
shape: Temporary. Legacy naming of dense_shape. Only one of `shape` or
`dense_shape` must be provided.
Returns:
A `SparseTensor`.
Raises:
ValueError: if both `shape` and `dense_shape` are provided.
"""
with ops.name_scope(None, "SparseTensor",
[indices, values, shape, dense_shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# Always pass as_ref=True because we want to be able to update
# values later if it is a VariableOp.
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.internal_convert_to_tensor(
values, name="values", as_ref=True)
if shape is not None and dense_shape is not None:
raise ValueError("Only one of shape or dense_shape must be provided, "
"but saw %s and %s" % (shape, dense_shape))
dense_shape = shape if shape is not None else dense_shape
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._dense_shape = dense_shape
indices_shape = indices.get_shape().with_rank(2)
values_shape = values.get_shape().with_rank(1)
dense_shape_shape = dense_shape.get_shape().with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape[0].merge_with(values_shape[0])
# Assert number of columns in indices matches the number of elements in
# dense_shape.
indices_shape[1].merge_with(dense_shape_shape[0])
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def dense_shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._dense_shape
@property
def shape(self):
"""Legacy property returning `dense_shape`."""
return self._dense_shape
@property
def graph(self):
"""The `Graph` that contains the index, value, and dense_shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, dense_shape = _eval_using_default_session(
[self.indices, self.values, self.dense_shape], feed_dict, self.graph,
session)
return SparseTensorValue(indices, values, dense_shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
class _STVIter(six.Iterator):
"""Iterator for the SparseTensorValue."""
def __init__(self, st):
self._st = st
self._ix = -1
def __iter__(self): # pylint: disable=non-iterator-returned
return self
def __next__(self):
self._ix += 1
if self._ix == 0:
return self._st.indices
elif self._ix == 1:
return self._st.values
elif self._ix == 2:
return self._st.dense_shape
else:
raise StopIteration
class SparseTensorValue(object):
"""Stores the calculated numpy arrays representing a `SparseTensor`.
Returned as the output of a session.run on a `SparseTensor` object.
"""
def __init__(self, indices, values, dense_shape=None, shape=None):
self._indices = indices
self._values = values
self._dense_shape = shape or dense_shape
@property
def indices(self):
return self._indices
@property
def values(self):
return self._values
@property
def dense_shape(self):
return self._dense_shape
@property
def shape(self):
return self._dense_shape
def __repr__(self):
return "SparseTensorValue(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def __iter__(self): # pylint: disable=non-iterator-returned
return _STVIter(self)
def __getitem__(self, i):
return [self.indices, self.values, self.dense_shape][i]
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
if isinstance(value, SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
"Sparse dtype: requested = %s, actual = %s" % (
dtype.name, value.dtype.name))
return value
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name)
| # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
# pylint: disable=protected-access
_TensorLike = ops._TensorLike
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
class SparseTensor(_TensorLike):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `dense_shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of dense_shape `[N, ndims]`, which specifies
the indices of the elements in the sparse tensor that contain nonzero
values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]`
specifies that the elements with indexes of [1,3] and [2,4] have
nonzero values.
* `values`: A 1-D tensor of any type and dense_shape `[N]`, which supplies the
values for each element in `indices`. For example, given
`indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies
that element [1,3] of the sparse tensor has a value of 18, and element
[2,4] of the tensor has a value of 3.6.
* `dense_shape`: A 1-D int64 tensor of dense_shape `[ndims]`, which specifies
the
dense_shape of the sparse tensor. Takes a list indicating the number of
elements
in each dimension. For example, `dense_shape=[3,6]` specifies a
two-dimensional
3x6 tensor, `dense_shape=[2,3,4]` specifies a three-dimensional 2x3x4
tensor, and
`dense_shape=[9]` specifies a one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = dense_shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse_reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
@@__init__
@@get_shape
@@indices
@@values
@@dense_shape
@@dtype
@@op
@@graph
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not (isinstance(sparse_tensor_value, SparseTensor) or
isinstance(sparse_tensor_value, SparseTensorValue)):
raise TypeError("Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
dense_shape=sparse_tensor_value.dense_shape)
def __init__(self, indices, values, dense_shape=None, shape=None):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of dense_shape `[N, ndims]`.
values: A 1-D tensor of any type and dense_shape `[N]`.
dense_shape: A 1-D int64 tensor of dense_shape `[ndims]`.
shape: Temporary. Legacy naming of dense_shape. Only one of `shape` or
`dense_shape` must be provided.
Returns:
A `SparseTensor`.
Raises:
ValueError: if both `shape` and `dense_shape` are provided.
"""
with ops.name_scope(None, "SparseTensor",
[indices, values, shape, dense_shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# Always pass as_ref=True because we want to be able to update
# values later if it is a VariableOp.
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.internal_convert_to_tensor(
values, name="values", as_ref=True)
if shape is not None and dense_shape is not None:
raise ValueError("Only one of shape or dense_shape must be provided, "
"but saw %s and %s" % (shape, dense_shape))
dense_shape = shape if shape is not None else dense_shape
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._dense_shape = dense_shape
indices_shape = indices.get_shape().with_rank(2)
values_shape = values.get_shape().with_rank(1)
dense_shape_shape = dense_shape.get_shape().with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape[0].merge_with(values_shape[0])
# Assert number of columns in indices matches the number of elements in
# dense_shape.
indices_shape[1].merge_with(dense_shape_shape[0])
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def dense_shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._dense_shape
@property
def shape(self):
"""Legacy property returning `dense_shape`."""
return self._dense_shape
@property
def graph(self):
"""The `Graph` that contains the index, value, and dense_shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, dense_shape = _eval_using_default_session(
[self.indices, self.values, self.dense_shape], feed_dict, self.graph,
session)
return SparseTensorValue(indices, values, dense_shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
class _STVIter(six.Iterator):
"""Iterator for the SparseTensorValue."""
def __init__(self, st):
self._st = st
self._ix = -1
def __iter__(self): # pylint: disable=non-iterator-returned
return self
def __next__(self):
self._ix += 1
if self._ix == 0:
return self._st.indices
elif self._ix == 1:
return self._st.values
elif self._ix == 2:
return self._st.dense_shape
else:
raise StopIteration
class SparseTensorValue(object):
"""Stores the calculated numpy arrays representing a `SparseTensor`.
Returned as the output of a session.run on a `SparseTensor` object.
"""
def __init__(self, indices, values, dense_shape=None, shape=None):
self._indices = indices
self._values = values
self._dense_shape = shape or dense_shape
@property
def indices(self):
return self._indices
@property
def values(self):
return self._values
@property
def dense_shape(self):
return self._dense_shape
@property
def shape(self):
return self._dense_shape
def __repr__(self):
return "SparseTensorValue(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def __iter__(self): # pylint: disable=non-iterator-returned
return _STVIter(self)
def __getitem__(self, i):
return [self.indices, self.values, self.dense_shape][i]
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
if isinstance(value, SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
"Sparse dtype: requested = %s, actual = %s" % (
dtype.name, value.dtype.name))
return value
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name)
| en | 0.697047 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Classes and functions used to construct graphs. # pylint: disable=g-bad-name # pylint: disable=protected-access # pylint: enable=protected-access Represents a sparse tensor. TensorFlow represents a sparse tensor as three separate dense tensors: `indices`, `values`, and `dense_shape`. In Python, the three tensors are collected into a `SparseTensor` class for ease of use. If you have separate `indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor` object before passing to the ops below. Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)` comprises the following components, where `N` and `ndims` are the number of values and number of dimensions in the `SparseTensor`, respectively: * `indices`: A 2-D int64 tensor of dense_shape `[N, ndims]`, which specifies the indices of the elements in the sparse tensor that contain nonzero values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]` specifies that the elements with indexes of [1,3] and [2,4] have nonzero values. * `values`: A 1-D tensor of any type and dense_shape `[N]`, which supplies the values for each element in `indices`. For example, given `indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies that element [1,3] of the sparse tensor has a value of 18, and element [2,4] of the tensor has a value of 3.6. * `dense_shape`: A 1-D int64 tensor of dense_shape `[ndims]`, which specifies the dense_shape of the sparse tensor. Takes a list indicating the number of elements in each dimension. For example, `dense_shape=[3,6]` specifies a two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a one-dimensional tensor with 9 elements. The corresponding dense tensor satisfies: ```python dense.shape = dense_shape dense[tuple(indices[i])] = values[i] ``` By convention, `indices` should be sorted in row-major order (or equivalently lexicographic order on the tuples `indices[i]`). This is not enforced when `SparseTensor` objects are constructed, but most ops assume correct ordering. If the ordering of sparse tensor `st` is wrong, a fixed version can be obtained by calling `tf.sparse_reorder(st)`. Example: The sparse tensor ```python SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) ``` represents the dense tensor ```python [[1, 0, 0, 0] [0, 0, 2, 0] [0, 0, 0, 0]] ``` @@__init__ @@get_shape @@indices @@values @@dense_shape @@dtype @@op @@graph Creates a `SparseTensor`. Args: indices: A 2-D int64 tensor of dense_shape `[N, ndims]`. values: A 1-D tensor of any type and dense_shape `[N]`. dense_shape: A 1-D int64 tensor of dense_shape `[ndims]`. shape: Temporary. Legacy naming of dense_shape. Only one of `shape` or `dense_shape` must be provided. Returns: A `SparseTensor`. Raises: ValueError: if both `shape` and `dense_shape` are provided. # Always pass as_ref=True because we want to be able to update # values later if it is a VariableOp. # TODO(touts): Consider adding mutable_values() when 'values' # is a VariableOp and updating users of SparseTensor. # Assert number of rows in indices match the number of elements in values. # Assert number of columns in indices matches the number of elements in # dense_shape. Get the `TensorShape` representing the shape of the dense tensor. Returns: A `TensorShape` object. The indices of non-zero values in the represented dense tensor. Returns: A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the number of non-zero values in the tensor, and `ndims` is the rank. The non-zero values in the represented dense tensor. Returns: A 1-D Tensor of any data type. The `Operation` that produces `values` as an output. The `DType` of elements in this tensor. A 1-D Tensor of int64 representing the shape of the dense tensor. Legacy property returning `dense_shape`. The `Graph` that contains the index, value, and dense_shape tensors. Evaluates this sparse tensor in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking `SparseTensor.eval()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a description of the valid feed values. session: (Optional.) The `Session` to be used to evaluate this sparse tensor. If none, the default session will be used. Returns: A `SparseTensorValue` object. Iterator for the SparseTensorValue. # pylint: disable=non-iterator-returned Stores the calculated numpy arrays representing a `SparseTensor`. Returned as the output of a session.run on a `SparseTensor` object. # pylint: disable=non-iterator-returned Converts value to a `SparseTensor` or `Tensor`. Args: value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. Returns: A `SparseTensor` or `Tensor` based on `value`. Raises: RuntimeError: If result type is incompatible with `dtype`. | 2.090289 | 2 |
exaslct_src/lib/test_runner/upload_exported_container.py | mace84/script-languages | 0 | 6624974 | import pathlib
import luigi
from exaslct_src.lib.data.dependency_collector.dependency_release_info_collector import DependencyExportInfoCollector
from exaslct_src.lib.data.release_info import ExportInfo
from exaslct_src.lib.test_runner.upload_file_to_db import UploadFileToBucketFS
class UploadExportedContainer(UploadFileToBucketFS):
release_name = luigi.Parameter()
release_type = luigi.Parameter()
release_info_dict = luigi.DictParameter(significant=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.release_info = ExportInfo.from_dict(self.release_info_dict)
def get_log_file(self):
return "/exa/logs/cored/bucketfsd*"
def get_pattern_to_wait_for(self):
return self.release_info.name+".*extracted"
def get_file_to_upload(self):
return "/exports/" + pathlib.Path(self.release_info.cache_file).name # TODO directory /exports is as data dependency to SpawnTestContainer
def get_upload_target(self):
return "myudfs/"+self.release_info.name+".tar.gz"
| import pathlib
import luigi
from exaslct_src.lib.data.dependency_collector.dependency_release_info_collector import DependencyExportInfoCollector
from exaslct_src.lib.data.release_info import ExportInfo
from exaslct_src.lib.test_runner.upload_file_to_db import UploadFileToBucketFS
class UploadExportedContainer(UploadFileToBucketFS):
release_name = luigi.Parameter()
release_type = luigi.Parameter()
release_info_dict = luigi.DictParameter(significant=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.release_info = ExportInfo.from_dict(self.release_info_dict)
def get_log_file(self):
return "/exa/logs/cored/bucketfsd*"
def get_pattern_to_wait_for(self):
return self.release_info.name+".*extracted"
def get_file_to_upload(self):
return "/exports/" + pathlib.Path(self.release_info.cache_file).name # TODO directory /exports is as data dependency to SpawnTestContainer
def get_upload_target(self):
return "myudfs/"+self.release_info.name+".tar.gz"
| en | 0.906729 | # TODO directory /exports is as data dependency to SpawnTestContainer | 1.994827 | 2 |
src/sagemaker/local/data.py | anirudh2290/sagemaker-python-sdk | 1 | 6624975 | <filename>src/sagemaker/local/data.py<gh_stars>1-10
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import os
import platform
import sys
import tempfile
from abc import ABCMeta
from abc import abstractmethod
from six import with_metaclass
from six.moves.urllib.parse import urlparse
import sagemaker.amazon.common
import sagemaker.local.utils
import sagemaker.utils
def get_data_source_instance(data_source, sagemaker_session):
"""Return an Instance of :class:`sagemaker.local.data.DataSource` that can
handle the provided data_source URI.
data_source can be either file:// or s3://
Args:
data_source (str): a valid URI that points to a data source.
sagemaker_session (:class:`sagemaker.session.Session`): a SageMaker Session to
interact with S3 if required.
Returns:
sagemaker.local.data.DataSource: an Instance of a Data Source
Raises:
ValueError: If parsed_uri scheme is neither `file` nor `s3` , raise an
error.
"""
parsed_uri = urlparse(data_source)
if parsed_uri.scheme == "file":
return LocalFileDataSource(parsed_uri.netloc + parsed_uri.path)
if parsed_uri.scheme == "s3":
return S3DataSource(parsed_uri.netloc, parsed_uri.path, sagemaker_session)
raise ValueError(
"data_source must be either file or s3. parsed_uri.scheme: {}".format(parsed_uri.scheme)
)
def get_splitter_instance(split_type):
"""Return an Instance of :class:`sagemaker.local.data.Splitter` according to
the specified `split_type` .
Args:
split_type (str): either 'Line' or 'RecordIO'. Can be left as None to
signal no data split will happen.
Returns
:class:`sagemaker.local.data.Splitter`: an Instance of a Splitter
"""
if split_type is None:
return NoneSplitter()
if split_type == "Line":
return LineSplitter()
if split_type == "RecordIO":
return RecordIOSplitter()
raise ValueError("Invalid Split Type: %s" % split_type)
def get_batch_strategy_instance(strategy, splitter):
"""Return an Instance of :class:`sagemaker.local.data.BatchStrategy` according to `strategy`
Args:
strategy (str): Either 'SingleRecord' or 'MultiRecord'
splitter (:class:`sagemaker.local.data.Splitter): splitter to get the data from.
Returns
:class:`sagemaker.local.data.BatchStrategy`: an Instance of a BatchStrategy
"""
if strategy == "SingleRecord":
return SingleRecordStrategy(splitter)
if strategy == "MultiRecord":
return MultiRecordStrategy(splitter)
raise ValueError('Invalid Batch Strategy: %s - Valid Strategies: "SingleRecord", "MultiRecord"')
class DataSource(with_metaclass(ABCMeta, object)):
"""Placeholder docstring"""
@abstractmethod
def get_file_list(self):
"""Retrieve the list of absolute paths to all the files in this data
source.
Returns:
List[str]: List of absolute paths.
"""
@abstractmethod
def get_root_dir(self):
"""Retrieve the absolute path to the root directory of this data source.
Returns:
str: absolute path to the root directory of this data source.
"""
class LocalFileDataSource(DataSource):
"""Represents a data source within the local filesystem."""
def __init__(self, root_path):
"""
Args:
root_path:
"""
super(LocalFileDataSource, self).__init__()
self.root_path = os.path.abspath(root_path)
if not os.path.exists(self.root_path):
raise RuntimeError("Invalid data source: %s does not exist." % self.root_path)
def get_file_list(self):
"""Retrieve the list of absolute paths to all the files in this data
source.
Returns:
List[str] List of absolute paths.
"""
if os.path.isdir(self.root_path):
return [
os.path.join(self.root_path, f)
for f in os.listdir(self.root_path)
if os.path.isfile(os.path.join(self.root_path, f))
]
return [self.root_path]
def get_root_dir(self):
"""Retrieve the absolute path to the root directory of this data source.
Returns:
str: absolute path to the root directory of this data source.
"""
if os.path.isdir(self.root_path):
return self.root_path
return os.path.dirname(self.root_path)
class S3DataSource(DataSource):
"""Defines a data source given by a bucket and S3 prefix. The contents will
be downloaded and then processed as local data.
"""
def __init__(self, bucket, prefix, sagemaker_session):
"""Create an S3DataSource instance
Args:
bucket (str): S3 bucket name
prefix (str): S3 prefix path to the data
sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker_session with the
desired settings
to talk to S3
"""
super(S3DataSource, self).__init__()
# Create a temporary dir to store the S3 contents
root_dir = sagemaker.utils.get_config_value(
"local.container_root", sagemaker_session.config
)
if root_dir:
root_dir = os.path.abspath(root_dir)
working_dir = tempfile.mkdtemp(dir=root_dir)
# Docker cannot mount Mac OS /var folder properly see
# https://forums.docker.com/t/var-folders-isnt-mounted-properly/9600
# Only apply this workaround if the user didn't provide an alternate storage root dir.
if root_dir is None and platform.system() == "Darwin":
working_dir = "/private{}".format(working_dir)
sagemaker.utils.download_folder(bucket, prefix, working_dir, sagemaker_session)
self.files = LocalFileDataSource(working_dir)
def get_file_list(self):
"""Retrieve the list of absolute paths to all the files in this data
source.
Returns:
List[str]: List of absolute paths.
"""
return self.files.get_file_list()
def get_root_dir(self):
"""Retrieve the absolute path to the root directory of this data source.
Returns:
str: absolute path to the root directory of this data source.
"""
return self.files.get_root_dir()
class Splitter(with_metaclass(ABCMeta, object)):
"""Placeholder docstring"""
@abstractmethod
def split(self, file):
"""Split a file into records using a specific strategy
Args:
file (str): path to the file to split
Returns:
generator for the individual records that were split from the file
"""
class NoneSplitter(Splitter):
"""Does not split records, essentially reads the whole file."""
# non-utf8 characters.
_textchars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F})
def split(self, filename):
"""Split a file into records using a specific strategy.
For this NoneSplitter there is no actual split happening and the file
is returned as a whole.
Args:
filename (str): path to the file to split
Returns: generator for the individual records that were split from
the file
"""
with open(filename, "rb") as f:
buf = f.read()
if not self._is_binary(buf):
buf = buf.decode()
yield buf
def _is_binary(self, buf):
"""Check whether `buf` contains binary data.
Returns True if `buf` contains any non-utf-8 characters.
Args:
buf (bytes): data to inspect
Returns:
True if data is binary, otherwise False
"""
return bool(buf.translate(None, self._textchars))
class LineSplitter(Splitter):
"""Split records by new line."""
def split(self, file):
"""Split a file into records using a specific strategy
This LineSplitter splits the file on each line break.
Args:
file (str): path to the file to split
Returns: generator for the individual records that were split from
the file
"""
with open(file, "r") as f:
for line in f:
yield line
class RecordIOSplitter(Splitter):
"""Split using Amazon Recordio.
Not useful for string content.
"""
def split(self, file):
"""Split a file into records using a specific strategy
This RecordIOSplitter splits the data into individual RecordIO
records.
Args:
file (str): path to the file to split
Returns: generator for the individual records that were split from
the file
"""
with open(file, "rb") as f:
for record in sagemaker.amazon.common.read_recordio(f):
yield record
class BatchStrategy(with_metaclass(ABCMeta, object)):
"""Placeholder docstring"""
def __init__(self, splitter):
"""Create a Batch Strategy Instance
Args:
splitter (sagemaker.local.data.Splitter): A Splitter to pre-process
the data before batching.
"""
self.splitter = splitter
@abstractmethod
def pad(self, file, size):
"""Group together as many records as possible to fit in the specified
size
Args:
file (str): file path to read the records from.
size (int): maximum size in MB that each group of records will be
fitted to. passing 0 means unlimited size.
Returns:
generator of records
"""
class MultiRecordStrategy(BatchStrategy):
"""Feed multiple records at a time for batch inference.
Will group up as many records as possible within the payload specified.
"""
def pad(self, file, size=6):
"""Group together as many records as possible to fit in the specified
size
Args:
file (str): file path to read the records from.
size (int): maximum size in MB that each group of records will be
fitted to. passing 0 means unlimited size.
Returns:
generator of records
"""
buffer = ""
for element in self.splitter.split(file):
if _payload_size_within_limit(buffer + element, size):
buffer += element
else:
tmp = buffer
buffer = element
yield tmp
if _validate_payload_size(buffer, size):
yield buffer
class SingleRecordStrategy(BatchStrategy):
"""Feed a single record at a time for batch inference.
If a single record does not fit within the payload specified it will
throw a RuntimeError.
"""
def pad(self, file, size=6):
"""Group together as many records as possible to fit in the specified
size
This SingleRecordStrategy will not group any record and will return
them one by one as long as they are within the maximum size.
Args:
file (str): file path to read the records from.
size (int): maximum size in MB that each group of records will be
fitted to. passing 0 means unlimited size.
Returns:
generator of records
"""
for element in self.splitter.split(file):
if _validate_payload_size(element, size):
yield element
def _payload_size_within_limit(payload, size):
"""
Args:
payload:
size:
"""
size_in_bytes = size * 1024 * 1024
if size == 0:
return True
return sys.getsizeof(payload) < size_in_bytes
def _validate_payload_size(payload, size):
"""Check if a payload is within the size in MB threshold.
Raise an exception if the payload is beyond the size in MB threshold.
Args:
payload: data that will be checked
size (int): max size in MB
Returns:
bool: True if within bounds. if size=0 it will always return True
Raises:
RuntimeError: If the payload is larger a runtime error is thrown.
"""
if _payload_size_within_limit(payload, size):
return True
raise RuntimeError("Record is larger than %sMB. Please increase your max_payload" % size)
| <filename>src/sagemaker/local/data.py<gh_stars>1-10
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import os
import platform
import sys
import tempfile
from abc import ABCMeta
from abc import abstractmethod
from six import with_metaclass
from six.moves.urllib.parse import urlparse
import sagemaker.amazon.common
import sagemaker.local.utils
import sagemaker.utils
def get_data_source_instance(data_source, sagemaker_session):
"""Return an Instance of :class:`sagemaker.local.data.DataSource` that can
handle the provided data_source URI.
data_source can be either file:// or s3://
Args:
data_source (str): a valid URI that points to a data source.
sagemaker_session (:class:`sagemaker.session.Session`): a SageMaker Session to
interact with S3 if required.
Returns:
sagemaker.local.data.DataSource: an Instance of a Data Source
Raises:
ValueError: If parsed_uri scheme is neither `file` nor `s3` , raise an
error.
"""
parsed_uri = urlparse(data_source)
if parsed_uri.scheme == "file":
return LocalFileDataSource(parsed_uri.netloc + parsed_uri.path)
if parsed_uri.scheme == "s3":
return S3DataSource(parsed_uri.netloc, parsed_uri.path, sagemaker_session)
raise ValueError(
"data_source must be either file or s3. parsed_uri.scheme: {}".format(parsed_uri.scheme)
)
def get_splitter_instance(split_type):
"""Return an Instance of :class:`sagemaker.local.data.Splitter` according to
the specified `split_type` .
Args:
split_type (str): either 'Line' or 'RecordIO'. Can be left as None to
signal no data split will happen.
Returns
:class:`sagemaker.local.data.Splitter`: an Instance of a Splitter
"""
if split_type is None:
return NoneSplitter()
if split_type == "Line":
return LineSplitter()
if split_type == "RecordIO":
return RecordIOSplitter()
raise ValueError("Invalid Split Type: %s" % split_type)
def get_batch_strategy_instance(strategy, splitter):
"""Return an Instance of :class:`sagemaker.local.data.BatchStrategy` according to `strategy`
Args:
strategy (str): Either 'SingleRecord' or 'MultiRecord'
splitter (:class:`sagemaker.local.data.Splitter): splitter to get the data from.
Returns
:class:`sagemaker.local.data.BatchStrategy`: an Instance of a BatchStrategy
"""
if strategy == "SingleRecord":
return SingleRecordStrategy(splitter)
if strategy == "MultiRecord":
return MultiRecordStrategy(splitter)
raise ValueError('Invalid Batch Strategy: %s - Valid Strategies: "SingleRecord", "MultiRecord"')
class DataSource(with_metaclass(ABCMeta, object)):
"""Placeholder docstring"""
@abstractmethod
def get_file_list(self):
"""Retrieve the list of absolute paths to all the files in this data
source.
Returns:
List[str]: List of absolute paths.
"""
@abstractmethod
def get_root_dir(self):
"""Retrieve the absolute path to the root directory of this data source.
Returns:
str: absolute path to the root directory of this data source.
"""
class LocalFileDataSource(DataSource):
"""Represents a data source within the local filesystem."""
def __init__(self, root_path):
"""
Args:
root_path:
"""
super(LocalFileDataSource, self).__init__()
self.root_path = os.path.abspath(root_path)
if not os.path.exists(self.root_path):
raise RuntimeError("Invalid data source: %s does not exist." % self.root_path)
def get_file_list(self):
"""Retrieve the list of absolute paths to all the files in this data
source.
Returns:
List[str] List of absolute paths.
"""
if os.path.isdir(self.root_path):
return [
os.path.join(self.root_path, f)
for f in os.listdir(self.root_path)
if os.path.isfile(os.path.join(self.root_path, f))
]
return [self.root_path]
def get_root_dir(self):
"""Retrieve the absolute path to the root directory of this data source.
Returns:
str: absolute path to the root directory of this data source.
"""
if os.path.isdir(self.root_path):
return self.root_path
return os.path.dirname(self.root_path)
class S3DataSource(DataSource):
"""Defines a data source given by a bucket and S3 prefix. The contents will
be downloaded and then processed as local data.
"""
def __init__(self, bucket, prefix, sagemaker_session):
"""Create an S3DataSource instance
Args:
bucket (str): S3 bucket name
prefix (str): S3 prefix path to the data
sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker_session with the
desired settings
to talk to S3
"""
super(S3DataSource, self).__init__()
# Create a temporary dir to store the S3 contents
root_dir = sagemaker.utils.get_config_value(
"local.container_root", sagemaker_session.config
)
if root_dir:
root_dir = os.path.abspath(root_dir)
working_dir = tempfile.mkdtemp(dir=root_dir)
# Docker cannot mount Mac OS /var folder properly see
# https://forums.docker.com/t/var-folders-isnt-mounted-properly/9600
# Only apply this workaround if the user didn't provide an alternate storage root dir.
if root_dir is None and platform.system() == "Darwin":
working_dir = "/private{}".format(working_dir)
sagemaker.utils.download_folder(bucket, prefix, working_dir, sagemaker_session)
self.files = LocalFileDataSource(working_dir)
def get_file_list(self):
"""Retrieve the list of absolute paths to all the files in this data
source.
Returns:
List[str]: List of absolute paths.
"""
return self.files.get_file_list()
def get_root_dir(self):
"""Retrieve the absolute path to the root directory of this data source.
Returns:
str: absolute path to the root directory of this data source.
"""
return self.files.get_root_dir()
class Splitter(with_metaclass(ABCMeta, object)):
"""Placeholder docstring"""
@abstractmethod
def split(self, file):
"""Split a file into records using a specific strategy
Args:
file (str): path to the file to split
Returns:
generator for the individual records that were split from the file
"""
class NoneSplitter(Splitter):
"""Does not split records, essentially reads the whole file."""
# non-utf8 characters.
_textchars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F})
def split(self, filename):
"""Split a file into records using a specific strategy.
For this NoneSplitter there is no actual split happening and the file
is returned as a whole.
Args:
filename (str): path to the file to split
Returns: generator for the individual records that were split from
the file
"""
with open(filename, "rb") as f:
buf = f.read()
if not self._is_binary(buf):
buf = buf.decode()
yield buf
def _is_binary(self, buf):
"""Check whether `buf` contains binary data.
Returns True if `buf` contains any non-utf-8 characters.
Args:
buf (bytes): data to inspect
Returns:
True if data is binary, otherwise False
"""
return bool(buf.translate(None, self._textchars))
class LineSplitter(Splitter):
"""Split records by new line."""
def split(self, file):
"""Split a file into records using a specific strategy
This LineSplitter splits the file on each line break.
Args:
file (str): path to the file to split
Returns: generator for the individual records that were split from
the file
"""
with open(file, "r") as f:
for line in f:
yield line
class RecordIOSplitter(Splitter):
"""Split using Amazon Recordio.
Not useful for string content.
"""
def split(self, file):
"""Split a file into records using a specific strategy
This RecordIOSplitter splits the data into individual RecordIO
records.
Args:
file (str): path to the file to split
Returns: generator for the individual records that were split from
the file
"""
with open(file, "rb") as f:
for record in sagemaker.amazon.common.read_recordio(f):
yield record
class BatchStrategy(with_metaclass(ABCMeta, object)):
"""Placeholder docstring"""
def __init__(self, splitter):
"""Create a Batch Strategy Instance
Args:
splitter (sagemaker.local.data.Splitter): A Splitter to pre-process
the data before batching.
"""
self.splitter = splitter
@abstractmethod
def pad(self, file, size):
"""Group together as many records as possible to fit in the specified
size
Args:
file (str): file path to read the records from.
size (int): maximum size in MB that each group of records will be
fitted to. passing 0 means unlimited size.
Returns:
generator of records
"""
class MultiRecordStrategy(BatchStrategy):
"""Feed multiple records at a time for batch inference.
Will group up as many records as possible within the payload specified.
"""
def pad(self, file, size=6):
"""Group together as many records as possible to fit in the specified
size
Args:
file (str): file path to read the records from.
size (int): maximum size in MB that each group of records will be
fitted to. passing 0 means unlimited size.
Returns:
generator of records
"""
buffer = ""
for element in self.splitter.split(file):
if _payload_size_within_limit(buffer + element, size):
buffer += element
else:
tmp = buffer
buffer = element
yield tmp
if _validate_payload_size(buffer, size):
yield buffer
class SingleRecordStrategy(BatchStrategy):
"""Feed a single record at a time for batch inference.
If a single record does not fit within the payload specified it will
throw a RuntimeError.
"""
def pad(self, file, size=6):
"""Group together as many records as possible to fit in the specified
size
This SingleRecordStrategy will not group any record and will return
them one by one as long as they are within the maximum size.
Args:
file (str): file path to read the records from.
size (int): maximum size in MB that each group of records will be
fitted to. passing 0 means unlimited size.
Returns:
generator of records
"""
for element in self.splitter.split(file):
if _validate_payload_size(element, size):
yield element
def _payload_size_within_limit(payload, size):
"""
Args:
payload:
size:
"""
size_in_bytes = size * 1024 * 1024
if size == 0:
return True
return sys.getsizeof(payload) < size_in_bytes
def _validate_payload_size(payload, size):
"""Check if a payload is within the size in MB threshold.
Raise an exception if the payload is beyond the size in MB threshold.
Args:
payload: data that will be checked
size (int): max size in MB
Returns:
bool: True if within bounds. if size=0 it will always return True
Raises:
RuntimeError: If the payload is larger a runtime error is thrown.
"""
if _payload_size_within_limit(payload, size):
return True
raise RuntimeError("Record is larger than %sMB. Please increase your max_payload" % size)
| en | 0.795558 | # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. Placeholder docstring Return an Instance of :class:`sagemaker.local.data.DataSource` that can handle the provided data_source URI. data_source can be either file:// or s3:// Args: data_source (str): a valid URI that points to a data source. sagemaker_session (:class:`sagemaker.session.Session`): a SageMaker Session to interact with S3 if required. Returns: sagemaker.local.data.DataSource: an Instance of a Data Source Raises: ValueError: If parsed_uri scheme is neither `file` nor `s3` , raise an error. Return an Instance of :class:`sagemaker.local.data.Splitter` according to the specified `split_type` . Args: split_type (str): either 'Line' or 'RecordIO'. Can be left as None to signal no data split will happen. Returns :class:`sagemaker.local.data.Splitter`: an Instance of a Splitter Return an Instance of :class:`sagemaker.local.data.BatchStrategy` according to `strategy` Args: strategy (str): Either 'SingleRecord' or 'MultiRecord' splitter (:class:`sagemaker.local.data.Splitter): splitter to get the data from. Returns :class:`sagemaker.local.data.BatchStrategy`: an Instance of a BatchStrategy Placeholder docstring Retrieve the list of absolute paths to all the files in this data source. Returns: List[str]: List of absolute paths. Retrieve the absolute path to the root directory of this data source. Returns: str: absolute path to the root directory of this data source. Represents a data source within the local filesystem. Args: root_path: Retrieve the list of absolute paths to all the files in this data source. Returns: List[str] List of absolute paths. Retrieve the absolute path to the root directory of this data source. Returns: str: absolute path to the root directory of this data source. Defines a data source given by a bucket and S3 prefix. The contents will be downloaded and then processed as local data. Create an S3DataSource instance Args: bucket (str): S3 bucket name prefix (str): S3 prefix path to the data sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker_session with the desired settings to talk to S3 # Create a temporary dir to store the S3 contents # Docker cannot mount Mac OS /var folder properly see # https://forums.docker.com/t/var-folders-isnt-mounted-properly/9600 # Only apply this workaround if the user didn't provide an alternate storage root dir. Retrieve the list of absolute paths to all the files in this data source. Returns: List[str]: List of absolute paths. Retrieve the absolute path to the root directory of this data source. Returns: str: absolute path to the root directory of this data source. Placeholder docstring Split a file into records using a specific strategy Args: file (str): path to the file to split Returns: generator for the individual records that were split from the file Does not split records, essentially reads the whole file. # non-utf8 characters. Split a file into records using a specific strategy. For this NoneSplitter there is no actual split happening and the file is returned as a whole. Args: filename (str): path to the file to split Returns: generator for the individual records that were split from the file Check whether `buf` contains binary data. Returns True if `buf` contains any non-utf-8 characters. Args: buf (bytes): data to inspect Returns: True if data is binary, otherwise False Split records by new line. Split a file into records using a specific strategy This LineSplitter splits the file on each line break. Args: file (str): path to the file to split Returns: generator for the individual records that were split from the file Split using Amazon Recordio. Not useful for string content. Split a file into records using a specific strategy This RecordIOSplitter splits the data into individual RecordIO records. Args: file (str): path to the file to split Returns: generator for the individual records that were split from the file Placeholder docstring Create a Batch Strategy Instance Args: splitter (sagemaker.local.data.Splitter): A Splitter to pre-process the data before batching. Group together as many records as possible to fit in the specified size Args: file (str): file path to read the records from. size (int): maximum size in MB that each group of records will be fitted to. passing 0 means unlimited size. Returns: generator of records Feed multiple records at a time for batch inference. Will group up as many records as possible within the payload specified. Group together as many records as possible to fit in the specified size Args: file (str): file path to read the records from. size (int): maximum size in MB that each group of records will be fitted to. passing 0 means unlimited size. Returns: generator of records Feed a single record at a time for batch inference. If a single record does not fit within the payload specified it will throw a RuntimeError. Group together as many records as possible to fit in the specified size This SingleRecordStrategy will not group any record and will return them one by one as long as they are within the maximum size. Args: file (str): file path to read the records from. size (int): maximum size in MB that each group of records will be fitted to. passing 0 means unlimited size. Returns: generator of records Args: payload: size: Check if a payload is within the size in MB threshold. Raise an exception if the payload is beyond the size in MB threshold. Args: payload: data that will be checked size (int): max size in MB Returns: bool: True if within bounds. if size=0 it will always return True Raises: RuntimeError: If the payload is larger a runtime error is thrown. | 1.962068 | 2 |
IntegerExample.py | ZnoKunG/PythonProject | 0 | 6624976 | print(18/2)
print(type(18/2))
print(18+2)
print(18-2)
print(18*2) | print(18/2)
print(type(18/2))
print(18+2)
print(18-2)
print(18*2) | none | 1 | 2.340098 | 2 | |
day_tripper_project/day_tripper_application/apps.py | JasonSpeerbrecker/Day-Tripper | 0 | 6624977 | <gh_stars>0
from django.apps import AppConfig
class DayTripperApplicationConfig(AppConfig):
name = 'day_tripper_application'
| from django.apps import AppConfig
class DayTripperApplicationConfig(AppConfig):
name = 'day_tripper_application' | none | 1 | 1.087902 | 1 | |
etc/scripts/docenizer.py | DarkArc/compiler-explorer | 2 | 6624978 | <filename>etc/scripts/docenizer.py
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import argparse
import json
import os
import re
import sys
import tarfile
import urllib
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Please install BeautifulSoup (apt-get install python-bs4 should do it)")
parser = argparse.ArgumentParser(description='Docenizes HTML version of the official Intel Asm PDFs')
parser.add_argument('-i', '--inputfolder', type=str,
help='Folder where the input files reside as .html. Default is ./asm-docs/',
default='asm-docs')
parser.add_argument('-o', '--outputpath', type=str, help='Final path of the .js file. Default is ./asm-docs.js',
default='./asm-docs.js')
parser.add_argument('-d', '--downloadfolder', type=str,
help='Folder where the archive will be downloaded and extracted', default='asm-docs')
# The maximum number of paragraphs from the description to copy.
MAX_DESC_PARAS = 5
STRIP_PREFIX = re.compile(r'^(([0-9a-fA-F]{2}|m64|NP|(REX|E?VEX\.)[.0-9A-Z]*|/[0-9a-z]+|[a-z]+)\b\s*)*')
INSTRUCTION_RE = re.compile(r'^([A-Z][A-Z0-9]+)\*?(\s+|$)')
# Some instructions are so broken we just take their names from the filename
UNPARSEABLE_INSTR_NAMES = ['PSRLW:PSRLD:PSRLQ', 'PSLLW:PSLLD:PSLLQ']
# Some files contain instructions which cannot be parsed and which compilers are unlikely to emit
IGNORED_FILE_NAMES = [
# SGX pseudo-instructions
"EADD",
"EACCEPT",
"EAUG",
"EACCEPTCOPY",
"EDECVIRTCHILD",
"EINCVIRTCHILD",
"EINIT",
"ELDB:ELDU:ELDBC:ELBUC",
"EMODPE",
"EMODPR",
"EMODT",
"ERDINFO",
"ESETCONTEXT",
"ETRACKC",
"EBLOCK",
"ECREATE",
"EDBGRD",
"EDBGWR",
"EENTER",
"EEXIT",
"EEXTEND",
"EGETKEY",
"ELDB",
"ELDU",
"ENCLS",
"ENCLU",
"EPA",
"EREMOVE",
"EREPORT",
"ERESUME",
"ETRACK",
"EWB",
# VMX instructions
"INVEPT",
"INVVPID",
"VMCALL",
"VMCLEAR",
"VMFUNC",
"VMLAUNCH",
"VMLAUNCH:VMRESUME",
"VMPTRLD",
"VMPTRST",
"VMREAD",
"VMRESUME",
"VMWRITE",
"VMXOFF",
"VMXON",
# Other instructions
"INVLPG",
"LAHF",
"RDMSR",
"SGDT",
# Unparsable instructions
# These instructions should be supported in the future
"MONITOR",
"MOVDQ2Q",
"MOVBE",
"MFENCE",
]
# Some instructions are defined in multiple files. We ignore a specific set of the
# duplicates here.
IGNORED_DUPLICATES = [
'MOV-1', # move to control reg
'MOV-2', # move to debug reg
'CMPSD', # compare doubleword (defined in CMPS:CMPSB:CMPSW:CMPSD:CMPSQ)
'MOVQ', # defined in MOVD:MOVQ
'MOVSD', # defined in MOVS:MOVSB:MOVSW:MOVSD:MOVSQ
'VPBROADCASTB:VPBROADCASTW:VPBROADCASTD:VPBROADCASTQ', # defined in VPBROADCAST
"VGATHERDPS:VGATHERDPD",
"VGATHERQPS:VGATHERQPD",
"VPGATHERDD:VPGATHERQD",
"VPGATHERDQ:VPGATHERQQ",
]
# Where to extract the asmdoc archive.
ASMDOC_DIR = "asm-docs"
ARCHIVE_URL = "http://www.felixcloutier.com/x86/x86.tbz2"
ARCHIVE_NAME = "x86.tbz2"
class Instruction(object):
def __init__(self, name, names, tooltip, body):
self.name = name
self.names = names
self.tooltip = tooltip.rstrip(': ,')
self.body = body
def __str__(self):
return "{} = {}\n{}".format(self.names, self.tooltip, self.body)
def get_url_for_instruction(instr):
return "http://www.felixcloutier.com/x86/{}.html".format(urllib.quote(instr.name))
def download_asm_doc_archive(downloadfolder):
if not os.path.exists(downloadfolder):
print "Creating {} as download folder".format(downloadfolder)
os.makedirs(downloadfolder)
elif not os.path.isdir(downloadfolder):
print "Error: download folder {} is not a directory".format(downloadfolder)
sys.exit(1)
archive_name = os.path.join(downloadfolder, ARCHIVE_NAME)
print("Downloading archive...")
urllib.urlretrieve(ARCHIVE_URL, archive_name)
def extract_asm_doc_archive(downloadfolder, inputfolder):
print "Extracting file..."
if os.path.isdir(os.path.join(inputfolder, "html")):
for root, dirs, files in os.walk(os.path.join(inputfolder, "html")):
for file in files:
if os.path.splitext(file)[1] == ".html":
os.remove(os.path.join(root, file))
tar = tarfile.open(os.path.join(downloadfolder, ARCHIVE_NAME))
tar.extractall(path=inputfolder)
def strip_non_instr(i):
# removes junk from encodings where the opcode is in the middle
# of prefix stuff. e.g.
# 66 0f 38 30 /r PMOVZXBW xmm1, xmm2/m64
return STRIP_PREFIX.sub('', i)
def instr_name(i):
match = INSTRUCTION_RE.match(strip_non_instr(i))
if match:
return match.group(1)
def get_description_paragraphs(document_soup):
description_header_node = document_soup.find(id="Description")
i = 0
description_paragraph_node = description_header_node.next_sibling.next_sibling
description_paragraphs = []
while i < MAX_DESC_PARAS and len(description_paragraph_node.text) > 20:
if description_paragraph_node.name == "p":
description_paragraphs.append(description_paragraph_node)
i = i + 1
# Move two siblings forward. Next sibling is the line feed.
description_paragraph_node = description_paragraph_node.next_sibling.next_sibling
return description_paragraphs
def parse(filename, f):
doc = BeautifulSoup(f, 'html.parser')
if doc.table is None:
print filename + ": Failed to find table"
return None
table = read_table(doc.table)
names = set()
def add_all(instrs):
for i in instrs:
instruction_name = instr_name(i)
if instruction_name:
names.add(instruction_name)
for inst in table:
if 'Opcode/Instruction' in inst:
add_all(inst['Opcode/Instruction'].split("\n"))
elif 'OpcodeInstruction' in inst:
add_all(inst['OpcodeInstruction'].split("\n"))
elif 'Opcode Instruction' in inst:
add_all(inst['Opcode Instruction'].split("\n"))
elif 'Opcode*/Instruction' in inst:
add_all(inst['Opcode*/Instruction'].split("\n"))
elif 'Opcode / Instruction' in inst:
add_all(inst['Opcode / Instruction'].split("\n"))
elif 'Instruction' in inst:
instruction_name = instr_name(inst['Instruction'])
if not instruction_name:
print "Unable to get instruction from:", inst['Instruction']
else:
names.add(instruction_name)
# else, skip the line
if not names:
if filename in UNPARSEABLE_INSTR_NAMES:
for inst in filename.split(":"):
names.add(inst)
else:
print filename + ": Failed to read instruction table"
return None
sections = {}
for section_header in doc.find_all("h2"):
children = []
first = section_header.next_sibling
while first and first.name != 'h2':
if str(first).strip():
children.append(first)
first = first.next_sibling
sections[section_header.text] = children
description_paragraphs = get_description_paragraphs(doc)
return Instruction(
filename,
names,
description_paragraphs[0].text.strip(),
''.join(map(lambda x: str(x), description_paragraphs)).strip())
def read_table(table):
# Finding all 'th' is not enough, since some headers are 'td'.
# Instead, walk through all children of the first 'tr', filter out those
# that are only whitespace, keep `get_text()` on the others.
headers = list(
map(lambda th: th.get_text(),
filter(lambda th: unicode(th).strip(), table.tr.children)))
result = []
if headers:
# common case
for row in table.find_all('tr'):
obj = {}
for column, name in zip(row.find_all('td'), headers):
# Remove '\n's in names that contain it.
obj[name.replace('\n', '')] = column.get_text()
if obj:
result.append(obj)
else:
# Cases like BEXTR and BZHI
rows = table.find_all('tr')
if len(rows) != 1:
return []
obj = {}
for td in rows[0].find_all('td'):
header = td.p.strong.get_text()
td.p.strong.decompose()
obj[header] = td.get_text()
result.append(obj)
return result
def parse_html(directory):
print "Parsing instructions..."
instructions = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(".html") and file != 'index.html':
with open(os.path.join(root, file)) as f2:
name = os.path.splitext(file)[0]
if name in IGNORED_DUPLICATES or name in IGNORED_FILE_NAMES:
continue
instruction = parse(name, f2)
if not instruction:
continue
instructions.append(instruction)
return instructions
def self_test(instructions, directory):
# For each generated instruction, check that there is a path to a file in
# the documentation.
directory = os.path.join(directory, "html")
ok = True
for inst in instructions:
if not os.path.isfile(os.path.join(directory, inst.name + ".html")):
print("Warning: {} has not file associated".format(inst.name))
ok = False
return ok
def docenizer():
args = parser.parse_args()
print "Called with: {}".format(args)
# If we don't have the html folder already...
if not os.path.isdir(os.path.join(args.inputfolder, 'html')):
# We don't, try with the compressed file
if not os.path.isfile(os.path.join(args.downloadfolder, "x86.tbz2")):
# We can't find that either. Download it
try:
download_asm_doc_archive(args.downloadfolder)
extract_asm_doc_archive(args.downloadfolder, args.inputfolder)
except IOError as e:
print("Error when downloading archive:")
print(e)
sys.exit(1)
else:
# We have a file already downloaded
extract_asm_doc_archive(args.downloadfolder, args.inputfolder)
instructions = parse_html(args.inputfolder)
instructions.sort(lambda x, y: cmp(x.name, y.name))
self_test(instructions, args.inputfolder)
all_inst = set()
for inst in instructions:
if not all_inst.isdisjoint(inst.names):
print "Overlap in instruction names: {} for {}".format(
inst.names.intersection(all_inst), inst.name)
all_inst = all_inst.union(inst.names)
if not self_test(instructions, args.inputfolder):
print("Tests do not pass. Not writing output file. Aborting.")
sys.exit(3)
print "Writing {} instructions".format(len(instructions))
with open(args.outputpath, 'w') as f:
f.write("""
function getAsmOpcode(opcode) {
if (!opcode) return;
switch (opcode.toUpperCase()) {
""")
for inst in instructions:
for name in inst.names:
f.write(' case "{}":\n'.format(name))
f.write(' return {}'.format(json.dumps({
"tooltip": inst.tooltip,
"html": inst.body,
"url": get_url_for_instruction(inst)
}, indent=16, separators=(',', ': ')))[:-1] + ' };\n\n')
f.write("""
}
}
module.exports = {
getAsmOpcode: getAsmOpcode
};
""")
if __name__ == '__main__':
docenizer()
| <filename>etc/scripts/docenizer.py
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import argparse
import json
import os
import re
import sys
import tarfile
import urllib
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Please install BeautifulSoup (apt-get install python-bs4 should do it)")
parser = argparse.ArgumentParser(description='Docenizes HTML version of the official Intel Asm PDFs')
parser.add_argument('-i', '--inputfolder', type=str,
help='Folder where the input files reside as .html. Default is ./asm-docs/',
default='asm-docs')
parser.add_argument('-o', '--outputpath', type=str, help='Final path of the .js file. Default is ./asm-docs.js',
default='./asm-docs.js')
parser.add_argument('-d', '--downloadfolder', type=str,
help='Folder where the archive will be downloaded and extracted', default='asm-docs')
# The maximum number of paragraphs from the description to copy.
MAX_DESC_PARAS = 5
STRIP_PREFIX = re.compile(r'^(([0-9a-fA-F]{2}|m64|NP|(REX|E?VEX\.)[.0-9A-Z]*|/[0-9a-z]+|[a-z]+)\b\s*)*')
INSTRUCTION_RE = re.compile(r'^([A-Z][A-Z0-9]+)\*?(\s+|$)')
# Some instructions are so broken we just take their names from the filename
UNPARSEABLE_INSTR_NAMES = ['PSRLW:PSRLD:PSRLQ', 'PSLLW:PSLLD:PSLLQ']
# Some files contain instructions which cannot be parsed and which compilers are unlikely to emit
IGNORED_FILE_NAMES = [
# SGX pseudo-instructions
"EADD",
"EACCEPT",
"EAUG",
"EACCEPTCOPY",
"EDECVIRTCHILD",
"EINCVIRTCHILD",
"EINIT",
"ELDB:ELDU:ELDBC:ELBUC",
"EMODPE",
"EMODPR",
"EMODT",
"ERDINFO",
"ESETCONTEXT",
"ETRACKC",
"EBLOCK",
"ECREATE",
"EDBGRD",
"EDBGWR",
"EENTER",
"EEXIT",
"EEXTEND",
"EGETKEY",
"ELDB",
"ELDU",
"ENCLS",
"ENCLU",
"EPA",
"EREMOVE",
"EREPORT",
"ERESUME",
"ETRACK",
"EWB",
# VMX instructions
"INVEPT",
"INVVPID",
"VMCALL",
"VMCLEAR",
"VMFUNC",
"VMLAUNCH",
"VMLAUNCH:VMRESUME",
"VMPTRLD",
"VMPTRST",
"VMREAD",
"VMRESUME",
"VMWRITE",
"VMXOFF",
"VMXON",
# Other instructions
"INVLPG",
"LAHF",
"RDMSR",
"SGDT",
# Unparsable instructions
# These instructions should be supported in the future
"MONITOR",
"MOVDQ2Q",
"MOVBE",
"MFENCE",
]
# Some instructions are defined in multiple files. We ignore a specific set of the
# duplicates here.
IGNORED_DUPLICATES = [
'MOV-1', # move to control reg
'MOV-2', # move to debug reg
'CMPSD', # compare doubleword (defined in CMPS:CMPSB:CMPSW:CMPSD:CMPSQ)
'MOVQ', # defined in MOVD:MOVQ
'MOVSD', # defined in MOVS:MOVSB:MOVSW:MOVSD:MOVSQ
'VPBROADCASTB:VPBROADCASTW:VPBROADCASTD:VPBROADCASTQ', # defined in VPBROADCAST
"VGATHERDPS:VGATHERDPD",
"VGATHERQPS:VGATHERQPD",
"VPGATHERDD:VPGATHERQD",
"VPGATHERDQ:VPGATHERQQ",
]
# Where to extract the asmdoc archive.
ASMDOC_DIR = "asm-docs"
ARCHIVE_URL = "http://www.felixcloutier.com/x86/x86.tbz2"
ARCHIVE_NAME = "x86.tbz2"
class Instruction(object):
def __init__(self, name, names, tooltip, body):
self.name = name
self.names = names
self.tooltip = tooltip.rstrip(': ,')
self.body = body
def __str__(self):
return "{} = {}\n{}".format(self.names, self.tooltip, self.body)
def get_url_for_instruction(instr):
return "http://www.felixcloutier.com/x86/{}.html".format(urllib.quote(instr.name))
def download_asm_doc_archive(downloadfolder):
if not os.path.exists(downloadfolder):
print "Creating {} as download folder".format(downloadfolder)
os.makedirs(downloadfolder)
elif not os.path.isdir(downloadfolder):
print "Error: download folder {} is not a directory".format(downloadfolder)
sys.exit(1)
archive_name = os.path.join(downloadfolder, ARCHIVE_NAME)
print("Downloading archive...")
urllib.urlretrieve(ARCHIVE_URL, archive_name)
def extract_asm_doc_archive(downloadfolder, inputfolder):
print "Extracting file..."
if os.path.isdir(os.path.join(inputfolder, "html")):
for root, dirs, files in os.walk(os.path.join(inputfolder, "html")):
for file in files:
if os.path.splitext(file)[1] == ".html":
os.remove(os.path.join(root, file))
tar = tarfile.open(os.path.join(downloadfolder, ARCHIVE_NAME))
tar.extractall(path=inputfolder)
def strip_non_instr(i):
# removes junk from encodings where the opcode is in the middle
# of prefix stuff. e.g.
# 66 0f 38 30 /r PMOVZXBW xmm1, xmm2/m64
return STRIP_PREFIX.sub('', i)
def instr_name(i):
match = INSTRUCTION_RE.match(strip_non_instr(i))
if match:
return match.group(1)
def get_description_paragraphs(document_soup):
description_header_node = document_soup.find(id="Description")
i = 0
description_paragraph_node = description_header_node.next_sibling.next_sibling
description_paragraphs = []
while i < MAX_DESC_PARAS and len(description_paragraph_node.text) > 20:
if description_paragraph_node.name == "p":
description_paragraphs.append(description_paragraph_node)
i = i + 1
# Move two siblings forward. Next sibling is the line feed.
description_paragraph_node = description_paragraph_node.next_sibling.next_sibling
return description_paragraphs
def parse(filename, f):
doc = BeautifulSoup(f, 'html.parser')
if doc.table is None:
print filename + ": Failed to find table"
return None
table = read_table(doc.table)
names = set()
def add_all(instrs):
for i in instrs:
instruction_name = instr_name(i)
if instruction_name:
names.add(instruction_name)
for inst in table:
if 'Opcode/Instruction' in inst:
add_all(inst['Opcode/Instruction'].split("\n"))
elif 'OpcodeInstruction' in inst:
add_all(inst['OpcodeInstruction'].split("\n"))
elif 'Opcode Instruction' in inst:
add_all(inst['Opcode Instruction'].split("\n"))
elif 'Opcode*/Instruction' in inst:
add_all(inst['Opcode*/Instruction'].split("\n"))
elif 'Opcode / Instruction' in inst:
add_all(inst['Opcode / Instruction'].split("\n"))
elif 'Instruction' in inst:
instruction_name = instr_name(inst['Instruction'])
if not instruction_name:
print "Unable to get instruction from:", inst['Instruction']
else:
names.add(instruction_name)
# else, skip the line
if not names:
if filename in UNPARSEABLE_INSTR_NAMES:
for inst in filename.split(":"):
names.add(inst)
else:
print filename + ": Failed to read instruction table"
return None
sections = {}
for section_header in doc.find_all("h2"):
children = []
first = section_header.next_sibling
while first and first.name != 'h2':
if str(first).strip():
children.append(first)
first = first.next_sibling
sections[section_header.text] = children
description_paragraphs = get_description_paragraphs(doc)
return Instruction(
filename,
names,
description_paragraphs[0].text.strip(),
''.join(map(lambda x: str(x), description_paragraphs)).strip())
def read_table(table):
# Finding all 'th' is not enough, since some headers are 'td'.
# Instead, walk through all children of the first 'tr', filter out those
# that are only whitespace, keep `get_text()` on the others.
headers = list(
map(lambda th: th.get_text(),
filter(lambda th: unicode(th).strip(), table.tr.children)))
result = []
if headers:
# common case
for row in table.find_all('tr'):
obj = {}
for column, name in zip(row.find_all('td'), headers):
# Remove '\n's in names that contain it.
obj[name.replace('\n', '')] = column.get_text()
if obj:
result.append(obj)
else:
# Cases like BEXTR and BZHI
rows = table.find_all('tr')
if len(rows) != 1:
return []
obj = {}
for td in rows[0].find_all('td'):
header = td.p.strong.get_text()
td.p.strong.decompose()
obj[header] = td.get_text()
result.append(obj)
return result
def parse_html(directory):
print "Parsing instructions..."
instructions = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(".html") and file != 'index.html':
with open(os.path.join(root, file)) as f2:
name = os.path.splitext(file)[0]
if name in IGNORED_DUPLICATES or name in IGNORED_FILE_NAMES:
continue
instruction = parse(name, f2)
if not instruction:
continue
instructions.append(instruction)
return instructions
def self_test(instructions, directory):
# For each generated instruction, check that there is a path to a file in
# the documentation.
directory = os.path.join(directory, "html")
ok = True
for inst in instructions:
if not os.path.isfile(os.path.join(directory, inst.name + ".html")):
print("Warning: {} has not file associated".format(inst.name))
ok = False
return ok
def docenizer():
args = parser.parse_args()
print "Called with: {}".format(args)
# If we don't have the html folder already...
if not os.path.isdir(os.path.join(args.inputfolder, 'html')):
# We don't, try with the compressed file
if not os.path.isfile(os.path.join(args.downloadfolder, "x86.tbz2")):
# We can't find that either. Download it
try:
download_asm_doc_archive(args.downloadfolder)
extract_asm_doc_archive(args.downloadfolder, args.inputfolder)
except IOError as e:
print("Error when downloading archive:")
print(e)
sys.exit(1)
else:
# We have a file already downloaded
extract_asm_doc_archive(args.downloadfolder, args.inputfolder)
instructions = parse_html(args.inputfolder)
instructions.sort(lambda x, y: cmp(x.name, y.name))
self_test(instructions, args.inputfolder)
all_inst = set()
for inst in instructions:
if not all_inst.isdisjoint(inst.names):
print "Overlap in instruction names: {} for {}".format(
inst.names.intersection(all_inst), inst.name)
all_inst = all_inst.union(inst.names)
if not self_test(instructions, args.inputfolder):
print("Tests do not pass. Not writing output file. Aborting.")
sys.exit(3)
print "Writing {} instructions".format(len(instructions))
with open(args.outputpath, 'w') as f:
f.write("""
function getAsmOpcode(opcode) {
if (!opcode) return;
switch (opcode.toUpperCase()) {
""")
for inst in instructions:
for name in inst.names:
f.write(' case "{}":\n'.format(name))
f.write(' return {}'.format(json.dumps({
"tooltip": inst.tooltip,
"html": inst.body,
"url": get_url_for_instruction(inst)
}, indent=16, separators=(',', ': ')))[:-1] + ' };\n\n')
f.write("""
}
}
module.exports = {
getAsmOpcode: getAsmOpcode
};
""")
if __name__ == '__main__':
docenizer()
| en | 0.863841 | #! /usr/bin/env python2 # -*- coding: utf-8 -*- # The maximum number of paragraphs from the description to copy. # Some instructions are so broken we just take their names from the filename # Some files contain instructions which cannot be parsed and which compilers are unlikely to emit # SGX pseudo-instructions # VMX instructions # Other instructions # Unparsable instructions # These instructions should be supported in the future # Some instructions are defined in multiple files. We ignore a specific set of the # duplicates here. # move to control reg # move to debug reg # compare doubleword (defined in CMPS:CMPSB:CMPSW:CMPSD:CMPSQ) # defined in MOVD:MOVQ # defined in MOVS:MOVSB:MOVSW:MOVSD:MOVSQ # defined in VPBROADCAST # Where to extract the asmdoc archive. # removes junk from encodings where the opcode is in the middle # of prefix stuff. e.g. # 66 0f 38 30 /r PMOVZXBW xmm1, xmm2/m64 # Move two siblings forward. Next sibling is the line feed. # else, skip the line # Finding all 'th' is not enough, since some headers are 'td'. # Instead, walk through all children of the first 'tr', filter out those # that are only whitespace, keep `get_text()` on the others. # common case # Remove '\n's in names that contain it. # Cases like BEXTR and BZHI # For each generated instruction, check that there is a path to a file in # the documentation. # If we don't have the html folder already... # We don't, try with the compressed file # We can't find that either. Download it # We have a file already downloaded function getAsmOpcode(opcode) { if (!opcode) return; switch (opcode.toUpperCase()) { } } module.exports = { getAsmOpcode: getAsmOpcode }; | 2.696662 | 3 |
server/data_analysis/los_alamos_processing/parse_auth.py | xhuang98/Dtect | 1 | 6624979 | <reponame>xhuang98/Dtect
import json
from .util import parse_line
if __name__ == '__main__':
path = "D:\\los alamos\\auth.txt"
# path = "auth_short.txt"
f = open(path)
limit = 5000
count = 0
for line in f:
if count > limit:
break
try:
values = parse_line(line)
f_write = open(f"D:\\los alamos\\auths\\auth_{values['src_user']}.txt", 'a')
f_write.write(line)
f_write.close()
count += 1
except:
continue
| import json
from .util import parse_line
if __name__ == '__main__':
path = "D:\\los alamos\\auth.txt"
# path = "auth_short.txt"
f = open(path)
limit = 5000
count = 0
for line in f:
if count > limit:
break
try:
values = parse_line(line)
f_write = open(f"D:\\los alamos\\auths\\auth_{values['src_user']}.txt", 'a')
f_write.write(line)
f_write.close()
count += 1
except:
continue | en | 0.780775 | # path = "auth_short.txt" | 2.480278 | 2 |
net.py | j-friedrich/neuronalGPR | 2 | 6624980 | # Copyright 2016, <NAME>, All rights reserved.
# This code is based on the code by <NAME> used for his
# paper "Probabilistic Backpropagation for Scalable Learning of Bayesian Neural Networks".
import warnings
warnings.filterwarnings("ignore")
import math
from scipy.special import logsumexp
import numpy as np
from keras.regularizers import l2
from keras import Input
from keras.layers import Dropout
from keras.layers import Dense
from keras import Model
import time
class net:
def __init__(self, X_train, y_train, n_hidden, n_epochs = 40,
normalize = False, tau = 1.0, dropout = 0.05):
"""
Constructor for the class implementing a Bayesian neural network
trained with the probabilistic back propagation method.
@param X_train Matrix with the features for the training data.
@param y_train Vector with the target variables for the
training data.
@param n_hidden Vector with the number of neurons for each
hidden layer.
@param n_epochs Numer of epochs for which to train the
network. The recommended value 40 should be
enough.
@param normalize Whether to normalize the input features. This
is recommended unles the input vector is for
example formed by binary features (a
fingerprint). In that case we do not recommend
to normalize the features.
@param tau Tau value used for regularization
@param dropout Dropout rate for all the dropout layers in the
network.
"""
# We normalize the training data to have zero mean and unit standard
# deviation in the training set if necessary
if normalize:
self.std_X_train = np.std(X_train, 0)
self.std_X_train[ self.std_X_train == 0 ] = 1
self.mean_X_train = np.mean(X_train, 0)
else:
self.std_X_train = np.ones(X_train.shape[ 1 ])
self.mean_X_train = np.zeros(X_train.shape[ 1 ])
X_train = (X_train - np.full(X_train.shape, self.mean_X_train)) / \
np.full(X_train.shape, self.std_X_train)
self.mean_y_train = np.mean(y_train)
self.std_y_train = np.std(y_train)
y_train_normalized = (y_train - self.mean_y_train) / self.std_y_train
y_train_normalized = np.array(y_train_normalized, ndmin = 2).T
# We construct the network
N = X_train.shape[0]
batch_size = 128
lengthscale = 1e-2
reg = lengthscale**2 * (1 - dropout) / (2. * N * tau)
inputs = Input(shape=(X_train.shape[1],))
inter = Dropout(dropout)(inputs, training=True)
inter = Dense(n_hidden[0], activation='relu', W_regularizer=l2(reg))(inter)
for i in range(len(n_hidden) - 1):
inter = Dropout(dropout)(inter, training=True)
inter = Dense(n_hidden[i+1], activation='relu', W_regularizer=l2(reg))(inter)
inter = Dropout(dropout)(inter, training=True)
outputs = Dense(y_train_normalized.shape[1], W_regularizer=l2(reg))(inter)
model = Model(inputs, outputs)
model.compile(loss='mean_squared_error', optimizer='adam')
# We iterate the learning process
start_time = time.time()
model.fit(X_train, y_train_normalized, batch_size=batch_size, nb_epoch=n_epochs, verbose=0)
self.model = model
self.tau = tau
self.running_time = time.time() - start_time
# We are done!
def predict(self, X_test, y_test, T=10000):
"""
Function for making predictions with the Bayesian neural network.
@param X_test The matrix of features for the test data
@return m The predictive mean for the test target variables.
@return v The predictive variance for the test target
variables.
@return v_noise The estimated variance for the additive noise.
"""
X_test = np.array(X_test, ndmin = 2)
y_test = np.array(y_test, ndmin = 2).T
# We normalize the test set
X_test = (X_test - np.full(X_test.shape, self.mean_X_train)) / \
np.full(X_test.shape, self.std_X_train)
# We compute the predictive mean and variance for the target variables
# of the test data
model = self.model
standard_pred = model.predict(X_test, batch_size=500, verbose=1)
standard_pred = standard_pred * self.std_y_train + self.mean_y_train
rmse_standard_pred = np.mean((y_test.squeeze() - standard_pred.squeeze())**2.)**0.5
Yt_hat = np.array([model.predict(X_test, batch_size=500, verbose=0) for _ in range(T)])
Yt_hat = Yt_hat * self.std_y_train + self.mean_y_train
MC_pred = np.mean(Yt_hat, 0)
rmse = np.mean((y_test.squeeze() - MC_pred.squeeze())**2.)**0.5
# We compute the test log-likelihood
if Yt_hat.size > 5e8:
ll = (np.log(np.sum([np.sum(np.exp(-0.5 * self.tau *
(y_test[None] - Yt_hat[i*1000:(i+1)*1000])**2.), 0)
for i in range((T-1)//1000+1)], 0)) - np.log(T)
- 0.5*np.log(2*np.pi) + 0.5*np.log(self.tau))
else:
ll = (logsumexp(-0.5 * self.tau * (y_test[None] - Yt_hat)**2., 0) - np.log(T)
- 0.5*np.log(2*np.pi) + 0.5*np.log(self.tau))
test_ll = np.mean(ll)
# We are done!
return rmse_standard_pred, rmse, test_ll
| # Copyright 2016, <NAME>, All rights reserved.
# This code is based on the code by <NAME> used for his
# paper "Probabilistic Backpropagation for Scalable Learning of Bayesian Neural Networks".
import warnings
warnings.filterwarnings("ignore")
import math
from scipy.special import logsumexp
import numpy as np
from keras.regularizers import l2
from keras import Input
from keras.layers import Dropout
from keras.layers import Dense
from keras import Model
import time
class net:
def __init__(self, X_train, y_train, n_hidden, n_epochs = 40,
normalize = False, tau = 1.0, dropout = 0.05):
"""
Constructor for the class implementing a Bayesian neural network
trained with the probabilistic back propagation method.
@param X_train Matrix with the features for the training data.
@param y_train Vector with the target variables for the
training data.
@param n_hidden Vector with the number of neurons for each
hidden layer.
@param n_epochs Numer of epochs for which to train the
network. The recommended value 40 should be
enough.
@param normalize Whether to normalize the input features. This
is recommended unles the input vector is for
example formed by binary features (a
fingerprint). In that case we do not recommend
to normalize the features.
@param tau Tau value used for regularization
@param dropout Dropout rate for all the dropout layers in the
network.
"""
# We normalize the training data to have zero mean and unit standard
# deviation in the training set if necessary
if normalize:
self.std_X_train = np.std(X_train, 0)
self.std_X_train[ self.std_X_train == 0 ] = 1
self.mean_X_train = np.mean(X_train, 0)
else:
self.std_X_train = np.ones(X_train.shape[ 1 ])
self.mean_X_train = np.zeros(X_train.shape[ 1 ])
X_train = (X_train - np.full(X_train.shape, self.mean_X_train)) / \
np.full(X_train.shape, self.std_X_train)
self.mean_y_train = np.mean(y_train)
self.std_y_train = np.std(y_train)
y_train_normalized = (y_train - self.mean_y_train) / self.std_y_train
y_train_normalized = np.array(y_train_normalized, ndmin = 2).T
# We construct the network
N = X_train.shape[0]
batch_size = 128
lengthscale = 1e-2
reg = lengthscale**2 * (1 - dropout) / (2. * N * tau)
inputs = Input(shape=(X_train.shape[1],))
inter = Dropout(dropout)(inputs, training=True)
inter = Dense(n_hidden[0], activation='relu', W_regularizer=l2(reg))(inter)
for i in range(len(n_hidden) - 1):
inter = Dropout(dropout)(inter, training=True)
inter = Dense(n_hidden[i+1], activation='relu', W_regularizer=l2(reg))(inter)
inter = Dropout(dropout)(inter, training=True)
outputs = Dense(y_train_normalized.shape[1], W_regularizer=l2(reg))(inter)
model = Model(inputs, outputs)
model.compile(loss='mean_squared_error', optimizer='adam')
# We iterate the learning process
start_time = time.time()
model.fit(X_train, y_train_normalized, batch_size=batch_size, nb_epoch=n_epochs, verbose=0)
self.model = model
self.tau = tau
self.running_time = time.time() - start_time
# We are done!
def predict(self, X_test, y_test, T=10000):
"""
Function for making predictions with the Bayesian neural network.
@param X_test The matrix of features for the test data
@return m The predictive mean for the test target variables.
@return v The predictive variance for the test target
variables.
@return v_noise The estimated variance for the additive noise.
"""
X_test = np.array(X_test, ndmin = 2)
y_test = np.array(y_test, ndmin = 2).T
# We normalize the test set
X_test = (X_test - np.full(X_test.shape, self.mean_X_train)) / \
np.full(X_test.shape, self.std_X_train)
# We compute the predictive mean and variance for the target variables
# of the test data
model = self.model
standard_pred = model.predict(X_test, batch_size=500, verbose=1)
standard_pred = standard_pred * self.std_y_train + self.mean_y_train
rmse_standard_pred = np.mean((y_test.squeeze() - standard_pred.squeeze())**2.)**0.5
Yt_hat = np.array([model.predict(X_test, batch_size=500, verbose=0) for _ in range(T)])
Yt_hat = Yt_hat * self.std_y_train + self.mean_y_train
MC_pred = np.mean(Yt_hat, 0)
rmse = np.mean((y_test.squeeze() - MC_pred.squeeze())**2.)**0.5
# We compute the test log-likelihood
if Yt_hat.size > 5e8:
ll = (np.log(np.sum([np.sum(np.exp(-0.5 * self.tau *
(y_test[None] - Yt_hat[i*1000:(i+1)*1000])**2.), 0)
for i in range((T-1)//1000+1)], 0)) - np.log(T)
- 0.5*np.log(2*np.pi) + 0.5*np.log(self.tau))
else:
ll = (logsumexp(-0.5 * self.tau * (y_test[None] - Yt_hat)**2., 0) - np.log(T)
- 0.5*np.log(2*np.pi) + 0.5*np.log(self.tau))
test_ll = np.mean(ll)
# We are done!
return rmse_standard_pred, rmse, test_ll
| en | 0.831558 | # Copyright 2016, <NAME>, All rights reserved. # This code is based on the code by <NAME> used for his # paper "Probabilistic Backpropagation for Scalable Learning of Bayesian Neural Networks". Constructor for the class implementing a Bayesian neural network trained with the probabilistic back propagation method. @param X_train Matrix with the features for the training data. @param y_train Vector with the target variables for the training data. @param n_hidden Vector with the number of neurons for each hidden layer. @param n_epochs Numer of epochs for which to train the network. The recommended value 40 should be enough. @param normalize Whether to normalize the input features. This is recommended unles the input vector is for example formed by binary features (a fingerprint). In that case we do not recommend to normalize the features. @param tau Tau value used for regularization @param dropout Dropout rate for all the dropout layers in the network. # We normalize the training data to have zero mean and unit standard # deviation in the training set if necessary # We construct the network # We iterate the learning process # We are done! Function for making predictions with the Bayesian neural network. @param X_test The matrix of features for the test data @return m The predictive mean for the test target variables. @return v The predictive variance for the test target variables. @return v_noise The estimated variance for the additive noise. # We normalize the test set # We compute the predictive mean and variance for the target variables # of the test data # We compute the test log-likelihood # We are done! | 3.325239 | 3 |
conans/model/conan_generator.py | sigmunjr/conan | 1 | 6624981 | <filename>conans/model/conan_generator.py
from abc import ABCMeta, abstractproperty
import six
@six.add_metaclass(ABCMeta)
class Generator(object):
def __init__(self, conanfile):
self.conanfile = conanfile
self.normalize = True
self._deps_build_info = conanfile.deps_cpp_info
self._deps_env_info = conanfile.deps_env_info
self._env_info = conanfile.env_info
self._deps_user_info = conanfile.deps_user_info
self._user_info_build = getattr(conanfile, 'user_info_build', None)
@property
def deps_build_info(self):
return self._deps_build_info
@property
def deps_env_info(self):
return self._deps_env_info
@property
def deps_user_info(self):
return self._deps_user_info
@property
def env_info(self):
return self._env_info
@property
def settings(self):
return self.conanfile.settings
@abstractproperty
def content(self):
raise NotImplementedError()
@abstractproperty
def filename(self):
raise NotImplementedError()
def sorted_components(self, cpp_info):
return cpp_info._get_sorted_components()
| <filename>conans/model/conan_generator.py
from abc import ABCMeta, abstractproperty
import six
@six.add_metaclass(ABCMeta)
class Generator(object):
def __init__(self, conanfile):
self.conanfile = conanfile
self.normalize = True
self._deps_build_info = conanfile.deps_cpp_info
self._deps_env_info = conanfile.deps_env_info
self._env_info = conanfile.env_info
self._deps_user_info = conanfile.deps_user_info
self._user_info_build = getattr(conanfile, 'user_info_build', None)
@property
def deps_build_info(self):
return self._deps_build_info
@property
def deps_env_info(self):
return self._deps_env_info
@property
def deps_user_info(self):
return self._deps_user_info
@property
def env_info(self):
return self._env_info
@property
def settings(self):
return self.conanfile.settings
@abstractproperty
def content(self):
raise NotImplementedError()
@abstractproperty
def filename(self):
raise NotImplementedError()
def sorted_components(self, cpp_info):
return cpp_info._get_sorted_components()
| none | 1 | 2.732033 | 3 | |
vi.py | magnetar-iiith/PRIL | 1 | 6624982 | import numpy as np
import gym
import random
import tools
from cvxopt import matrix, solvers
import matplotlib.pyplot as plt
import numpy.random as rn
import copy
import sys
from contextlib import closing
from io import StringIO
from gym import utils
from gym.envs.toy_text import discrete
import os
import argparse
from irl import irl
from mdp import FrozenLakeEnv
from helper import get_state_rewards, get_transition_prob_matrix, to_s, pretty
parser = argparse.ArgumentParser()
parser.add_argument('--with_privacy', type=str, default='true')
parser.add_argument('--env_size', type=str, default='5x5')
parser.add_argument('--with_testing', type=str, default='true')
config = parser.parse_args()
wind = 0.001
arg_list = []
if config.env_size == '10x10':
grid_sizes = [10] * 12
for i in range(97, 109):
arg_list.append([None, "10x10_" + chr(i), True, wind, -1., -1., 10., None, 100.])
else:
grid_sizes = [5] * 12
for i in range(97, 100):
arg_list.append([None, "5x5_" + chr(i), True, wind, -1., -1., 10., None, 100.])
arg_list.append([None, "5x5_" + chr(i), True, wind, -1., -1., 10., None, 100.])
for i in range(100, 106):
arg_list.append([None, "5x5_" + chr(i), True, wind, -1., -1., 10., None, 100.])
if config.with_testing == 'true':
testing = True
else:
testing = False
if config.with_privacy == 'true':
is_priv = True
else:
is_priv = False
def compute_sigma(eps, sens):
return (2.*sens*sens*np.log(1.25e4)/(eps*eps))
eps = [0.01, 0.05, 0.1, 0.2, 0.5, 1.0, 2, 5, 10]
sigmas = [compute_sigma(e, 1.05) for e in eps]
def compute_value_iteration(env, gamma=.99, theta=.0000000001, verbose=True, sigma=1):
env.reset()
nb_actions = env.action_space.n
nb_states = env.observation_space.n
V = np.zeros([nb_states])
newV = V.copy()
P = np.zeros([nb_states], dtype=int)
iteration = 0
while True:
delta = 0
for s in range (0, nb_states):
action_vals = []
for action in range(nb_actions):
temp_val = 0
for i in range(len(env.P[s][action])):
prob, next_state, reward, done = env.P[s][action][i]
sa_value = prob * (reward + gamma * V[next_state]) + is_priv * np.random.normal(0,sigma,1)
temp_val += sa_value
action_vals.append(temp_val) #the value of each action
bestA = np.argmax(np.asarray(action_vals)) # choose the action which gives the maximum value
P[s] = bestA
newV[s] = action_vals[bestA]
delta = max(delta, np.abs(newV[s] - V[s]))
V = newV.copy()
iteration += 1
if delta < theta or iteration > 10000:
if verbose:
print (iteration,' iterations done')
break
return V, P
discount = 0.999
theta = 0.15
lambd = 0.99
os.mkdir(config.env_size)
os.chdir(config.env_size)
for sigma_iter in range(len(sigmas)):
path = str(eps[sigma_iter])
os.mkdir(path)
for i in range(len(arg_list)):
grid_size = grid_sizes[i]
li = arg_list[i]
env = FrozenLakeEnv(li[0], li[1], li[2], li[3], li[4], li[5], li[6], li[7])
env.reset()
rews = get_state_rewards(env)
ground_r = np.array(rews)
with open(path + "/" + str(i + 1) + '_ground_r.txt', 'w') as f:
for j in ground_r.flatten():
f.write('%f ' % j)
f.write('\n')
for avg_iter in range(10):
env.reset()
V, P = compute_value_iteration(env, gamma=discount, theta=theta, verbose=True, sigma=sigmas[sigma_iter])
tns_prob = get_transition_prob_matrix(env)
r = irl(env.nS, env.nA, tns_prob, P, discount, 1, 5)
with open(path + "/" + str(i + 1) + "_" + str(avg_iter + 1) + '_r.txt', 'a') as f:
for j in r.flatten():
f.write('%.15f ' % j)
f.write('\n')
if testing:
all_episode_reward = []
num_test_episodes = 5
max_steps = 200
for i in range(num_test_episodes):
s = env.reset()
rAll = 0
for j in range(max_steps):
a = P[s]
s1, r, d, _ = env.step(a)
rAll += r * pow(lambd, j)
s = s1
if d: break
all_episode_reward.append(rAll)
with open(path + '/' + str(avg_iter + 1) + '_r_test.txt', 'a') as f:
for j in all_episode_reward:
f.write('%.15f ' % j)
f.write('\n')
| import numpy as np
import gym
import random
import tools
from cvxopt import matrix, solvers
import matplotlib.pyplot as plt
import numpy.random as rn
import copy
import sys
from contextlib import closing
from io import StringIO
from gym import utils
from gym.envs.toy_text import discrete
import os
import argparse
from irl import irl
from mdp import FrozenLakeEnv
from helper import get_state_rewards, get_transition_prob_matrix, to_s, pretty
parser = argparse.ArgumentParser()
parser.add_argument('--with_privacy', type=str, default='true')
parser.add_argument('--env_size', type=str, default='5x5')
parser.add_argument('--with_testing', type=str, default='true')
config = parser.parse_args()
wind = 0.001
arg_list = []
if config.env_size == '10x10':
grid_sizes = [10] * 12
for i in range(97, 109):
arg_list.append([None, "10x10_" + chr(i), True, wind, -1., -1., 10., None, 100.])
else:
grid_sizes = [5] * 12
for i in range(97, 100):
arg_list.append([None, "5x5_" + chr(i), True, wind, -1., -1., 10., None, 100.])
arg_list.append([None, "5x5_" + chr(i), True, wind, -1., -1., 10., None, 100.])
for i in range(100, 106):
arg_list.append([None, "5x5_" + chr(i), True, wind, -1., -1., 10., None, 100.])
if config.with_testing == 'true':
testing = True
else:
testing = False
if config.with_privacy == 'true':
is_priv = True
else:
is_priv = False
def compute_sigma(eps, sens):
return (2.*sens*sens*np.log(1.25e4)/(eps*eps))
eps = [0.01, 0.05, 0.1, 0.2, 0.5, 1.0, 2, 5, 10]
sigmas = [compute_sigma(e, 1.05) for e in eps]
def compute_value_iteration(env, gamma=.99, theta=.0000000001, verbose=True, sigma=1):
env.reset()
nb_actions = env.action_space.n
nb_states = env.observation_space.n
V = np.zeros([nb_states])
newV = V.copy()
P = np.zeros([nb_states], dtype=int)
iteration = 0
while True:
delta = 0
for s in range (0, nb_states):
action_vals = []
for action in range(nb_actions):
temp_val = 0
for i in range(len(env.P[s][action])):
prob, next_state, reward, done = env.P[s][action][i]
sa_value = prob * (reward + gamma * V[next_state]) + is_priv * np.random.normal(0,sigma,1)
temp_val += sa_value
action_vals.append(temp_val) #the value of each action
bestA = np.argmax(np.asarray(action_vals)) # choose the action which gives the maximum value
P[s] = bestA
newV[s] = action_vals[bestA]
delta = max(delta, np.abs(newV[s] - V[s]))
V = newV.copy()
iteration += 1
if delta < theta or iteration > 10000:
if verbose:
print (iteration,' iterations done')
break
return V, P
discount = 0.999
theta = 0.15
lambd = 0.99
os.mkdir(config.env_size)
os.chdir(config.env_size)
for sigma_iter in range(len(sigmas)):
path = str(eps[sigma_iter])
os.mkdir(path)
for i in range(len(arg_list)):
grid_size = grid_sizes[i]
li = arg_list[i]
env = FrozenLakeEnv(li[0], li[1], li[2], li[3], li[4], li[5], li[6], li[7])
env.reset()
rews = get_state_rewards(env)
ground_r = np.array(rews)
with open(path + "/" + str(i + 1) + '_ground_r.txt', 'w') as f:
for j in ground_r.flatten():
f.write('%f ' % j)
f.write('\n')
for avg_iter in range(10):
env.reset()
V, P = compute_value_iteration(env, gamma=discount, theta=theta, verbose=True, sigma=sigmas[sigma_iter])
tns_prob = get_transition_prob_matrix(env)
r = irl(env.nS, env.nA, tns_prob, P, discount, 1, 5)
with open(path + "/" + str(i + 1) + "_" + str(avg_iter + 1) + '_r.txt', 'a') as f:
for j in r.flatten():
f.write('%.15f ' % j)
f.write('\n')
if testing:
all_episode_reward = []
num_test_episodes = 5
max_steps = 200
for i in range(num_test_episodes):
s = env.reset()
rAll = 0
for j in range(max_steps):
a = P[s]
s1, r, d, _ = env.step(a)
rAll += r * pow(lambd, j)
s = s1
if d: break
all_episode_reward.append(rAll)
with open(path + '/' + str(avg_iter + 1) + '_r_test.txt', 'a') as f:
for j in all_episode_reward:
f.write('%.15f ' % j)
f.write('\n')
| en | 0.767497 | #the value of each action # choose the action which gives the maximum value | 1.827804 | 2 |
insomniac/hardban_indicator.py | chikko80/Insomniac | 0 | 6624983 | <filename>insomniac/hardban_indicator.py
from insomniac.utils import *
class HardBanError(Exception):
pass
class HardBanIndicator:
WEBVIEW_ACTIVITY_NAME = "com.instagram.simplewebview.SimpleWebViewActivity"
def detect_webview(self, device):
"""
While "hard banned" Instagram shows you a webview with CAPTCHA and request to confirm your account. So what we
need is to simply detect that topmost activity is a webview.
"""
device_id = device.device_id
app_id = device.app_id
resumed_activity_output = execute_command(
"adb"
+ ("" if device_id is None else " -s " + device_id)
+ f" shell dumpsys activity | grep 'mResumedActivity'"
)
max_attempts = 3
attempt = 1
while attempt <= max_attempts:
sleep(1)
full_webview_activity_name = f"{app_id}/{self.WEBVIEW_ACTIVITY_NAME}"
if (
resumed_activity_output is not None
and full_webview_activity_name in resumed_activity_output
):
print(
COLOR_FAIL
+ "WebView is shown. Counting that as a hard-ban indicator!"
+ COLOR_ENDC
)
self.indicate_ban()
return
attempt += 1
def indicate_ban(self):
raise HardBanError("Hard ban indicated!")
hardban_indicator = HardBanIndicator()
| <filename>insomniac/hardban_indicator.py
from insomniac.utils import *
class HardBanError(Exception):
pass
class HardBanIndicator:
WEBVIEW_ACTIVITY_NAME = "com.instagram.simplewebview.SimpleWebViewActivity"
def detect_webview(self, device):
"""
While "hard banned" Instagram shows you a webview with CAPTCHA and request to confirm your account. So what we
need is to simply detect that topmost activity is a webview.
"""
device_id = device.device_id
app_id = device.app_id
resumed_activity_output = execute_command(
"adb"
+ ("" if device_id is None else " -s " + device_id)
+ f" shell dumpsys activity | grep 'mResumedActivity'"
)
max_attempts = 3
attempt = 1
while attempt <= max_attempts:
sleep(1)
full_webview_activity_name = f"{app_id}/{self.WEBVIEW_ACTIVITY_NAME}"
if (
resumed_activity_output is not None
and full_webview_activity_name in resumed_activity_output
):
print(
COLOR_FAIL
+ "WebView is shown. Counting that as a hard-ban indicator!"
+ COLOR_ENDC
)
self.indicate_ban()
return
attempt += 1
def indicate_ban(self):
raise HardBanError("Hard ban indicated!")
hardban_indicator = HardBanIndicator()
| en | 0.927065 | While "hard banned" Instagram shows you a webview with CAPTCHA and request to confirm your account. So what we need is to simply detect that topmost activity is a webview. | 2.873102 | 3 |
plugins/link.py | shadabk96/mm_hack | 0 | 6624984 | <filename>plugins/link.py<gh_stars>0
# -*- coding: utf-8 -*-
import logging
import re
import time
import link_constants
from linkbot import session, settings
from linkbot.bot import listen_to, respond_to
from linkbot.scheduler import schedule, catch_exceptions
from linkbot.utils import allow_only_direct_message
from linkbot.bot_constants import SCHEDULED_UPDATE_TIME_INTERVAL
from linkbot.plugins.link_models import Link, Tag, BotSubscriber
from linkbot.plugins.link_utils import populate_params, message_response, populate_link_data, pretty_print, pretty_print_table
logger = logging.getLogger(__name__)
@respond_to('^test$', re.IGNORECASE)
def test_listen(message):
message.reply("Hello %s! Test successful" % message.get_username())
@respond_to('^testdb$$', re.IGNORECASE)
@allow_only_direct_message()
def test_db(message):
link_table = session.query(Link).all()
message.reply('**Printing Link Table - **\n%s' % pretty_print_table(link_table))
tag_table = session.query(Tag).all()
message.reply('**Printing Tag Table - **\n%s' % pretty_print_table(tag_table))
bot_subscriber_table = session.query(BotSubscriber).all()
message.reply('**Printing BotSubscriber Table - **\n%s' % pretty_print_table(bot_subscriber_table))
@listen_to('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
def link_listen(message):
# get relevant info from message
author = message._get_sender_name()
channel = message.get_channel_name()
message_text = message.get_message()
logger.info('Params: Author = %s, channel = %s, Message = %s' % (author, channel, message_text))
# extract link from message
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', message_text)[0]
ts = str(time.time())
tags = [i[1:] for i in message_text.split() if i.startswith("#") ]
# clean message
message_text = message_text.replace(url, "")
# store in db
link = Link(author = author, message = message_text, link = url, channel = channel, timestamp = ts)
session.add(link)
session.flush()
if len(tags) != 0:
tags_arr = []
for tag in tags:
tags_arr.append(Tag(message_id = link.id, tag = tag))
session.add_all(tags_arr)
session.commit()
@respond_to('^links .*$')
@catch_exceptions(cancel_on_failure=True)
def get_aggregated_links(message, userId=None, teamId=None, channelId=None):
days, tags, channels = populate_params(message, userId, teamId)
result = populate_link_data(days, tags, channels, message)
if result == []:
if message.get_message() == 'subscribe':
message.reply('There have been no posts for the past 7 days :| Please wait for the next update!')
else:
message.reply("Unable to find Links matching your criteria :| Please try changing your search criteria!")
else:
scheduled_update = True
if message._body:
scheduled_update = False
message_response(message, pretty_print(result, scheduled_update), channelId)
logger.info("Bot Log : Function=get_aggregated_links() - aggregated link updates based on filter criteria")
@respond_to('^subscribe$', re.IGNORECASE)
@allow_only_direct_message()
def subscribe_links_summary(message):
'''
Subscribes sender of message of Link aggregation scheduled update by the bot
Currently Link aggregation update is scheduled for every 30 seconds for testing purposes
- schedule.every(SCHEDULED_UPDATE_TIME_INTERVAL).seconds.do(get_aggregated_links, message).tag(userId)
For production, Link aggregation update can be scheduled every day at 10:00 AM by setting
- schedule.every().day().at("10:00").do(get_aggregated_links, message).tag(userId)
This functionality can be extended to support periodic scheduling for daily/weekly/monthly time period.
Above changes also need to be reflected in 'run_scheduled_update_jobs()' function in 'bot.bot' module
'''
userId = message.get_user_id()
alreadyBotSubscribed = session.query(BotSubscriber).filter(BotSubscriber.user_id == userId).all()
if alreadyBotSubscribed != []:
message.reply("You are already a subscriber of the my updates! :) Wait for the next update!")
return
# scheduled link aggregation
schedule.every(SCHEDULED_UPDATE_TIME_INTERVAL).seconds.do(get_aggregated_links, message).tag(userId)
botSubcriber = BotSubscriber(user_id=userId, team_id=message.get_teams_of_user(userId)[0][u'id'], \
channel_id=message.channel)
session.add(botSubcriber)
session.flush()
session.commit()
message.reply("Successfully subscribed for my updates! Wait for the next update! :)")
logger.info("Bot Log : Function=subscribe_links_summary() - user subscribed from scheduled link updates")
@respond_to('^unsubscribe$', re.IGNORECASE)
@allow_only_direct_message()
def unsubscribe_links_summary(message):
jobTag = message.get_user_id()
alreadyBotSubscribed = session.query(BotSubscriber).filter(BotSubscriber.user_id == jobTag).all()
if alreadyBotSubscribed == []:
message.reply("You haven't Subscribed for the my updates! Cannot Unsubscribe you :P")
return
# clear job
schedule.clear(jobTag)
session.query(BotSubscriber).filter(BotSubscriber.user_id == jobTag).delete()
session.commit()
message.reply("You have been successfully unsubscribed! :)")
logger.info("Bot Log : Function=unsubscribe_links_summary() - user unsubscribed from scheduled link updates")
test_listen.__doc__ = link_constants.TEST_LISTEN_DOC
test_db.__doc__ = link_constants.TEST_DB_DOC
link_listen.__doc__ = link_constants.LINK_LISTEN_DOC
get_aggregated_links.__doc__ = link_constants.LINK_AGGREGATION_DOC
subscribe_links_summary.__doc__ = link_constants.LINK_SUBSCRIBE_DOC
unsubscribe_links_summary.__doc__ = link_constants.LINK_UNSUBSCRIBE_DOC
| <filename>plugins/link.py<gh_stars>0
# -*- coding: utf-8 -*-
import logging
import re
import time
import link_constants
from linkbot import session, settings
from linkbot.bot import listen_to, respond_to
from linkbot.scheduler import schedule, catch_exceptions
from linkbot.utils import allow_only_direct_message
from linkbot.bot_constants import SCHEDULED_UPDATE_TIME_INTERVAL
from linkbot.plugins.link_models import Link, Tag, BotSubscriber
from linkbot.plugins.link_utils import populate_params, message_response, populate_link_data, pretty_print, pretty_print_table
logger = logging.getLogger(__name__)
@respond_to('^test$', re.IGNORECASE)
def test_listen(message):
message.reply("Hello %s! Test successful" % message.get_username())
@respond_to('^testdb$$', re.IGNORECASE)
@allow_only_direct_message()
def test_db(message):
link_table = session.query(Link).all()
message.reply('**Printing Link Table - **\n%s' % pretty_print_table(link_table))
tag_table = session.query(Tag).all()
message.reply('**Printing Tag Table - **\n%s' % pretty_print_table(tag_table))
bot_subscriber_table = session.query(BotSubscriber).all()
message.reply('**Printing BotSubscriber Table - **\n%s' % pretty_print_table(bot_subscriber_table))
@listen_to('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
def link_listen(message):
# get relevant info from message
author = message._get_sender_name()
channel = message.get_channel_name()
message_text = message.get_message()
logger.info('Params: Author = %s, channel = %s, Message = %s' % (author, channel, message_text))
# extract link from message
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', message_text)[0]
ts = str(time.time())
tags = [i[1:] for i in message_text.split() if i.startswith("#") ]
# clean message
message_text = message_text.replace(url, "")
# store in db
link = Link(author = author, message = message_text, link = url, channel = channel, timestamp = ts)
session.add(link)
session.flush()
if len(tags) != 0:
tags_arr = []
for tag in tags:
tags_arr.append(Tag(message_id = link.id, tag = tag))
session.add_all(tags_arr)
session.commit()
@respond_to('^links .*$')
@catch_exceptions(cancel_on_failure=True)
def get_aggregated_links(message, userId=None, teamId=None, channelId=None):
days, tags, channels = populate_params(message, userId, teamId)
result = populate_link_data(days, tags, channels, message)
if result == []:
if message.get_message() == 'subscribe':
message.reply('There have been no posts for the past 7 days :| Please wait for the next update!')
else:
message.reply("Unable to find Links matching your criteria :| Please try changing your search criteria!")
else:
scheduled_update = True
if message._body:
scheduled_update = False
message_response(message, pretty_print(result, scheduled_update), channelId)
logger.info("Bot Log : Function=get_aggregated_links() - aggregated link updates based on filter criteria")
@respond_to('^subscribe$', re.IGNORECASE)
@allow_only_direct_message()
def subscribe_links_summary(message):
'''
Subscribes sender of message of Link aggregation scheduled update by the bot
Currently Link aggregation update is scheduled for every 30 seconds for testing purposes
- schedule.every(SCHEDULED_UPDATE_TIME_INTERVAL).seconds.do(get_aggregated_links, message).tag(userId)
For production, Link aggregation update can be scheduled every day at 10:00 AM by setting
- schedule.every().day().at("10:00").do(get_aggregated_links, message).tag(userId)
This functionality can be extended to support periodic scheduling for daily/weekly/monthly time period.
Above changes also need to be reflected in 'run_scheduled_update_jobs()' function in 'bot.bot' module
'''
userId = message.get_user_id()
alreadyBotSubscribed = session.query(BotSubscriber).filter(BotSubscriber.user_id == userId).all()
if alreadyBotSubscribed != []:
message.reply("You are already a subscriber of the my updates! :) Wait for the next update!")
return
# scheduled link aggregation
schedule.every(SCHEDULED_UPDATE_TIME_INTERVAL).seconds.do(get_aggregated_links, message).tag(userId)
botSubcriber = BotSubscriber(user_id=userId, team_id=message.get_teams_of_user(userId)[0][u'id'], \
channel_id=message.channel)
session.add(botSubcriber)
session.flush()
session.commit()
message.reply("Successfully subscribed for my updates! Wait for the next update! :)")
logger.info("Bot Log : Function=subscribe_links_summary() - user subscribed from scheduled link updates")
@respond_to('^unsubscribe$', re.IGNORECASE)
@allow_only_direct_message()
def unsubscribe_links_summary(message):
jobTag = message.get_user_id()
alreadyBotSubscribed = session.query(BotSubscriber).filter(BotSubscriber.user_id == jobTag).all()
if alreadyBotSubscribed == []:
message.reply("You haven't Subscribed for the my updates! Cannot Unsubscribe you :P")
return
# clear job
schedule.clear(jobTag)
session.query(BotSubscriber).filter(BotSubscriber.user_id == jobTag).delete()
session.commit()
message.reply("You have been successfully unsubscribed! :)")
logger.info("Bot Log : Function=unsubscribe_links_summary() - user unsubscribed from scheduled link updates")
test_listen.__doc__ = link_constants.TEST_LISTEN_DOC
test_db.__doc__ = link_constants.TEST_DB_DOC
link_listen.__doc__ = link_constants.LINK_LISTEN_DOC
get_aggregated_links.__doc__ = link_constants.LINK_AGGREGATION_DOC
subscribe_links_summary.__doc__ = link_constants.LINK_SUBSCRIBE_DOC
unsubscribe_links_summary.__doc__ = link_constants.LINK_UNSUBSCRIBE_DOC
| en | 0.646979 | # -*- coding: utf-8 -*- # get relevant info from message # extract link from message # clean message # store in db Subscribes sender of message of Link aggregation scheduled update by the bot Currently Link aggregation update is scheduled for every 30 seconds for testing purposes - schedule.every(SCHEDULED_UPDATE_TIME_INTERVAL).seconds.do(get_aggregated_links, message).tag(userId) For production, Link aggregation update can be scheduled every day at 10:00 AM by setting - schedule.every().day().at("10:00").do(get_aggregated_links, message).tag(userId) This functionality can be extended to support periodic scheduling for daily/weekly/monthly time period. Above changes also need to be reflected in 'run_scheduled_update_jobs()' function in 'bot.bot' module # scheduled link aggregation # clear job | 2.026373 | 2 |
tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_ext.py | pazamelin/openvino | 1 | 6624985 | <filename>tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_ext.py
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from onnx import numpy_helper
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
from openvino.tools.mo.ops.constant_of_shape import ConstantOfShape
class ConstantOfShapeExtractor(FrontExtractorOp):
op = 'ConstantOfShape'
enabled = True
@classmethod
def extract(cls, node):
fill_value = onnx_attr(node, 'value', 't', default=mo_array([0.0]), dst_type=lambda x: numpy_helper.to_array(x))
ConstantOfShape.update_node_stat(node, {'fill_value': fill_value})
return cls.enabled
| <filename>tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_ext.py
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from onnx import numpy_helper
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
from openvino.tools.mo.ops.constant_of_shape import ConstantOfShape
class ConstantOfShapeExtractor(FrontExtractorOp):
op = 'ConstantOfShape'
enabled = True
@classmethod
def extract(cls, node):
fill_value = onnx_attr(node, 'value', 't', default=mo_array([0.0]), dst_type=lambda x: numpy_helper.to_array(x))
ConstantOfShape.update_node_stat(node, {'fill_value': fill_value})
return cls.enabled
| de | 0.248959 | # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 | 1.952985 | 2 |
Day 17/quiz_brain.py | Jackson-Miller/python-boot-camp | 0 | 6624986 | <gh_stars>0
class QuizBrain:
def __init__(self, q_list):
self.score = 0
self.question_number = 0
self.question_list = q_list
def still_has_questions(self):
return self.question_number < len(self.question_list)
def next_question(self):
question = self.question_list[self.question_number]
self.question_number += 1
user_answer = input(f"Q.{self.question_number}: {question.text} (True/False): ")
self.check_answer(user_answer, question.answer)
def check_answer(self, u_answer, q_answer):
if u_answer.lower() == q_answer.lower():
self.score += 1
print("You got it right!")
else:
print("You got it wrong!")
print(f"The correct answer was: {q_answer}")
print(f"Your current score is: {self.score}/{self.question_number} \n")
| class QuizBrain:
def __init__(self, q_list):
self.score = 0
self.question_number = 0
self.question_list = q_list
def still_has_questions(self):
return self.question_number < len(self.question_list)
def next_question(self):
question = self.question_list[self.question_number]
self.question_number += 1
user_answer = input(f"Q.{self.question_number}: {question.text} (True/False): ")
self.check_answer(user_answer, question.answer)
def check_answer(self, u_answer, q_answer):
if u_answer.lower() == q_answer.lower():
self.score += 1
print("You got it right!")
else:
print("You got it wrong!")
print(f"The correct answer was: {q_answer}")
print(f"Your current score is: {self.score}/{self.question_number} \n") | none | 1 | 3.589128 | 4 | |
test/test_acl_plugin_l2l3.py | lfntac/vpp | 1 | 6624987 | <filename>test/test_acl_plugin_l2l3.py<gh_stars>1-10
#!/usr/bin/env python
"""ACL IRB Test Case HLD:
**config**
- L2 MAC learning enabled in l2bd
- 2 routed interfaces untagged, bvi (Bridge Virtual Interface)
- 2 bridged interfaces in l2bd with bvi
**test**
- sending ip4 eth pkts between routed interfaces
- 2 routed interfaces
- 2 bridged interfaces
- 64B, 512B, 1518B, 9200B (ether_size)
- burst of pkts per interface
- 257pkts per burst
- routed pkts hitting different FIB entries
- bridged pkts hitting different MAC entries
**verify**
- all packets received correctly
"""
import unittest
from socket import inet_pton, AF_INET, AF_INET6
from random import choice, shuffle
from pprint import pprint
import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP, TCP
from scapy.layers.inet6 import IPv6, ICMPv6Unknown, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply, IPv6ExtHdrRouting
from scapy.layers.inet6 import IPv6ExtHdrFragment
from framework import VppTestCase, VppTestRunner
from vpp_papi_provider import L2_PORT_TYPE
import time
class TestACLpluginL2L3(VppTestCase):
"""TestACLpluginL2L3 Test Case"""
@classmethod
def setUpClass(cls):
"""
#. Create BD with MAC learning enabled and put interfaces to this BD.
#. Configure IPv4 addresses on loopback interface and routed interface.
#. Configure MAC address binding to IPv4 neighbors on loop0.
#. Configure MAC address on pg2.
#. Loopback BVI interface has remote hosts, one half of hosts are
behind pg0 second behind pg1.
"""
super(TestACLpluginL2L3, cls).setUpClass()
cls.pg_if_packet_sizes = [64, 512, 1518, 9018] # packet sizes
cls.bd_id = 10
cls.remote_hosts_count = 250
# create 3 pg interfaces, 1 loopback interface
cls.create_pg_interfaces(range(3))
cls.create_loopback_interfaces(1)
cls.interfaces = list(cls.pg_interfaces)
cls.interfaces.extend(cls.lo_interfaces)
for i in cls.interfaces:
i.admin_up()
# Create BD with MAC learning enabled and put interfaces to this BD
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.loop0.sw_if_index, bd_id=cls.bd_id,
port_type=L2_PORT_TYPE.BVI)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=cls.pg0.sw_if_index,
bd_id=cls.bd_id)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=cls.pg1.sw_if_index,
bd_id=cls.bd_id)
# Configure IPv4 addresses on loopback interface and routed interface
cls.loop0.config_ip4()
cls.loop0.config_ip6()
cls.pg2.config_ip4()
cls.pg2.config_ip6()
# Configure MAC address binding to IPv4 neighbors on loop0
cls.loop0.generate_remote_hosts(cls.remote_hosts_count)
cls.loop0.configure_ipv4_neighbors()
cls.loop0.configure_ipv6_neighbors()
# configure MAC address on pg2
cls.pg2.resolve_arp()
cls.pg2.resolve_ndp()
cls.WITHOUT_EH = False
cls.WITH_EH = True
cls.STATELESS_ICMP = False
cls.STATEFUL_ICMP = True
# Loopback BVI interface has remote hosts, one half of hosts are behind
# pg0 second behind pg1
half = cls.remote_hosts_count // 2
cls.pg0.remote_hosts = cls.loop0.remote_hosts[:half]
cls.pg1.remote_hosts = cls.loop0.remote_hosts[half:]
def tearDown(self):
"""Run standard test teardown and log ``show l2patch``,
``show l2fib verbose``,``show bridge-domain <bd_id> detail``,
``show ip arp``.
"""
super(TestACLpluginL2L3, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.cli("show l2patch"))
self.logger.info(self.vapi.cli("show classify tables"))
self.logger.info(self.vapi.cli("show l2fib verbose"))
self.logger.info(self.vapi.cli("show bridge-domain %s detail" %
self.bd_id))
self.logger.info(self.vapi.cli("show ip arp"))
self.logger.info(self.vapi.cli("show ip6 neighbors"))
cmd = "show acl-plugin sessions verbose 1"
self.logger.info(self.vapi.cli(cmd))
self.logger.info(self.vapi.cli("show acl-plugin acl"))
self.logger.info(self.vapi.cli("show acl-plugin interface"))
self.logger.info(self.vapi.cli("show acl-plugin tables"))
def create_stream(self, src_ip_if, dst_ip_if, reverse, packet_sizes,
is_ip6, expect_blocked, expect_established,
add_extension_header, icmp_stateful=False):
pkts = []
rules = []
permit_rules = []
permit_and_reflect_rules = []
total_packet_count = 8
for i in range(0, total_packet_count):
modulo = (i//2) % 2
icmp_type_delta = i % 2
icmp_code = i
is_udp_packet = (modulo == 0)
if is_udp_packet and icmp_stateful:
continue
is_reflectable_icmp = (icmp_stateful and icmp_type_delta == 0 and
not is_udp_packet)
is_reflected_icmp = is_reflectable_icmp and expect_established
can_reflect_this_packet = is_udp_packet or is_reflectable_icmp
is_permit = i % 2
remote_dst_index = i % len(dst_ip_if.remote_hosts)
remote_dst_host = dst_ip_if.remote_hosts[remote_dst_index]
if is_permit == 1:
info = self.create_packet_info(src_ip_if, dst_ip_if)
payload = self.info_to_payload(info)
else:
to_be_blocked = False
if (expect_blocked and not expect_established):
to_be_blocked = True
if (not can_reflect_this_packet):
to_be_blocked = True
if to_be_blocked:
payload = "to be blocked"
else:
info = self.create_packet_info(src_ip_if, dst_ip_if)
payload = self.info_to_payload(info)
if reverse:
dst_mac = 'de:ad:00:00:00:00'
src_mac = remote_dst_host._mac
dst_ip6 = src_ip_if.remote_ip6
src_ip6 = remote_dst_host.ip6
dst_ip4 = src_ip_if.remote_ip4
src_ip4 = remote_dst_host.ip4
dst_l4 = 1234 + i
src_l4 = 4321 + i
else:
dst_mac = src_ip_if.local_mac
src_mac = src_ip_if.remote_mac
src_ip6 = src_ip_if.remote_ip6
dst_ip6 = remote_dst_host.ip6
src_ip4 = src_ip_if.remote_ip4
dst_ip4 = remote_dst_host.ip4
src_l4 = 1234 + i
dst_l4 = 4321 + i
if is_reflected_icmp:
icmp_type_delta = 1
# default ULP should be something we do not use in tests
ulp_l4 = TCP(sport=src_l4, dport=dst_l4)
# potentially a chain of protocols leading to ULP
ulp = ulp_l4
if is_udp_packet:
if is_ip6:
ulp_l4 = UDP(sport=src_l4, dport=dst_l4)
if add_extension_header:
# prepend some extension headers
ulp = (IPv6ExtHdrRouting() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment(offset=0, m=1) / ulp_l4)
# uncomment below to test invalid ones
# ulp = IPv6ExtHdrRouting(len = 200) / ulp_l4
else:
ulp = ulp_l4
p = (Ether(dst=dst_mac, src=src_mac) /
IPv6(src=src_ip6, dst=dst_ip6) /
ulp /
Raw(payload))
else:
ulp_l4 = UDP(sport=src_l4, dport=dst_l4)
# IPv4 does not allow extension headers,
# but we rather make it a first fragment
flags = 1 if add_extension_header else 0
ulp = ulp_l4
p = (Ether(dst=dst_mac, src=src_mac) /
IP(src=src_ip4, dst=dst_ip4, frag=0, flags=flags) /
ulp /
Raw(payload))
elif modulo == 1:
if is_ip6:
ulp_l4 = ICMPv6Unknown(type=128 + icmp_type_delta,
code=icmp_code)
ulp = ulp_l4
p = (Ether(dst=dst_mac, src=src_mac) /
IPv6(src=src_ip6, dst=dst_ip6) /
ulp /
Raw(payload))
else:
ulp_l4 = ICMP(type=8 - 8*icmp_type_delta, code=icmp_code)
ulp = ulp_l4
p = (Ether(dst=dst_mac, src=src_mac) /
IP(src=src_ip4, dst=dst_ip4) /
ulp /
Raw(payload))
if i % 2 == 1:
info.data = p.copy()
size = packet_sizes[(i // 2) % len(packet_sizes)]
self.extend_packet(p, size)
pkts.append(p)
rule_family = AF_INET6 if p.haslayer(IPv6) else AF_INET
rule_prefix_len = 128 if p.haslayer(IPv6) else 32
rule_l3_layer = IPv6 if p.haslayer(IPv6) else IP
if p.haslayer(UDP):
rule_l4_sport = p[UDP].sport
rule_l4_dport = p[UDP].dport
else:
if p.haslayer(ICMP):
rule_l4_sport = p[ICMP].type
rule_l4_dport = p[ICMP].code
else:
rule_l4_sport = p[ICMPv6Unknown].type
rule_l4_dport = p[ICMPv6Unknown].code
if p.haslayer(IPv6):
rule_l4_proto = ulp_l4.overload_fields[IPv6]['nh']
else:
rule_l4_proto = p[IP].proto
new_rule = {
'is_permit': is_permit,
'is_ipv6': p.haslayer(IPv6),
'src_ip_addr': inet_pton(rule_family,
p[rule_l3_layer].src),
'src_ip_prefix_len': rule_prefix_len,
'dst_ip_addr': inet_pton(rule_family,
p[rule_l3_layer].dst),
'dst_ip_prefix_len': rule_prefix_len,
'srcport_or_icmptype_first': rule_l4_sport,
'srcport_or_icmptype_last': rule_l4_sport,
'dstport_or_icmpcode_first': rule_l4_dport,
'dstport_or_icmpcode_last': rule_l4_dport,
'proto': rule_l4_proto,
}
rules.append(new_rule)
new_rule_permit = new_rule.copy()
new_rule_permit['is_permit'] = 1
permit_rules.append(new_rule_permit)
new_rule_permit_and_reflect = new_rule.copy()
if can_reflect_this_packet:
new_rule_permit_and_reflect['is_permit'] = 2
else:
new_rule_permit_and_reflect['is_permit'] = is_permit
permit_and_reflect_rules.append(new_rule_permit_and_reflect)
self.logger.info("create_stream pkt#%d: %s" % (i, payload))
return {'stream': pkts,
'rules': rules,
'permit_rules': permit_rules,
'permit_and_reflect_rules': permit_and_reflect_rules}
def verify_capture(self, dst_ip_if, src_ip_if, capture, reverse):
last_info = dict()
for i in self.interfaces:
last_info[i.sw_if_index] = None
dst_ip_sw_if_index = dst_ip_if.sw_if_index
for packet in capture:
l3 = IP if packet.haslayer(IP) else IPv6
ip = packet[l3]
if packet.haslayer(UDP):
l4 = UDP
else:
if packet.haslayer(ICMP):
l4 = ICMP
else:
l4 = ICMPv6Unknown
# Scapy IPv6 stuff is too smart for its own good.
# So we do this and coerce the ICMP into unknown type
if packet.haslayer(UDP):
data = scapy.compat.raw(packet[UDP][Raw])
else:
if l3 == IP:
data = scapy.compat.raw(ICMP(
scapy.compat.raw(packet[l3].payload))[Raw])
else:
data = scapy.compat.raw(ICMPv6Unknown(
scapy.compat.raw(packet[l3].payload)).msgbody)
udp_or_icmp = packet[l3].payload
data_obj = Raw(data)
# FIXME: make framework believe we are on object
payload_info = self.payload_to_info(data_obj)
packet_index = payload_info.index
self.assertEqual(payload_info.dst, dst_ip_sw_if_index)
next_info = self.get_next_packet_info_for_interface2(
payload_info.src, dst_ip_sw_if_index,
last_info[payload_info.src])
last_info[payload_info.src] = next_info
self.assertTrue(next_info is not None)
self.assertEqual(packet_index, next_info.index)
saved_packet = next_info.data
self.assertTrue(next_info is not None)
# MAC: src, dst
if not reverse:
self.assertEqual(packet.src, dst_ip_if.local_mac)
host = dst_ip_if.host_by_mac(packet.dst)
# IP: src, dst
# self.assertEqual(ip.src, src_ip_if.remote_ip4)
if saved_packet is not None:
self.assertEqual(ip.src, saved_packet[l3].src)
self.assertEqual(ip.dst, saved_packet[l3].dst)
if l4 == UDP:
self.assertEqual(udp_or_icmp.sport, saved_packet[l4].sport)
self.assertEqual(udp_or_icmp.dport, saved_packet[l4].dport)
# self.assertEqual(ip.dst, host.ip4)
# UDP:
def applied_acl_shuffle(self, sw_if_index):
# first collect what ACLs are applied and what they look like
r = self.vapi.acl_interface_list_dump(sw_if_index=sw_if_index)
orig_applied_acls = r[0]
# we will collect these just to save and generate additional rulesets
orig_acls = []
for acl_num in orig_applied_acls.acls:
rr = self.vapi.acl_dump(acl_num)
orig_acls.append(rr[0])
# now create a list of all the rules in all ACLs
all_rules = []
for old_acl in orig_acls:
for rule in old_acl.r:
all_rules.append(dict(rule._asdict()))
# Add a few ACLs made from shuffled rules
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=4294967295,
r=all_rules[::2],
tag=b"shuffle 1. acl")
shuffle_acl_1 = reply.acl_index
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=4294967295,
r=all_rules[::3],
tag=b"shuffle 2. acl")
shuffle_acl_2 = reply.acl_index
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=4294967295,
r=all_rules[::2],
tag=b"shuffle 3. acl")
shuffle_acl_3 = reply.acl_index
# apply the shuffle ACLs in front
input_acls = [shuffle_acl_1, shuffle_acl_2]
output_acls = [shuffle_acl_1, shuffle_acl_2]
# add the currently applied ACLs
n_input = orig_applied_acls.n_input
input_acls.extend(orig_applied_acls.acls[:n_input])
output_acls.extend(orig_applied_acls.acls[n_input:])
# and the trailing shuffle ACL(s)
input_acls.extend([shuffle_acl_3])
output_acls.extend([shuffle_acl_3])
# set the interface ACL list to the result
self.vapi.acl_interface_set_acl_list(sw_if_index=sw_if_index,
n_input=len(input_acls),
acls=input_acls + output_acls)
# change the ACLs a few times
for i in range(1, 10):
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=shuffle_acl_1,
r=all_rules[::1+(i % 2)],
tag=b"shuffle 1. acl")
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=shuffle_acl_2,
r=all_rules[::1+(i % 3)],
tag=b"shuffle 2. acl")
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=shuffle_acl_2,
r=all_rules[::1+(i % 5)],
tag=b"shuffle 3. acl")
# restore to how it was before and clean up
self.vapi.acl_interface_set_acl_list(sw_if_index=sw_if_index,
n_input=orig_applied_acls.n_input,
acls=orig_applied_acls.acls)
reply = self.vapi.acl_del(acl_index=shuffle_acl_1)
reply = self.vapi.acl_del(acl_index=shuffle_acl_2)
reply = self.vapi.acl_del(acl_index=shuffle_acl_3)
def create_acls_for_a_stream(self, stream_dict,
test_l2_action, is_reflect):
r = stream_dict['rules']
r_permit = stream_dict['permit_rules']
r_permit_reflect = stream_dict['permit_and_reflect_rules']
r_action = r_permit_reflect if is_reflect else r
reply = self.vapi.acl_add_replace(acl_index=4294967295, r=r_action,
tag=b"act. acl")
action_acl_index = reply.acl_index
reply = self.vapi.acl_add_replace(acl_index=4294967295, r=r_permit,
tag=b"perm. acl")
permit_acl_index = reply.acl_index
return {'L2': action_acl_index if test_l2_action else permit_acl_index,
'L3': permit_acl_index if test_l2_action else action_acl_index,
'permit': permit_acl_index, 'action': action_acl_index}
def apply_acl_ip46_x_to_y(self, bridged_to_routed, test_l2_deny,
is_ip6, is_reflect, add_eh):
""" Apply the ACLs
"""
self.reset_packet_infos()
stream_dict = self.create_stream(
self.pg2, self.loop0,
bridged_to_routed,
self.pg_if_packet_sizes, is_ip6,
not is_reflect, False, add_eh)
stream = stream_dict['stream']
acl_idx = self.create_acls_for_a_stream(stream_dict, test_l2_deny,
is_reflect)
n_input_l3 = 0 if bridged_to_routed else 1
n_input_l2 = 1 if bridged_to_routed else 0
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg2.sw_if_index,
n_input=n_input_l3,
acls=[acl_idx['L3']])
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg0.sw_if_index,
n_input=n_input_l2,
acls=[acl_idx['L2']])
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg1.sw_if_index,
n_input=n_input_l2,
acls=[acl_idx['L2']])
self.applied_acl_shuffle(self.pg0.sw_if_index)
self.applied_acl_shuffle(self.pg2.sw_if_index)
def apply_acl_ip46_both_directions_reflect(self,
primary_is_bridged_to_routed,
reflect_on_l2, is_ip6, add_eh,
stateful_icmp):
primary_is_routed_to_bridged = not primary_is_bridged_to_routed
self.reset_packet_infos()
stream_dict_fwd = self.create_stream(self.pg2, self.loop0,
primary_is_bridged_to_routed,
self.pg_if_packet_sizes, is_ip6,
False, False, add_eh,
stateful_icmp)
acl_idx_fwd = self.create_acls_for_a_stream(stream_dict_fwd,
reflect_on_l2, True)
stream_dict_rev = self.create_stream(self.pg2, self.loop0,
not primary_is_bridged_to_routed,
self.pg_if_packet_sizes, is_ip6,
True, True, add_eh, stateful_icmp)
# We want the primary action to be "deny" rather than reflect
acl_idx_rev = self.create_acls_for_a_stream(stream_dict_rev,
reflect_on_l2, False)
if primary_is_bridged_to_routed:
inbound_l2_acl = acl_idx_fwd['L2']
else:
inbound_l2_acl = acl_idx_rev['L2']
if primary_is_routed_to_bridged:
outbound_l2_acl = acl_idx_fwd['L2']
else:
outbound_l2_acl = acl_idx_rev['L2']
if primary_is_routed_to_bridged:
inbound_l3_acl = acl_idx_fwd['L3']
else:
inbound_l3_acl = acl_idx_rev['L3']
if primary_is_bridged_to_routed:
outbound_l3_acl = acl_idx_fwd['L3']
else:
outbound_l3_acl = acl_idx_rev['L3']
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg2.sw_if_index,
n_input=1,
acls=[inbound_l3_acl,
outbound_l3_acl])
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg0.sw_if_index,
n_input=1,
acls=[inbound_l2_acl,
outbound_l2_acl])
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg1.sw_if_index,
n_input=1,
acls=[inbound_l2_acl,
outbound_l2_acl])
self.applied_acl_shuffle(self.pg0.sw_if_index)
self.applied_acl_shuffle(self.pg2.sw_if_index)
def apply_acl_ip46_routed_to_bridged(self, test_l2_deny, is_ip6,
is_reflect, add_eh):
self.apply_acl_ip46_x_to_y(False, test_l2_deny, is_ip6,
is_reflect, add_eh)
def apply_acl_ip46_bridged_to_routed(self, test_l2_deny, is_ip6,
is_reflect, add_eh):
self.apply_acl_ip46_x_to_y(True, test_l2_deny, is_ip6,
is_reflect, add_eh)
def run_traffic_ip46_x_to_y(self, bridged_to_routed,
test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp=False):
self.reset_packet_infos()
stream_dict = self.create_stream(self.pg2, self.loop0,
bridged_to_routed,
self.pg_if_packet_sizes, is_ip6,
not is_reflect, is_established,
add_eh, stateful_icmp)
stream = stream_dict['stream']
tx_if = self.pg0 if bridged_to_routed else self.pg2
rx_if = self.pg2 if bridged_to_routed else self.pg0
tx_if.add_stream(stream)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
packet_count = self.get_packet_count_for_if_idx(self.loop0.sw_if_index)
rcvd1 = rx_if.get_capture(packet_count)
self.verify_capture(self.loop0, self.pg2, rcvd1, bridged_to_routed)
def run_traffic_ip46_routed_to_bridged(self, test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp=False):
self.run_traffic_ip46_x_to_y(False, test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp)
def run_traffic_ip46_bridged_to_routed(self, test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp=False):
self.run_traffic_ip46_x_to_y(True, test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp)
def run_test_ip46_routed_to_bridged(self, test_l2_deny,
is_ip6, is_reflect, add_eh):
self.apply_acl_ip46_routed_to_bridged(test_l2_deny,
is_ip6, is_reflect, add_eh)
self.run_traffic_ip46_routed_to_bridged(test_l2_deny, is_ip6,
is_reflect, False, add_eh)
def run_test_ip46_bridged_to_routed(self, test_l2_deny,
is_ip6, is_reflect, add_eh):
self.apply_acl_ip46_bridged_to_routed(test_l2_deny,
is_ip6, is_reflect, add_eh)
self.run_traffic_ip46_bridged_to_routed(test_l2_deny, is_ip6,
is_reflect, False, add_eh)
def run_test_ip46_routed_to_bridged_and_back(self, test_l2_action,
is_ip6, add_eh,
stateful_icmp=False):
self.apply_acl_ip46_both_directions_reflect(False, test_l2_action,
is_ip6, add_eh,
stateful_icmp)
self.run_traffic_ip46_routed_to_bridged(test_l2_action, is_ip6,
True, False, add_eh,
stateful_icmp)
self.run_traffic_ip46_bridged_to_routed(test_l2_action, is_ip6,
False, True, add_eh,
stateful_icmp)
def run_test_ip46_bridged_to_routed_and_back(self, test_l2_action,
is_ip6, add_eh,
stateful_icmp=False):
self.apply_acl_ip46_both_directions_reflect(True, test_l2_action,
is_ip6, add_eh,
stateful_icmp)
self.run_traffic_ip46_bridged_to_routed(test_l2_action, is_ip6,
True, False, add_eh,
stateful_icmp)
self.run_traffic_ip46_routed_to_bridged(test_l2_action, is_ip6,
False, True, add_eh,
stateful_icmp)
def test_0000_ip6_irb_1(self):
""" ACL plugin prepare"""
if not self.vpp_dead:
cmd = "set acl-plugin session timeout udp idle 2000"
self.logger.info(self.vapi.ppcli(cmd))
# uncomment to not skip past the routing header
# and watch the EH tests fail
# self.logger.info(self.vapi.ppcli(
# "set acl-plugin skip-ipv6-extension-header 43 0"))
# uncomment to test the session limit (stateful tests will fail)
# self.logger.info(self.vapi.ppcli(
# "set acl-plugin session table max-entries 1"))
# new datapath is the default, but just in case
# self.logger.info(self.vapi.ppcli(
# "set acl-plugin l2-datapath new"))
# If you want to see some tests fail, uncomment the next line
# self.logger.info(self.vapi.ppcli(
# "set acl-plugin l2-datapath old"))
def test_0001_ip6_irb_1(self):
""" ACL IPv6 routed -> bridged, L2 ACL deny"""
self.run_test_ip46_routed_to_bridged(True, True, False,
self.WITHOUT_EH)
def test_0002_ip6_irb_1(self):
""" ACL IPv6 routed -> bridged, L3 ACL deny"""
self.run_test_ip46_routed_to_bridged(False, True, False,
self.WITHOUT_EH)
def test_0003_ip4_irb_1(self):
""" ACL IPv4 routed -> bridged, L2 ACL deny"""
self.run_test_ip46_routed_to_bridged(True, False, False,
self.WITHOUT_EH)
def test_0004_ip4_irb_1(self):
""" ACL IPv4 routed -> bridged, L3 ACL deny"""
self.run_test_ip46_routed_to_bridged(False, False, False,
self.WITHOUT_EH)
def test_0005_ip6_irb_1(self):
""" ACL IPv6 bridged -> routed, L2 ACL deny """
self.run_test_ip46_bridged_to_routed(True, True, False,
self.WITHOUT_EH)
def test_0006_ip6_irb_1(self):
""" ACL IPv6 bridged -> routed, L3 ACL deny """
self.run_test_ip46_bridged_to_routed(False, True, False,
self.WITHOUT_EH)
def test_0007_ip6_irb_1(self):
""" ACL IPv4 bridged -> routed, L2 ACL deny """
self.run_test_ip46_bridged_to_routed(True, False, False,
self.WITHOUT_EH)
def test_0008_ip6_irb_1(self):
""" ACL IPv4 bridged -> routed, L3 ACL deny """
self.run_test_ip46_bridged_to_routed(False, False, False,
self.WITHOUT_EH)
# Stateful ACL tests
def test_0101_ip6_irb_1(self):
""" ACL IPv6 routed -> bridged, L2 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, True,
self.WITHOUT_EH)
def test_0102_ip6_irb_1(self):
""" ACL IPv6 bridged -> routed, L2 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, True,
self.WITHOUT_EH)
def test_0103_ip6_irb_1(self):
""" ACL IPv4 routed -> bridged, L2 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, False,
self.WITHOUT_EH)
def test_0104_ip6_irb_1(self):
""" ACL IPv4 bridged -> routed, L2 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, False,
self.WITHOUT_EH)
def test_0111_ip6_irb_1(self):
""" ACL IPv6 routed -> bridged, L3 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, True,
self.WITHOUT_EH)
def test_0112_ip6_irb_1(self):
""" ACL IPv6 bridged -> routed, L3 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, True,
self.WITHOUT_EH)
def test_0113_ip6_irb_1(self):
""" ACL IPv4 routed -> bridged, L3 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, False,
self.WITHOUT_EH)
def test_0114_ip6_irb_1(self):
""" ACL IPv4 bridged -> routed, L3 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, False,
self.WITHOUT_EH)
# A block of tests with extension headers
def test_1001_ip6_irb_1(self):
""" ACL IPv6+EH routed -> bridged, L2 ACL deny"""
self.run_test_ip46_routed_to_bridged(True, True, False,
self.WITH_EH)
def test_1002_ip6_irb_1(self):
""" ACL IPv6+EH routed -> bridged, L3 ACL deny"""
self.run_test_ip46_routed_to_bridged(False, True, False,
self.WITH_EH)
def test_1005_ip6_irb_1(self):
""" ACL IPv6+EH bridged -> routed, L2 ACL deny """
self.run_test_ip46_bridged_to_routed(True, True, False,
self.WITH_EH)
def test_1006_ip6_irb_1(self):
""" ACL IPv6+EH bridged -> routed, L3 ACL deny """
self.run_test_ip46_bridged_to_routed(False, True, False,
self.WITH_EH)
def test_1101_ip6_irb_1(self):
""" ACL IPv6+EH routed -> bridged, L2 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, True,
self.WITH_EH)
def test_1102_ip6_irb_1(self):
""" ACL IPv6+EH bridged -> routed, L2 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, True,
self.WITH_EH)
def test_1111_ip6_irb_1(self):
""" ACL IPv6+EH routed -> bridged, L3 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, True,
self.WITH_EH)
def test_1112_ip6_irb_1(self):
""" ACL IPv6+EH bridged -> routed, L3 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, True,
self.WITH_EH)
# IPv4 with "MF" bit set
def test_1201_ip6_irb_1(self):
""" ACL IPv4+MF routed -> bridged, L2 ACL deny"""
self.run_test_ip46_routed_to_bridged(True, False, False,
self.WITH_EH)
def test_1202_ip6_irb_1(self):
""" ACL IPv4+MF routed -> bridged, L3 ACL deny"""
self.run_test_ip46_routed_to_bridged(False, False, False,
self.WITH_EH)
def test_1205_ip6_irb_1(self):
""" ACL IPv4+MF bridged -> routed, L2 ACL deny """
self.run_test_ip46_bridged_to_routed(True, False, False,
self.WITH_EH)
def test_1206_ip6_irb_1(self):
""" ACL IPv4+MF bridged -> routed, L3 ACL deny """
self.run_test_ip46_bridged_to_routed(False, False, False,
self.WITH_EH)
def test_1301_ip6_irb_1(self):
""" ACL IPv4+MF routed -> bridged, L2 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, False,
self.WITH_EH)
def test_1302_ip6_irb_1(self):
""" ACL IPv4+MF bridged -> routed, L2 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, False,
self.WITH_EH)
def test_1311_ip6_irb_1(self):
""" ACL IPv4+MF routed -> bridged, L3 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, False,
self.WITH_EH)
def test_1312_ip6_irb_1(self):
""" ACL IPv4+MF bridged -> routed, L3 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, False,
self.WITH_EH)
# Stateful ACL tests with stateful ICMP
def test_1401_ip6_irb_1(self):
""" IPv6 routed -> bridged, L2 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, True,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1402_ip6_irb_1(self):
""" IPv6 bridged -> routed, L2 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, True,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1403_ip4_irb_1(self):
""" IPv4 routed -> bridged, L2 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, False,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1404_ip4_irb_1(self):
""" IPv4 bridged -> routed, L2 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, False,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1411_ip6_irb_1(self):
""" IPv6 routed -> bridged, L3 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, True,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1412_ip6_irb_1(self):
""" IPv6 bridged -> routed, L3 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, True,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1413_ip4_irb_1(self):
""" IPv4 routed -> bridged, L3 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, False,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1414_ip4_irb_1(self):
""" IPv4 bridged -> routed, L3 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, False,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| <filename>test/test_acl_plugin_l2l3.py<gh_stars>1-10
#!/usr/bin/env python
"""ACL IRB Test Case HLD:
**config**
- L2 MAC learning enabled in l2bd
- 2 routed interfaces untagged, bvi (Bridge Virtual Interface)
- 2 bridged interfaces in l2bd with bvi
**test**
- sending ip4 eth pkts between routed interfaces
- 2 routed interfaces
- 2 bridged interfaces
- 64B, 512B, 1518B, 9200B (ether_size)
- burst of pkts per interface
- 257pkts per burst
- routed pkts hitting different FIB entries
- bridged pkts hitting different MAC entries
**verify**
- all packets received correctly
"""
import unittest
from socket import inet_pton, AF_INET, AF_INET6
from random import choice, shuffle
from pprint import pprint
import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP, TCP
from scapy.layers.inet6 import IPv6, ICMPv6Unknown, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply, IPv6ExtHdrRouting
from scapy.layers.inet6 import IPv6ExtHdrFragment
from framework import VppTestCase, VppTestRunner
from vpp_papi_provider import L2_PORT_TYPE
import time
class TestACLpluginL2L3(VppTestCase):
"""TestACLpluginL2L3 Test Case"""
@classmethod
def setUpClass(cls):
"""
#. Create BD with MAC learning enabled and put interfaces to this BD.
#. Configure IPv4 addresses on loopback interface and routed interface.
#. Configure MAC address binding to IPv4 neighbors on loop0.
#. Configure MAC address on pg2.
#. Loopback BVI interface has remote hosts, one half of hosts are
behind pg0 second behind pg1.
"""
super(TestACLpluginL2L3, cls).setUpClass()
cls.pg_if_packet_sizes = [64, 512, 1518, 9018] # packet sizes
cls.bd_id = 10
cls.remote_hosts_count = 250
# create 3 pg interfaces, 1 loopback interface
cls.create_pg_interfaces(range(3))
cls.create_loopback_interfaces(1)
cls.interfaces = list(cls.pg_interfaces)
cls.interfaces.extend(cls.lo_interfaces)
for i in cls.interfaces:
i.admin_up()
# Create BD with MAC learning enabled and put interfaces to this BD
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.loop0.sw_if_index, bd_id=cls.bd_id,
port_type=L2_PORT_TYPE.BVI)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=cls.pg0.sw_if_index,
bd_id=cls.bd_id)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=cls.pg1.sw_if_index,
bd_id=cls.bd_id)
# Configure IPv4 addresses on loopback interface and routed interface
cls.loop0.config_ip4()
cls.loop0.config_ip6()
cls.pg2.config_ip4()
cls.pg2.config_ip6()
# Configure MAC address binding to IPv4 neighbors on loop0
cls.loop0.generate_remote_hosts(cls.remote_hosts_count)
cls.loop0.configure_ipv4_neighbors()
cls.loop0.configure_ipv6_neighbors()
# configure MAC address on pg2
cls.pg2.resolve_arp()
cls.pg2.resolve_ndp()
cls.WITHOUT_EH = False
cls.WITH_EH = True
cls.STATELESS_ICMP = False
cls.STATEFUL_ICMP = True
# Loopback BVI interface has remote hosts, one half of hosts are behind
# pg0 second behind pg1
half = cls.remote_hosts_count // 2
cls.pg0.remote_hosts = cls.loop0.remote_hosts[:half]
cls.pg1.remote_hosts = cls.loop0.remote_hosts[half:]
def tearDown(self):
"""Run standard test teardown and log ``show l2patch``,
``show l2fib verbose``,``show bridge-domain <bd_id> detail``,
``show ip arp``.
"""
super(TestACLpluginL2L3, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.cli("show l2patch"))
self.logger.info(self.vapi.cli("show classify tables"))
self.logger.info(self.vapi.cli("show l2fib verbose"))
self.logger.info(self.vapi.cli("show bridge-domain %s detail" %
self.bd_id))
self.logger.info(self.vapi.cli("show ip arp"))
self.logger.info(self.vapi.cli("show ip6 neighbors"))
cmd = "show acl-plugin sessions verbose 1"
self.logger.info(self.vapi.cli(cmd))
self.logger.info(self.vapi.cli("show acl-plugin acl"))
self.logger.info(self.vapi.cli("show acl-plugin interface"))
self.logger.info(self.vapi.cli("show acl-plugin tables"))
def create_stream(self, src_ip_if, dst_ip_if, reverse, packet_sizes,
is_ip6, expect_blocked, expect_established,
add_extension_header, icmp_stateful=False):
pkts = []
rules = []
permit_rules = []
permit_and_reflect_rules = []
total_packet_count = 8
for i in range(0, total_packet_count):
modulo = (i//2) % 2
icmp_type_delta = i % 2
icmp_code = i
is_udp_packet = (modulo == 0)
if is_udp_packet and icmp_stateful:
continue
is_reflectable_icmp = (icmp_stateful and icmp_type_delta == 0 and
not is_udp_packet)
is_reflected_icmp = is_reflectable_icmp and expect_established
can_reflect_this_packet = is_udp_packet or is_reflectable_icmp
is_permit = i % 2
remote_dst_index = i % len(dst_ip_if.remote_hosts)
remote_dst_host = dst_ip_if.remote_hosts[remote_dst_index]
if is_permit == 1:
info = self.create_packet_info(src_ip_if, dst_ip_if)
payload = self.info_to_payload(info)
else:
to_be_blocked = False
if (expect_blocked and not expect_established):
to_be_blocked = True
if (not can_reflect_this_packet):
to_be_blocked = True
if to_be_blocked:
payload = "to be blocked"
else:
info = self.create_packet_info(src_ip_if, dst_ip_if)
payload = self.info_to_payload(info)
if reverse:
dst_mac = 'de:ad:00:00:00:00'
src_mac = remote_dst_host._mac
dst_ip6 = src_ip_if.remote_ip6
src_ip6 = remote_dst_host.ip6
dst_ip4 = src_ip_if.remote_ip4
src_ip4 = remote_dst_host.ip4
dst_l4 = 1234 + i
src_l4 = 4321 + i
else:
dst_mac = src_ip_if.local_mac
src_mac = src_ip_if.remote_mac
src_ip6 = src_ip_if.remote_ip6
dst_ip6 = remote_dst_host.ip6
src_ip4 = src_ip_if.remote_ip4
dst_ip4 = remote_dst_host.ip4
src_l4 = 1234 + i
dst_l4 = 4321 + i
if is_reflected_icmp:
icmp_type_delta = 1
# default ULP should be something we do not use in tests
ulp_l4 = TCP(sport=src_l4, dport=dst_l4)
# potentially a chain of protocols leading to ULP
ulp = ulp_l4
if is_udp_packet:
if is_ip6:
ulp_l4 = UDP(sport=src_l4, dport=dst_l4)
if add_extension_header:
# prepend some extension headers
ulp = (IPv6ExtHdrRouting() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment(offset=0, m=1) / ulp_l4)
# uncomment below to test invalid ones
# ulp = IPv6ExtHdrRouting(len = 200) / ulp_l4
else:
ulp = ulp_l4
p = (Ether(dst=dst_mac, src=src_mac) /
IPv6(src=src_ip6, dst=dst_ip6) /
ulp /
Raw(payload))
else:
ulp_l4 = UDP(sport=src_l4, dport=dst_l4)
# IPv4 does not allow extension headers,
# but we rather make it a first fragment
flags = 1 if add_extension_header else 0
ulp = ulp_l4
p = (Ether(dst=dst_mac, src=src_mac) /
IP(src=src_ip4, dst=dst_ip4, frag=0, flags=flags) /
ulp /
Raw(payload))
elif modulo == 1:
if is_ip6:
ulp_l4 = ICMPv6Unknown(type=128 + icmp_type_delta,
code=icmp_code)
ulp = ulp_l4
p = (Ether(dst=dst_mac, src=src_mac) /
IPv6(src=src_ip6, dst=dst_ip6) /
ulp /
Raw(payload))
else:
ulp_l4 = ICMP(type=8 - 8*icmp_type_delta, code=icmp_code)
ulp = ulp_l4
p = (Ether(dst=dst_mac, src=src_mac) /
IP(src=src_ip4, dst=dst_ip4) /
ulp /
Raw(payload))
if i % 2 == 1:
info.data = p.copy()
size = packet_sizes[(i // 2) % len(packet_sizes)]
self.extend_packet(p, size)
pkts.append(p)
rule_family = AF_INET6 if p.haslayer(IPv6) else AF_INET
rule_prefix_len = 128 if p.haslayer(IPv6) else 32
rule_l3_layer = IPv6 if p.haslayer(IPv6) else IP
if p.haslayer(UDP):
rule_l4_sport = p[UDP].sport
rule_l4_dport = p[UDP].dport
else:
if p.haslayer(ICMP):
rule_l4_sport = p[ICMP].type
rule_l4_dport = p[ICMP].code
else:
rule_l4_sport = p[ICMPv6Unknown].type
rule_l4_dport = p[ICMPv6Unknown].code
if p.haslayer(IPv6):
rule_l4_proto = ulp_l4.overload_fields[IPv6]['nh']
else:
rule_l4_proto = p[IP].proto
new_rule = {
'is_permit': is_permit,
'is_ipv6': p.haslayer(IPv6),
'src_ip_addr': inet_pton(rule_family,
p[rule_l3_layer].src),
'src_ip_prefix_len': rule_prefix_len,
'dst_ip_addr': inet_pton(rule_family,
p[rule_l3_layer].dst),
'dst_ip_prefix_len': rule_prefix_len,
'srcport_or_icmptype_first': rule_l4_sport,
'srcport_or_icmptype_last': rule_l4_sport,
'dstport_or_icmpcode_first': rule_l4_dport,
'dstport_or_icmpcode_last': rule_l4_dport,
'proto': rule_l4_proto,
}
rules.append(new_rule)
new_rule_permit = new_rule.copy()
new_rule_permit['is_permit'] = 1
permit_rules.append(new_rule_permit)
new_rule_permit_and_reflect = new_rule.copy()
if can_reflect_this_packet:
new_rule_permit_and_reflect['is_permit'] = 2
else:
new_rule_permit_and_reflect['is_permit'] = is_permit
permit_and_reflect_rules.append(new_rule_permit_and_reflect)
self.logger.info("create_stream pkt#%d: %s" % (i, payload))
return {'stream': pkts,
'rules': rules,
'permit_rules': permit_rules,
'permit_and_reflect_rules': permit_and_reflect_rules}
def verify_capture(self, dst_ip_if, src_ip_if, capture, reverse):
last_info = dict()
for i in self.interfaces:
last_info[i.sw_if_index] = None
dst_ip_sw_if_index = dst_ip_if.sw_if_index
for packet in capture:
l3 = IP if packet.haslayer(IP) else IPv6
ip = packet[l3]
if packet.haslayer(UDP):
l4 = UDP
else:
if packet.haslayer(ICMP):
l4 = ICMP
else:
l4 = ICMPv6Unknown
# Scapy IPv6 stuff is too smart for its own good.
# So we do this and coerce the ICMP into unknown type
if packet.haslayer(UDP):
data = scapy.compat.raw(packet[UDP][Raw])
else:
if l3 == IP:
data = scapy.compat.raw(ICMP(
scapy.compat.raw(packet[l3].payload))[Raw])
else:
data = scapy.compat.raw(ICMPv6Unknown(
scapy.compat.raw(packet[l3].payload)).msgbody)
udp_or_icmp = packet[l3].payload
data_obj = Raw(data)
# FIXME: make framework believe we are on object
payload_info = self.payload_to_info(data_obj)
packet_index = payload_info.index
self.assertEqual(payload_info.dst, dst_ip_sw_if_index)
next_info = self.get_next_packet_info_for_interface2(
payload_info.src, dst_ip_sw_if_index,
last_info[payload_info.src])
last_info[payload_info.src] = next_info
self.assertTrue(next_info is not None)
self.assertEqual(packet_index, next_info.index)
saved_packet = next_info.data
self.assertTrue(next_info is not None)
# MAC: src, dst
if not reverse:
self.assertEqual(packet.src, dst_ip_if.local_mac)
host = dst_ip_if.host_by_mac(packet.dst)
# IP: src, dst
# self.assertEqual(ip.src, src_ip_if.remote_ip4)
if saved_packet is not None:
self.assertEqual(ip.src, saved_packet[l3].src)
self.assertEqual(ip.dst, saved_packet[l3].dst)
if l4 == UDP:
self.assertEqual(udp_or_icmp.sport, saved_packet[l4].sport)
self.assertEqual(udp_or_icmp.dport, saved_packet[l4].dport)
# self.assertEqual(ip.dst, host.ip4)
# UDP:
def applied_acl_shuffle(self, sw_if_index):
# first collect what ACLs are applied and what they look like
r = self.vapi.acl_interface_list_dump(sw_if_index=sw_if_index)
orig_applied_acls = r[0]
# we will collect these just to save and generate additional rulesets
orig_acls = []
for acl_num in orig_applied_acls.acls:
rr = self.vapi.acl_dump(acl_num)
orig_acls.append(rr[0])
# now create a list of all the rules in all ACLs
all_rules = []
for old_acl in orig_acls:
for rule in old_acl.r:
all_rules.append(dict(rule._asdict()))
# Add a few ACLs made from shuffled rules
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=4294967295,
r=all_rules[::2],
tag=b"shuffle 1. acl")
shuffle_acl_1 = reply.acl_index
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=4294967295,
r=all_rules[::3],
tag=b"shuffle 2. acl")
shuffle_acl_2 = reply.acl_index
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=4294967295,
r=all_rules[::2],
tag=b"shuffle 3. acl")
shuffle_acl_3 = reply.acl_index
# apply the shuffle ACLs in front
input_acls = [shuffle_acl_1, shuffle_acl_2]
output_acls = [shuffle_acl_1, shuffle_acl_2]
# add the currently applied ACLs
n_input = orig_applied_acls.n_input
input_acls.extend(orig_applied_acls.acls[:n_input])
output_acls.extend(orig_applied_acls.acls[n_input:])
# and the trailing shuffle ACL(s)
input_acls.extend([shuffle_acl_3])
output_acls.extend([shuffle_acl_3])
# set the interface ACL list to the result
self.vapi.acl_interface_set_acl_list(sw_if_index=sw_if_index,
n_input=len(input_acls),
acls=input_acls + output_acls)
# change the ACLs a few times
for i in range(1, 10):
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=shuffle_acl_1,
r=all_rules[::1+(i % 2)],
tag=b"shuffle 1. acl")
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=shuffle_acl_2,
r=all_rules[::1+(i % 3)],
tag=b"shuffle 2. acl")
shuffle(all_rules)
reply = self.vapi.acl_add_replace(acl_index=shuffle_acl_2,
r=all_rules[::1+(i % 5)],
tag=b"shuffle 3. acl")
# restore to how it was before and clean up
self.vapi.acl_interface_set_acl_list(sw_if_index=sw_if_index,
n_input=orig_applied_acls.n_input,
acls=orig_applied_acls.acls)
reply = self.vapi.acl_del(acl_index=shuffle_acl_1)
reply = self.vapi.acl_del(acl_index=shuffle_acl_2)
reply = self.vapi.acl_del(acl_index=shuffle_acl_3)
def create_acls_for_a_stream(self, stream_dict,
test_l2_action, is_reflect):
r = stream_dict['rules']
r_permit = stream_dict['permit_rules']
r_permit_reflect = stream_dict['permit_and_reflect_rules']
r_action = r_permit_reflect if is_reflect else r
reply = self.vapi.acl_add_replace(acl_index=4294967295, r=r_action,
tag=b"act. acl")
action_acl_index = reply.acl_index
reply = self.vapi.acl_add_replace(acl_index=4294967295, r=r_permit,
tag=b"perm. acl")
permit_acl_index = reply.acl_index
return {'L2': action_acl_index if test_l2_action else permit_acl_index,
'L3': permit_acl_index if test_l2_action else action_acl_index,
'permit': permit_acl_index, 'action': action_acl_index}
def apply_acl_ip46_x_to_y(self, bridged_to_routed, test_l2_deny,
is_ip6, is_reflect, add_eh):
""" Apply the ACLs
"""
self.reset_packet_infos()
stream_dict = self.create_stream(
self.pg2, self.loop0,
bridged_to_routed,
self.pg_if_packet_sizes, is_ip6,
not is_reflect, False, add_eh)
stream = stream_dict['stream']
acl_idx = self.create_acls_for_a_stream(stream_dict, test_l2_deny,
is_reflect)
n_input_l3 = 0 if bridged_to_routed else 1
n_input_l2 = 1 if bridged_to_routed else 0
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg2.sw_if_index,
n_input=n_input_l3,
acls=[acl_idx['L3']])
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg0.sw_if_index,
n_input=n_input_l2,
acls=[acl_idx['L2']])
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg1.sw_if_index,
n_input=n_input_l2,
acls=[acl_idx['L2']])
self.applied_acl_shuffle(self.pg0.sw_if_index)
self.applied_acl_shuffle(self.pg2.sw_if_index)
def apply_acl_ip46_both_directions_reflect(self,
primary_is_bridged_to_routed,
reflect_on_l2, is_ip6, add_eh,
stateful_icmp):
primary_is_routed_to_bridged = not primary_is_bridged_to_routed
self.reset_packet_infos()
stream_dict_fwd = self.create_stream(self.pg2, self.loop0,
primary_is_bridged_to_routed,
self.pg_if_packet_sizes, is_ip6,
False, False, add_eh,
stateful_icmp)
acl_idx_fwd = self.create_acls_for_a_stream(stream_dict_fwd,
reflect_on_l2, True)
stream_dict_rev = self.create_stream(self.pg2, self.loop0,
not primary_is_bridged_to_routed,
self.pg_if_packet_sizes, is_ip6,
True, True, add_eh, stateful_icmp)
# We want the primary action to be "deny" rather than reflect
acl_idx_rev = self.create_acls_for_a_stream(stream_dict_rev,
reflect_on_l2, False)
if primary_is_bridged_to_routed:
inbound_l2_acl = acl_idx_fwd['L2']
else:
inbound_l2_acl = acl_idx_rev['L2']
if primary_is_routed_to_bridged:
outbound_l2_acl = acl_idx_fwd['L2']
else:
outbound_l2_acl = acl_idx_rev['L2']
if primary_is_routed_to_bridged:
inbound_l3_acl = acl_idx_fwd['L3']
else:
inbound_l3_acl = acl_idx_rev['L3']
if primary_is_bridged_to_routed:
outbound_l3_acl = acl_idx_fwd['L3']
else:
outbound_l3_acl = acl_idx_rev['L3']
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg2.sw_if_index,
n_input=1,
acls=[inbound_l3_acl,
outbound_l3_acl])
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg0.sw_if_index,
n_input=1,
acls=[inbound_l2_acl,
outbound_l2_acl])
self.vapi.acl_interface_set_acl_list(sw_if_index=self.pg1.sw_if_index,
n_input=1,
acls=[inbound_l2_acl,
outbound_l2_acl])
self.applied_acl_shuffle(self.pg0.sw_if_index)
self.applied_acl_shuffle(self.pg2.sw_if_index)
def apply_acl_ip46_routed_to_bridged(self, test_l2_deny, is_ip6,
is_reflect, add_eh):
self.apply_acl_ip46_x_to_y(False, test_l2_deny, is_ip6,
is_reflect, add_eh)
def apply_acl_ip46_bridged_to_routed(self, test_l2_deny, is_ip6,
is_reflect, add_eh):
self.apply_acl_ip46_x_to_y(True, test_l2_deny, is_ip6,
is_reflect, add_eh)
def run_traffic_ip46_x_to_y(self, bridged_to_routed,
test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp=False):
self.reset_packet_infos()
stream_dict = self.create_stream(self.pg2, self.loop0,
bridged_to_routed,
self.pg_if_packet_sizes, is_ip6,
not is_reflect, is_established,
add_eh, stateful_icmp)
stream = stream_dict['stream']
tx_if = self.pg0 if bridged_to_routed else self.pg2
rx_if = self.pg2 if bridged_to_routed else self.pg0
tx_if.add_stream(stream)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
packet_count = self.get_packet_count_for_if_idx(self.loop0.sw_if_index)
rcvd1 = rx_if.get_capture(packet_count)
self.verify_capture(self.loop0, self.pg2, rcvd1, bridged_to_routed)
def run_traffic_ip46_routed_to_bridged(self, test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp=False):
self.run_traffic_ip46_x_to_y(False, test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp)
def run_traffic_ip46_bridged_to_routed(self, test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp=False):
self.run_traffic_ip46_x_to_y(True, test_l2_deny, is_ip6,
is_reflect, is_established, add_eh,
stateful_icmp)
def run_test_ip46_routed_to_bridged(self, test_l2_deny,
is_ip6, is_reflect, add_eh):
self.apply_acl_ip46_routed_to_bridged(test_l2_deny,
is_ip6, is_reflect, add_eh)
self.run_traffic_ip46_routed_to_bridged(test_l2_deny, is_ip6,
is_reflect, False, add_eh)
def run_test_ip46_bridged_to_routed(self, test_l2_deny,
is_ip6, is_reflect, add_eh):
self.apply_acl_ip46_bridged_to_routed(test_l2_deny,
is_ip6, is_reflect, add_eh)
self.run_traffic_ip46_bridged_to_routed(test_l2_deny, is_ip6,
is_reflect, False, add_eh)
def run_test_ip46_routed_to_bridged_and_back(self, test_l2_action,
is_ip6, add_eh,
stateful_icmp=False):
self.apply_acl_ip46_both_directions_reflect(False, test_l2_action,
is_ip6, add_eh,
stateful_icmp)
self.run_traffic_ip46_routed_to_bridged(test_l2_action, is_ip6,
True, False, add_eh,
stateful_icmp)
self.run_traffic_ip46_bridged_to_routed(test_l2_action, is_ip6,
False, True, add_eh,
stateful_icmp)
def run_test_ip46_bridged_to_routed_and_back(self, test_l2_action,
is_ip6, add_eh,
stateful_icmp=False):
self.apply_acl_ip46_both_directions_reflect(True, test_l2_action,
is_ip6, add_eh,
stateful_icmp)
self.run_traffic_ip46_bridged_to_routed(test_l2_action, is_ip6,
True, False, add_eh,
stateful_icmp)
self.run_traffic_ip46_routed_to_bridged(test_l2_action, is_ip6,
False, True, add_eh,
stateful_icmp)
def test_0000_ip6_irb_1(self):
""" ACL plugin prepare"""
if not self.vpp_dead:
cmd = "set acl-plugin session timeout udp idle 2000"
self.logger.info(self.vapi.ppcli(cmd))
# uncomment to not skip past the routing header
# and watch the EH tests fail
# self.logger.info(self.vapi.ppcli(
# "set acl-plugin skip-ipv6-extension-header 43 0"))
# uncomment to test the session limit (stateful tests will fail)
# self.logger.info(self.vapi.ppcli(
# "set acl-plugin session table max-entries 1"))
# new datapath is the default, but just in case
# self.logger.info(self.vapi.ppcli(
# "set acl-plugin l2-datapath new"))
# If you want to see some tests fail, uncomment the next line
# self.logger.info(self.vapi.ppcli(
# "set acl-plugin l2-datapath old"))
def test_0001_ip6_irb_1(self):
""" ACL IPv6 routed -> bridged, L2 ACL deny"""
self.run_test_ip46_routed_to_bridged(True, True, False,
self.WITHOUT_EH)
def test_0002_ip6_irb_1(self):
""" ACL IPv6 routed -> bridged, L3 ACL deny"""
self.run_test_ip46_routed_to_bridged(False, True, False,
self.WITHOUT_EH)
def test_0003_ip4_irb_1(self):
""" ACL IPv4 routed -> bridged, L2 ACL deny"""
self.run_test_ip46_routed_to_bridged(True, False, False,
self.WITHOUT_EH)
def test_0004_ip4_irb_1(self):
""" ACL IPv4 routed -> bridged, L3 ACL deny"""
self.run_test_ip46_routed_to_bridged(False, False, False,
self.WITHOUT_EH)
def test_0005_ip6_irb_1(self):
""" ACL IPv6 bridged -> routed, L2 ACL deny """
self.run_test_ip46_bridged_to_routed(True, True, False,
self.WITHOUT_EH)
def test_0006_ip6_irb_1(self):
""" ACL IPv6 bridged -> routed, L3 ACL deny """
self.run_test_ip46_bridged_to_routed(False, True, False,
self.WITHOUT_EH)
def test_0007_ip6_irb_1(self):
""" ACL IPv4 bridged -> routed, L2 ACL deny """
self.run_test_ip46_bridged_to_routed(True, False, False,
self.WITHOUT_EH)
def test_0008_ip6_irb_1(self):
""" ACL IPv4 bridged -> routed, L3 ACL deny """
self.run_test_ip46_bridged_to_routed(False, False, False,
self.WITHOUT_EH)
# Stateful ACL tests
def test_0101_ip6_irb_1(self):
""" ACL IPv6 routed -> bridged, L2 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, True,
self.WITHOUT_EH)
def test_0102_ip6_irb_1(self):
""" ACL IPv6 bridged -> routed, L2 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, True,
self.WITHOUT_EH)
def test_0103_ip6_irb_1(self):
""" ACL IPv4 routed -> bridged, L2 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, False,
self.WITHOUT_EH)
def test_0104_ip6_irb_1(self):
""" ACL IPv4 bridged -> routed, L2 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, False,
self.WITHOUT_EH)
def test_0111_ip6_irb_1(self):
""" ACL IPv6 routed -> bridged, L3 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, True,
self.WITHOUT_EH)
def test_0112_ip6_irb_1(self):
""" ACL IPv6 bridged -> routed, L3 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, True,
self.WITHOUT_EH)
def test_0113_ip6_irb_1(self):
""" ACL IPv4 routed -> bridged, L3 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, False,
self.WITHOUT_EH)
def test_0114_ip6_irb_1(self):
""" ACL IPv4 bridged -> routed, L3 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, False,
self.WITHOUT_EH)
# A block of tests with extension headers
def test_1001_ip6_irb_1(self):
""" ACL IPv6+EH routed -> bridged, L2 ACL deny"""
self.run_test_ip46_routed_to_bridged(True, True, False,
self.WITH_EH)
def test_1002_ip6_irb_1(self):
""" ACL IPv6+EH routed -> bridged, L3 ACL deny"""
self.run_test_ip46_routed_to_bridged(False, True, False,
self.WITH_EH)
def test_1005_ip6_irb_1(self):
""" ACL IPv6+EH bridged -> routed, L2 ACL deny """
self.run_test_ip46_bridged_to_routed(True, True, False,
self.WITH_EH)
def test_1006_ip6_irb_1(self):
""" ACL IPv6+EH bridged -> routed, L3 ACL deny """
self.run_test_ip46_bridged_to_routed(False, True, False,
self.WITH_EH)
def test_1101_ip6_irb_1(self):
""" ACL IPv6+EH routed -> bridged, L2 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, True,
self.WITH_EH)
def test_1102_ip6_irb_1(self):
""" ACL IPv6+EH bridged -> routed, L2 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, True,
self.WITH_EH)
def test_1111_ip6_irb_1(self):
""" ACL IPv6+EH routed -> bridged, L3 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, True,
self.WITH_EH)
def test_1112_ip6_irb_1(self):
""" ACL IPv6+EH bridged -> routed, L3 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, True,
self.WITH_EH)
# IPv4 with "MF" bit set
def test_1201_ip6_irb_1(self):
""" ACL IPv4+MF routed -> bridged, L2 ACL deny"""
self.run_test_ip46_routed_to_bridged(True, False, False,
self.WITH_EH)
def test_1202_ip6_irb_1(self):
""" ACL IPv4+MF routed -> bridged, L3 ACL deny"""
self.run_test_ip46_routed_to_bridged(False, False, False,
self.WITH_EH)
def test_1205_ip6_irb_1(self):
""" ACL IPv4+MF bridged -> routed, L2 ACL deny """
self.run_test_ip46_bridged_to_routed(True, False, False,
self.WITH_EH)
def test_1206_ip6_irb_1(self):
""" ACL IPv4+MF bridged -> routed, L3 ACL deny """
self.run_test_ip46_bridged_to_routed(False, False, False,
self.WITH_EH)
def test_1301_ip6_irb_1(self):
""" ACL IPv4+MF routed -> bridged, L2 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, False,
self.WITH_EH)
def test_1302_ip6_irb_1(self):
""" ACL IPv4+MF bridged -> routed, L2 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, False,
self.WITH_EH)
def test_1311_ip6_irb_1(self):
""" ACL IPv4+MF routed -> bridged, L3 ACL permit+reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, False,
self.WITH_EH)
def test_1312_ip6_irb_1(self):
""" ACL IPv4+MF bridged -> routed, L3 ACL permit+reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, False,
self.WITH_EH)
# Stateful ACL tests with stateful ICMP
def test_1401_ip6_irb_1(self):
""" IPv6 routed -> bridged, L2 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, True,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1402_ip6_irb_1(self):
""" IPv6 bridged -> routed, L2 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, True,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1403_ip4_irb_1(self):
""" IPv4 routed -> bridged, L2 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_routed_to_bridged_and_back(True, False,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1404_ip4_irb_1(self):
""" IPv4 bridged -> routed, L2 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_bridged_to_routed_and_back(True, False,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1411_ip6_irb_1(self):
""" IPv6 routed -> bridged, L3 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, True,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1412_ip6_irb_1(self):
""" IPv6 bridged -> routed, L3 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, True,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1413_ip4_irb_1(self):
""" IPv4 routed -> bridged, L3 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_routed_to_bridged_and_back(False, False,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
def test_1414_ip4_irb_1(self):
""" IPv4 bridged -> routed, L3 ACL permit+reflect, ICMP reflect"""
self.run_test_ip46_bridged_to_routed_and_back(False, False,
self.WITHOUT_EH,
self.STATEFUL_ICMP)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| en | 0.727608 | #!/usr/bin/env python ACL IRB Test Case HLD: **config** - L2 MAC learning enabled in l2bd - 2 routed interfaces untagged, bvi (Bridge Virtual Interface) - 2 bridged interfaces in l2bd with bvi **test** - sending ip4 eth pkts between routed interfaces - 2 routed interfaces - 2 bridged interfaces - 64B, 512B, 1518B, 9200B (ether_size) - burst of pkts per interface - 257pkts per burst - routed pkts hitting different FIB entries - bridged pkts hitting different MAC entries **verify** - all packets received correctly TestACLpluginL2L3 Test Case #. Create BD with MAC learning enabled and put interfaces to this BD. #. Configure IPv4 addresses on loopback interface and routed interface. #. Configure MAC address binding to IPv4 neighbors on loop0. #. Configure MAC address on pg2. #. Loopback BVI interface has remote hosts, one half of hosts are behind pg0 second behind pg1. # packet sizes # create 3 pg interfaces, 1 loopback interface # Create BD with MAC learning enabled and put interfaces to this BD # Configure IPv4 addresses on loopback interface and routed interface # Configure MAC address binding to IPv4 neighbors on loop0 # configure MAC address on pg2 # Loopback BVI interface has remote hosts, one half of hosts are behind # pg0 second behind pg1 Run standard test teardown and log ``show l2patch``, ``show l2fib verbose``,``show bridge-domain <bd_id> detail``, ``show ip arp``. # default ULP should be something we do not use in tests # potentially a chain of protocols leading to ULP # prepend some extension headers # uncomment below to test invalid ones # ulp = IPv6ExtHdrRouting(len = 200) / ulp_l4 # IPv4 does not allow extension headers, # but we rather make it a first fragment #%d: %s" % (i, payload)) # Scapy IPv6 stuff is too smart for its own good. # So we do this and coerce the ICMP into unknown type # FIXME: make framework believe we are on object # MAC: src, dst # IP: src, dst # self.assertEqual(ip.src, src_ip_if.remote_ip4) # self.assertEqual(ip.dst, host.ip4) # UDP: # first collect what ACLs are applied and what they look like # we will collect these just to save and generate additional rulesets # now create a list of all the rules in all ACLs # Add a few ACLs made from shuffled rules # apply the shuffle ACLs in front # add the currently applied ACLs # and the trailing shuffle ACL(s) # set the interface ACL list to the result # change the ACLs a few times # restore to how it was before and clean up Apply the ACLs # We want the primary action to be "deny" rather than reflect ACL plugin prepare # uncomment to not skip past the routing header # and watch the EH tests fail # self.logger.info(self.vapi.ppcli( # "set acl-plugin skip-ipv6-extension-header 43 0")) # uncomment to test the session limit (stateful tests will fail) # self.logger.info(self.vapi.ppcli( # "set acl-plugin session table max-entries 1")) # new datapath is the default, but just in case # self.logger.info(self.vapi.ppcli( # "set acl-plugin l2-datapath new")) # If you want to see some tests fail, uncomment the next line # self.logger.info(self.vapi.ppcli( # "set acl-plugin l2-datapath old")) ACL IPv6 routed -> bridged, L2 ACL deny ACL IPv6 routed -> bridged, L3 ACL deny ACL IPv4 routed -> bridged, L2 ACL deny ACL IPv4 routed -> bridged, L3 ACL deny ACL IPv6 bridged -> routed, L2 ACL deny ACL IPv6 bridged -> routed, L3 ACL deny ACL IPv4 bridged -> routed, L2 ACL deny ACL IPv4 bridged -> routed, L3 ACL deny # Stateful ACL tests ACL IPv6 routed -> bridged, L2 ACL permit+reflect ACL IPv6 bridged -> routed, L2 ACL permit+reflect ACL IPv4 routed -> bridged, L2 ACL permit+reflect ACL IPv4 bridged -> routed, L2 ACL permit+reflect ACL IPv6 routed -> bridged, L3 ACL permit+reflect ACL IPv6 bridged -> routed, L3 ACL permit+reflect ACL IPv4 routed -> bridged, L3 ACL permit+reflect ACL IPv4 bridged -> routed, L3 ACL permit+reflect # A block of tests with extension headers ACL IPv6+EH routed -> bridged, L2 ACL deny ACL IPv6+EH routed -> bridged, L3 ACL deny ACL IPv6+EH bridged -> routed, L2 ACL deny ACL IPv6+EH bridged -> routed, L3 ACL deny ACL IPv6+EH routed -> bridged, L2 ACL permit+reflect ACL IPv6+EH bridged -> routed, L2 ACL permit+reflect ACL IPv6+EH routed -> bridged, L3 ACL permit+reflect ACL IPv6+EH bridged -> routed, L3 ACL permit+reflect # IPv4 with "MF" bit set ACL IPv4+MF routed -> bridged, L2 ACL deny ACL IPv4+MF routed -> bridged, L3 ACL deny ACL IPv4+MF bridged -> routed, L2 ACL deny ACL IPv4+MF bridged -> routed, L3 ACL deny ACL IPv4+MF routed -> bridged, L2 ACL permit+reflect ACL IPv4+MF bridged -> routed, L2 ACL permit+reflect ACL IPv4+MF routed -> bridged, L3 ACL permit+reflect ACL IPv4+MF bridged -> routed, L3 ACL permit+reflect # Stateful ACL tests with stateful ICMP IPv6 routed -> bridged, L2 ACL permit+reflect, ICMP reflect IPv6 bridged -> routed, L2 ACL permit+reflect, ICMP reflect IPv4 routed -> bridged, L2 ACL permit+reflect, ICMP reflect IPv4 bridged -> routed, L2 ACL permit+reflect, ICMP reflect IPv6 routed -> bridged, L3 ACL permit+reflect, ICMP reflect IPv6 bridged -> routed, L3 ACL permit+reflect, ICMP reflect IPv4 routed -> bridged, L3 ACL permit+reflect, ICMP reflect IPv4 bridged -> routed, L3 ACL permit+reflect, ICMP reflect | 2.118922 | 2 |
py_env/lib/python3.4/site-packages/py_library/__init__.py | saurabhranem/saurabh_library | 0 | 6624988 | name = "py_advanced_search_list" | name = "py_advanced_search_list" | none | 1 | 1.107247 | 1 | |
wwara_gd77.py | ajorg/WWARA | 0 | 6624989 | <reponame>ajorg/WWARA
#!/usr/bin/env python
"""Converts a WWARA database dump to GB3GF CSV format for GD-77."""
import codecs
import logging
from csv import DictReader, DictWriter
from decimal import Decimal
from io import BytesIO, StringIO
from sys import stdout, stderr
from zipfile import ZipFile
LOG = logging.getLogger(__name__)
FIELDNAMES = (
"Channel Number",
"Channel Name",
"Channel Type",
"Rx Frequency",
"Tx Frequency",
"Color Code",
"Timeslot",
"Contact",
"Rx Group",
"Scanlist",
"RX CTCSS",
"TX CTCSS",
"Power",
"Bandwidth",
"Rx Only",
"Squelch",
"Skip",
"Tx Admit",
"TOT",
"TOT Rekey",
"Tx Signaling",
"Rx Signaling",
"Privacy Group",
"Emergency System",
"Flags1",
"Flags2",
"Flags3",
"Flags4",
"Unknown25",
"Unknown26",
"Unknown30",
"Unknown36",
"Unknown38",
"Unknown40",
"Unknown52",
"Unknown53",
"Unknown54",
"Unknown55",
)
def _drop_decimals(decimal):
"""Decimal.normalize gives 2E+1 for 20..."""
decimal = str(decimal)
if "." in decimal:
decimal = decimal.rstrip("0").rstrip(".")
return decimal
def _supported(row):
"""Checks if the mode is supported."""
if "Y" not in (row["DMR"], row["FM_WIDE"], row["FM_NARROW"]):
return False
ifreq = Decimal(row["INPUT_FREQ"])
if ifreq > 144 and ifreq < 148:
# 2M
return True
if ifreq > 420 and ifreq < 450:
# 70CM
return True
return False
def _channel_name(row, prefix="", suffix=""):
"""Formats a usable name for the repeater."""
length = 16 - len(prefix)
name = prefix + " ".join((row["CALL"], row["CITY"]))[:length]
if suffix:
length = 16 - len(suffix)
name = ("{:%d.%d}" % (length, length)).format(name) + suffix
return name
def _channel_type(row):
"""Converts the mode per WWARA to the Channel Type per GD-77"""
mode = "Analogue"
if row["DMR"] == "Y":
mode = "Digital"
elif row["FM_WIDE"] == "Y":
mode = "Analogue"
elif row["FM_NARROW"] == "Y":
mode = "Analogue"
return mode
def _color_code(row):
"""Returns the DMR Color Code from the WWARA record"""
color_code = row.get("DMR_COLOR_CODE").lstrip("CC")
return color_code or "0"
def _rx_ctcss(row):
rx_ctcss = row.get("CTCSS_OUT")
if not rx_ctcss:
rx_ctcss = "None"
return rx_ctcss
def _tx_ctcss(row):
tx_ctcss = row.get("CTCSS_IN")
if not tx_ctcss:
tx_ctcss = "None"
return tx_ctcss
def _bandwidth(row):
if row.get("FM_WIDE", "N") == "Y":
return "25KHz"
elif row.get("FM_NARROW", "N") == "Y":
return "12.5KHz"
def _entry(row, channel_number, prefix="", suffix="", timeslot=None):
channel_name = _channel_name(row, prefix, suffix)
channel_type = _channel_type(row)
rx_frequency = _drop_decimals(row["OUTPUT_FREQ"])
tx_frequency = _drop_decimals(row["INPUT_FREQ"])
color_code = _color_code(row)
rx_ctcss = _rx_ctcss(row)
tx_ctcss = _tx_ctcss(row)
bandwidth = _bandwidth(row)
return {
"Channel Number": channel_number,
"Channel Name": channel_name,
"Channel Type": channel_type,
"Rx Frequency": rx_frequency,
"Tx Frequency": tx_frequency,
"Color Code": color_code,
"Timeslot": timeslot,
"Contact": "None",
"Rx Group": "None",
"Scanlist": "None",
"RX CTCSS": rx_ctcss,
"TX CTCSS": tx_ctcss,
"Power": "High",
"Bandwidth": bandwidth,
"Rx Only": "No",
"Squelch": "Normal",
"Skip": "No",
}
def _order(name):
if "-pending-" in name:
return -3
elif "-rptrlist-" in name:
return -4
elif "-About2Expire-" in name:
return -2
elif "-Expired-" in name:
return -1
return 0
def convert(zipfile):
"""Converts a WWARA zipfile."""
wlist = []
channel_number = 0
for name in sorted(zipfile.namelist(), key=_order):
if name.endswith(".csv"):
print(name, file=stderr)
prefix = ""
if "-pending-" in name:
prefix = "+"
elif "-About2Expire-" in name:
prefix = "-"
elif "-Expired-" in name:
prefix = "!"
with zipfile.open(name, "r") as csv:
# Remove the DATA_SPEC_VERSION header line from the .csv
csv.readline()
for row in DictReader(codecs.getreader("us-ascii")(csv)):
if not _supported(row):
continue
channel_number += 1
if row.get("DMR") == "Y":
timeslot = 1
wlist.append(
_entry(
row,
channel_number,
prefix,
" " + str(timeslot),
timeslot,
)
)
channel_number += 1
timeslot = 2
wlist.append(
_entry(
row,
channel_number,
prefix,
" " + str(timeslot),
timeslot,
)
)
elif "Y" in (row.get("FM_WIDE"), row.get("FM_NARROW")):
wlist.append(_entry(row, channel_number, prefix))
return wlist
return sorted(wlist, key=lambda x: (x["Channel Type"], Decimal(x["Rx Frequency"])))
if __name__ == "__main__":
from urllib.request import urlopen
with urlopen("https://www.wwara.org/DataBaseExtract.zip") as RESPONSE:
# ZipFile requires a file-like object that supports seek
FILE_OBJ = BytesIO(RESPONSE.read())
ZIPFILE = ZipFile(FILE_OBJ)
WRITER = DictWriter(stdout, FIELDNAMES)
WRITER.writeheader()
WRITER.writerows(convert(ZIPFILE))
FILE_OBJ.close()
| #!/usr/bin/env python
"""Converts a WWARA database dump to GB3GF CSV format for GD-77."""
import codecs
import logging
from csv import DictReader, DictWriter
from decimal import Decimal
from io import BytesIO, StringIO
from sys import stdout, stderr
from zipfile import ZipFile
LOG = logging.getLogger(__name__)
FIELDNAMES = (
"Channel Number",
"Channel Name",
"Channel Type",
"Rx Frequency",
"Tx Frequency",
"Color Code",
"Timeslot",
"Contact",
"Rx Group",
"Scanlist",
"RX CTCSS",
"TX CTCSS",
"Power",
"Bandwidth",
"Rx Only",
"Squelch",
"Skip",
"Tx Admit",
"TOT",
"TOT Rekey",
"Tx Signaling",
"Rx Signaling",
"Privacy Group",
"Emergency System",
"Flags1",
"Flags2",
"Flags3",
"Flags4",
"Unknown25",
"Unknown26",
"Unknown30",
"Unknown36",
"Unknown38",
"Unknown40",
"Unknown52",
"Unknown53",
"Unknown54",
"Unknown55",
)
def _drop_decimals(decimal):
"""Decimal.normalize gives 2E+1 for 20..."""
decimal = str(decimal)
if "." in decimal:
decimal = decimal.rstrip("0").rstrip(".")
return decimal
def _supported(row):
"""Checks if the mode is supported."""
if "Y" not in (row["DMR"], row["FM_WIDE"], row["FM_NARROW"]):
return False
ifreq = Decimal(row["INPUT_FREQ"])
if ifreq > 144 and ifreq < 148:
# 2M
return True
if ifreq > 420 and ifreq < 450:
# 70CM
return True
return False
def _channel_name(row, prefix="", suffix=""):
"""Formats a usable name for the repeater."""
length = 16 - len(prefix)
name = prefix + " ".join((row["CALL"], row["CITY"]))[:length]
if suffix:
length = 16 - len(suffix)
name = ("{:%d.%d}" % (length, length)).format(name) + suffix
return name
def _channel_type(row):
"""Converts the mode per WWARA to the Channel Type per GD-77"""
mode = "Analogue"
if row["DMR"] == "Y":
mode = "Digital"
elif row["FM_WIDE"] == "Y":
mode = "Analogue"
elif row["FM_NARROW"] == "Y":
mode = "Analogue"
return mode
def _color_code(row):
"""Returns the DMR Color Code from the WWARA record"""
color_code = row.get("DMR_COLOR_CODE").lstrip("CC")
return color_code or "0"
def _rx_ctcss(row):
rx_ctcss = row.get("CTCSS_OUT")
if not rx_ctcss:
rx_ctcss = "None"
return rx_ctcss
def _tx_ctcss(row):
tx_ctcss = row.get("CTCSS_IN")
if not tx_ctcss:
tx_ctcss = "None"
return tx_ctcss
def _bandwidth(row):
if row.get("FM_WIDE", "N") == "Y":
return "25KHz"
elif row.get("FM_NARROW", "N") == "Y":
return "12.5KHz"
def _entry(row, channel_number, prefix="", suffix="", timeslot=None):
channel_name = _channel_name(row, prefix, suffix)
channel_type = _channel_type(row)
rx_frequency = _drop_decimals(row["OUTPUT_FREQ"])
tx_frequency = _drop_decimals(row["INPUT_FREQ"])
color_code = _color_code(row)
rx_ctcss = _rx_ctcss(row)
tx_ctcss = _tx_ctcss(row)
bandwidth = _bandwidth(row)
return {
"Channel Number": channel_number,
"Channel Name": channel_name,
"Channel Type": channel_type,
"Rx Frequency": rx_frequency,
"Tx Frequency": tx_frequency,
"Color Code": color_code,
"Timeslot": timeslot,
"Contact": "None",
"Rx Group": "None",
"Scanlist": "None",
"RX CTCSS": rx_ctcss,
"TX CTCSS": tx_ctcss,
"Power": "High",
"Bandwidth": bandwidth,
"Rx Only": "No",
"Squelch": "Normal",
"Skip": "No",
}
def _order(name):
if "-pending-" in name:
return -3
elif "-rptrlist-" in name:
return -4
elif "-About2Expire-" in name:
return -2
elif "-Expired-" in name:
return -1
return 0
def convert(zipfile):
"""Converts a WWARA zipfile."""
wlist = []
channel_number = 0
for name in sorted(zipfile.namelist(), key=_order):
if name.endswith(".csv"):
print(name, file=stderr)
prefix = ""
if "-pending-" in name:
prefix = "+"
elif "-About2Expire-" in name:
prefix = "-"
elif "-Expired-" in name:
prefix = "!"
with zipfile.open(name, "r") as csv:
# Remove the DATA_SPEC_VERSION header line from the .csv
csv.readline()
for row in DictReader(codecs.getreader("us-ascii")(csv)):
if not _supported(row):
continue
channel_number += 1
if row.get("DMR") == "Y":
timeslot = 1
wlist.append(
_entry(
row,
channel_number,
prefix,
" " + str(timeslot),
timeslot,
)
)
channel_number += 1
timeslot = 2
wlist.append(
_entry(
row,
channel_number,
prefix,
" " + str(timeslot),
timeslot,
)
)
elif "Y" in (row.get("FM_WIDE"), row.get("FM_NARROW")):
wlist.append(_entry(row, channel_number, prefix))
return wlist
return sorted(wlist, key=lambda x: (x["Channel Type"], Decimal(x["Rx Frequency"])))
if __name__ == "__main__":
from urllib.request import urlopen
with urlopen("https://www.wwara.org/DataBaseExtract.zip") as RESPONSE:
# ZipFile requires a file-like object that supports seek
FILE_OBJ = BytesIO(RESPONSE.read())
ZIPFILE = ZipFile(FILE_OBJ)
WRITER = DictWriter(stdout, FIELDNAMES)
WRITER.writeheader()
WRITER.writerows(convert(ZIPFILE))
FILE_OBJ.close() | en | 0.606234 | #!/usr/bin/env python Converts a WWARA database dump to GB3GF CSV format for GD-77. Decimal.normalize gives 2E+1 for 20... Checks if the mode is supported. # 2M # 70CM Formats a usable name for the repeater. Converts the mode per WWARA to the Channel Type per GD-77 Returns the DMR Color Code from the WWARA record Converts a WWARA zipfile. # Remove the DATA_SPEC_VERSION header line from the .csv # ZipFile requires a file-like object that supports seek | 2.477736 | 2 |
PyATS/inventories/test/job.py | dmmar/netascode | 36 | 6624990 | # Example: job.py
# -------------------
from pyats.easypy import run
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ntp-server', dest='ntp_server', type=str, required=True)
parser.add_argument('--devices', dest = 'devices', nargs='+', required=True)
args = parser.parse_args()
ntp_server = args.ntp_server
devices = args.devices
def main():
# run api launches a testscript as an individual task.
# run('ntp_check_v2_no_rollback.py', devices=devices, ntp_server=ntp_server)
run('ntp_check_v3.py', devices = devices, ntp_server = ntp_server) | # Example: job.py
# -------------------
from pyats.easypy import run
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ntp-server', dest='ntp_server', type=str, required=True)
parser.add_argument('--devices', dest = 'devices', nargs='+', required=True)
args = parser.parse_args()
ntp_server = args.ntp_server
devices = args.devices
def main():
# run api launches a testscript as an individual task.
# run('ntp_check_v2_no_rollback.py', devices=devices, ntp_server=ntp_server)
run('ntp_check_v3.py', devices = devices, ntp_server = ntp_server) | en | 0.424554 | # Example: job.py # ------------------- # run api launches a testscript as an individual task. # run('ntp_check_v2_no_rollback.py', devices=devices, ntp_server=ntp_server) | 2.473384 | 2 |
Test/output_test.py | robinsonkwame/pycm | 0 | 6624991 | <reponame>robinsonkwame/pycm
# -*- coding: utf-8 -*-
"""
>>> from pycm import *
>>> import os
>>> import json
>>> import numpy as np
>>> y_test = np.array([600, 200, 200, 200, 200, 200, 200, 200, 500, 500, 500, 200, 200, 200, 200, 200, 200, 200, 200, 200])
>>> y_pred = np.array([100, 200, 200, 100, 100, 200, 200, 200, 100, 200, 500, 100, 100, 100, 100, 100, 100, 100, 500, 200])
>>> cm=ConfusionMatrix(y_test, y_pred)
>>> save_stat=cm.save_stat("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_summary",address=False,summary=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=["L1","L2"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> large_cm = ConfusionMatrix(list(range(20)),list(range(20)))
>>> save_stat = large_cm.save_stat("test_large",address=False)
>>> save_stat == {'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.pycm'"}
True
>>> save_stat=cm.save_html("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_normalized",address=False,normalize=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_alt",address=False,normalize=True,alt_link=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_summary",address=False,summary=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[],color=(-2,-2,-2))
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered4",address=False,overall_param=["Kappa","Scott PI"],class_param=[],class_name=[100],color={})
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered5",address=False,overall_param=[],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_colored",address=False,color=(130,100,200))
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_colored2",address=False,color="Beige")
>>> save_stat=={'Status': True, 'Message': None}
True
>>> long_name_cm = ConfusionMatrix(matrix={'SVM-Classifier':{'SVM-Classifier':25,'NN-Classifier':2},'NN-Classifier':{'SVM-Classifier':3,'NN-Classifier':50}})
>>> save_stat=long_name_cm.save_html("test_long_name",address=False,color="Pink")
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.html'"}
True
>>> save_stat=cm.save_csv("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_normalized",address=False,normalize=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_summary",address=False,summary=True,matrix_save=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered",address=False,class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered2",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[100],matrix_save=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered3",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[],matrix_save=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered4",address=False,class_param=[],class_name=[100],matrix_save=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.csv'"}
True
>>> save_obj=cm.save_obj("test",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> save_obj=cm.save_obj("test_stat",address=False,save_stat=True)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> save_obj=cm.save_obj("test_no_vectors",address=False,save_vector=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file=ConfusionMatrix(file=open("test.obj","r"))
>>> print(cm_file)
Predict 100 200 500 600
Actual
100 0 0 0 0
<BLANKLINE>
200 9 6 1 0
<BLANKLINE>
500 1 1 1 0
<BLANKLINE>
600 1 0 0 0
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.14096,0.55904)
ACC Macro 0.675
AUNP None
AUNU None
Bennett S 0.13333
CBA 0.17708
Chi-Squared None
Chi-Squared DF 9
Conditional Entropy 1.23579
Cramer V None
Cross Entropy 1.70995
F1 Macro 0.23043
F1 Micro 0.35
Gwet AC1 0.19505
Hamming Loss 0.65
Joint Entropy 2.11997
KL Divergence None
Kappa 0.07801
Kappa 95% CI (-0.2185,0.37453)
Kappa No Prevalence -0.3
Kappa Standard Error 0.15128
Kappa Unbiased -0.12554
Lambda A 0.0
Lambda B 0.0
Mutual Information 0.10088
NIR 0.8
Overall ACC 0.35
Overall CEN 0.3648
Overall J (0.60294,0.15074)
Overall MCC 0.12642
Overall MCEN 0.37463
Overall RACC 0.295
Overall RACCU 0.4225
P-Value 1.0
PPV Macro None
PPV Micro 0.35
Pearson C None
Phi-Squared None
RCI 0.11409
RR 5.0
Reference Entropy 0.88418
Response Entropy 1.33667
SOA1(Landis & Koch) Slight
SOA2(Fleiss) Poor
SOA3(Altman) Poor
SOA4(Cicchetti) Poor
SOA5(Cramer) None
SOA6(Matthews) Negligible
Scott PI -0.12554
Standard Error 0.10665
TPR Macro None
TPR Micro 0.35
Zero-one Loss 13
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AGF(Adjusted F-score) 0.0 0.33642 0.56659 0.0
AGM(Adjusted geometric mean) None 0.56694 0.7352 0
AM(Difference between automatic and manual classification) 11 -9 -1 -1
AUC(Area under the ROC curve) None 0.5625 0.63725 0.5
AUCI(AUC value interpretation) None Poor Fair Poor
AUPR(Area under the PR curve) None 0.61607 0.41667 None
BCD(Bray-Curtis dissimilarity) 0.275 0.225 0.025 0.025
BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0
CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0
DOR(Diagnostic odds ratio) None 1.8 8.0 None
DP(Discriminant power) None 0.14074 0.4979 None
DPI(Discriminant power interpretation) None Poor Poor None
ERR(Error rate) 0.55 0.55 0.15 0.05
F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0
F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0
F2(F2 score) 0.0 0.42254 0.35714 0.0
FDR(False discovery rate) 1.0 0.14286 0.5 None
FN(False negative/miss/type 2 error) 0 10 2 1
FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0
FOR(False omission rate) 0.0 0.76923 0.11111 0.05
FP(False positive/type 1 error/false alarm) 11 1 1 0
FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0
G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None
GI(Gini index) None 0.125 0.27451 0.0
GM(G-mean geometric mean of specificity and sensitivity) None 0.53033 0.56011 0.0
IBA(Index of balanced accuracy) None 0.17578 0.12303 0.0
IS(Information score) None 0.09954 1.73697 None
J(Jaccard index) 0.0 0.35294 0.25 0.0
LS(Lift score) None 1.07143 3.33333 None
MCC(Matthews correlation coefficient) None 0.10483 0.32673 None
MCCI(Matthews correlation coefficient interpretation) None Negligible Weak None
MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0
MK(Markedness) 0.0 0.08791 0.38889 None
N(Condition negative) 20 4 17 19
NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0
NLRI(Negative likelihood ratio interpretation) None Negligible Negligible Negligible
NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95
OC(Overlap coefficient) None 0.85714 0.5 None
OOC(Otsuka-Ochiai coefficient) None 0.56695 0.40825 None
OP(Optimized precision) None 0.11667 0.37308 -0.05
P(Condition positive or support) 0 16 3 1
PLR(Positive likelihood ratio) None 1.5 5.66667 None
PLRI(Positive likelihood ratio interpretation) None Poor Fair None
POP(Population) 20 20 20 20
PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None
PRE(Prevalence) 0.0 0.8 0.15 0.05
Q(Yule Q - coefficient of colligation) None 0.28571 0.77778 None
RACC(Random accuracy) 0.0 0.28 0.015 0.0
RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063
TN(True negative/correct rejection) 9 3 16 19
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TON(Test outcome negative) 9 13 18 20
TOP(Test outcome positive) 11 7 2 0
TP(True positive/hit) 0 6 1 0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
Y(Youden index) None 0.125 0.27451 0.0
dInd(Distance index) None 0.67315 0.66926 1.0
sInd(Similarity index) None 0.52401 0.52676 0.29289
<BLANKLINE>
>>> cm_stat_file=ConfusionMatrix(file=open("test_stat.obj","r"))
>>> cm_no_vectors_file=ConfusionMatrix(file=open("test_no_vectors.obj","r"))
>>> cm_stat_file==cm_file
True
>>> cm_no_vectors_file==cm_file
True
>>> cm_no_vectors_dict = json.load(open("test_no_vectors.obj","r"))
>>> cm_no_vectors_dict["Actual-Vector"] == None
True
>>> cm_no_vectors_dict["Predict-Vector"] == None
True
>>> cm_stat_dict = json.load(open("test_stat.obj","r"))
>>> cm_stat_dict["Class-Stat"]["MCC"] != None
True
>>> cm_stat_dict["Overall-Stat"]["Overall MCC"] != None
True
>>> def activation(i):
... if i<0.7:
... return 1
... else:
... return 0
>>> cm_6 = ConfusionMatrix([0,0,1,0],[0.87,0.34,0.9,0.12],threshold=activation)
>>> save_obj=cm_6.save_obj("test2",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_2=ConfusionMatrix(file=open("test2.obj","r"))
>>> cm_file_2.print_matrix()
Predict 0 1
Actual
0 1 2
1 1 0
>>> y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]
>>> y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]
>>> cm = ConfusionMatrix(y_actu, y_pred, sample_weight=[2, 2, 2, 2, 3, 1, 1, 2, 2, 1, 1, 2])
>>> save_obj=cm.save_obj("test3",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_3=ConfusionMatrix(file=open("test3.obj","r"))
>>> cm = ConfusionMatrix(y_actu, y_pred, sample_weight=np.array([2, 2, 2, 2, 3, 1, 1, 2, 2, 1, 1, 2]))
>>> save_obj=cm.save_obj("test3_np",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_3_np=ConfusionMatrix(file=open("test3_np.obj","r"))
>>> cm_file_3_np == cm_file_3
True
>>> cm_file_3.print_matrix()
Predict 0 1 2
Actual
0 6 0 0
1 0 1 2
2 4 2 6
<BLANKLINE>
>>> cm_file_3.stat()
Overall Statistics :
<BLANKLINE>
95% CI (0.41134,0.82675)
ACC Macro 0.74603
AUNP 0.7
AUNU 0.70556
Bennett S 0.42857
CBA 0.47778
Chi-Squared 10.44167
Chi-Squared DF 4
Conditional Entropy 0.96498
Cramer V 0.49861
Cross Entropy 1.50249
F1 Macro 0.56111
F1 Micro 0.61905
Gwet AC1 0.45277
Hamming Loss 0.38095
Joint Entropy 2.34377
KL Divergence 0.1237
Kappa 0.3913
Kappa 95% CI (0.05943,0.72318)
Kappa No Prevalence 0.2381
Kappa Standard Error 0.16932
Kappa Unbiased 0.37313
Lambda A 0.22222
Lambda B 0.36364
Mutual Information 0.47618
NIR 0.57143
Overall ACC 0.61905
Overall CEN 0.43947
Overall J (1.22857,0.40952)
Overall MCC 0.41558
Overall MCEN 0.50059
Overall RACC 0.37415
Overall RACCU 0.39229
P-Value 0.41709
PPV Macro 0.56111
PPV Micro 0.61905
Pearson C 0.57628
Phi-Squared 0.49722
RCI 0.34536
RR 7.0
Reference Entropy 1.37878
Response Entropy 1.44117
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
SOA5(Cramer) Relatively Strong
SOA6(Matthews) Weak
Scott PI 0.37313
Standard Error 0.10597
TPR Macro 0.61111
TPR Micro 0.61905
Zero-one Loss 8
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2
ACC(Accuracy) 0.80952 0.80952 0.61905
AGF(Adjusted F-score) 0.90694 0.54433 0.55442
AGM(Adjusted geometric mean) 0.80509 0.70336 0.66986
AM(Difference between automatic and manual classification) 4 0 -4
AUC(Area under the ROC curve) 0.86667 0.61111 0.63889
AUCI(AUC value interpretation) Very Good Fair Fair
AUPR(Area under the PR curve) 0.8 0.33333 0.625
BCD(Bray-Curtis dissimilarity) 0.09524 0.0 0.09524
BM(Informedness or bookmaker informedness) 0.73333 0.22222 0.27778
CEN(Confusion entropy) 0.25 0.52832 0.56439
DOR(Diagnostic odds ratio) None 4.0 3.5
DP(Discriminant power) None 0.33193 0.29996
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.19048 0.19048 0.38095
F0.5(F0.5 score) 0.65217 0.33333 0.68182
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.33333 0.6
F2(F2 score) 0.88235 0.33333 0.53571
FDR(False discovery rate) 0.4 0.66667 0.25
FN(False negative/miss/type 2 error) 0 2 6
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.11111 0.46154
FP(False positive/type 1 error/false alarm) 4 2 2
FPR(Fall-out or false positive rate) 0.26667 0.11111 0.22222
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.33333 0.61237
GI(Gini index) 0.73333 0.22222 0.27778
GM(G-mean geometric mean of specificity and sensitivity) 0.85635 0.54433 0.62361
IBA(Index of balanced accuracy) 0.92889 0.13169 0.28086
IS(Information score) 1.07039 1.22239 0.39232
J(Jaccard index) 0.6 0.2 0.42857
LS(Lift score) 2.1 2.33333 1.3125
MCC(Matthews correlation coefficient) 0.66332 0.22222 0.28307
MCCI(Matthews correlation coefficient interpretation) Moderate Negligible Negligible
MCEN(Modified confusion entropy) 0.26439 0.52877 0.65924
MK(Markedness) 0.6 0.22222 0.28846
N(Condition negative) 15 18 9
NLR(Negative likelihood ratio) 0.0 0.75 0.64286
NLRI(Negative likelihood ratio interpretation) Good Negligible Negligible
NPV(Negative predictive value) 1.0 0.88889 0.53846
OC(Overlap coefficient) 1.0 0.33333 0.75
OOC(Otsuka-Ochiai coefficient) 0.7746 0.33333 0.61237
OP(Optimized precision) 0.65568 0.35498 0.40166
P(Condition positive or support) 6 3 12
PLR(Positive likelihood ratio) 3.75 3.0 2.25
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 21 21 21
PPV(Precision or positive predictive value) 0.6 0.33333 0.75
PRE(Prevalence) 0.28571 0.14286 0.57143
Q(Yule Q - coefficient of colligation) None 0.6 0.55556
RACC(Random accuracy) 0.13605 0.02041 0.21769
RACCU(Random accuracy unbiased) 0.14512 0.02041 0.22676
TN(True negative/correct rejection) 11 16 7
TNR(Specificity or true negative rate) 0.73333 0.88889 0.77778
TON(Test outcome negative) 11 18 13
TOP(Test outcome positive) 10 3 8
TP(True positive/hit) 6 1 6
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.73333 0.22222 0.27778
dInd(Distance index) 0.26667 0.67586 0.54716
sInd(Similarity index) 0.81144 0.52209 0.6131
>>> cm = ConfusionMatrix(matrix={1:{1:13182,2:30516},2:{1:5108,2:295593}},transpose=True) # Verified Case
>>> save_obj = cm.save_obj("test4",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> save_obj = cm.save_obj("/asdasd,qweqwe.eo/",address=False)
>>> save_obj=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.obj'"}
True
>>> cm_file=ConfusionMatrix(file=open("test4.obj","r"))
>>> cm_file.DP[1]
0.770700985610517
>>> cm_file.Y[1]
0.627145631592811
>>> cm_file.BM[1]
0.627145631592811
>>> cm_file.transpose
True
>>> cm.matrix == cm_file.matrix
True
>>> cm.normalized_matrix == cm_file.normalized_matrix
True
>>> json.dump({"Actual-Vector": None, "Digit": 5, "Predict-Vector": None, "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}, "Transpose": True,"Sample-Weight": None},open("test5.obj","w"))
>>> cm_file=ConfusionMatrix(file=open("test5.obj","r"))
>>> cm_file.transpose
True
>>> cm_file.matrix == {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}
True
>>> json.dump({"Actual-Vector": None, "Digit": 5, "Predict-Vector": None, "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}},open("test6.obj","w"))
>>> cm_file=ConfusionMatrix(file=open("test6.obj","r"))
>>> cm_file.weights
>>> cm_file.transpose
False
>>> cm_file.matrix == {'1': {'1': 1, '2': 1, '0': 0}, '2': {'1': 2, '2': 3, '0': 0}, '0': {'1': 0, '2': 2, '0': 3}}
True
>>> json.dump({"Actual-Vector": ['1', '1', '2', '2', '2', '2', '2', '0', '0', '0', '0', '0'], "Digit": 5, "Predict-Vector": ['1', '2', '1', '1', '2', '2', '2', '2', '2', '0', '0', '0'], "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}},open("test7.obj","w"))
>>> cm_file=ConfusionMatrix(file=open("test7.obj","r"))
>>> cm_file.weights
>>> cm_file.transpose
False
>>> cm_file.matrix == {'1': {'1': 1, '2': 1, '0': 0}, '2': {'1': 2, '2': 3, '0': 0}, '0': {'1': 0, '2': 2, '0': 3}}
True
>>> cm_file.actual_vector == ['1', '1', '2', '2', '2', '2', '2', '0', '0', '0', '0', '0']
True
>>> cm_file.predict_vector == ['1', '2', '1', '1', '2', '2', '2', '2', '2', '0', '0', '0']
True
>>> cm_comp1 = ConfusionMatrix(matrix={0:{0:2,1:50,2:6},1:{0:5,1:50,2:3},2:{0:1,1:7,2:50}})
>>> cm_comp2 = ConfusionMatrix(matrix={0:{0:50,1:2,2:6},1:{0:50,1:5,2:3},2:{0:1,1:55,2:2}})
>>> cp = Compare({"model1":cm_comp1,"model2":cm_comp2})
>>> save_report = cp.save_report("test",address=False)
>>> save_report == {'Status': True, 'Message': None}
True
>>> save_report = cp.save_report("/asdasd,qweqwe.eo/",address=False)
>>> save_report == {'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.comp'"}
True
>>> os.remove("test.csv")
>>> os.remove("test_matrix.csv")
>>> os.remove("test_normalized.csv")
>>> os.remove("test_normalized_matrix.csv")
>>> os.remove("test.obj")
>>> os.remove("test_stat.obj")
>>> os.remove("test_no_vectors.obj")
>>> os.remove("test.html")
>>> os.remove("test_normalized.html")
>>> os.remove("test_filtered.html")
>>> os.remove("test_filtered.csv")
>>> os.remove("test_filtered_matrix.csv")
>>> os.remove("test_filtered.pycm")
>>> os.remove("test_large.pycm")
>>> os.remove("test_summary.pycm")
>>> os.remove("test_filtered2.html")
>>> os.remove("test_filtered3.html")
>>> os.remove("test_filtered4.html")
>>> os.remove("test_filtered5.html")
>>> os.remove("test_long_name.html")
>>> os.remove("test_alt.html")
>>> os.remove("test_summary.html")
>>> os.remove("test_colored.html")
>>> os.remove("test_colored2.html")
>>> os.remove("test_filtered2.csv")
>>> os.remove("test_filtered3.csv")
>>> os.remove("test_filtered4.csv")
>>> os.remove("test_summary.csv")
>>> os.remove("test_filtered2.pycm")
>>> os.remove("test_filtered3.pycm")
>>> os.remove("test2.obj")
>>> os.remove("test3.obj")
>>> os.remove("test3_np.obj")
>>> os.remove("test4.obj")
>>> os.remove("test5.obj")
>>> os.remove("test6.obj")
>>> os.remove("test7.obj")
>>> os.remove("test.pycm")
>>> os.remove("test.comp")
"""
| # -*- coding: utf-8 -*-
"""
>>> from pycm import *
>>> import os
>>> import json
>>> import numpy as np
>>> y_test = np.array([600, 200, 200, 200, 200, 200, 200, 200, 500, 500, 500, 200, 200, 200, 200, 200, 200, 200, 200, 200])
>>> y_pred = np.array([100, 200, 200, 100, 100, 200, 200, 200, 100, 200, 500, 100, 100, 100, 100, 100, 100, 100, 500, 200])
>>> cm=ConfusionMatrix(y_test, y_pred)
>>> save_stat=cm.save_stat("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_summary",address=False,summary=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=["L1","L2"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> large_cm = ConfusionMatrix(list(range(20)),list(range(20)))
>>> save_stat = large_cm.save_stat("test_large",address=False)
>>> save_stat == {'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.pycm'"}
True
>>> save_stat=cm.save_html("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_normalized",address=False,normalize=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_alt",address=False,normalize=True,alt_link=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_summary",address=False,summary=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[],color=(-2,-2,-2))
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered4",address=False,overall_param=["Kappa","Scott PI"],class_param=[],class_name=[100],color={})
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered5",address=False,overall_param=[],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_colored",address=False,color=(130,100,200))
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_colored2",address=False,color="Beige")
>>> save_stat=={'Status': True, 'Message': None}
True
>>> long_name_cm = ConfusionMatrix(matrix={'SVM-Classifier':{'SVM-Classifier':25,'NN-Classifier':2},'NN-Classifier':{'SVM-Classifier':3,'NN-Classifier':50}})
>>> save_stat=long_name_cm.save_html("test_long_name",address=False,color="Pink")
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.html'"}
True
>>> save_stat=cm.save_csv("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_normalized",address=False,normalize=True)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_summary",address=False,summary=True,matrix_save=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered",address=False,class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered2",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[100],matrix_save=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered3",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[],matrix_save=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered4",address=False,class_param=[],class_name=[100],matrix_save=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.csv'"}
True
>>> save_obj=cm.save_obj("test",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> save_obj=cm.save_obj("test_stat",address=False,save_stat=True)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> save_obj=cm.save_obj("test_no_vectors",address=False,save_vector=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file=ConfusionMatrix(file=open("test.obj","r"))
>>> print(cm_file)
Predict 100 200 500 600
Actual
100 0 0 0 0
<BLANKLINE>
200 9 6 1 0
<BLANKLINE>
500 1 1 1 0
<BLANKLINE>
600 1 0 0 0
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.14096,0.55904)
ACC Macro 0.675
AUNP None
AUNU None
Bennett S 0.13333
CBA 0.17708
Chi-Squared None
Chi-Squared DF 9
Conditional Entropy 1.23579
Cramer V None
Cross Entropy 1.70995
F1 Macro 0.23043
F1 Micro 0.35
Gwet AC1 0.19505
Hamming Loss 0.65
Joint Entropy 2.11997
KL Divergence None
Kappa 0.07801
Kappa 95% CI (-0.2185,0.37453)
Kappa No Prevalence -0.3
Kappa Standard Error 0.15128
Kappa Unbiased -0.12554
Lambda A 0.0
Lambda B 0.0
Mutual Information 0.10088
NIR 0.8
Overall ACC 0.35
Overall CEN 0.3648
Overall J (0.60294,0.15074)
Overall MCC 0.12642
Overall MCEN 0.37463
Overall RACC 0.295
Overall RACCU 0.4225
P-Value 1.0
PPV Macro None
PPV Micro 0.35
Pearson C None
Phi-Squared None
RCI 0.11409
RR 5.0
Reference Entropy 0.88418
Response Entropy 1.33667
SOA1(Landis & Koch) Slight
SOA2(Fleiss) Poor
SOA3(Altman) Poor
SOA4(Cicchetti) Poor
SOA5(Cramer) None
SOA6(Matthews) Negligible
Scott PI -0.12554
Standard Error 0.10665
TPR Macro None
TPR Micro 0.35
Zero-one Loss 13
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AGF(Adjusted F-score) 0.0 0.33642 0.56659 0.0
AGM(Adjusted geometric mean) None 0.56694 0.7352 0
AM(Difference between automatic and manual classification) 11 -9 -1 -1
AUC(Area under the ROC curve) None 0.5625 0.63725 0.5
AUCI(AUC value interpretation) None Poor Fair Poor
AUPR(Area under the PR curve) None 0.61607 0.41667 None
BCD(Bray-Curtis dissimilarity) 0.275 0.225 0.025 0.025
BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0
CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0
DOR(Diagnostic odds ratio) None 1.8 8.0 None
DP(Discriminant power) None 0.14074 0.4979 None
DPI(Discriminant power interpretation) None Poor Poor None
ERR(Error rate) 0.55 0.55 0.15 0.05
F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0
F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0
F2(F2 score) 0.0 0.42254 0.35714 0.0
FDR(False discovery rate) 1.0 0.14286 0.5 None
FN(False negative/miss/type 2 error) 0 10 2 1
FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0
FOR(False omission rate) 0.0 0.76923 0.11111 0.05
FP(False positive/type 1 error/false alarm) 11 1 1 0
FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0
G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None
GI(Gini index) None 0.125 0.27451 0.0
GM(G-mean geometric mean of specificity and sensitivity) None 0.53033 0.56011 0.0
IBA(Index of balanced accuracy) None 0.17578 0.12303 0.0
IS(Information score) None 0.09954 1.73697 None
J(Jaccard index) 0.0 0.35294 0.25 0.0
LS(Lift score) None 1.07143 3.33333 None
MCC(Matthews correlation coefficient) None 0.10483 0.32673 None
MCCI(Matthews correlation coefficient interpretation) None Negligible Weak None
MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0
MK(Markedness) 0.0 0.08791 0.38889 None
N(Condition negative) 20 4 17 19
NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0
NLRI(Negative likelihood ratio interpretation) None Negligible Negligible Negligible
NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95
OC(Overlap coefficient) None 0.85714 0.5 None
OOC(Otsuka-Ochiai coefficient) None 0.56695 0.40825 None
OP(Optimized precision) None 0.11667 0.37308 -0.05
P(Condition positive or support) 0 16 3 1
PLR(Positive likelihood ratio) None 1.5 5.66667 None
PLRI(Positive likelihood ratio interpretation) None Poor Fair None
POP(Population) 20 20 20 20
PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None
PRE(Prevalence) 0.0 0.8 0.15 0.05
Q(Yule Q - coefficient of colligation) None 0.28571 0.77778 None
RACC(Random accuracy) 0.0 0.28 0.015 0.0
RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063
TN(True negative/correct rejection) 9 3 16 19
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TON(Test outcome negative) 9 13 18 20
TOP(Test outcome positive) 11 7 2 0
TP(True positive/hit) 0 6 1 0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
Y(Youden index) None 0.125 0.27451 0.0
dInd(Distance index) None 0.67315 0.66926 1.0
sInd(Similarity index) None 0.52401 0.52676 0.29289
<BLANKLINE>
>>> cm_stat_file=ConfusionMatrix(file=open("test_stat.obj","r"))
>>> cm_no_vectors_file=ConfusionMatrix(file=open("test_no_vectors.obj","r"))
>>> cm_stat_file==cm_file
True
>>> cm_no_vectors_file==cm_file
True
>>> cm_no_vectors_dict = json.load(open("test_no_vectors.obj","r"))
>>> cm_no_vectors_dict["Actual-Vector"] == None
True
>>> cm_no_vectors_dict["Predict-Vector"] == None
True
>>> cm_stat_dict = json.load(open("test_stat.obj","r"))
>>> cm_stat_dict["Class-Stat"]["MCC"] != None
True
>>> cm_stat_dict["Overall-Stat"]["Overall MCC"] != None
True
>>> def activation(i):
... if i<0.7:
... return 1
... else:
... return 0
>>> cm_6 = ConfusionMatrix([0,0,1,0],[0.87,0.34,0.9,0.12],threshold=activation)
>>> save_obj=cm_6.save_obj("test2",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_2=ConfusionMatrix(file=open("test2.obj","r"))
>>> cm_file_2.print_matrix()
Predict 0 1
Actual
0 1 2
1 1 0
>>> y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]
>>> y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]
>>> cm = ConfusionMatrix(y_actu, y_pred, sample_weight=[2, 2, 2, 2, 3, 1, 1, 2, 2, 1, 1, 2])
>>> save_obj=cm.save_obj("test3",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_3=ConfusionMatrix(file=open("test3.obj","r"))
>>> cm = ConfusionMatrix(y_actu, y_pred, sample_weight=np.array([2, 2, 2, 2, 3, 1, 1, 2, 2, 1, 1, 2]))
>>> save_obj=cm.save_obj("test3_np",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_3_np=ConfusionMatrix(file=open("test3_np.obj","r"))
>>> cm_file_3_np == cm_file_3
True
>>> cm_file_3.print_matrix()
Predict 0 1 2
Actual
0 6 0 0
1 0 1 2
2 4 2 6
<BLANKLINE>
>>> cm_file_3.stat()
Overall Statistics :
<BLANKLINE>
95% CI (0.41134,0.82675)
ACC Macro 0.74603
AUNP 0.7
AUNU 0.70556
Bennett S 0.42857
CBA 0.47778
Chi-Squared 10.44167
Chi-Squared DF 4
Conditional Entropy 0.96498
Cramer V 0.49861
Cross Entropy 1.50249
F1 Macro 0.56111
F1 Micro 0.61905
Gwet AC1 0.45277
Hamming Loss 0.38095
Joint Entropy 2.34377
KL Divergence 0.1237
Kappa 0.3913
Kappa 95% CI (0.05943,0.72318)
Kappa No Prevalence 0.2381
Kappa Standard Error 0.16932
Kappa Unbiased 0.37313
Lambda A 0.22222
Lambda B 0.36364
Mutual Information 0.47618
NIR 0.57143
Overall ACC 0.61905
Overall CEN 0.43947
Overall J (1.22857,0.40952)
Overall MCC 0.41558
Overall MCEN 0.50059
Overall RACC 0.37415
Overall RACCU 0.39229
P-Value 0.41709
PPV Macro 0.56111
PPV Micro 0.61905
Pearson C 0.57628
Phi-Squared 0.49722
RCI 0.34536
RR 7.0
Reference Entropy 1.37878
Response Entropy 1.44117
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
SOA5(Cramer) Relatively Strong
SOA6(Matthews) Weak
Scott PI 0.37313
Standard Error 0.10597
TPR Macro 0.61111
TPR Micro 0.61905
Zero-one Loss 8
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2
ACC(Accuracy) 0.80952 0.80952 0.61905
AGF(Adjusted F-score) 0.90694 0.54433 0.55442
AGM(Adjusted geometric mean) 0.80509 0.70336 0.66986
AM(Difference between automatic and manual classification) 4 0 -4
AUC(Area under the ROC curve) 0.86667 0.61111 0.63889
AUCI(AUC value interpretation) Very Good Fair Fair
AUPR(Area under the PR curve) 0.8 0.33333 0.625
BCD(Bray-Curtis dissimilarity) 0.09524 0.0 0.09524
BM(Informedness or bookmaker informedness) 0.73333 0.22222 0.27778
CEN(Confusion entropy) 0.25 0.52832 0.56439
DOR(Diagnostic odds ratio) None 4.0 3.5
DP(Discriminant power) None 0.33193 0.29996
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.19048 0.19048 0.38095
F0.5(F0.5 score) 0.65217 0.33333 0.68182
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.33333 0.6
F2(F2 score) 0.88235 0.33333 0.53571
FDR(False discovery rate) 0.4 0.66667 0.25
FN(False negative/miss/type 2 error) 0 2 6
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.11111 0.46154
FP(False positive/type 1 error/false alarm) 4 2 2
FPR(Fall-out or false positive rate) 0.26667 0.11111 0.22222
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.33333 0.61237
GI(Gini index) 0.73333 0.22222 0.27778
GM(G-mean geometric mean of specificity and sensitivity) 0.85635 0.54433 0.62361
IBA(Index of balanced accuracy) 0.92889 0.13169 0.28086
IS(Information score) 1.07039 1.22239 0.39232
J(Jaccard index) 0.6 0.2 0.42857
LS(Lift score) 2.1 2.33333 1.3125
MCC(Matthews correlation coefficient) 0.66332 0.22222 0.28307
MCCI(Matthews correlation coefficient interpretation) Moderate Negligible Negligible
MCEN(Modified confusion entropy) 0.26439 0.52877 0.65924
MK(Markedness) 0.6 0.22222 0.28846
N(Condition negative) 15 18 9
NLR(Negative likelihood ratio) 0.0 0.75 0.64286
NLRI(Negative likelihood ratio interpretation) Good Negligible Negligible
NPV(Negative predictive value) 1.0 0.88889 0.53846
OC(Overlap coefficient) 1.0 0.33333 0.75
OOC(Otsuka-Ochiai coefficient) 0.7746 0.33333 0.61237
OP(Optimized precision) 0.65568 0.35498 0.40166
P(Condition positive or support) 6 3 12
PLR(Positive likelihood ratio) 3.75 3.0 2.25
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 21 21 21
PPV(Precision or positive predictive value) 0.6 0.33333 0.75
PRE(Prevalence) 0.28571 0.14286 0.57143
Q(Yule Q - coefficient of colligation) None 0.6 0.55556
RACC(Random accuracy) 0.13605 0.02041 0.21769
RACCU(Random accuracy unbiased) 0.14512 0.02041 0.22676
TN(True negative/correct rejection) 11 16 7
TNR(Specificity or true negative rate) 0.73333 0.88889 0.77778
TON(Test outcome negative) 11 18 13
TOP(Test outcome positive) 10 3 8
TP(True positive/hit) 6 1 6
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.73333 0.22222 0.27778
dInd(Distance index) 0.26667 0.67586 0.54716
sInd(Similarity index) 0.81144 0.52209 0.6131
>>> cm = ConfusionMatrix(matrix={1:{1:13182,2:30516},2:{1:5108,2:295593}},transpose=True) # Verified Case
>>> save_obj = cm.save_obj("test4",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> save_obj = cm.save_obj("/asdasd,qweqwe.eo/",address=False)
>>> save_obj=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.obj'"}
True
>>> cm_file=ConfusionMatrix(file=open("test4.obj","r"))
>>> cm_file.DP[1]
0.770700985610517
>>> cm_file.Y[1]
0.627145631592811
>>> cm_file.BM[1]
0.627145631592811
>>> cm_file.transpose
True
>>> cm.matrix == cm_file.matrix
True
>>> cm.normalized_matrix == cm_file.normalized_matrix
True
>>> json.dump({"Actual-Vector": None, "Digit": 5, "Predict-Vector": None, "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}, "Transpose": True,"Sample-Weight": None},open("test5.obj","w"))
>>> cm_file=ConfusionMatrix(file=open("test5.obj","r"))
>>> cm_file.transpose
True
>>> cm_file.matrix == {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}
True
>>> json.dump({"Actual-Vector": None, "Digit": 5, "Predict-Vector": None, "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}},open("test6.obj","w"))
>>> cm_file=ConfusionMatrix(file=open("test6.obj","r"))
>>> cm_file.weights
>>> cm_file.transpose
False
>>> cm_file.matrix == {'1': {'1': 1, '2': 1, '0': 0}, '2': {'1': 2, '2': 3, '0': 0}, '0': {'1': 0, '2': 2, '0': 3}}
True
>>> json.dump({"Actual-Vector": ['1', '1', '2', '2', '2', '2', '2', '0', '0', '0', '0', '0'], "Digit": 5, "Predict-Vector": ['1', '2', '1', '1', '2', '2', '2', '2', '2', '0', '0', '0'], "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}},open("test7.obj","w"))
>>> cm_file=ConfusionMatrix(file=open("test7.obj","r"))
>>> cm_file.weights
>>> cm_file.transpose
False
>>> cm_file.matrix == {'1': {'1': 1, '2': 1, '0': 0}, '2': {'1': 2, '2': 3, '0': 0}, '0': {'1': 0, '2': 2, '0': 3}}
True
>>> cm_file.actual_vector == ['1', '1', '2', '2', '2', '2', '2', '0', '0', '0', '0', '0']
True
>>> cm_file.predict_vector == ['1', '2', '1', '1', '2', '2', '2', '2', '2', '0', '0', '0']
True
>>> cm_comp1 = ConfusionMatrix(matrix={0:{0:2,1:50,2:6},1:{0:5,1:50,2:3},2:{0:1,1:7,2:50}})
>>> cm_comp2 = ConfusionMatrix(matrix={0:{0:50,1:2,2:6},1:{0:50,1:5,2:3},2:{0:1,1:55,2:2}})
>>> cp = Compare({"model1":cm_comp1,"model2":cm_comp2})
>>> save_report = cp.save_report("test",address=False)
>>> save_report == {'Status': True, 'Message': None}
True
>>> save_report = cp.save_report("/asdasd,qweqwe.eo/",address=False)
>>> save_report == {'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.comp'"}
True
>>> os.remove("test.csv")
>>> os.remove("test_matrix.csv")
>>> os.remove("test_normalized.csv")
>>> os.remove("test_normalized_matrix.csv")
>>> os.remove("test.obj")
>>> os.remove("test_stat.obj")
>>> os.remove("test_no_vectors.obj")
>>> os.remove("test.html")
>>> os.remove("test_normalized.html")
>>> os.remove("test_filtered.html")
>>> os.remove("test_filtered.csv")
>>> os.remove("test_filtered_matrix.csv")
>>> os.remove("test_filtered.pycm")
>>> os.remove("test_large.pycm")
>>> os.remove("test_summary.pycm")
>>> os.remove("test_filtered2.html")
>>> os.remove("test_filtered3.html")
>>> os.remove("test_filtered4.html")
>>> os.remove("test_filtered5.html")
>>> os.remove("test_long_name.html")
>>> os.remove("test_alt.html")
>>> os.remove("test_summary.html")
>>> os.remove("test_colored.html")
>>> os.remove("test_colored2.html")
>>> os.remove("test_filtered2.csv")
>>> os.remove("test_filtered3.csv")
>>> os.remove("test_filtered4.csv")
>>> os.remove("test_summary.csv")
>>> os.remove("test_filtered2.pycm")
>>> os.remove("test_filtered3.pycm")
>>> os.remove("test2.obj")
>>> os.remove("test3.obj")
>>> os.remove("test3_np.obj")
>>> os.remove("test4.obj")
>>> os.remove("test5.obj")
>>> os.remove("test6.obj")
>>> os.remove("test7.obj")
>>> os.remove("test.pycm")
>>> os.remove("test.comp")
""" | en | 0.416554 | # -*- coding: utf-8 -*- >>> from pycm import * >>> import os >>> import json >>> import numpy as np >>> y_test = np.array([600, 200, 200, 200, 200, 200, 200, 200, 500, 500, 500, 200, 200, 200, 200, 200, 200, 200, 200, 200]) >>> y_pred = np.array([100, 200, 200, 100, 100, 200, 200, 200, 100, 200, 500, 100, 100, 100, 100, 100, 100, 100, 500, 200]) >>> cm=ConfusionMatrix(y_test, y_pred) >>> save_stat=cm.save_stat("test",address=False) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_stat("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"]) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_stat("test_summary",address=False,summary=True) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_stat("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=["L1","L2"]) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_stat("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[]) >>> save_stat=={'Status': True, 'Message': None} True >>> large_cm = ConfusionMatrix(list(range(20)),list(range(20))) >>> save_stat = large_cm.save_stat("test_large",address=False) >>> save_stat == {'Status': True, 'Message': None} True >>> save_stat=cm.save_stat("/asdasd,qweqwe.eo/",address=True) >>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.pycm'"} True >>> save_stat=cm.save_html("test",address=False) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_normalized",address=False,normalize=True) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_alt",address=False,normalize=True,alt_link=True) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_summary",address=False,summary=True) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"]) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[100]) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[],color=(-2,-2,-2)) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_filtered4",address=False,overall_param=["Kappa","Scott PI"],class_param=[],class_name=[100],color={}) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_filtered5",address=False,overall_param=[],class_param=["TPR","TNR","ACC","AUC"],class_name=[100]) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_colored",address=False,color=(130,100,200)) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("test_colored2",address=False,color="Beige") >>> save_stat=={'Status': True, 'Message': None} True >>> long_name_cm = ConfusionMatrix(matrix={'SVM-Classifier':{'SVM-Classifier':25,'NN-Classifier':2},'NN-Classifier':{'SVM-Classifier':3,'NN-Classifier':50}}) >>> save_stat=long_name_cm.save_html("test_long_name",address=False,color="Pink") >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_html("/asdasd,qweqwe.eo/",address=True) >>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.html'"} True >>> save_stat=cm.save_csv("test",address=False) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_csv("test_normalized",address=False,normalize=True) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_csv("test_summary",address=False,summary=True,matrix_save=False) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_csv("test_filtered",address=False,class_param=["TPR","TNR","ACC","AUC"]) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_csv("test_filtered2",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[100],matrix_save=False) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_csv("test_filtered3",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[],matrix_save=False) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_csv("test_filtered4",address=False,class_param=[],class_name=[100],matrix_save=False) >>> save_stat=={'Status': True, 'Message': None} True >>> save_stat=cm.save_csv("/asdasd,qweqwe.eo/",address=True) >>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.csv'"} True >>> save_obj=cm.save_obj("test",address=False) >>> save_obj=={'Status': True, 'Message': None} True >>> save_obj=cm.save_obj("test_stat",address=False,save_stat=True) >>> save_obj=={'Status': True, 'Message': None} True >>> save_obj=cm.save_obj("test_no_vectors",address=False,save_vector=False) >>> save_obj=={'Status': True, 'Message': None} True >>> cm_file=ConfusionMatrix(file=open("test.obj","r")) >>> print(cm_file) Predict 100 200 500 600 Actual 100 0 0 0 0 <BLANKLINE> 200 9 6 1 0 <BLANKLINE> 500 1 1 1 0 <BLANKLINE> 600 1 0 0 0 <BLANKLINE> <BLANKLINE> <BLANKLINE> <BLANKLINE> <BLANKLINE> Overall Statistics : <BLANKLINE> 95% CI (0.14096,0.55904) ACC Macro 0.675 AUNP None AUNU None Bennett S 0.13333 CBA 0.17708 Chi-Squared None Chi-Squared DF 9 Conditional Entropy 1.23579 Cramer V None Cross Entropy 1.70995 F1 Macro 0.23043 F1 Micro 0.35 Gwet AC1 0.19505 Hamming Loss 0.65 Joint Entropy 2.11997 KL Divergence None Kappa 0.07801 Kappa 95% CI (-0.2185,0.37453) Kappa No Prevalence -0.3 Kappa Standard Error 0.15128 Kappa Unbiased -0.12554 Lambda A 0.0 Lambda B 0.0 Mutual Information 0.10088 NIR 0.8 Overall ACC 0.35 Overall CEN 0.3648 Overall J (0.60294,0.15074) Overall MCC 0.12642 Overall MCEN 0.37463 Overall RACC 0.295 Overall RACCU 0.4225 P-Value 1.0 PPV Macro None PPV Micro 0.35 Pearson C None Phi-Squared None RCI 0.11409 RR 5.0 Reference Entropy 0.88418 Response Entropy 1.33667 SOA1(Landis & Koch) Slight SOA2(Fleiss) Poor SOA3(Altman) Poor SOA4(Cicchetti) Poor SOA5(Cramer) None SOA6(Matthews) Negligible Scott PI -0.12554 Standard Error 0.10665 TPR Macro None TPR Micro 0.35 Zero-one Loss 13 <BLANKLINE> Class Statistics : <BLANKLINE> Classes 100 200 500 600 ACC(Accuracy) 0.45 0.45 0.85 0.95 AGF(Adjusted F-score) 0.0 0.33642 0.56659 0.0 AGM(Adjusted geometric mean) None 0.56694 0.7352 0 AM(Difference between automatic and manual classification) 11 -9 -1 -1 AUC(Area under the ROC curve) None 0.5625 0.63725 0.5 AUCI(AUC value interpretation) None Poor Fair Poor AUPR(Area under the PR curve) None 0.61607 0.41667 None BCD(Bray-Curtis dissimilarity) 0.275 0.225 0.025 0.025 BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0 CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0 DOR(Diagnostic odds ratio) None 1.8 8.0 None DP(Discriminant power) None 0.14074 0.4979 None DPI(Discriminant power interpretation) None Poor Poor None ERR(Error rate) 0.55 0.55 0.15 0.05 F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0 F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0 F2(F2 score) 0.0 0.42254 0.35714 0.0 FDR(False discovery rate) 1.0 0.14286 0.5 None FN(False negative/miss/type 2 error) 0 10 2 1 FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0 FOR(False omission rate) 0.0 0.76923 0.11111 0.05 FP(False positive/type 1 error/false alarm) 11 1 1 0 FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0 G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None GI(Gini index) None 0.125 0.27451 0.0 GM(G-mean geometric mean of specificity and sensitivity) None 0.53033 0.56011 0.0 IBA(Index of balanced accuracy) None 0.17578 0.12303 0.0 IS(Information score) None 0.09954 1.73697 None J(Jaccard index) 0.0 0.35294 0.25 0.0 LS(Lift score) None 1.07143 3.33333 None MCC(Matthews correlation coefficient) None 0.10483 0.32673 None MCCI(Matthews correlation coefficient interpretation) None Negligible Weak None MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0 MK(Markedness) 0.0 0.08791 0.38889 None N(Condition negative) 20 4 17 19 NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0 NLRI(Negative likelihood ratio interpretation) None Negligible Negligible Negligible NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95 OC(Overlap coefficient) None 0.85714 0.5 None OOC(Otsuka-Ochiai coefficient) None 0.56695 0.40825 None OP(Optimized precision) None 0.11667 0.37308 -0.05 P(Condition positive or support) 0 16 3 1 PLR(Positive likelihood ratio) None 1.5 5.66667 None PLRI(Positive likelihood ratio interpretation) None Poor Fair None POP(Population) 20 20 20 20 PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None PRE(Prevalence) 0.0 0.8 0.15 0.05 Q(Yule Q - coefficient of colligation) None 0.28571 0.77778 None RACC(Random accuracy) 0.0 0.28 0.015 0.0 RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063 TN(True negative/correct rejection) 9 3 16 19 TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0 TON(Test outcome negative) 9 13 18 20 TOP(Test outcome positive) 11 7 2 0 TP(True positive/hit) 0 6 1 0 TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0 Y(Youden index) None 0.125 0.27451 0.0 dInd(Distance index) None 0.67315 0.66926 1.0 sInd(Similarity index) None 0.52401 0.52676 0.29289 <BLANKLINE> >>> cm_stat_file=ConfusionMatrix(file=open("test_stat.obj","r")) >>> cm_no_vectors_file=ConfusionMatrix(file=open("test_no_vectors.obj","r")) >>> cm_stat_file==cm_file True >>> cm_no_vectors_file==cm_file True >>> cm_no_vectors_dict = json.load(open("test_no_vectors.obj","r")) >>> cm_no_vectors_dict["Actual-Vector"] == None True >>> cm_no_vectors_dict["Predict-Vector"] == None True >>> cm_stat_dict = json.load(open("test_stat.obj","r")) >>> cm_stat_dict["Class-Stat"]["MCC"] != None True >>> cm_stat_dict["Overall-Stat"]["Overall MCC"] != None True >>> def activation(i): ... if i<0.7: ... return 1 ... else: ... return 0 >>> cm_6 = ConfusionMatrix([0,0,1,0],[0.87,0.34,0.9,0.12],threshold=activation) >>> save_obj=cm_6.save_obj("test2",address=False) >>> save_obj=={'Status': True, 'Message': None} True >>> cm_file_2=ConfusionMatrix(file=open("test2.obj","r")) >>> cm_file_2.print_matrix() Predict 0 1 Actual 0 1 2 1 1 0 >>> y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2] >>> y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2] >>> cm = ConfusionMatrix(y_actu, y_pred, sample_weight=[2, 2, 2, 2, 3, 1, 1, 2, 2, 1, 1, 2]) >>> save_obj=cm.save_obj("test3",address=False) >>> save_obj=={'Status': True, 'Message': None} True >>> cm_file_3=ConfusionMatrix(file=open("test3.obj","r")) >>> cm = ConfusionMatrix(y_actu, y_pred, sample_weight=np.array([2, 2, 2, 2, 3, 1, 1, 2, 2, 1, 1, 2])) >>> save_obj=cm.save_obj("test3_np",address=False) >>> save_obj=={'Status': True, 'Message': None} True >>> cm_file_3_np=ConfusionMatrix(file=open("test3_np.obj","r")) >>> cm_file_3_np == cm_file_3 True >>> cm_file_3.print_matrix() Predict 0 1 2 Actual 0 6 0 0 1 0 1 2 2 4 2 6 <BLANKLINE> >>> cm_file_3.stat() Overall Statistics : <BLANKLINE> 95% CI (0.41134,0.82675) ACC Macro 0.74603 AUNP 0.7 AUNU 0.70556 Bennett S 0.42857 CBA 0.47778 Chi-Squared 10.44167 Chi-Squared DF 4 Conditional Entropy 0.96498 Cramer V 0.49861 Cross Entropy 1.50249 F1 Macro 0.56111 F1 Micro 0.61905 Gwet AC1 0.45277 Hamming Loss 0.38095 Joint Entropy 2.34377 KL Divergence 0.1237 Kappa 0.3913 Kappa 95% CI (0.05943,0.72318) Kappa No Prevalence 0.2381 Kappa Standard Error 0.16932 Kappa Unbiased 0.37313 Lambda A 0.22222 Lambda B 0.36364 Mutual Information 0.47618 NIR 0.57143 Overall ACC 0.61905 Overall CEN 0.43947 Overall J (1.22857,0.40952) Overall MCC 0.41558 Overall MCEN 0.50059 Overall RACC 0.37415 Overall RACCU 0.39229 P-Value 0.41709 PPV Macro 0.56111 PPV Micro 0.61905 Pearson C 0.57628 Phi-Squared 0.49722 RCI 0.34536 RR 7.0 Reference Entropy 1.37878 Response Entropy 1.44117 SOA1(Landis & Koch) Fair SOA2(Fleiss) Poor SOA3(Altman) Fair SOA4(Cicchetti) Poor SOA5(Cramer) Relatively Strong SOA6(Matthews) Weak Scott PI 0.37313 Standard Error 0.10597 TPR Macro 0.61111 TPR Micro 0.61905 Zero-one Loss 8 <BLANKLINE> Class Statistics : <BLANKLINE> Classes 0 1 2 ACC(Accuracy) 0.80952 0.80952 0.61905 AGF(Adjusted F-score) 0.90694 0.54433 0.55442 AGM(Adjusted geometric mean) 0.80509 0.70336 0.66986 AM(Difference between automatic and manual classification) 4 0 -4 AUC(Area under the ROC curve) 0.86667 0.61111 0.63889 AUCI(AUC value interpretation) Very Good Fair Fair AUPR(Area under the PR curve) 0.8 0.33333 0.625 BCD(Bray-Curtis dissimilarity) 0.09524 0.0 0.09524 BM(Informedness or bookmaker informedness) 0.73333 0.22222 0.27778 CEN(Confusion entropy) 0.25 0.52832 0.56439 DOR(Diagnostic odds ratio) None 4.0 3.5 DP(Discriminant power) None 0.33193 0.29996 DPI(Discriminant power interpretation) None Poor Poor ERR(Error rate) 0.19048 0.19048 0.38095 F0.5(F0.5 score) 0.65217 0.33333 0.68182 F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.33333 0.6 F2(F2 score) 0.88235 0.33333 0.53571 FDR(False discovery rate) 0.4 0.66667 0.25 FN(False negative/miss/type 2 error) 0 2 6 FNR(Miss rate or false negative rate) 0.0 0.66667 0.5 FOR(False omission rate) 0.0 0.11111 0.46154 FP(False positive/type 1 error/false alarm) 4 2 2 FPR(Fall-out or false positive rate) 0.26667 0.11111 0.22222 G(G-measure geometric mean of precision and sensitivity) 0.7746 0.33333 0.61237 GI(Gini index) 0.73333 0.22222 0.27778 GM(G-mean geometric mean of specificity and sensitivity) 0.85635 0.54433 0.62361 IBA(Index of balanced accuracy) 0.92889 0.13169 0.28086 IS(Information score) 1.07039 1.22239 0.39232 J(Jaccard index) 0.6 0.2 0.42857 LS(Lift score) 2.1 2.33333 1.3125 MCC(Matthews correlation coefficient) 0.66332 0.22222 0.28307 MCCI(Matthews correlation coefficient interpretation) Moderate Negligible Negligible MCEN(Modified confusion entropy) 0.26439 0.52877 0.65924 MK(Markedness) 0.6 0.22222 0.28846 N(Condition negative) 15 18 9 NLR(Negative likelihood ratio) 0.0 0.75 0.64286 NLRI(Negative likelihood ratio interpretation) Good Negligible Negligible NPV(Negative predictive value) 1.0 0.88889 0.53846 OC(Overlap coefficient) 1.0 0.33333 0.75 OOC(Otsuka-Ochiai coefficient) 0.7746 0.33333 0.61237 OP(Optimized precision) 0.65568 0.35498 0.40166 P(Condition positive or support) 6 3 12 PLR(Positive likelihood ratio) 3.75 3.0 2.25 PLRI(Positive likelihood ratio interpretation) Poor Poor Poor POP(Population) 21 21 21 PPV(Precision or positive predictive value) 0.6 0.33333 0.75 PRE(Prevalence) 0.28571 0.14286 0.57143 Q(Yule Q - coefficient of colligation) None 0.6 0.55556 RACC(Random accuracy) 0.13605 0.02041 0.21769 RACCU(Random accuracy unbiased) 0.14512 0.02041 0.22676 TN(True negative/correct rejection) 11 16 7 TNR(Specificity or true negative rate) 0.73333 0.88889 0.77778 TON(Test outcome negative) 11 18 13 TOP(Test outcome positive) 10 3 8 TP(True positive/hit) 6 1 6 TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5 Y(Youden index) 0.73333 0.22222 0.27778 dInd(Distance index) 0.26667 0.67586 0.54716 sInd(Similarity index) 0.81144 0.52209 0.6131 >>> cm = ConfusionMatrix(matrix={1:{1:13182,2:30516},2:{1:5108,2:295593}},transpose=True) # Verified Case >>> save_obj = cm.save_obj("test4",address=False) >>> save_obj=={'Status': True, 'Message': None} True >>> save_obj = cm.save_obj("/asdasd,qweqwe.eo/",address=False) >>> save_obj=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.obj'"} True >>> cm_file=ConfusionMatrix(file=open("test4.obj","r")) >>> cm_file.DP[1] 0.770700985610517 >>> cm_file.Y[1] 0.627145631592811 >>> cm_file.BM[1] 0.627145631592811 >>> cm_file.transpose True >>> cm.matrix == cm_file.matrix True >>> cm.normalized_matrix == cm_file.normalized_matrix True >>> json.dump({"Actual-Vector": None, "Digit": 5, "Predict-Vector": None, "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}, "Transpose": True,"Sample-Weight": None},open("test5.obj","w")) >>> cm_file=ConfusionMatrix(file=open("test5.obj","r")) >>> cm_file.transpose True >>> cm_file.matrix == {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}} True >>> json.dump({"Actual-Vector": None, "Digit": 5, "Predict-Vector": None, "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}},open("test6.obj","w")) >>> cm_file=ConfusionMatrix(file=open("test6.obj","r")) >>> cm_file.weights >>> cm_file.transpose False >>> cm_file.matrix == {'1': {'1': 1, '2': 1, '0': 0}, '2': {'1': 2, '2': 3, '0': 0}, '0': {'1': 0, '2': 2, '0': 3}} True >>> json.dump({"Actual-Vector": ['1', '1', '2', '2', '2', '2', '2', '0', '0', '0', '0', '0'], "Digit": 5, "Predict-Vector": ['1', '2', '1', '1', '2', '2', '2', '2', '2', '0', '0', '0'], "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}},open("test7.obj","w")) >>> cm_file=ConfusionMatrix(file=open("test7.obj","r")) >>> cm_file.weights >>> cm_file.transpose False >>> cm_file.matrix == {'1': {'1': 1, '2': 1, '0': 0}, '2': {'1': 2, '2': 3, '0': 0}, '0': {'1': 0, '2': 2, '0': 3}} True >>> cm_file.actual_vector == ['1', '1', '2', '2', '2', '2', '2', '0', '0', '0', '0', '0'] True >>> cm_file.predict_vector == ['1', '2', '1', '1', '2', '2', '2', '2', '2', '0', '0', '0'] True >>> cm_comp1 = ConfusionMatrix(matrix={0:{0:2,1:50,2:6},1:{0:5,1:50,2:3},2:{0:1,1:7,2:50}}) >>> cm_comp2 = ConfusionMatrix(matrix={0:{0:50,1:2,2:6},1:{0:50,1:5,2:3},2:{0:1,1:55,2:2}}) >>> cp = Compare({"model1":cm_comp1,"model2":cm_comp2}) >>> save_report = cp.save_report("test",address=False) >>> save_report == {'Status': True, 'Message': None} True >>> save_report = cp.save_report("/asdasd,qweqwe.eo/",address=False) >>> save_report == {'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.comp'"} True >>> os.remove("test.csv") >>> os.remove("test_matrix.csv") >>> os.remove("test_normalized.csv") >>> os.remove("test_normalized_matrix.csv") >>> os.remove("test.obj") >>> os.remove("test_stat.obj") >>> os.remove("test_no_vectors.obj") >>> os.remove("test.html") >>> os.remove("test_normalized.html") >>> os.remove("test_filtered.html") >>> os.remove("test_filtered.csv") >>> os.remove("test_filtered_matrix.csv") >>> os.remove("test_filtered.pycm") >>> os.remove("test_large.pycm") >>> os.remove("test_summary.pycm") >>> os.remove("test_filtered2.html") >>> os.remove("test_filtered3.html") >>> os.remove("test_filtered4.html") >>> os.remove("test_filtered5.html") >>> os.remove("test_long_name.html") >>> os.remove("test_alt.html") >>> os.remove("test_summary.html") >>> os.remove("test_colored.html") >>> os.remove("test_colored2.html") >>> os.remove("test_filtered2.csv") >>> os.remove("test_filtered3.csv") >>> os.remove("test_filtered4.csv") >>> os.remove("test_summary.csv") >>> os.remove("test_filtered2.pycm") >>> os.remove("test_filtered3.pycm") >>> os.remove("test2.obj") >>> os.remove("test3.obj") >>> os.remove("test3_np.obj") >>> os.remove("test4.obj") >>> os.remove("test5.obj") >>> os.remove("test6.obj") >>> os.remove("test7.obj") >>> os.remove("test.pycm") >>> os.remove("test.comp") | 2.410646 | 2 |
ContentGrabbing/ContentChemanager.py | JTischbein/KGL-ChemicalCompounds-CC | 0 | 6624992 | <reponame>JTischbein/KGL-ChemicalCompounds-CC
# Crawling for content of articles from chemanager
from configparser import ConfigParser
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from tqdm import tqdm
import sys
sys.path.append('../')
from Database import Database
db = Database('../config.ini').connect()
# get all links from chemamager which are actually working
data = db.execute("SELECT link FROM articles WHERE link LIKE 'https://www.chemanager-online.com/%' AND release_date IS NOT NULL")
config = ConfigParser()
config.read("../config.ini")
CHROMEDRIVER_PATH = config["SELENIUM"]["DRIVERTHEYS"]
s = Service(CHROMEDRIVER_PATH)
driver = webdriver.Chrome(service = s)
i = 0
for link in tqdm(data):
i += 1
driver.get(link[0])
# Article library does not work here properly. We have to extract the text from the div manuall
div = driver.find_element(By.XPATH, "//div[@class='paragraph paragraph--type--text paragraph--view-mode--default']")
text = div.text
db.execute("UPDATE articles SET content = %s WHERE link = %s", (text, link))
driver.quit()
| # Crawling for content of articles from chemanager
from configparser import ConfigParser
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from tqdm import tqdm
import sys
sys.path.append('../')
from Database import Database
db = Database('../config.ini').connect()
# get all links from chemamager which are actually working
data = db.execute("SELECT link FROM articles WHERE link LIKE 'https://www.chemanager-online.com/%' AND release_date IS NOT NULL")
config = ConfigParser()
config.read("../config.ini")
CHROMEDRIVER_PATH = config["SELENIUM"]["DRIVERTHEYS"]
s = Service(CHROMEDRIVER_PATH)
driver = webdriver.Chrome(service = s)
i = 0
for link in tqdm(data):
i += 1
driver.get(link[0])
# Article library does not work here properly. We have to extract the text from the div manuall
div = driver.find_element(By.XPATH, "//div[@class='paragraph paragraph--type--text paragraph--view-mode--default']")
text = div.text
db.execute("UPDATE articles SET content = %s WHERE link = %s", (text, link))
driver.quit() | en | 0.957699 | # Crawling for content of articles from chemanager # get all links from chemamager which are actually working # Article library does not work here properly. We have to extract the text from the div manuall | 2.772903 | 3 |
cmptree.py | dglo/svn2git_tools | 0 | 6624993 | <gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
import argparse
import filecmp
import os
import sys
class TreePath(object):
LEFT = 1
RIGHT = 2
BOTH = 3
def __init__(self, pathsrc, dirpath, filename):
self.__pathsrc = pathsrc
self.__dirpath = dirpath
self.__filename = filename
def __str__(self):
return os.path.join(self.__dirpath, self.__filename)
def __repr__(self):
if self.__pathsrc == self.LEFT:
clsname = "LeftPath"
elif self.__pathsrc == self.RIGHT:
clsname = "RightPath"
elif self.__pathsrc == self.BOTH:
clsname = "BothPath"
return "%s(%s, %s)" % (clsname, self.__dirpath, self.__filename)
@property
def directory(self):
return self.__dirpath
@property
def filename(self):
return self.__filename
@property
def is_both(self):
return self.__pathsrc == self.BOTH
@property
def is_left(self):
return self.__pathsrc == self.LEFT
@property
def is_right(self):
return self.__pathsrc == self.RIGHT
class BothPath(TreePath):
def __init__(self, dirpath, filename):
super(BothPath, self).__init__(TreePath.BOTH, dirpath, filename)
class LeftPath(TreePath):
def __init__(self, dirpath, filename):
super(LeftPath, self).__init__(TreePath.LEFT, dirpath, filename)
class RightPath(TreePath):
def __init__(self, dirpath, filename):
super(RightPath, self).__init__(TreePath.RIGHT, dirpath, filename)
class CompareTrees(object):
IGNORE = [".git", ".gitignore", ".gitmodules", ".hg", ".hgignore",
".svn", "target"]
IGNORE_EXT = [".pyc", ".class"]
def __init__(self, left_dir, right_dir, ignore_empty_directories=False):
self.__left_dir = left_dir
self.__right_dir = right_dir
self.__ignore_empty_dirs = ignore_empty_directories
self.__compared = False
self.__changed = None
self.__added = None
self.__deleted = None
self.__modified_dict = None
def __add_to_list(self, filelist, value):
if filelist is None:
filelist = []
filelist.append(value)
return filelist
def __compare_trees(self, dcmp=None, depth=99, debug=False):
self.__compared = True
# clear previously cached values
self.__modified_dict = None
# if we haven't compared anything, build the comparison object
if dcmp is None:
dcmp = filecmp.dircmp(self.__left_dir, self.__right_dir,
ignore=self.IGNORE)
else:
if debug:
print("~~~ (%d: L%d/R%d/D%d/F%d) %s" %
(depth, len(dcmp.left_only), len(dcmp.right_only),
len(dcmp.diff_files), len(dcmp.funny_files), dcmp.left))
sys.stdout.flush()
left_subdir = self.__extract_subdir(dcmp.left, self.__left_dir)
right_subdir = self.__extract_subdir(dcmp.right, self.__right_dir)
if left_subdir != right_subdir:
print("Expected left path \"%s\" to match right path \"%s\"" %
(left_subdir, right_subdir), file=sys.stderr)
# build lists of added/changed/deleted files
for name in dcmp.right_only:
_, ext = os.path.splitext(name)
if ext in self.IGNORE_EXT:
continue
if self.__ignore_empty_dirs and \
self.__is_empty_dir(os.path.join(dcmp.right, name)):
continue
if debug:
print(" ++ %s" % name)
self.__added = self.__add_to_list(self.__added,
RightPath(right_subdir, name))
for name in dcmp.diff_files:
_, ext = os.path.splitext(name)
if ext in self.IGNORE_EXT:
continue
if debug:
print(" ** %s" % name)
self.__changed = \
self.__add_to_list(self.__changed, BothPath(left_subdir, name))
for name in dcmp.left_only:
_, ext = os.path.splitext(name)
if ext in self.IGNORE_EXT:
continue
if self.__ignore_empty_dirs and \
self.__is_empty_dir(os.path.join(dcmp.left, name)):
continue
if debug:
print(" -- %s" % name)
self.__deleted = self.__add_to_list(self.__deleted,
LeftPath(left_subdir, name))
if depth > 0:
# check subdirectories
for subcmp in dcmp.subdirs.values():
self.__compare_trees(subcmp, depth=depth-1, debug=debug)
return dcmp
def __extract_subdir(self, full_path, base_path):
plen = len(base_path)
if not full_path.startswith(base_path) or \
(len(full_path) > plen and full_path[plen] != os.path.sep):
raise Exception("Expected directory \"%s\" to start with \"%s\"" %
(full_path, base_path))
return full_path[plen+1:]
def __is_empty_dir(self, path):
if os.path.isdir(path):
for _ in os.listdir(path):
return True
return False
def __len(self, filelist):
return 0 if filelist is None else len(filelist)
@property
def added(self):
if not self.__compared:
self.__compare_trees()
for value in self.__added:
yield value
@property
def changed(self):
if not self.__compared:
self.__compare_trees()
for value in self.__changed:
yield value
def compare(self, debug=False):
if not self.__compared:
self.__compare_trees(debug=debug)
@property
def deleted(self):
if not self.__compared:
self.__compare_trees()
for value in self.__deleted:
yield value
@property
def is_modified(self):
if not self.__compared:
self.__compare_trees()
return self.__added is not None or \
self.__changed is not None or \
self.__deleted is not None
@property
def modified_trees(self):
if self.__modified_dict is None:
if not self.__compared:
self.__compare_trees()
topdict = {}
for treelist in self.__added, self.__deleted, self.__changed:
if treelist is None:
continue
for tree_obj in treelist:
try:
topdir, _ = tree_obj.directory.split(os.path.sep, 1)
except:
topdir = tree_obj.directory
if topdir not in topdict:
topdict[topdir] = 1
else:
topdict[topdir] += 1
self.__modified_dict = topdict
return self.__modified_dict
def add_arguments(parser):
"Add command-line arguments"
parser.add_argument("-l", "--left-path", dest="left_path",
default=None,
help="Left path to compare")
parser.add_argument("-r", "--right-path", dest="right_path",
default=None,
help="Right path to compare")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="Print details")
parser.add_argument("-x", "--debug", dest="debug",
action="store_true", default=False,
help="Print debugging messages")
parser.add_argument("-X", "--extra-verbose", dest="command_verbose",
action="store_true", default=False,
help="Print command output")
def main():
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
if args.left_path is None:
left_path = "/home/dglo/prj/pdaq-git/svn_tools/xxx/pdaq"
else:
left_path = args.left_path[:-1] \
if args.left_path.endswith(os.path.sep) \
else args.left_path
if args.right_path is None:
right_path = "/home/dglo/prj/pDAQ_Urban_Harvest9"
else:
right_path = args.right_path[:-1] \
if args.right_path.endswith(os.path.sep) \
else args.right_path
treecmp = CompareTrees(left_path, right_path)
treecmp.compare(debug=args.debug)
if treecmp.is_modified:
print("Found differences between \"%s\" and \"%s\"" %
(left_path, right_path))
for name, count in sorted(treecmp.modified_trees.items(),
key=lambda x: x[1]):
print("%s*%d" % (name, count))
if __name__ == "__main__":
main()
| #!/usr/bin/env python
from __future__ import print_function
import argparse
import filecmp
import os
import sys
class TreePath(object):
LEFT = 1
RIGHT = 2
BOTH = 3
def __init__(self, pathsrc, dirpath, filename):
self.__pathsrc = pathsrc
self.__dirpath = dirpath
self.__filename = filename
def __str__(self):
return os.path.join(self.__dirpath, self.__filename)
def __repr__(self):
if self.__pathsrc == self.LEFT:
clsname = "LeftPath"
elif self.__pathsrc == self.RIGHT:
clsname = "RightPath"
elif self.__pathsrc == self.BOTH:
clsname = "BothPath"
return "%s(%s, %s)" % (clsname, self.__dirpath, self.__filename)
@property
def directory(self):
return self.__dirpath
@property
def filename(self):
return self.__filename
@property
def is_both(self):
return self.__pathsrc == self.BOTH
@property
def is_left(self):
return self.__pathsrc == self.LEFT
@property
def is_right(self):
return self.__pathsrc == self.RIGHT
class BothPath(TreePath):
def __init__(self, dirpath, filename):
super(BothPath, self).__init__(TreePath.BOTH, dirpath, filename)
class LeftPath(TreePath):
def __init__(self, dirpath, filename):
super(LeftPath, self).__init__(TreePath.LEFT, dirpath, filename)
class RightPath(TreePath):
def __init__(self, dirpath, filename):
super(RightPath, self).__init__(TreePath.RIGHT, dirpath, filename)
class CompareTrees(object):
IGNORE = [".git", ".gitignore", ".gitmodules", ".hg", ".hgignore",
".svn", "target"]
IGNORE_EXT = [".pyc", ".class"]
def __init__(self, left_dir, right_dir, ignore_empty_directories=False):
self.__left_dir = left_dir
self.__right_dir = right_dir
self.__ignore_empty_dirs = ignore_empty_directories
self.__compared = False
self.__changed = None
self.__added = None
self.__deleted = None
self.__modified_dict = None
def __add_to_list(self, filelist, value):
if filelist is None:
filelist = []
filelist.append(value)
return filelist
def __compare_trees(self, dcmp=None, depth=99, debug=False):
self.__compared = True
# clear previously cached values
self.__modified_dict = None
# if we haven't compared anything, build the comparison object
if dcmp is None:
dcmp = filecmp.dircmp(self.__left_dir, self.__right_dir,
ignore=self.IGNORE)
else:
if debug:
print("~~~ (%d: L%d/R%d/D%d/F%d) %s" %
(depth, len(dcmp.left_only), len(dcmp.right_only),
len(dcmp.diff_files), len(dcmp.funny_files), dcmp.left))
sys.stdout.flush()
left_subdir = self.__extract_subdir(dcmp.left, self.__left_dir)
right_subdir = self.__extract_subdir(dcmp.right, self.__right_dir)
if left_subdir != right_subdir:
print("Expected left path \"%s\" to match right path \"%s\"" %
(left_subdir, right_subdir), file=sys.stderr)
# build lists of added/changed/deleted files
for name in dcmp.right_only:
_, ext = os.path.splitext(name)
if ext in self.IGNORE_EXT:
continue
if self.__ignore_empty_dirs and \
self.__is_empty_dir(os.path.join(dcmp.right, name)):
continue
if debug:
print(" ++ %s" % name)
self.__added = self.__add_to_list(self.__added,
RightPath(right_subdir, name))
for name in dcmp.diff_files:
_, ext = os.path.splitext(name)
if ext in self.IGNORE_EXT:
continue
if debug:
print(" ** %s" % name)
self.__changed = \
self.__add_to_list(self.__changed, BothPath(left_subdir, name))
for name in dcmp.left_only:
_, ext = os.path.splitext(name)
if ext in self.IGNORE_EXT:
continue
if self.__ignore_empty_dirs and \
self.__is_empty_dir(os.path.join(dcmp.left, name)):
continue
if debug:
print(" -- %s" % name)
self.__deleted = self.__add_to_list(self.__deleted,
LeftPath(left_subdir, name))
if depth > 0:
# check subdirectories
for subcmp in dcmp.subdirs.values():
self.__compare_trees(subcmp, depth=depth-1, debug=debug)
return dcmp
def __extract_subdir(self, full_path, base_path):
plen = len(base_path)
if not full_path.startswith(base_path) or \
(len(full_path) > plen and full_path[plen] != os.path.sep):
raise Exception("Expected directory \"%s\" to start with \"%s\"" %
(full_path, base_path))
return full_path[plen+1:]
def __is_empty_dir(self, path):
if os.path.isdir(path):
for _ in os.listdir(path):
return True
return False
def __len(self, filelist):
return 0 if filelist is None else len(filelist)
@property
def added(self):
if not self.__compared:
self.__compare_trees()
for value in self.__added:
yield value
@property
def changed(self):
if not self.__compared:
self.__compare_trees()
for value in self.__changed:
yield value
def compare(self, debug=False):
if not self.__compared:
self.__compare_trees(debug=debug)
@property
def deleted(self):
if not self.__compared:
self.__compare_trees()
for value in self.__deleted:
yield value
@property
def is_modified(self):
if not self.__compared:
self.__compare_trees()
return self.__added is not None or \
self.__changed is not None or \
self.__deleted is not None
@property
def modified_trees(self):
if self.__modified_dict is None:
if not self.__compared:
self.__compare_trees()
topdict = {}
for treelist in self.__added, self.__deleted, self.__changed:
if treelist is None:
continue
for tree_obj in treelist:
try:
topdir, _ = tree_obj.directory.split(os.path.sep, 1)
except:
topdir = tree_obj.directory
if topdir not in topdict:
topdict[topdir] = 1
else:
topdict[topdir] += 1
self.__modified_dict = topdict
return self.__modified_dict
def add_arguments(parser):
"Add command-line arguments"
parser.add_argument("-l", "--left-path", dest="left_path",
default=None,
help="Left path to compare")
parser.add_argument("-r", "--right-path", dest="right_path",
default=None,
help="Right path to compare")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="Print details")
parser.add_argument("-x", "--debug", dest="debug",
action="store_true", default=False,
help="Print debugging messages")
parser.add_argument("-X", "--extra-verbose", dest="command_verbose",
action="store_true", default=False,
help="Print command output")
def main():
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
if args.left_path is None:
left_path = "/home/dglo/prj/pdaq-git/svn_tools/xxx/pdaq"
else:
left_path = args.left_path[:-1] \
if args.left_path.endswith(os.path.sep) \
else args.left_path
if args.right_path is None:
right_path = "/home/dglo/prj/pDAQ_Urban_Harvest9"
else:
right_path = args.right_path[:-1] \
if args.right_path.endswith(os.path.sep) \
else args.right_path
treecmp = CompareTrees(left_path, right_path)
treecmp.compare(debug=args.debug)
if treecmp.is_modified:
print("Found differences between \"%s\" and \"%s\"" %
(left_path, right_path))
for name, count in sorted(treecmp.modified_trees.items(),
key=lambda x: x[1]):
print("%s*%d" % (name, count))
if __name__ == "__main__":
main() | en | 0.768977 | #!/usr/bin/env python # clear previously cached values # if we haven't compared anything, build the comparison object # build lists of added/changed/deleted files # check subdirectories | 2.996156 | 3 |
var/spack/repos/builtin/packages/minife/package.py | kkauder/spack | 2 | 6624994 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Minife(MakefilePackage):
"""Proxy Application. MiniFE is an proxy application
for unstructured implicit finite element codes.
"""
homepage = "https://mantevo.org/"
url = "https://github.com/Mantevo/miniFE/archive/v2.1.0.tar.gz"
tags = ['proxy-app', 'ecp-proxy-app']
version('2.1.0', sha256='59f4c56d73d2a758cba86939db2d36e12705282cb4174ce78223d984527f5d15')
variant('build', default='ref', description='Type of Parallelism',
values=('ref', 'openmp_ref', 'qthreads', 'kokkos'))
depends_on('mpi')
depends_on('qthreads', when='build=qthreads')
@property
def build_targets(self):
targets = [
'--directory={0}/src'.format(self.spec.variants['build'].value),
'CXX={0}'.format(self.spec['mpi'].mpicxx),
'CC={0}'.format(self.spec['mpi'].mpicc)
]
return targets
def edit(self, spec, prefix):
makefile = FileFilter('{0}/src/Makefile'.format(
self.spec.variants['build'].value))
makefile.filter('-fopenmp', self.compiler.openmp_flag, string=True)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('{0}/src/miniFE.x'.format(self.spec.variants['build'].value),
prefix.bin)
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Minife(MakefilePackage):
"""Proxy Application. MiniFE is an proxy application
for unstructured implicit finite element codes.
"""
homepage = "https://mantevo.org/"
url = "https://github.com/Mantevo/miniFE/archive/v2.1.0.tar.gz"
tags = ['proxy-app', 'ecp-proxy-app']
version('2.1.0', sha256='59f4c56d73d2a758cba86939db2d36e12705282cb4174ce78223d984527f5d15')
variant('build', default='ref', description='Type of Parallelism',
values=('ref', 'openmp_ref', 'qthreads', 'kokkos'))
depends_on('mpi')
depends_on('qthreads', when='build=qthreads')
@property
def build_targets(self):
targets = [
'--directory={0}/src'.format(self.spec.variants['build'].value),
'CXX={0}'.format(self.spec['mpi'].mpicxx),
'CC={0}'.format(self.spec['mpi'].mpicc)
]
return targets
def edit(self, spec, prefix):
makefile = FileFilter('{0}/src/Makefile'.format(
self.spec.variants['build'].value))
makefile.filter('-fopenmp', self.compiler.openmp_flag, string=True)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('{0}/src/miniFE.x'.format(self.spec.variants['build'].value),
prefix.bin)
| en | 0.697509 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Proxy Application. MiniFE is an proxy application for unstructured implicit finite element codes. | 1.919336 | 2 |
personal_site/email_util.py | gorel/personal-site | 0 | 6624995 | <gh_stars>0
import flask_mail
from personal_site import mail
def send_email(subject, sender, recipients, text_body, html_body, attachments=None):
msg = flask_mail.Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
if attachments:
for attachment in attachments:
msg.attach(*attachment)
mail.send(msg)
| import flask_mail
from personal_site import mail
def send_email(subject, sender, recipients, text_body, html_body, attachments=None):
msg = flask_mail.Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
if attachments:
for attachment in attachments:
msg.attach(*attachment)
mail.send(msg) | none | 1 | 2.703679 | 3 | |
billing/tests/test_lifecycle_webhook.py | hkhanna/django-stripe-billing | 1 | 6624996 | """Stripe lifecycle webhook functionality. Webhooks where the user has taken
some action in Checkout or Portal are found elsewhere."""
from datetime import timedelta
from unittest.mock import Mock
from freezegun import freeze_time
import pytest
from django.utils import timezone
from django.urls import reverse
from .. import models, factories
@pytest.fixture
def customer():
"""Customer that is coming up for renewal"""
user = factories.UserFactory(
paying=True,
customer__customer_id="cus",
)
assert "paid.paying" == user.customer.state # Gut check
return user.customer
@pytest.fixture
def subscription_event(customer, paid_plan):
"""Return a function that generates a Stripe Event payload with defaults of an active paid subscription."""
def inner(**kwargs):
_customer = kwargs.pop("customer", customer)
type = kwargs.pop("type", "customer.subscription.updated")
id = kwargs.pop("id", _customer.subscription.id)
customer_id = kwargs.pop("customer_id", _customer.customer_id)
price_id = kwargs.pop("price_id", paid_plan.price_id)
cancel_at_period_end = kwargs.pop("cancel_at_period_end", False)
created = kwargs.pop("created", timezone.now().timestamp())
status = kwargs.pop("status", "active")
current_period_end = kwargs.pop("current_period_end", None)
if not current_period_end:
current_period_end = _customer.current_period_end.timestamp()
payload = {
"id": "evt_test",
"object": "event",
"type": type,
"data": {
"object": {
"id": id,
"customer": customer_id,
"current_period_end": current_period_end,
"items": {"data": [{"price": {"id": price_id}}]},
"cancel_at_period_end": cancel_at_period_end,
"created": created,
"status": status,
}
},
}
assert len(kwargs.keys()) == 0, "Unrecognized keys passed to payload fixture."
return (
locals()
) # For assertion convenience, we pass back all the variables at the top level as well.
return inner
def test_create_event(client):
"""Create event"""
url = reverse("billing:stripe_webhook")
payload = {"id": "evt_test", "object": "event", "type": "test"}
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
assert 1 == models.StripeEvent.objects.count()
def test_bad_json(client):
"""Malformed JSON"""
url = reverse("billing:stripe_webhook")
payload = "bad json"
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 400
assert models.StripeEvent.objects.count() == 0
def test_unrecognized_type(client):
"""Unrecognized event type"""
url = reverse("billing:stripe_webhook")
payload = {
"id": "evt_test",
"object": "event",
"type": "bad.type",
"data": {"object": None},
}
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert (
models.StripeEvent.Status.IGNORED == models.StripeEvent.objects.first().status
)
def test_subscription_event_new_stripe_subscription(
customer, client, subscription_event
):
"""A Stripe Subscription event payload should correctly create a StripeSubscription."""
url = reverse("billing:stripe_webhook")
event_json = subscription_event()
customer.stripesubscription_set.all().delete()
assert 0 == models.StripeSubscription.objects.count()
payload = event_json["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert 1 == models.StripeEvent.objects.count()
event = models.StripeEvent.objects.first()
assert models.StripeEvent.Status.PROCESSED == event.status
assert 1 == models.StripeSubscription.objects.count()
subscription = models.StripeSubscription.objects.first()
assert subscription.id == event_json["id"]
assert subscription.customer.customer_id == event_json["customer_id"]
assert (
subscription.current_period_end.timestamp() == event_json["current_period_end"]
)
assert subscription.price_id == event_json["price_id"]
assert subscription.cancel_at_period_end == event_json["cancel_at_period_end"]
assert subscription.created.timestamp() == event_json["created"]
assert subscription.status == event_json["status"]
def test_subscription_event_update_stripe_subscription(client, subscription_event):
"""A Stripe Subscription event payload should correctly update a StripeSubscription."""
url = reverse("billing:stripe_webhook")
event_attributes = {
"current_period_end": (timezone.now() + timedelta(days=45)).timestamp(),
# "price_id": "new_price" -- not available until we can upgrade plans
"cancel_at_period_end": True,
"created": timezone.now().timestamp(),
"status": "past_due",
}
event_json = subscription_event(**event_attributes)
# Ensure event_json is correct
for k, v in event_attributes.items():
assert event_json[k] == v
payload = event_json["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert 1 == models.StripeEvent.objects.count()
event = models.StripeEvent.objects.first()
assert models.StripeEvent.Status.PROCESSED == event.status
assert 1 == models.StripeSubscription.objects.count()
subscription = models.StripeSubscription.objects.first()
assert subscription.id == event_json["id"]
assert subscription.customer.customer_id == event_json["customer_id"]
assert (
subscription.current_period_end.timestamp() == event_json["current_period_end"]
)
assert subscription.price_id == event_json["price_id"]
assert subscription.cancel_at_period_end == event_json["cancel_at_period_end"]
assert subscription.created.timestamp() == event_json["created"]
assert subscription.status == event_json["status"]
def test_link_event_to_user(client, customer, subscription_event):
"""A Stripe Event should be connected to a User."""
url = reverse("billing:stripe_webhook")
payload = subscription_event()["payload"]
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.user == customer.user
def test_user_not_found(client, mock_stripe_customer, subscription_event):
"""If a user can't be found, error."""
url = reverse("billing:stripe_webhook")
mock_stripe_customer.retrieve.return_value.email = "<EMAIL>"
payload = subscription_event(id="sub_new", customer_id="cus_new")["payload"]
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.ERROR
assert event.user == None
assert "Customer.DoesNotExist" in event.note
def test_persist_customer_id(user, client, mock_stripe_customer, subscription_event):
"""A Customer without a Stripe customer_id gets it set on the first subscription event."""
mock_stripe_customer.retrieve.return_value.email = user.email
url = reverse("billing:stripe_webhook")
event_json = subscription_event(id="sub_new", customer_id="cus_new")
assert user.customer.customer_id is None
payload = event_json["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
user.customer.refresh_from_db()
assert user.customer.customer_id == "cus_new"
def test_subscription_customer_mismatch(
user, client, subscription_event, mock_stripe_customer
):
"""If a subscription already belongs to a different customer in the database than
the customer_id reported on the event, something is wrong.
This could happen if someone changes who the StripeSubscription instance is connected to in the admin."""
mock_stripe_customer.retrieve.return_value.email = user.email
url = reverse("billing:stripe_webhook")
payload = subscription_event(customer_id="cus_different")["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.ERROR
assert "Integrity error" in event.note
def test_multiple_subscriptions_sync(client, subscription_event, monkeypatch):
"""If a customer has multiple subscriptions, the sync function is only called for the correct one."""
mock = Mock()
monkeypatch.setattr(models.StripeSubscription, "sync_to_customer", mock)
url = reverse("billing:stripe_webhook")
payload = subscription_event(id="sub_different", status="past_due")["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert mock.call_count == 0
payload = subscription_event(status="past_due")["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert mock.call_count == 1
def test_payment_update_active(
client, customer, subscription_event, mock_stripe_invoice
):
"""An update to a Subscription's payment method does not do anything if the Subscription is
active."""
url = reverse("billing:stripe_webhook")
payload = subscription_event()["payload"]
payload["data"]["previous_attributes"] = {"default_payment_method": "pm_new"}
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.PROCESSED
assert event.user.customer == customer
assert mock_stripe_invoice.list.call_count == 0
assert mock_stripe_invoice.pay.call_count == 0
@pytest.mark.parametrize("status", ["incomplete", "past_due"])
def test_payment_update_and_retry(
client, subscription_event, customer, status, mock_stripe_invoice
):
"""An update to a Subscription's payment method when not active automatically retries the last open invoice."""
mock_stripe_invoice.list.return_value = {
"data": [{"status": "open", "id": "inv_123"}]
}
url = reverse("billing:stripe_webhook")
payload = subscription_event(status=status)["payload"]
payload["data"]["previous_attributes"] = {"default_payment_method": "pm_new"}
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.PROCESSED
assert event.user.customer == customer
assert mock_stripe_invoice.list.call_count == 1
assert mock_stripe_invoice.pay.call_count == 1
def test_incomplete_expired_cycle(client, user, subscription_event):
"""A StripeSubscription that transitions from incomplete to incomplete_expired should not error."""
# This is a bugfix that results from ignoring deleted subscriptions when the customer is not on a
# paid plan. Customers are, of course, not normally on paid plans when their status is incomplete.
url = reverse("billing:stripe_webhook")
customer = user.customer
factories.StripeSubscriptionFactory(
customer=customer, status=models.StripeSubscription.Status.INCOMPLETE
)
payload = subscription_event(
customer=customer,
status=models.StripeSubscription.Status.INCOMPLETE_EXPIRED,
current_period_end=(timezone.now() + timedelta(days=30)).timestamp(),
)["payload"]
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.PROCESSED
| """Stripe lifecycle webhook functionality. Webhooks where the user has taken
some action in Checkout or Portal are found elsewhere."""
from datetime import timedelta
from unittest.mock import Mock
from freezegun import freeze_time
import pytest
from django.utils import timezone
from django.urls import reverse
from .. import models, factories
@pytest.fixture
def customer():
"""Customer that is coming up for renewal"""
user = factories.UserFactory(
paying=True,
customer__customer_id="cus",
)
assert "paid.paying" == user.customer.state # Gut check
return user.customer
@pytest.fixture
def subscription_event(customer, paid_plan):
"""Return a function that generates a Stripe Event payload with defaults of an active paid subscription."""
def inner(**kwargs):
_customer = kwargs.pop("customer", customer)
type = kwargs.pop("type", "customer.subscription.updated")
id = kwargs.pop("id", _customer.subscription.id)
customer_id = kwargs.pop("customer_id", _customer.customer_id)
price_id = kwargs.pop("price_id", paid_plan.price_id)
cancel_at_period_end = kwargs.pop("cancel_at_period_end", False)
created = kwargs.pop("created", timezone.now().timestamp())
status = kwargs.pop("status", "active")
current_period_end = kwargs.pop("current_period_end", None)
if not current_period_end:
current_period_end = _customer.current_period_end.timestamp()
payload = {
"id": "evt_test",
"object": "event",
"type": type,
"data": {
"object": {
"id": id,
"customer": customer_id,
"current_period_end": current_period_end,
"items": {"data": [{"price": {"id": price_id}}]},
"cancel_at_period_end": cancel_at_period_end,
"created": created,
"status": status,
}
},
}
assert len(kwargs.keys()) == 0, "Unrecognized keys passed to payload fixture."
return (
locals()
) # For assertion convenience, we pass back all the variables at the top level as well.
return inner
def test_create_event(client):
"""Create event"""
url = reverse("billing:stripe_webhook")
payload = {"id": "evt_test", "object": "event", "type": "test"}
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
assert 1 == models.StripeEvent.objects.count()
def test_bad_json(client):
"""Malformed JSON"""
url = reverse("billing:stripe_webhook")
payload = "bad json"
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 400
assert models.StripeEvent.objects.count() == 0
def test_unrecognized_type(client):
"""Unrecognized event type"""
url = reverse("billing:stripe_webhook")
payload = {
"id": "evt_test",
"object": "event",
"type": "bad.type",
"data": {"object": None},
}
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert (
models.StripeEvent.Status.IGNORED == models.StripeEvent.objects.first().status
)
def test_subscription_event_new_stripe_subscription(
customer, client, subscription_event
):
"""A Stripe Subscription event payload should correctly create a StripeSubscription."""
url = reverse("billing:stripe_webhook")
event_json = subscription_event()
customer.stripesubscription_set.all().delete()
assert 0 == models.StripeSubscription.objects.count()
payload = event_json["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert 1 == models.StripeEvent.objects.count()
event = models.StripeEvent.objects.first()
assert models.StripeEvent.Status.PROCESSED == event.status
assert 1 == models.StripeSubscription.objects.count()
subscription = models.StripeSubscription.objects.first()
assert subscription.id == event_json["id"]
assert subscription.customer.customer_id == event_json["customer_id"]
assert (
subscription.current_period_end.timestamp() == event_json["current_period_end"]
)
assert subscription.price_id == event_json["price_id"]
assert subscription.cancel_at_period_end == event_json["cancel_at_period_end"]
assert subscription.created.timestamp() == event_json["created"]
assert subscription.status == event_json["status"]
def test_subscription_event_update_stripe_subscription(client, subscription_event):
"""A Stripe Subscription event payload should correctly update a StripeSubscription."""
url = reverse("billing:stripe_webhook")
event_attributes = {
"current_period_end": (timezone.now() + timedelta(days=45)).timestamp(),
# "price_id": "new_price" -- not available until we can upgrade plans
"cancel_at_period_end": True,
"created": timezone.now().timestamp(),
"status": "past_due",
}
event_json = subscription_event(**event_attributes)
# Ensure event_json is correct
for k, v in event_attributes.items():
assert event_json[k] == v
payload = event_json["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert 1 == models.StripeEvent.objects.count()
event = models.StripeEvent.objects.first()
assert models.StripeEvent.Status.PROCESSED == event.status
assert 1 == models.StripeSubscription.objects.count()
subscription = models.StripeSubscription.objects.first()
assert subscription.id == event_json["id"]
assert subscription.customer.customer_id == event_json["customer_id"]
assert (
subscription.current_period_end.timestamp() == event_json["current_period_end"]
)
assert subscription.price_id == event_json["price_id"]
assert subscription.cancel_at_period_end == event_json["cancel_at_period_end"]
assert subscription.created.timestamp() == event_json["created"]
assert subscription.status == event_json["status"]
def test_link_event_to_user(client, customer, subscription_event):
"""A Stripe Event should be connected to a User."""
url = reverse("billing:stripe_webhook")
payload = subscription_event()["payload"]
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.user == customer.user
def test_user_not_found(client, mock_stripe_customer, subscription_event):
"""If a user can't be found, error."""
url = reverse("billing:stripe_webhook")
mock_stripe_customer.retrieve.return_value.email = "<EMAIL>"
payload = subscription_event(id="sub_new", customer_id="cus_new")["payload"]
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.ERROR
assert event.user == None
assert "Customer.DoesNotExist" in event.note
def test_persist_customer_id(user, client, mock_stripe_customer, subscription_event):
"""A Customer without a Stripe customer_id gets it set on the first subscription event."""
mock_stripe_customer.retrieve.return_value.email = user.email
url = reverse("billing:stripe_webhook")
event_json = subscription_event(id="sub_new", customer_id="cus_new")
assert user.customer.customer_id is None
payload = event_json["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
user.customer.refresh_from_db()
assert user.customer.customer_id == "cus_new"
def test_subscription_customer_mismatch(
user, client, subscription_event, mock_stripe_customer
):
"""If a subscription already belongs to a different customer in the database than
the customer_id reported on the event, something is wrong.
This could happen if someone changes who the StripeSubscription instance is connected to in the admin."""
mock_stripe_customer.retrieve.return_value.email = user.email
url = reverse("billing:stripe_webhook")
payload = subscription_event(customer_id="cus_different")["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.ERROR
assert "Integrity error" in event.note
def test_multiple_subscriptions_sync(client, subscription_event, monkeypatch):
"""If a customer has multiple subscriptions, the sync function is only called for the correct one."""
mock = Mock()
monkeypatch.setattr(models.StripeSubscription, "sync_to_customer", mock)
url = reverse("billing:stripe_webhook")
payload = subscription_event(id="sub_different", status="past_due")["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert mock.call_count == 0
payload = subscription_event(status="past_due")["payload"]
response = client.post(url, payload, content_type="application/json")
assert 201 == response.status_code
assert mock.call_count == 1
def test_payment_update_active(
client, customer, subscription_event, mock_stripe_invoice
):
"""An update to a Subscription's payment method does not do anything if the Subscription is
active."""
url = reverse("billing:stripe_webhook")
payload = subscription_event()["payload"]
payload["data"]["previous_attributes"] = {"default_payment_method": "pm_new"}
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.PROCESSED
assert event.user.customer == customer
assert mock_stripe_invoice.list.call_count == 0
assert mock_stripe_invoice.pay.call_count == 0
@pytest.mark.parametrize("status", ["incomplete", "past_due"])
def test_payment_update_and_retry(
client, subscription_event, customer, status, mock_stripe_invoice
):
"""An update to a Subscription's payment method when not active automatically retries the last open invoice."""
mock_stripe_invoice.list.return_value = {
"data": [{"status": "open", "id": "inv_123"}]
}
url = reverse("billing:stripe_webhook")
payload = subscription_event(status=status)["payload"]
payload["data"]["previous_attributes"] = {"default_payment_method": "pm_new"}
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.PROCESSED
assert event.user.customer == customer
assert mock_stripe_invoice.list.call_count == 1
assert mock_stripe_invoice.pay.call_count == 1
def test_incomplete_expired_cycle(client, user, subscription_event):
"""A StripeSubscription that transitions from incomplete to incomplete_expired should not error."""
# This is a bugfix that results from ignoring deleted subscriptions when the customer is not on a
# paid plan. Customers are, of course, not normally on paid plans when their status is incomplete.
url = reverse("billing:stripe_webhook")
customer = user.customer
factories.StripeSubscriptionFactory(
customer=customer, status=models.StripeSubscription.Status.INCOMPLETE
)
payload = subscription_event(
customer=customer,
status=models.StripeSubscription.Status.INCOMPLETE_EXPIRED,
current_period_end=(timezone.now() + timedelta(days=30)).timestamp(),
)["payload"]
response = client.post(url, payload, content_type="application/json")
assert response.status_code == 201
event = models.StripeEvent.objects.first()
assert event.status == models.StripeEvent.Status.PROCESSED
| en | 0.897083 | Stripe lifecycle webhook functionality. Webhooks where the user has taken some action in Checkout or Portal are found elsewhere. Customer that is coming up for renewal # Gut check Return a function that generates a Stripe Event payload with defaults of an active paid subscription. # For assertion convenience, we pass back all the variables at the top level as well. Create event Malformed JSON Unrecognized event type A Stripe Subscription event payload should correctly create a StripeSubscription. A Stripe Subscription event payload should correctly update a StripeSubscription. # "price_id": "new_price" -- not available until we can upgrade plans # Ensure event_json is correct A Stripe Event should be connected to a User. If a user can't be found, error. A Customer without a Stripe customer_id gets it set on the first subscription event. If a subscription already belongs to a different customer in the database than the customer_id reported on the event, something is wrong. This could happen if someone changes who the StripeSubscription instance is connected to in the admin. If a customer has multiple subscriptions, the sync function is only called for the correct one. An update to a Subscription's payment method does not do anything if the Subscription is active. An update to a Subscription's payment method when not active automatically retries the last open invoice. A StripeSubscription that transitions from incomplete to incomplete_expired should not error. # This is a bugfix that results from ignoring deleted subscriptions when the customer is not on a # paid plan. Customers are, of course, not normally on paid plans when their status is incomplete. | 2.353793 | 2 |
microdrop/gui/channel_sweep.py | cfobel/microdrop | 17 | 6624997 | from flatland import Form, Float
from flatland.validation import ValueAtLeast
from pygtkhelpers.ui.form_view_dialog import create_form_view
from pygtkhelpers.ui.views.select import ListSelect
import gtk
import pandas as pd
import pygtkhelpers.ui.extra_widgets # Include widget for `Float` form fields
def get_channel_sweep_parameters(voltage=100, frequency=10e3, channels=None,
parent=None):
'''
Show dialog to select parameters for a sweep across a selected set of
channels.
Parameters
----------
voltage : int
Default actuation voltage.
frequency : int
Default actuation frequency.
channels : pandas.Series
Default channels selection, encoded as boolean array indexed by channel
number, where `True` values indicate selected channel(s).
parent : gtk.Window
If not ``None``, parent window for dialog. For example, display dialog
at position relative to the parent window.
Returns
-------
dict
Values collected from widgets with the following keys:
`'frequency'`, `voltage'`, and (optionally) `'channels'`.
'''
# Create a form view containing widgets to set the waveform attributes
# (i.e., voltage and frequency).
form = Form.of(Float.named('voltage')
.using(default=voltage,
validators=[ValueAtLeast(minimum=0)]),
Float.named('frequency')
.using(default=frequency,
validators=[ValueAtLeast(minimum=1)]))
form_view = create_form_view(form)
# If default channel selection was provided, create a treeview with one row
# per channel, and a checkbox in each row to mark the selection status of
# the corresponding channel.
if channels is not None:
df_channel_select = pd.DataFrame(channels.index, columns=['channel'])
df_channel_select.insert(1, 'select', channels.values)
view_channels = ListSelect(df_channel_select)
# Create dialog window.
dialog = gtk.Dialog(title='Channel sweep parameters',
buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
# Add waveform widgets to dialog window.
frame_waveform = gtk.Frame('Waveform properties')
frame_waveform.add(form_view.widget)
dialog.vbox.pack_start(child=frame_waveform, expand=False, fill=False,
padding=5)
# Add channel selection widgets to dialog window.
if channels is not None:
frame_channels = gtk.Frame('Select channels to sweep')
frame_channels.add(view_channels.widget)
dialog.vbox.pack_start(child=frame_channels, expand=True, fill=True,
padding=5)
# Mark all widgets as visible.
dialog.vbox.show_all()
if parent is not None:
dialog.window.set_transient_for(parent)
response = dialog.run()
dialog.destroy()
if response != gtk.RESPONSE_OK:
raise RuntimeError('Dialog cancelled.')
# Collection waveform and channel selection values from dialog.
form_values = {name: f.element.value
for name, f in form_view.form.fields.items()}
if channels is not None:
form_values['channels'] = (df_channel_select
.loc[df_channel_select['select'],
'channel'].values)
return form_values
| from flatland import Form, Float
from flatland.validation import ValueAtLeast
from pygtkhelpers.ui.form_view_dialog import create_form_view
from pygtkhelpers.ui.views.select import ListSelect
import gtk
import pandas as pd
import pygtkhelpers.ui.extra_widgets # Include widget for `Float` form fields
def get_channel_sweep_parameters(voltage=100, frequency=10e3, channels=None,
parent=None):
'''
Show dialog to select parameters for a sweep across a selected set of
channels.
Parameters
----------
voltage : int
Default actuation voltage.
frequency : int
Default actuation frequency.
channels : pandas.Series
Default channels selection, encoded as boolean array indexed by channel
number, where `True` values indicate selected channel(s).
parent : gtk.Window
If not ``None``, parent window for dialog. For example, display dialog
at position relative to the parent window.
Returns
-------
dict
Values collected from widgets with the following keys:
`'frequency'`, `voltage'`, and (optionally) `'channels'`.
'''
# Create a form view containing widgets to set the waveform attributes
# (i.e., voltage and frequency).
form = Form.of(Float.named('voltage')
.using(default=voltage,
validators=[ValueAtLeast(minimum=0)]),
Float.named('frequency')
.using(default=frequency,
validators=[ValueAtLeast(minimum=1)]))
form_view = create_form_view(form)
# If default channel selection was provided, create a treeview with one row
# per channel, and a checkbox in each row to mark the selection status of
# the corresponding channel.
if channels is not None:
df_channel_select = pd.DataFrame(channels.index, columns=['channel'])
df_channel_select.insert(1, 'select', channels.values)
view_channels = ListSelect(df_channel_select)
# Create dialog window.
dialog = gtk.Dialog(title='Channel sweep parameters',
buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
# Add waveform widgets to dialog window.
frame_waveform = gtk.Frame('Waveform properties')
frame_waveform.add(form_view.widget)
dialog.vbox.pack_start(child=frame_waveform, expand=False, fill=False,
padding=5)
# Add channel selection widgets to dialog window.
if channels is not None:
frame_channels = gtk.Frame('Select channels to sweep')
frame_channels.add(view_channels.widget)
dialog.vbox.pack_start(child=frame_channels, expand=True, fill=True,
padding=5)
# Mark all widgets as visible.
dialog.vbox.show_all()
if parent is not None:
dialog.window.set_transient_for(parent)
response = dialog.run()
dialog.destroy()
if response != gtk.RESPONSE_OK:
raise RuntimeError('Dialog cancelled.')
# Collection waveform and channel selection values from dialog.
form_values = {name: f.element.value
for name, f in form_view.form.fields.items()}
if channels is not None:
form_values['channels'] = (df_channel_select
.loc[df_channel_select['select'],
'channel'].values)
return form_values
| en | 0.741343 | # Include widget for `Float` form fields Show dialog to select parameters for a sweep across a selected set of channels. Parameters ---------- voltage : int Default actuation voltage. frequency : int Default actuation frequency. channels : pandas.Series Default channels selection, encoded as boolean array indexed by channel number, where `True` values indicate selected channel(s). parent : gtk.Window If not ``None``, parent window for dialog. For example, display dialog at position relative to the parent window. Returns ------- dict Values collected from widgets with the following keys: `'frequency'`, `voltage'`, and (optionally) `'channels'`. # Create a form view containing widgets to set the waveform attributes # (i.e., voltage and frequency). # If default channel selection was provided, create a treeview with one row # per channel, and a checkbox in each row to mark the selection status of # the corresponding channel. # Create dialog window. # Add waveform widgets to dialog window. # Add channel selection widgets to dialog window. # Mark all widgets as visible. # Collection waveform and channel selection values from dialog. | 2.375365 | 2 |
egs/csj/asr1/local/csj_rm_tag.py | texpomru13/espnet | 5,053 | 6624998 | <reponame>texpomru13/espnet
#!/usr/bin/env python3
# Copyright 2017 Johns Hopkins University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
from io import open
import sys
PY2 = sys.version_info[0] == 2
sys.stdin = codecs.getreader("utf-8")(sys.stdin if PY2 else sys.stdin.buffer)
sys.stdout = codecs.getwriter("utf-8")(sys.stdout if PY2 else sys.stdout.buffer)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--skip-ncols", "-s", default=0, type=int, help="skip first n columns"
)
parser.add_argument("text", type=str, help="input text")
args = parser.parse_args()
if args.text:
f = open(args.text, encoding="utf-8")
else:
f = sys.stdin
for line in f:
x = line.split()
print(" ".join(x[: args.skip_ncols]), end=" ")
print(" ".join([st.split("+")[0] for st in x[args.skip_ncols :]]))
| #!/usr/bin/env python3
# Copyright 2017 Johns Hopkins University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
from io import open
import sys
PY2 = sys.version_info[0] == 2
sys.stdin = codecs.getreader("utf-8")(sys.stdin if PY2 else sys.stdin.buffer)
sys.stdout = codecs.getwriter("utf-8")(sys.stdout if PY2 else sys.stdout.buffer)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--skip-ncols", "-s", default=0, type=int, help="skip first n columns"
)
parser.add_argument("text", type=str, help="input text")
args = parser.parse_args()
if args.text:
f = open(args.text, encoding="utf-8")
else:
f = sys.stdin
for line in f:
x = line.split()
print(" ".join(x[: args.skip_ncols]), end=" ")
print(" ".join([st.split("+")[0] for st in x[args.skip_ncols :]])) | en | 0.286349 | #!/usr/bin/env python3 # Copyright 2017 Johns Hopkins University (<NAME>) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) | 2.977155 | 3 |
WebNavigation.py | Josicamats/rpachallenge | 0 | 6624999 | <filename>WebNavigation.py
import logging
from RPA.Browser.Selenium import Selenium
from RPA.Tables import Tables
from RPA.FileSystem import FileSystem
browser = Selenium()
tb = Tables()
fs = FileSystem()
class WebNavigation:
def __init__(self) -> None:
self.logger = logging.getLogger(__name__)
#set download directory
def set_download_directory(self, directory):
try:
browser.set_download_directory(directory, True)
except Exception as err:
self.logger.error("Set download directory fails: " + str(err))
raise SystemError("Set download directory fails: " + str(err))
# Open specified website
def open_website(self, url: str):
try:
browser.open_available_browser(url)
browser.maximize_browser_window()
browser.set_browser_implicit_wait(30)
#self.browser.wait_until_page_contains_element()
except Exception as err:
self.logger.error("Login website fails bc: " + str(err))
raise SystemError("Login website fails bc: " + str(err))
# Click specified button
def click_button(self, button: str):
try:
browser.wait_until_element_is_visible(button)
browser.click_element_when_visible(button)
#browser.set_browser_implicit_wait(30)
except Exception as err:
self.logger.error("Click button failes bc: " + str(err))
raise SystemError("Click button failes bc: " + str(err))
# Close browsers:
def close_browser(self):
try:
browser.close_all_browsers()
except Exception as err:
self.logger.error("Close all browsers fails bc: " + str(err))
raise SystemError("Close all browsers fails bc: " + str(err))
# Get Agencies Data:
def get_agencies(self):
try:
total = 0
count_agencies = len(browser.find_elements("(//*[contains(@class, 'col-sm-4 text-center noUnderline')])"))
agencies = []
for i in range(1,10):
for j in range (1,4):
total = total + 1
if total <= count_agencies:
agency = browser.find_element("xpath://*[@id='agency-tiles-widget']/div/div["+str(i)+"]/div["+str(j)+"]/div/div/div/div[1]/a/span[1]").text
agencies.append(agency)
#dt_agencies = tb.create_table(agencies)
return agencies
except Exception as err:
self.logger.error("Unable to get Agencies names bc: " + str(err))
raise SystemError("Unable to get Agencies names bc: " + str(err))
# Get Amounts Data:
def get_amounts(self):
try:
total = 0
count_agencies = len(browser.find_elements("(//*[contains(@class, 'col-sm-4 text-center noUnderline')])"))
amounts = []
for i in range(1,10):
for j in range (1,4):
total = total + 1
if total <= count_agencies:
amount = browser.find_element("xpath://*[@id='agency-tiles-widget']/div/div["+str(i)+"]/div["+str(j)+"]/div/div/div/div[1]/a/span[2]").text
amounts.append(amount)
#dt_amounts = tb.create_table(amounts)
return amounts
except Exception as err:
self.logger.error("Unable to get amounts of each agency bc: " + str(err))
raise SystemError("Unable to get amounts of each agency bc: " + str(err))
#Scraping data from table
def get_table_data(self):
try:
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
browser.set_browser_implicit_wait(5)
row_count = len(browser.find_elements("//*[@id='investments-table-object']/tbody/tr"))
col_count = len(browser.find_elements("//*[@id='investments-table-object']/tbody/tr[1]/td"))
data = tb.create_table()
columns = ["A","B","C","D","E","F","G"]
for col in columns:
tb.add_table_column(data, col)
for n in range(1, row_count+1):
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
rows = []
row = 0
for m in range(1, col_count+1):
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
path = "//*[@id='investments-table-object']/tbody/tr["+str(n)+"]/td["+str(m)+"]"
table_data = browser.find_element(path).text
rows.append(table_data)
if(columns[row] == 'A'):
directory = "C:\\ROBOTS\\RPAChallenge\\Challenge\\output"
download_pdf(table_data, directory)
row = row + 1
tb.add_table_row(data, rows)
return data
except Exception as err:
self.logger.error("Scraping data from table fails: " + str(err))
raise SystemError("Scraping data from table fails: " + str(err))
# Download Specified Business Case if Exists Link
def download_pdf(file: str, directory):
try:
tableURL = "/drupal/summary/393/" + file
exist = browser.does_page_contain_link(tableURL)
if(exist):
link = browser.find_element('//a[@href="'+tableURL+'"]')
browser.click_link(link)
browser.set_browser_implicit_wait(30)
pdfPath = browser.find_element("//*[@id='business-case-pdf']/a")
browser.click_link(pdfPath)
while(fs.does_file_not_exist(directory+"\\"+file+".pdf")):
browser.set_browser_implicit_wait(10)
browser.go_back()
browser.go_back()
except:
pass
| <filename>WebNavigation.py
import logging
from RPA.Browser.Selenium import Selenium
from RPA.Tables import Tables
from RPA.FileSystem import FileSystem
browser = Selenium()
tb = Tables()
fs = FileSystem()
class WebNavigation:
def __init__(self) -> None:
self.logger = logging.getLogger(__name__)
#set download directory
def set_download_directory(self, directory):
try:
browser.set_download_directory(directory, True)
except Exception as err:
self.logger.error("Set download directory fails: " + str(err))
raise SystemError("Set download directory fails: " + str(err))
# Open specified website
def open_website(self, url: str):
try:
browser.open_available_browser(url)
browser.maximize_browser_window()
browser.set_browser_implicit_wait(30)
#self.browser.wait_until_page_contains_element()
except Exception as err:
self.logger.error("Login website fails bc: " + str(err))
raise SystemError("Login website fails bc: " + str(err))
# Click specified button
def click_button(self, button: str):
try:
browser.wait_until_element_is_visible(button)
browser.click_element_when_visible(button)
#browser.set_browser_implicit_wait(30)
except Exception as err:
self.logger.error("Click button failes bc: " + str(err))
raise SystemError("Click button failes bc: " + str(err))
# Close browsers:
def close_browser(self):
try:
browser.close_all_browsers()
except Exception as err:
self.logger.error("Close all browsers fails bc: " + str(err))
raise SystemError("Close all browsers fails bc: " + str(err))
# Get Agencies Data:
def get_agencies(self):
try:
total = 0
count_agencies = len(browser.find_elements("(//*[contains(@class, 'col-sm-4 text-center noUnderline')])"))
agencies = []
for i in range(1,10):
for j in range (1,4):
total = total + 1
if total <= count_agencies:
agency = browser.find_element("xpath://*[@id='agency-tiles-widget']/div/div["+str(i)+"]/div["+str(j)+"]/div/div/div/div[1]/a/span[1]").text
agencies.append(agency)
#dt_agencies = tb.create_table(agencies)
return agencies
except Exception as err:
self.logger.error("Unable to get Agencies names bc: " + str(err))
raise SystemError("Unable to get Agencies names bc: " + str(err))
# Get Amounts Data:
def get_amounts(self):
try:
total = 0
count_agencies = len(browser.find_elements("(//*[contains(@class, 'col-sm-4 text-center noUnderline')])"))
amounts = []
for i in range(1,10):
for j in range (1,4):
total = total + 1
if total <= count_agencies:
amount = browser.find_element("xpath://*[@id='agency-tiles-widget']/div/div["+str(i)+"]/div["+str(j)+"]/div/div/div/div[1]/a/span[2]").text
amounts.append(amount)
#dt_amounts = tb.create_table(amounts)
return amounts
except Exception as err:
self.logger.error("Unable to get amounts of each agency bc: " + str(err))
raise SystemError("Unable to get amounts of each agency bc: " + str(err))
#Scraping data from table
def get_table_data(self):
try:
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
browser.set_browser_implicit_wait(5)
row_count = len(browser.find_elements("//*[@id='investments-table-object']/tbody/tr"))
col_count = len(browser.find_elements("//*[@id='investments-table-object']/tbody/tr[1]/td"))
data = tb.create_table()
columns = ["A","B","C","D","E","F","G"]
for col in columns:
tb.add_table_column(data, col)
for n in range(1, row_count+1):
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
rows = []
row = 0
for m in range(1, col_count+1):
browser.select_from_list_by_value("//*[@id='investments-table-object_length']/label/select","-1")
path = "//*[@id='investments-table-object']/tbody/tr["+str(n)+"]/td["+str(m)+"]"
table_data = browser.find_element(path).text
rows.append(table_data)
if(columns[row] == 'A'):
directory = "C:\\ROBOTS\\RPAChallenge\\Challenge\\output"
download_pdf(table_data, directory)
row = row + 1
tb.add_table_row(data, rows)
return data
except Exception as err:
self.logger.error("Scraping data from table fails: " + str(err))
raise SystemError("Scraping data from table fails: " + str(err))
# Download Specified Business Case if Exists Link
def download_pdf(file: str, directory):
try:
tableURL = "/drupal/summary/393/" + file
exist = browser.does_page_contain_link(tableURL)
if(exist):
link = browser.find_element('//a[@href="'+tableURL+'"]')
browser.click_link(link)
browser.set_browser_implicit_wait(30)
pdfPath = browser.find_element("//*[@id='business-case-pdf']/a")
browser.click_link(pdfPath)
while(fs.does_file_not_exist(directory+"\\"+file+".pdf")):
browser.set_browser_implicit_wait(10)
browser.go_back()
browser.go_back()
except:
pass
| en | 0.305832 | #set download directory # Open specified website #self.browser.wait_until_page_contains_element() # Click specified button #browser.set_browser_implicit_wait(30) # Close browsers: # Get Agencies Data: #dt_agencies = tb.create_table(agencies) # Get Amounts Data: #dt_amounts = tb.create_table(amounts) #Scraping data from table # Download Specified Business Case if Exists Link | 2.765227 | 3 |
protein_lm/generate_data.py | shaun95/google-research | 1 | 6625000 | <gh_stars>1-10
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Process csv data to tfrecords."""
import os
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from protein_lm import data
FLAGS = flags.FLAGS
flags.DEFINE_string(
'input_dir', default='', help=('Directory to load CSVs from.'))
flags.DEFINE_string(
'output_dir', default='', help=('Directory to output tfrecords to.'))
def main(argv):
if not FLAGS.input_dir:
raise ValueError('Must provide input directory.')
if not FLAGS.output_dir:
raise ValueError('Must provide output directory.')
files = tf.gfile.Glob(os.path.join(FLAGS.input_dir, '*.csv'))
tf.gfile.MakeDirs(FLAGS.output_dir)
for i, file in enumerate(files):
file = os.path.join(FLAGS.input_dir, file)
print(file)
data.csv_to_tfrecord(file, FLAGS.output_dir, idx=i, total=len(files))
if __name__ == '__main__':
app.run(main)
| # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Process csv data to tfrecords."""
import os
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from protein_lm import data
FLAGS = flags.FLAGS
flags.DEFINE_string(
'input_dir', default='', help=('Directory to load CSVs from.'))
flags.DEFINE_string(
'output_dir', default='', help=('Directory to output tfrecords to.'))
def main(argv):
if not FLAGS.input_dir:
raise ValueError('Must provide input directory.')
if not FLAGS.output_dir:
raise ValueError('Must provide output directory.')
files = tf.gfile.Glob(os.path.join(FLAGS.input_dir, '*.csv'))
tf.gfile.MakeDirs(FLAGS.output_dir)
for i, file in enumerate(files):
file = os.path.join(FLAGS.input_dir, file)
print(file)
data.csv_to_tfrecord(file, FLAGS.output_dir, idx=i, total=len(files))
if __name__ == '__main__':
app.run(main) | en | 0.836533 | # coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 Process csv data to tfrecords. | 2.468896 | 2 |
pipelines/pipelines.py | philippschmalen/ESG-topics-Google-count | 0 | 6625001 | # general
import yaml
from datetime import timedelta, datetime
import os
import pandas as pd
# prefect
from prefect import task, Flow, Parameter
from prefect.schedules import IntervalSchedule
# custom
from google_results_count import extract, load
from analysis import load_raw_data, transform, plot
# -- for logging
import logging
logging.basicConfig(
level=logging.DEBUG,
format='{asctime} {levelname:<8} {message}',
style='{',
filename='log/google_results_count.log',
filemode='w'
)
# -------------------------------------------------
# Extract raw data
# -------------------------------------------------
@task
def create_search_url(keyword_list, url):
return extract.get_search_urls(keyword_list, url)
@task(max_retries=2, retry_delay=timedelta(seconds=10))
def extract_result_count(search_urls, user_agent, url):
result_count = [extract.get_results_count(
url, user_agent) for url in search_urls]
return result_count
@task
def df_build_results_count(keyword_list, result_count, search_urls):
df = pd.DataFrame({'keyword': keyword_list,
'results_count': result_count,
'search_url': search_urls,
'query_timestamp': datetime.now()})
return df
@task
def assert_df(df, keyword_list, url):
extract.assert_df(df, keyword_list, url)
@task
def export_raw_data(df, path):
load.write_to_csv(df, path)
# -------------------------------------------------
# Tranform data for analysis
# -------------------------------------------------
@task
def transform_raw_data():
df = (load_raw_data.load(raw_data_dir='../data/0_raw', filename='google_results_count')
.pipe(transform.impute_results_count)
.pipe(transform.feature_engineering))
return df
@task
def export_analysis_data(df, path):
load.write_to_csv(df, path)
@task
def deploy_plots(df):
# subset df for last x days
df_lxdays = transform.subset_last_x_days(df, last_x_days=30)
plot.set_layout_template()
fig_timeline = plot.plot_timeline(df)
fig_change = plot.plot_change(df_lxdays)
plot.deploy_figure(fig_timeline, filename="google_results_count_timeline")
plot.deploy_figure(fig_change, filename="google_results_count_change")
def main():
# ~----------------- SETTINGS -----------------~
with open(r'../settings.yml') as file:
settings = yaml.full_load(file)
PROJECT_DIR = '../'
RAW_DATA_DIR = settings['project']['raw_data_dir']
FINAL_DATA_DIR = settings['project']['final_data_dir']
FILENAME = f"{settings['project']['export_filename']}_{datetime.now().strftime('%Y%m%d_%H%M')}.csv"
FILEPATH_RAW = os.path.join(PROJECT_DIR, RAW_DATA_DIR, FILENAME)
FILEPATH_ANALYSIS = os.path.join(PROJECT_DIR, FINAL_DATA_DIR, FILENAME)
KEYWORDS = settings['query']['keywords']
USER_AGENT = settings['query']['user_agent']
GOOGLE_URL = settings['query']['google_url']
# ~----------------- FLOW -----------------~
# ~-- daily schedule
schedule = IntervalSchedule(
start_date=datetime.strptime("20210424-030500UTC", "%Y%m%d-%H%M%S%Z"),
interval=timedelta(days=1),
)
with Flow("etl", schedule=schedule) as flow:
# parameter
filepath_raw = Parameter(name="filepath_raw")
filepath_analysis = Parameter(name="filepath_analysis")
keywords = Parameter(name="keywords")
user_agent = Parameter(name="user_agent")
google_url = Parameter(name="google_url")
# task flow
# -- raw data
search_urls = create_search_url(keywords, google_url)
results_count = extract_result_count(
search_urls, user_agent, google_url)
df = df_build_results_count(keywords, results_count, search_urls)
assert_df(df, keywords, google_url)
export_raw_data(df, filepath_raw)
# -- analysis data and plot
df = transform_raw_data()
export_analysis_data(df, filepath_analysis)
deploy_plots(df)
# ~----------------- RUN -----------------~
flow.run(filepath_raw=FILEPATH_RAW,
filepath_analysis=FILEPATH_ANALYSIS,
keywords=KEYWORDS,
user_agent=USER_AGENT,
google_url=GOOGLE_URL)
if __name__ == "__main__":
main()
| # general
import yaml
from datetime import timedelta, datetime
import os
import pandas as pd
# prefect
from prefect import task, Flow, Parameter
from prefect.schedules import IntervalSchedule
# custom
from google_results_count import extract, load
from analysis import load_raw_data, transform, plot
# -- for logging
import logging
logging.basicConfig(
level=logging.DEBUG,
format='{asctime} {levelname:<8} {message}',
style='{',
filename='log/google_results_count.log',
filemode='w'
)
# -------------------------------------------------
# Extract raw data
# -------------------------------------------------
@task
def create_search_url(keyword_list, url):
return extract.get_search_urls(keyword_list, url)
@task(max_retries=2, retry_delay=timedelta(seconds=10))
def extract_result_count(search_urls, user_agent, url):
result_count = [extract.get_results_count(
url, user_agent) for url in search_urls]
return result_count
@task
def df_build_results_count(keyword_list, result_count, search_urls):
df = pd.DataFrame({'keyword': keyword_list,
'results_count': result_count,
'search_url': search_urls,
'query_timestamp': datetime.now()})
return df
@task
def assert_df(df, keyword_list, url):
extract.assert_df(df, keyword_list, url)
@task
def export_raw_data(df, path):
load.write_to_csv(df, path)
# -------------------------------------------------
# Tranform data for analysis
# -------------------------------------------------
@task
def transform_raw_data():
df = (load_raw_data.load(raw_data_dir='../data/0_raw', filename='google_results_count')
.pipe(transform.impute_results_count)
.pipe(transform.feature_engineering))
return df
@task
def export_analysis_data(df, path):
load.write_to_csv(df, path)
@task
def deploy_plots(df):
# subset df for last x days
df_lxdays = transform.subset_last_x_days(df, last_x_days=30)
plot.set_layout_template()
fig_timeline = plot.plot_timeline(df)
fig_change = plot.plot_change(df_lxdays)
plot.deploy_figure(fig_timeline, filename="google_results_count_timeline")
plot.deploy_figure(fig_change, filename="google_results_count_change")
def main():
# ~----------------- SETTINGS -----------------~
with open(r'../settings.yml') as file:
settings = yaml.full_load(file)
PROJECT_DIR = '../'
RAW_DATA_DIR = settings['project']['raw_data_dir']
FINAL_DATA_DIR = settings['project']['final_data_dir']
FILENAME = f"{settings['project']['export_filename']}_{datetime.now().strftime('%Y%m%d_%H%M')}.csv"
FILEPATH_RAW = os.path.join(PROJECT_DIR, RAW_DATA_DIR, FILENAME)
FILEPATH_ANALYSIS = os.path.join(PROJECT_DIR, FINAL_DATA_DIR, FILENAME)
KEYWORDS = settings['query']['keywords']
USER_AGENT = settings['query']['user_agent']
GOOGLE_URL = settings['query']['google_url']
# ~----------------- FLOW -----------------~
# ~-- daily schedule
schedule = IntervalSchedule(
start_date=datetime.strptime("20210424-030500UTC", "%Y%m%d-%H%M%S%Z"),
interval=timedelta(days=1),
)
with Flow("etl", schedule=schedule) as flow:
# parameter
filepath_raw = Parameter(name="filepath_raw")
filepath_analysis = Parameter(name="filepath_analysis")
keywords = Parameter(name="keywords")
user_agent = Parameter(name="user_agent")
google_url = Parameter(name="google_url")
# task flow
# -- raw data
search_urls = create_search_url(keywords, google_url)
results_count = extract_result_count(
search_urls, user_agent, google_url)
df = df_build_results_count(keywords, results_count, search_urls)
assert_df(df, keywords, google_url)
export_raw_data(df, filepath_raw)
# -- analysis data and plot
df = transform_raw_data()
export_analysis_data(df, filepath_analysis)
deploy_plots(df)
# ~----------------- RUN -----------------~
flow.run(filepath_raw=FILEPATH_RAW,
filepath_analysis=FILEPATH_ANALYSIS,
keywords=KEYWORDS,
user_agent=USER_AGENT,
google_url=GOOGLE_URL)
if __name__ == "__main__":
main()
| en | 0.228405 | # general # prefect # custom # -- for logging # ------------------------------------------------- # Extract raw data # ------------------------------------------------- # ------------------------------------------------- # Tranform data for analysis # ------------------------------------------------- # subset df for last x days # ~----------------- SETTINGS -----------------~ # ~----------------- FLOW -----------------~ # ~-- daily schedule # parameter # task flow # -- raw data # -- analysis data and plot # ~----------------- RUN -----------------~ | 2.357933 | 2 |
pyscf/nao/test/test_0064_gw_h_chain.py | robert-anderson/pyscf | 2 | 6625002 | from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import gw
class KnowValues(unittest.TestCase):
def test_gw_h2_ae_spin_rf0_speed(self):
""" This is GW """
mol = gto.M( verbose = 1,
atom = '''H 0 0 0; H 0 0 0.5; H 0 0 1.0; H 0 0 1.5; H 0 0 2.0; H 0 0 2.5; H 0 0 3.0; H 0 0 3.5;''',
basis = 'cc-pvdz', spin=0)
#mol = gto.M( verbose = 0, atom = '''H 0.0 0.0 -0.3707; H 0.0 0.0 0.3707''', basis = 'cc-pvdz',)
gto_mf = scf.RHF(mol)
etot = gto_mf.kernel()
#print(__name__, 'etot', etot)
#print('gto_mf.mo_energy:', gto_mf.mo_energy)
b = gw(mf=gto_mf, gto=mol, verbosity=0, nvrt=4)
ww = np.arange(0.0, 1.0, 0.1)+1j*0.2
rf0 = b.rf0_den(ww)
rf0_ref = b.rf0_cmplx_ref(ww)
print(__name__, '|diff|', abs(rf0_ref-rf0).sum()/rf0.size)
self.assertTrue(abs(rf0_ref-rf0).sum()/rf0.size<1e-12)
if __name__ == "__main__": unittest.main()
| from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import gw
class KnowValues(unittest.TestCase):
def test_gw_h2_ae_spin_rf0_speed(self):
""" This is GW """
mol = gto.M( verbose = 1,
atom = '''H 0 0 0; H 0 0 0.5; H 0 0 1.0; H 0 0 1.5; H 0 0 2.0; H 0 0 2.5; H 0 0 3.0; H 0 0 3.5;''',
basis = 'cc-pvdz', spin=0)
#mol = gto.M( verbose = 0, atom = '''H 0.0 0.0 -0.3707; H 0.0 0.0 0.3707''', basis = 'cc-pvdz',)
gto_mf = scf.RHF(mol)
etot = gto_mf.kernel()
#print(__name__, 'etot', etot)
#print('gto_mf.mo_energy:', gto_mf.mo_energy)
b = gw(mf=gto_mf, gto=mol, verbosity=0, nvrt=4)
ww = np.arange(0.0, 1.0, 0.1)+1j*0.2
rf0 = b.rf0_den(ww)
rf0_ref = b.rf0_cmplx_ref(ww)
print(__name__, '|diff|', abs(rf0_ref-rf0).sum()/rf0.size)
self.assertTrue(abs(rf0_ref-rf0).sum()/rf0.size<1e-12)
if __name__ == "__main__": unittest.main()
| en | 0.404121 | This is GW H 0 0 0; H 0 0 0.5; H 0 0 1.0; H 0 0 1.5; H 0 0 2.0; H 0 0 2.5; H 0 0 3.0; H 0 0 3.5; #mol = gto.M( verbose = 0, atom = '''H 0.0 0.0 -0.3707; H 0.0 0.0 0.3707''', basis = 'cc-pvdz',) #print(__name__, 'etot', etot) #print('gto_mf.mo_energy:', gto_mf.mo_energy) | 2.096999 | 2 |
api/tacticalrmm/scripts/tests.py | HighTech-Grace-Solutions/tacticalrmm | 0 | 6625003 | from email.policy import default
import json
import os
from pathlib import Path
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from model_bakery import baker
from tacticalrmm.test import TacticalTestCase
from .models import Script
from .serializers import ScriptSerializer, ScriptTableSerializer
class TestScriptViews(TacticalTestCase):
def setUp(self):
self.authenticate()
def test_get_scripts(self):
url = "/scripts/scripts/"
scripts = baker.make("scripts.Script", _quantity=3)
serializer = ScriptTableSerializer(scripts, many=True)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(serializer.data, resp.data) # type: ignore
self.check_not_authenticated("get", url)
def test_add_script(self):
url = f"/scripts/scripts/"
data = {
"name": "Name",
"description": "Description",
"shell": "powershell",
"category": "New",
"code": "Some Test Code\nnew Line",
"default_timeout": 99,
"args": ["hello", "world", r"{{agent.public_ip}}"],
"favorite": False,
}
# test without file upload
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertTrue(Script.objects.filter(name="Name").exists())
self.assertEqual(Script.objects.get(name="Name").code, data["code"])
# test with file upload
# file with 'Test' as content
file = SimpleUploadedFile(
"test_script.bat", b"\x54\x65\x73\x74", content_type="text/plain"
)
data = {
"name": "<NAME>",
"description": "Description",
"shell": "cmd",
"category": "New",
"filename": file,
"default_timeout": 4455,
"args": json.dumps(
["hello", "world", r"{{agent.public_ip}}"]
), # simulate javascript's JSON.stringify() for formData
}
# test with file upload
resp = self.client.post(url, data, format="multipart")
self.assertEqual(resp.status_code, 200)
script = Script.objects.filter(name="New Name").first()
self.assertEquals(script.code, "Test")
self.check_not_authenticated("post", url)
def test_modify_script(self):
# test a call where script doesn't exist
resp = self.client.put("/scripts/500/script/", format="json")
self.assertEqual(resp.status_code, 404)
# make a userdefined script
script = baker.make_recipe("scripts.script")
url = f"/scripts/{script.pk}/script/"
data = {
"name": script.name,
"description": "Description Change",
"shell": script.shell,
"code": "Test Code\nAnother Line",
"default_timeout": 13344556,
}
# test edit a userdefined script
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
script = Script.objects.get(pk=script.pk)
self.assertEquals(script.description, "Description Change")
self.assertEquals(script.code, "Test Code\nAnother Line")
# test edit a builtin script
data = {"name": "New Name", "description": "New Desc", "code": "Some New Code"}
builtin_script = baker.make_recipe("scripts.script", script_type="builtin")
resp = self.client.put(
f"/scripts/{builtin_script.pk}/script/", data, format="json"
)
self.assertEqual(resp.status_code, 400)
data = {
"name": script.name,
"description": "Description Change",
"shell": script.shell,
"favorite": True,
"code": "Test Code\nAnother Line",
"default_timeout": 54345,
}
# test marking a builtin script as favorite
resp = self.client.put(
f"/scripts/{builtin_script.pk}/script/", data, format="json"
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(Script.objects.get(pk=builtin_script.pk).favorite)
self.check_not_authenticated("put", url)
def test_get_script(self):
# test a call where script doesn't exist
resp = self.client.get("/scripts/500/script/", format="json")
self.assertEqual(resp.status_code, 404)
script = baker.make("scripts.Script")
url = f"/scripts/{script.pk}/script/" # type: ignore
serializer = ScriptSerializer(script)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(serializer.data, resp.data) # type: ignore
self.check_not_authenticated("get", url)
def test_delete_script(self):
# test a call where script doesn't exist
resp = self.client.delete("/scripts/500/script/", format="json")
self.assertEqual(resp.status_code, 404)
# test delete script
script = baker.make_recipe("scripts.script")
url = f"/scripts/{script.pk}/script/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(Script.objects.filter(pk=script.pk).exists())
# test delete community script
script = baker.make_recipe("scripts.script", script_type="builtin")
url = f"/scripts/{script.pk}/script/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 400)
self.check_not_authenticated("delete", url)
def test_download_script(self):
# test a call where script doesn't exist
resp = self.client.get("/scripts/500/download/", format="json")
self.assertEqual(resp.status_code, 404)
# return script code property should be "Test"
# test powershell file
script = baker.make(
"scripts.Script", code_base64="VGVzdA==", shell="powershell"
)
url = f"/scripts/{script.pk}/download/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, {"filename": f"{script.name}.ps1", "code": "Test"}) # type: ignore
# test batch file
script = baker.make("scripts.Script", code_base64="VGVzdA==", shell="cmd")
url = f"/scripts/{script.pk}/download/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, {"filename": f"{script.name}.bat", "code": "Test"}) # type: ignore
# test python file
script = baker.make("scripts.Script", code_base64="VGVzdA==", shell="python")
url = f"/scripts/{script.pk}/download/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, {"filename": f"{script.name}.py", "code": "Test"}) # type: ignore
self.check_not_authenticated("get", url)
def test_community_script_json_file(self):
valid_shells = ["powershell", "python", "cmd"]
if not settings.DOCKER_BUILD:
scripts_dir = os.path.join(Path(settings.BASE_DIR).parents[1], "scripts")
else:
scripts_dir = settings.SCRIPTS_DIR
with open(
os.path.join(settings.BASE_DIR, "scripts/community_scripts.json")
) as f:
info = json.load(f)
guids = []
for script in info:
fn: str = script["filename"]
self.assertTrue(os.path.exists(os.path.join(scripts_dir, fn)))
self.assertTrue(script["filename"])
self.assertTrue(script["name"])
self.assertTrue(script["description"])
self.assertTrue(script["shell"])
self.assertIn(script["shell"], valid_shells)
if fn.endswith(".ps1"):
self.assertEqual(script["shell"], "powershell")
elif fn.endswith(".bat"):
self.assertEqual(script["shell"], "cmd")
elif fn.endswith(".py"):
self.assertEqual(script["shell"], "python")
if "args" in script.keys():
self.assertIsInstance(script["args"], list)
# allows strings as long as they can be type casted to int
if "default_timeout" in script.keys():
self.assertIsInstance(int(script["default_timeout"]), int)
self.assertIn("guid", script.keys())
guids.append(script["guid"])
# check guids are unique
self.assertEqual(len(guids), len(set(guids)))
def test_load_community_scripts(self):
with open(
os.path.join(settings.BASE_DIR, "scripts/community_scripts.json")
) as f:
info = json.load(f)
Script.load_community_scripts()
community_scripts = Script.objects.filter(script_type="builtin").count()
self.assertEqual(len(info), community_scripts)
# test updating already added community scripts
Script.load_community_scripts()
self.assertEqual(len(info), community_scripts)
def test_script_filenames_do_not_contain_spaces(self):
with open(
os.path.join(settings.BASE_DIR, "scripts/community_scripts.json")
) as f:
info = json.load(f)
for script in info:
fn: str = script["filename"]
self.assertTrue(" " not in fn)
| from email.policy import default
import json
import os
from pathlib import Path
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from model_bakery import baker
from tacticalrmm.test import TacticalTestCase
from .models import Script
from .serializers import ScriptSerializer, ScriptTableSerializer
class TestScriptViews(TacticalTestCase):
def setUp(self):
self.authenticate()
def test_get_scripts(self):
url = "/scripts/scripts/"
scripts = baker.make("scripts.Script", _quantity=3)
serializer = ScriptTableSerializer(scripts, many=True)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(serializer.data, resp.data) # type: ignore
self.check_not_authenticated("get", url)
def test_add_script(self):
url = f"/scripts/scripts/"
data = {
"name": "Name",
"description": "Description",
"shell": "powershell",
"category": "New",
"code": "Some Test Code\nnew Line",
"default_timeout": 99,
"args": ["hello", "world", r"{{agent.public_ip}}"],
"favorite": False,
}
# test without file upload
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertTrue(Script.objects.filter(name="Name").exists())
self.assertEqual(Script.objects.get(name="Name").code, data["code"])
# test with file upload
# file with 'Test' as content
file = SimpleUploadedFile(
"test_script.bat", b"\x54\x65\x73\x74", content_type="text/plain"
)
data = {
"name": "<NAME>",
"description": "Description",
"shell": "cmd",
"category": "New",
"filename": file,
"default_timeout": 4455,
"args": json.dumps(
["hello", "world", r"{{agent.public_ip}}"]
), # simulate javascript's JSON.stringify() for formData
}
# test with file upload
resp = self.client.post(url, data, format="multipart")
self.assertEqual(resp.status_code, 200)
script = Script.objects.filter(name="New Name").first()
self.assertEquals(script.code, "Test")
self.check_not_authenticated("post", url)
def test_modify_script(self):
# test a call where script doesn't exist
resp = self.client.put("/scripts/500/script/", format="json")
self.assertEqual(resp.status_code, 404)
# make a userdefined script
script = baker.make_recipe("scripts.script")
url = f"/scripts/{script.pk}/script/"
data = {
"name": script.name,
"description": "Description Change",
"shell": script.shell,
"code": "Test Code\nAnother Line",
"default_timeout": 13344556,
}
# test edit a userdefined script
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
script = Script.objects.get(pk=script.pk)
self.assertEquals(script.description, "Description Change")
self.assertEquals(script.code, "Test Code\nAnother Line")
# test edit a builtin script
data = {"name": "New Name", "description": "New Desc", "code": "Some New Code"}
builtin_script = baker.make_recipe("scripts.script", script_type="builtin")
resp = self.client.put(
f"/scripts/{builtin_script.pk}/script/", data, format="json"
)
self.assertEqual(resp.status_code, 400)
data = {
"name": script.name,
"description": "Description Change",
"shell": script.shell,
"favorite": True,
"code": "Test Code\nAnother Line",
"default_timeout": 54345,
}
# test marking a builtin script as favorite
resp = self.client.put(
f"/scripts/{builtin_script.pk}/script/", data, format="json"
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(Script.objects.get(pk=builtin_script.pk).favorite)
self.check_not_authenticated("put", url)
def test_get_script(self):
# test a call where script doesn't exist
resp = self.client.get("/scripts/500/script/", format="json")
self.assertEqual(resp.status_code, 404)
script = baker.make("scripts.Script")
url = f"/scripts/{script.pk}/script/" # type: ignore
serializer = ScriptSerializer(script)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(serializer.data, resp.data) # type: ignore
self.check_not_authenticated("get", url)
def test_delete_script(self):
# test a call where script doesn't exist
resp = self.client.delete("/scripts/500/script/", format="json")
self.assertEqual(resp.status_code, 404)
# test delete script
script = baker.make_recipe("scripts.script")
url = f"/scripts/{script.pk}/script/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(Script.objects.filter(pk=script.pk).exists())
# test delete community script
script = baker.make_recipe("scripts.script", script_type="builtin")
url = f"/scripts/{script.pk}/script/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 400)
self.check_not_authenticated("delete", url)
def test_download_script(self):
# test a call where script doesn't exist
resp = self.client.get("/scripts/500/download/", format="json")
self.assertEqual(resp.status_code, 404)
# return script code property should be "Test"
# test powershell file
script = baker.make(
"scripts.Script", code_base64="VGVzdA==", shell="powershell"
)
url = f"/scripts/{script.pk}/download/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, {"filename": f"{script.name}.ps1", "code": "Test"}) # type: ignore
# test batch file
script = baker.make("scripts.Script", code_base64="VGVzdA==", shell="cmd")
url = f"/scripts/{script.pk}/download/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, {"filename": f"{script.name}.bat", "code": "Test"}) # type: ignore
# test python file
script = baker.make("scripts.Script", code_base64="VGVzdA==", shell="python")
url = f"/scripts/{script.pk}/download/" # type: ignore
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, {"filename": f"{script.name}.py", "code": "Test"}) # type: ignore
self.check_not_authenticated("get", url)
def test_community_script_json_file(self):
valid_shells = ["powershell", "python", "cmd"]
if not settings.DOCKER_BUILD:
scripts_dir = os.path.join(Path(settings.BASE_DIR).parents[1], "scripts")
else:
scripts_dir = settings.SCRIPTS_DIR
with open(
os.path.join(settings.BASE_DIR, "scripts/community_scripts.json")
) as f:
info = json.load(f)
guids = []
for script in info:
fn: str = script["filename"]
self.assertTrue(os.path.exists(os.path.join(scripts_dir, fn)))
self.assertTrue(script["filename"])
self.assertTrue(script["name"])
self.assertTrue(script["description"])
self.assertTrue(script["shell"])
self.assertIn(script["shell"], valid_shells)
if fn.endswith(".ps1"):
self.assertEqual(script["shell"], "powershell")
elif fn.endswith(".bat"):
self.assertEqual(script["shell"], "cmd")
elif fn.endswith(".py"):
self.assertEqual(script["shell"], "python")
if "args" in script.keys():
self.assertIsInstance(script["args"], list)
# allows strings as long as they can be type casted to int
if "default_timeout" in script.keys():
self.assertIsInstance(int(script["default_timeout"]), int)
self.assertIn("guid", script.keys())
guids.append(script["guid"])
# check guids are unique
self.assertEqual(len(guids), len(set(guids)))
def test_load_community_scripts(self):
with open(
os.path.join(settings.BASE_DIR, "scripts/community_scripts.json")
) as f:
info = json.load(f)
Script.load_community_scripts()
community_scripts = Script.objects.filter(script_type="builtin").count()
self.assertEqual(len(info), community_scripts)
# test updating already added community scripts
Script.load_community_scripts()
self.assertEqual(len(info), community_scripts)
def test_script_filenames_do_not_contain_spaces(self):
with open(
os.path.join(settings.BASE_DIR, "scripts/community_scripts.json")
) as f:
info = json.load(f)
for script in info:
fn: str = script["filename"]
self.assertTrue(" " not in fn)
| en | 0.849785 | # type: ignore # test without file upload # test with file upload # file with 'Test' as content # simulate javascript's JSON.stringify() for formData # test with file upload # test a call where script doesn't exist # make a userdefined script # test edit a userdefined script # test edit a builtin script # test marking a builtin script as favorite # test a call where script doesn't exist # type: ignore # type: ignore # test a call where script doesn't exist # test delete script # test delete community script # test a call where script doesn't exist # return script code property should be "Test" # test powershell file # type: ignore # type: ignore # test batch file # type: ignore # type: ignore # test python file # type: ignore # type: ignore # allows strings as long as they can be type casted to int # check guids are unique # test updating already added community scripts | 2.085523 | 2 |
pypesto/sample/auto_correlation.py | m-philipps/pyPESTO | 0 | 6625004 | <gh_stars>0
import numpy as np
def autocorrelation_sokal(chain: np.ndarray) -> np.ndarray:
"""
Estimate the integrated autocorrelation time of a MCMC chain.
Uses Sokal's adaptive truncated periodogram estimator.
- <NAME>., <NAME>., <NAME>. et al. DRAM: Efficient
adaptive MCMC. Stat Comput 16, 339–354 (2006).
https://doi.org/10.1007/s11222-006-9438-0
- <NAME>. (1997) Monte Carlo Methods in Statistical Mechanics:
Foundations and New Algorithms. In: <NAME>.,
<NAME>., <NAME>. (eds) Functional Integration.
NATO ASI Series (Series B: Physics), vol 361. Springer, Boston, MA
Parameters
----------
chain: The MCMC chain.
Returns
-------
tau_est: An estimate of the integrated autocorrelation time of
the MCMC chain.
"""
nsamples, npar = chain.shape
tau_est = np.zeros((npar))
# Calculate the fast Fourier transform
x = np.fft.fft(chain, axis=0)
# Get the real part
xr = np.real(x)
# Get the imaginary part
xi = np.imag(x)
xr = xr**2 + xi**2
# First value to zero
xr[0, :] = 0.0
# Calculate the fast Fourier transform
# of the transformation
xr = np.real(np.fft.fft(xr, axis=0))
# Calculate the variance
var = xr[0] / nsamples / (nsamples - 1)
# Loop over parameters
for j in range(npar):
if var[j] == 0.0:
continue
# Normalize by the first value
xr[:, j] = xr[:, j] / xr[0, j]
# Initiate variable
_sum = -1 / 3
# Loop over samples
for i in range(nsamples):
_sum = _sum + xr[i, j] - 1 / 6
if _sum < 0.0:
tau_est[j] = 2 * (_sum + i / 6)
break
return tau_est
| import numpy as np
def autocorrelation_sokal(chain: np.ndarray) -> np.ndarray:
"""
Estimate the integrated autocorrelation time of a MCMC chain.
Uses Sokal's adaptive truncated periodogram estimator.
- <NAME>., <NAME>., <NAME>. et al. DRAM: Efficient
adaptive MCMC. Stat Comput 16, 339–354 (2006).
https://doi.org/10.1007/s11222-006-9438-0
- <NAME>. (1997) Monte Carlo Methods in Statistical Mechanics:
Foundations and New Algorithms. In: <NAME>.,
<NAME>., <NAME>. (eds) Functional Integration.
NATO ASI Series (Series B: Physics), vol 361. Springer, Boston, MA
Parameters
----------
chain: The MCMC chain.
Returns
-------
tau_est: An estimate of the integrated autocorrelation time of
the MCMC chain.
"""
nsamples, npar = chain.shape
tau_est = np.zeros((npar))
# Calculate the fast Fourier transform
x = np.fft.fft(chain, axis=0)
# Get the real part
xr = np.real(x)
# Get the imaginary part
xi = np.imag(x)
xr = xr**2 + xi**2
# First value to zero
xr[0, :] = 0.0
# Calculate the fast Fourier transform
# of the transformation
xr = np.real(np.fft.fft(xr, axis=0))
# Calculate the variance
var = xr[0] / nsamples / (nsamples - 1)
# Loop over parameters
for j in range(npar):
if var[j] == 0.0:
continue
# Normalize by the first value
xr[:, j] = xr[:, j] / xr[0, j]
# Initiate variable
_sum = -1 / 3
# Loop over samples
for i in range(nsamples):
_sum = _sum + xr[i, j] - 1 / 6
if _sum < 0.0:
tau_est[j] = 2 * (_sum + i / 6)
break
return tau_est | en | 0.54947 | Estimate the integrated autocorrelation time of a MCMC chain. Uses Sokal's adaptive truncated periodogram estimator. - <NAME>., <NAME>., <NAME>. et al. DRAM: Efficient adaptive MCMC. Stat Comput 16, 339–354 (2006). https://doi.org/10.1007/s11222-006-9438-0 - <NAME>. (1997) Monte Carlo Methods in Statistical Mechanics: Foundations and New Algorithms. In: <NAME>., <NAME>., <NAME>. (eds) Functional Integration. NATO ASI Series (Series B: Physics), vol 361. Springer, Boston, MA Parameters ---------- chain: The MCMC chain. Returns ------- tau_est: An estimate of the integrated autocorrelation time of the MCMC chain. # Calculate the fast Fourier transform # Get the real part # Get the imaginary part # First value to zero # Calculate the fast Fourier transform # of the transformation # Calculate the variance # Loop over parameters # Normalize by the first value # Initiate variable # Loop over samples | 2.871833 | 3 |
advanced_functionality/fairseq_translation/fairseq/predictor.py | jpmarques19/tensorflwo-test | 5 | 6625005 | <filename>advanced_functionality/fairseq_translation/fairseq/predictor.py
from sagemaker_translate import model_fn, input_fn, output_fn, predict_fn
import flask
import os
prefix = '/opt/ml/'
model_path = os.path.join(prefix, 'model')
print("in predictor.py")
# A singleton for holding the model. This simply loads the model and holds it.
# It has a predict function that does a prediction based on the model and the input data.
class ScoringService(object):
model = None # Where we keep the model when it's loaded
@classmethod
def get_model(cls):
"""Get the model object for this instance, loading it if it's not already loaded."""
if cls.model is None:
cls.model = model_fn(model_path)
return cls.model
@classmethod
def predict(cls, serialized_input_data):
"""For the input, do the predictions and return them.
Args:
input (a pandas dataframe): The data on which to do the predictions. There will be
one prediction per row in the dataframe"""
clf = cls.get_model()
input_data = input_fn(serialized_input_data)
output = predict_fn(input_data, clf)
return output_fn(output)
# The flask app for serving predictions
app = flask.Flask(__name__)
@app.route('/ping', methods=['GET'])
def ping():
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
health = ScoringService.get_model() is not None # You can insert a health check here
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
def transformation():
"""Do an inference on a single batch of data.
"""
data = None
data = flask.request.data.decode('utf-8')
# Do the prediction
result, accept = ScoringService.predict(data)
return flask.Response(response=result, status=200, mimetype='text/json') | <filename>advanced_functionality/fairseq_translation/fairseq/predictor.py
from sagemaker_translate import model_fn, input_fn, output_fn, predict_fn
import flask
import os
prefix = '/opt/ml/'
model_path = os.path.join(prefix, 'model')
print("in predictor.py")
# A singleton for holding the model. This simply loads the model and holds it.
# It has a predict function that does a prediction based on the model and the input data.
class ScoringService(object):
model = None # Where we keep the model when it's loaded
@classmethod
def get_model(cls):
"""Get the model object for this instance, loading it if it's not already loaded."""
if cls.model is None:
cls.model = model_fn(model_path)
return cls.model
@classmethod
def predict(cls, serialized_input_data):
"""For the input, do the predictions and return them.
Args:
input (a pandas dataframe): The data on which to do the predictions. There will be
one prediction per row in the dataframe"""
clf = cls.get_model()
input_data = input_fn(serialized_input_data)
output = predict_fn(input_data, clf)
return output_fn(output)
# The flask app for serving predictions
app = flask.Flask(__name__)
@app.route('/ping', methods=['GET'])
def ping():
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
health = ScoringService.get_model() is not None # You can insert a health check here
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
def transformation():
"""Do an inference on a single batch of data.
"""
data = None
data = flask.request.data.decode('utf-8')
# Do the prediction
result, accept = ScoringService.predict(data)
return flask.Response(response=result, status=200, mimetype='text/json') | en | 0.890845 | # A singleton for holding the model. This simply loads the model and holds it. # It has a predict function that does a prediction based on the model and the input data. # Where we keep the model when it's loaded Get the model object for this instance, loading it if it's not already loaded. For the input, do the predictions and return them. Args: input (a pandas dataframe): The data on which to do the predictions. There will be one prediction per row in the dataframe # The flask app for serving predictions Determine if the container is working and healthy. In this sample container, we declare it healthy if we can load the model successfully. # You can insert a health check here Do an inference on a single batch of data. # Do the prediction | 2.56404 | 3 |
src/practice/20_framework/rl/policies/random_policy.py | djjh/reinforcement-learning-labs | 1 | 6625006 | <gh_stars>1-10
from rl.core import Policy
class RandomPolicy(Policy):
def __init__(self, environment):
self.environment = environment
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def get_action(self, observation):
return self.environment.action_space.sample()
def update(self, epoch, episodes):
pass
| from rl.core import Policy
class RandomPolicy(Policy):
def __init__(self, environment):
self.environment = environment
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def get_action(self, observation):
return self.environment.action_space.sample()
def update(self, epoch, episodes):
pass | none | 1 | 2.248434 | 2 | |
pyodec/files/campsci_cs135_ep.py | pyodec/pyodec | 0 | 6625007 | from pyodec.core import FileDecoder, VariableList
from pyodec.messages.campscics135 import decoder as msgdecode
import numpy as np
import os
import time
import gzip
import traceback
"""
Campbell Scienfitic CS-135 with message 006 and epoch timestamps
"""
class Epcs135D(FileDecoder):
init_vars = VariableList()
init_vars.addvar('DATTIM','Observation time',int,1,'seconds since 1970-01-01 00:00 UTC')
init_vars += msgdecode.vars
init_fixed_vars = msgdecode.fixed_vars
def on_chunk(self, message):
"""
Read a chunk of data from the file, the chunk being spliced from read_chunks.
"""
# this is an end-spliced message, so we will get the timestamp
ob = message.split(unichr(001))
try:
tmstr = ob[0].strip().split()[-1]
# don't add extra newlines after the time string if you want to be happy
otime = float(tmstr)
except:
return False
try:
data = msgdecode.decode(ob[1])
except:
print "decode failure"
traceback.print_exc()
# there was something ugly in this data... serial hiccups.
data=False
if data is False:
return None
output = [otime]+data
return output
def decode_proc(self, filepath, yieldcount=1000, **kwargs):
# open the file
#return self.read_chunks(yieldcount, self.open_ascii(filepath), end=unichr(004))
# problem with above: who closes the file handle??
with self.open_ascii(filepath) as filehandle:
for d in self.read_chunks(yieldcount, filehandle, end=unichr(004)):
yield d
decoder = Epcs135D()
if __name__ == '__main__':
import sys
fil = sys.argv[1]
print decoder.decode(fil)
| from pyodec.core import FileDecoder, VariableList
from pyodec.messages.campscics135 import decoder as msgdecode
import numpy as np
import os
import time
import gzip
import traceback
"""
Campbell Scienfitic CS-135 with message 006 and epoch timestamps
"""
class Epcs135D(FileDecoder):
init_vars = VariableList()
init_vars.addvar('DATTIM','Observation time',int,1,'seconds since 1970-01-01 00:00 UTC')
init_vars += msgdecode.vars
init_fixed_vars = msgdecode.fixed_vars
def on_chunk(self, message):
"""
Read a chunk of data from the file, the chunk being spliced from read_chunks.
"""
# this is an end-spliced message, so we will get the timestamp
ob = message.split(unichr(001))
try:
tmstr = ob[0].strip().split()[-1]
# don't add extra newlines after the time string if you want to be happy
otime = float(tmstr)
except:
return False
try:
data = msgdecode.decode(ob[1])
except:
print "decode failure"
traceback.print_exc()
# there was something ugly in this data... serial hiccups.
data=False
if data is False:
return None
output = [otime]+data
return output
def decode_proc(self, filepath, yieldcount=1000, **kwargs):
# open the file
#return self.read_chunks(yieldcount, self.open_ascii(filepath), end=unichr(004))
# problem with above: who closes the file handle??
with self.open_ascii(filepath) as filehandle:
for d in self.read_chunks(yieldcount, filehandle, end=unichr(004)):
yield d
decoder = Epcs135D()
if __name__ == '__main__':
import sys
fil = sys.argv[1]
print decoder.decode(fil)
| en | 0.84361 | Campbell Scienfitic CS-135 with message 006 and epoch timestamps Read a chunk of data from the file, the chunk being spliced from read_chunks. # this is an end-spliced message, so we will get the timestamp # don't add extra newlines after the time string if you want to be happy # there was something ugly in this data... serial hiccups. # open the file #return self.read_chunks(yieldcount, self.open_ascii(filepath), end=unichr(004)) # problem with above: who closes the file handle?? | 2.164462 | 2 |
TkiWrapper/logger.py | Jakub21/Tki-Wrapper | 0 | 6625008 | <filename>TkiWrapper/logger.py
from TkiWrapper.Settings import Settings
from Namespace.Namespace import Namespace
from datetime import datetime
class LogIssuer:
def setIssuerData(self):
self.__logIssuerData__ = Namespace(scope = 'tki',
name = self.__class__.__name__, id = hex(id(self))[2:].upper())
return self
def printLog(level, issuer, *message):
if not Settings.enableLogs: return
time = datetime.now()
time = time.strftime('%I:%M:%S')
levels = ['Debug', 'Info', 'Note', 'Warn', 'Error']
levelNo = levels.index(level)
if levelNo < levels.index(Settings.logLevel): return
lvlPrefix = '+'*levelNo + ' '*(4-levelNo)
try: issuer = issuer.__logIssuerData__
except:
print('LOG ISSUER NOT SPECIFIED')
raise
print(f'@{time} [{lvlPrefix}] <{issuer.id} {issuer.scope}:{issuer.name}>', *message)
def Debug(issuer, *message):
printLog('Debug', issuer, *message)
def Info(issuer, *message):
printLog('Info', issuer, *message)
def Note(issuer, *message):
printLog('Note', issuer, *message)
def Warn(issuer, *message):
printLog('Warn', issuer, *message)
def Error(issuer, *message):
printLog('Error', issuer, *message)
| <filename>TkiWrapper/logger.py
from TkiWrapper.Settings import Settings
from Namespace.Namespace import Namespace
from datetime import datetime
class LogIssuer:
def setIssuerData(self):
self.__logIssuerData__ = Namespace(scope = 'tki',
name = self.__class__.__name__, id = hex(id(self))[2:].upper())
return self
def printLog(level, issuer, *message):
if not Settings.enableLogs: return
time = datetime.now()
time = time.strftime('%I:%M:%S')
levels = ['Debug', 'Info', 'Note', 'Warn', 'Error']
levelNo = levels.index(level)
if levelNo < levels.index(Settings.logLevel): return
lvlPrefix = '+'*levelNo + ' '*(4-levelNo)
try: issuer = issuer.__logIssuerData__
except:
print('LOG ISSUER NOT SPECIFIED')
raise
print(f'@{time} [{lvlPrefix}] <{issuer.id} {issuer.scope}:{issuer.name}>', *message)
def Debug(issuer, *message):
printLog('Debug', issuer, *message)
def Info(issuer, *message):
printLog('Info', issuer, *message)
def Note(issuer, *message):
printLog('Note', issuer, *message)
def Warn(issuer, *message):
printLog('Warn', issuer, *message)
def Error(issuer, *message):
printLog('Error', issuer, *message)
| none | 1 | 2.420353 | 2 | |
egp/math.py | egpbos/egp | 0 | 6625009 | def skew(a):
m1 = a.mean()
m2 = a.std()
m3 = 0.0
for i in range(len(a)):
m3 += (a[i]-m1)**3.0
return m3/m2**3/len(a)
def kurtosis(a):
m1 = a.mean()
m2 = a.std()
m4 = 0.0
for i in range(len(a)):
m4 += (a[i]-m1)**4.0
return m4/m2**4/len(a) - 3.0
| def skew(a):
m1 = a.mean()
m2 = a.std()
m3 = 0.0
for i in range(len(a)):
m3 += (a[i]-m1)**3.0
return m3/m2**3/len(a)
def kurtosis(a):
m1 = a.mean()
m2 = a.std()
m4 = 0.0
for i in range(len(a)):
m4 += (a[i]-m1)**4.0
return m4/m2**4/len(a) - 3.0
| none | 1 | 3.440157 | 3 | |
mgeconvert/frontend/tm_to_ir/qat_pattern.py | Zhiy-Zhang/mgeconvert | 42 | 6625010 | <filename>mgeconvert/frontend/tm_to_ir/qat_pattern.py
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module
import megengine.functional as F
from megengine.core.ops import builtin
from megengine.module.qat import QATModule
from megengine.traced_module.expr import Apply, CallFunction, GetAttr
from ...converter_ir.ir_op import ReluOpr
from .op_generators import GenConv2dOpr, GenDeconv2dOpr
from .pattern_utils import InputNode, MatchAnyNode, is_match, register_fusion_pattern
from .tm_tensor_resolver import TensorNodeResolver
def gen_qat_conv_opr(module, conv_function_expr, qat_expr, irgraph, is_deconv=False):
weight_fakequant = conv_function_expr.inputs[1].expr
bias = None
if len(conv_function_expr.inputs) == 3:
bias = conv_function_expr.inputs[2]
assert (isinstance(bias.expr, GetAttr) and bias.expr.name == "bias") or (
isinstance(bias.expr, Apply)
and isinstance(
bias.expr.opdef, builtin.FakeQuant # pylint:disable=no-member
)
)
assert (
isinstance(weight_fakequant.inputs[1].expr, GetAttr)
and weight_fakequant.inputs[1].expr.name == "weight"
)
assert len(module.graph.inputs) == 2
act_qparams = module.act_fake_quant.get_qparams()
weight_qparams = module.weight_fake_quant.get_qparams()
module.stride = conv_function_expr.args[3]
module.padding = conv_function_expr.args[4]
module.dilation = conv_function_expr.args[5]
module.groups = conv_function_expr.args[6]
assert conv_function_expr.args[7] == "cross_correlation"
assert conv_function_expr.args[8] == "default"
if bias is None:
module.bias = None
op = (
GenConv2dOpr(qat_expr, irgraph).get_opr()
if not is_deconv
else GenDeconv2dOpr(qat_expr, irgraph).get_opr()
)
op.inp_tensors[1].scale = float(weight_qparams.scale)
op.inp_tensors[1].zero_point = int(weight_qparams.zero_point)
op.inp_tensors[1].q_dtype = weight_qparams.dtype_meta.np_dtype_str
if len(op.inp_tensors) == 3:
op.inp_tensors[2].scale = op.inp_tensors[0].scale * op.inp_tensors[1].scale
op.inp_tensors[2].q_dtype = "int32"
op.inp_tensors[2].zero_point = 0
op.out_tensors[0].scale = act_qparams.scale.numpy()[0]
op.out_tensors[0].zero_point = act_qparams.zero_point.numpy()[0]
op.out_tensors[0].q_dtype = act_qparams.dtype_meta.np_dtype_str
return op
MATCH_RULE = {}
pat_conv_bias_relu = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(
F.relu,
(F.conv2d, InputNode, QATModule._apply_fakequant_with_observer, MatchAnyNode),
),
MatchAnyNode,
)
pat_conv_bias = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(F.conv2d, InputNode, QATModule._apply_fakequant_with_observer, MatchAnyNode),
MatchAnyNode,
)
pat_conv_relu = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(F.relu, (F.conv2d, InputNode, QATModule._apply_fakequant_with_observer),),
MatchAnyNode,
)
pat_conv = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(F.conv2d, InputNode, QATModule._apply_fakequant_with_observer),
MatchAnyNode,
)
pat_deconv_relu = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(F.relu, (F.conv_transpose2d, InputNode, QATModule._apply_fakequant_with_observer)),
MatchAnyNode,
)
pat_deconv_bias = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(
F.conv_transpose2d,
InputNode,
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
),
MatchAnyNode,
)
@register_fusion_pattern(pat_conv_bias_relu)
def qat_conv_bias_relu(module, expr, call_expr, irgraph, _):
relu = expr.inputs[1].expr
op = gen_qat_conv_opr(module, relu.inputs[0].expr, call_expr, irgraph)
op.activation = "RELU"
return op
@register_fusion_pattern(pat_conv_bias)
def qat_conv_bias(module, expr, call_expr, irgraph, _):
conv = expr.inputs[1].expr
op = gen_qat_conv_opr(module, conv, call_expr, irgraph)
return op
@register_fusion_pattern(pat_conv_relu)
def qat_conv_relu(module, expr, call_expr, net, _):
relu = expr.inputs[1].expr
op = gen_qat_conv_opr(module, relu.inputs[0].expr, call_expr, net)
op.activation = "RELU"
return op
@register_fusion_pattern(pat_conv)
def qat_conv(module, expr, call_expr, net, _):
conv = expr.inputs[1].expr
op = gen_qat_conv_opr(module, conv, call_expr, net)
return op
@register_fusion_pattern(pat_deconv_bias)
def qat_deconv_bias(module, expr, call_expr, irgraph, _):
conv = expr.inputs[1].expr
op = gen_qat_conv_opr(module, conv, call_expr, irgraph, is_deconv=True)
return op
@register_fusion_pattern(pat_deconv_relu)
def qat_deconv_relu_bias(
module, expr, call_expr, irgraph, resolver: TensorNodeResolver
):
relu = expr.inputs[1].expr
deconv = relu.inputs[0].expr
op = gen_qat_conv_opr(module, deconv, call_expr, irgraph, is_deconv=True)
op.activation = "RELU"
relu_op = ReluOpr()
relu_op.inp_tensors = []
relu_op.out_tensors = []
relu_op.inp_tensors.append(op.out_tensors[0])
relu_op.out_tensors.append(resolver.resolve(call_expr.outputs[0], relu_op)[0])
relu_op.out_tensors[0].name += "_relu"
relu_op.out_tensors[0].q_dtype = relu_op.inp_tensors[0].q_dtype
relu_op.out_tensors[0].scale = relu_op.inp_tensors[0].scale
relu_op.out_tensors[0].zero_point = relu_op.inp_tensors[0].zero_point
irgraph.all_tensors[
irgraph._tensor_ids.index(call_expr.outputs[0]._id)
] = relu_op.out_tensors[0]
return op, relu_op
MATCH_RULE[QATModule._apply_fakequant_with_observer] = [
pat_conv_bias_relu,
pat_conv_bias,
pat_deconv_relu,
pat_conv_relu,
pat_conv,
pat_deconv_bias,
]
def find_match_pattern(graph):
rst = []
for expr in graph._exprs:
if isinstance(expr, CallFunction):
if expr.func in MATCH_RULE:
pat = MATCH_RULE[expr.func]
for p in pat:
if is_match(expr, p):
rst.append((p, expr))
return rst
| <filename>mgeconvert/frontend/tm_to_ir/qat_pattern.py
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module
import megengine.functional as F
from megengine.core.ops import builtin
from megengine.module.qat import QATModule
from megengine.traced_module.expr import Apply, CallFunction, GetAttr
from ...converter_ir.ir_op import ReluOpr
from .op_generators import GenConv2dOpr, GenDeconv2dOpr
from .pattern_utils import InputNode, MatchAnyNode, is_match, register_fusion_pattern
from .tm_tensor_resolver import TensorNodeResolver
def gen_qat_conv_opr(module, conv_function_expr, qat_expr, irgraph, is_deconv=False):
weight_fakequant = conv_function_expr.inputs[1].expr
bias = None
if len(conv_function_expr.inputs) == 3:
bias = conv_function_expr.inputs[2]
assert (isinstance(bias.expr, GetAttr) and bias.expr.name == "bias") or (
isinstance(bias.expr, Apply)
and isinstance(
bias.expr.opdef, builtin.FakeQuant # pylint:disable=no-member
)
)
assert (
isinstance(weight_fakequant.inputs[1].expr, GetAttr)
and weight_fakequant.inputs[1].expr.name == "weight"
)
assert len(module.graph.inputs) == 2
act_qparams = module.act_fake_quant.get_qparams()
weight_qparams = module.weight_fake_quant.get_qparams()
module.stride = conv_function_expr.args[3]
module.padding = conv_function_expr.args[4]
module.dilation = conv_function_expr.args[5]
module.groups = conv_function_expr.args[6]
assert conv_function_expr.args[7] == "cross_correlation"
assert conv_function_expr.args[8] == "default"
if bias is None:
module.bias = None
op = (
GenConv2dOpr(qat_expr, irgraph).get_opr()
if not is_deconv
else GenDeconv2dOpr(qat_expr, irgraph).get_opr()
)
op.inp_tensors[1].scale = float(weight_qparams.scale)
op.inp_tensors[1].zero_point = int(weight_qparams.zero_point)
op.inp_tensors[1].q_dtype = weight_qparams.dtype_meta.np_dtype_str
if len(op.inp_tensors) == 3:
op.inp_tensors[2].scale = op.inp_tensors[0].scale * op.inp_tensors[1].scale
op.inp_tensors[2].q_dtype = "int32"
op.inp_tensors[2].zero_point = 0
op.out_tensors[0].scale = act_qparams.scale.numpy()[0]
op.out_tensors[0].zero_point = act_qparams.zero_point.numpy()[0]
op.out_tensors[0].q_dtype = act_qparams.dtype_meta.np_dtype_str
return op
MATCH_RULE = {}
pat_conv_bias_relu = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(
F.relu,
(F.conv2d, InputNode, QATModule._apply_fakequant_with_observer, MatchAnyNode),
),
MatchAnyNode,
)
pat_conv_bias = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(F.conv2d, InputNode, QATModule._apply_fakequant_with_observer, MatchAnyNode),
MatchAnyNode,
)
pat_conv_relu = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(F.relu, (F.conv2d, InputNode, QATModule._apply_fakequant_with_observer),),
MatchAnyNode,
)
pat_conv = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(F.conv2d, InputNode, QATModule._apply_fakequant_with_observer),
MatchAnyNode,
)
pat_deconv_relu = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(F.relu, (F.conv_transpose2d, InputNode, QATModule._apply_fakequant_with_observer)),
MatchAnyNode,
)
pat_deconv_bias = (
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
(
F.conv_transpose2d,
InputNode,
QATModule._apply_fakequant_with_observer,
MatchAnyNode,
),
MatchAnyNode,
)
@register_fusion_pattern(pat_conv_bias_relu)
def qat_conv_bias_relu(module, expr, call_expr, irgraph, _):
relu = expr.inputs[1].expr
op = gen_qat_conv_opr(module, relu.inputs[0].expr, call_expr, irgraph)
op.activation = "RELU"
return op
@register_fusion_pattern(pat_conv_bias)
def qat_conv_bias(module, expr, call_expr, irgraph, _):
conv = expr.inputs[1].expr
op = gen_qat_conv_opr(module, conv, call_expr, irgraph)
return op
@register_fusion_pattern(pat_conv_relu)
def qat_conv_relu(module, expr, call_expr, net, _):
relu = expr.inputs[1].expr
op = gen_qat_conv_opr(module, relu.inputs[0].expr, call_expr, net)
op.activation = "RELU"
return op
@register_fusion_pattern(pat_conv)
def qat_conv(module, expr, call_expr, net, _):
conv = expr.inputs[1].expr
op = gen_qat_conv_opr(module, conv, call_expr, net)
return op
@register_fusion_pattern(pat_deconv_bias)
def qat_deconv_bias(module, expr, call_expr, irgraph, _):
conv = expr.inputs[1].expr
op = gen_qat_conv_opr(module, conv, call_expr, irgraph, is_deconv=True)
return op
@register_fusion_pattern(pat_deconv_relu)
def qat_deconv_relu_bias(
module, expr, call_expr, irgraph, resolver: TensorNodeResolver
):
relu = expr.inputs[1].expr
deconv = relu.inputs[0].expr
op = gen_qat_conv_opr(module, deconv, call_expr, irgraph, is_deconv=True)
op.activation = "RELU"
relu_op = ReluOpr()
relu_op.inp_tensors = []
relu_op.out_tensors = []
relu_op.inp_tensors.append(op.out_tensors[0])
relu_op.out_tensors.append(resolver.resolve(call_expr.outputs[0], relu_op)[0])
relu_op.out_tensors[0].name += "_relu"
relu_op.out_tensors[0].q_dtype = relu_op.inp_tensors[0].q_dtype
relu_op.out_tensors[0].scale = relu_op.inp_tensors[0].scale
relu_op.out_tensors[0].zero_point = relu_op.inp_tensors[0].zero_point
irgraph.all_tensors[
irgraph._tensor_ids.index(call_expr.outputs[0]._id)
] = relu_op.out_tensors[0]
return op, relu_op
MATCH_RULE[QATModule._apply_fakequant_with_observer] = [
pat_conv_bias_relu,
pat_conv_bias,
pat_deconv_relu,
pat_conv_relu,
pat_conv,
pat_deconv_bias,
]
def find_match_pattern(graph):
rst = []
for expr in graph._exprs:
if isinstance(expr, CallFunction):
if expr.func in MATCH_RULE:
pat = MATCH_RULE[expr.func]
for p in pat:
if is_match(expr, p):
rst.append((p, expr))
return rst
| en | 0.827977 | # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # pylint: disable=import-error,no-name-in-module # pylint:disable=no-member | 1.831291 | 2 |
optvaeutils/__init__.py | rahulk90/vae_sparse | 11 | 6625011 | __all__=['viz','parse_args_vae','optimizer']
| __all__=['viz','parse_args_vae','optimizer']
| none | 1 | 1.151507 | 1 | |
model_trainer.py | VanillaBrooks/landing_coordination | 0 | 6625012 | <gh_stars>0
from flight_path_det import PlanePath
import torch
import pprint, time, pymysql, os
class NeuralNetwork(torch.nn.Module):
def __init__(self, D_in, Hidden_1, Hidden_2, Hidden_3, D_out):
super(NeuralNetwork, self).__init__()
self.LI = torch.nn.Linear(D_in, Hidden_1)
self.L1 = torch.nn.Linear(Hidden_1, Hidden_2)
self.L2 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L3 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L4 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L5 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L6 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L7 = torch.nn.Linear(Hidden_2, Hidden_3)
self.LF = torch.nn.Linear(Hidden_3, D_out)
def forward(self, x):
initial = self.LI(x)
L1 = self.L1(initial)
L2 = self.L2(L1)
L3 = self.L3(L2)
L4 = self.L4(L3)
L5 = self.L5(L4)
L6 = self.L6(L5)
L7 = self.L7(L6)
pred = self.LF(L7)
return pred
class DatabaseConnection():
def __init__(self):
self.conn = pymysql.connect(host='localhost', port=3306, user='brooks', password='<PASSWORD>', database='pubg4')
self.cursor = self.conn.cursor()
def op(self, query, arg_tuple=()):
self.cursor.execute(query, arg_tuple)
raw_data = self.cursor.fetchall()
data = [row[:-1] for row in raw_data]
print('the length of the sql data is ', len(data))
return data
def percentdec(prev, new):
x = 100* ((prev- new) / prev)
x = float(str(x)[:5])
return x
def probabilities(train, expect):
ph = []
index = 0
data = list(zip(train,expect))
d2 = data[:]
removed = 0
for pair in d2:
row = pair[1]
sum = 0
nested_ph = []
for item in row:
sum += item
# print('sum is %s for %s index is %s' % (sum, row, index))
if sum <= 30 or sum > 100:
# print('sum is %s for %s index is %s' % (sum, row, index))
data.pop(index)
removed += 1
continue
for item in row:
nested_ph.append(item/sum)
ph.append(nested_ph)
index += 1
if removed:
print('there were %s values removed'%(removed))
for pair in data:
row = pair[1]
sum = 0
for item in row:
sum += item
if sum <= 30 or sum > 100:
print('fuck dude you didnt get rid of the index for the sum', sum)
nt, ne = list(zip(*data))
return nt,ph
D_in = 25
Hidden_1 = 50
Hidden_2 = 40
Hidden_3 = 30
D_out = 25
epochs = 5000000
path = r'D:\Python\pytorch\model_2_3'
if __name__ == '__main__':
torch.set_printoptions(threshold=10000)
db = DatabaseConnection()
training_data, expected_results = probabilities(db.op('SELECT * from distances'), db.op('SELECT * from dropcount'))
print('len training data %s len expected results %s' % (len(training_data), len(expected_results)))
# training_data = training_data[:400]
# expected_results = expected_results[:400]
N = len(training_data)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = NeuralNetwork(D_in, Hidden_1, Hidden_2, Hidden_3, D_out)
model.to(device)
if os.path.exists(path):
print('loaded previous model')
model.load_state_dict(torch.load(path))
training_data = torch.Tensor(training_data).to(device)
expected_results = torch.Tensor(expected_results).to(device)
criterion = torch.nn.SmoothL1Loss(size_average=False, reduce=True)
optimizer = torch.optim.RMSprop(model.parameters(), lr=1e-5)
prev = 1
# time.sleep(363632)
for t in range(epochs):
predicted = model(training_data).view(N,D_out)
percent = str((100* t) / epochs)[:6]
loss = criterion(predicted, expected_results)
if t % 5000 == 0:
print('iter: %s percent done: %s loss: %s loss decrease: %s' % (t,percent, loss.item(), percentdec(prev, float(loss.item()))))
prev = float(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (t%10000) == 0:
torch.save(model.state_dict(), path)
torch.save(model.state_dict(), path)
| from flight_path_det import PlanePath
import torch
import pprint, time, pymysql, os
class NeuralNetwork(torch.nn.Module):
def __init__(self, D_in, Hidden_1, Hidden_2, Hidden_3, D_out):
super(NeuralNetwork, self).__init__()
self.LI = torch.nn.Linear(D_in, Hidden_1)
self.L1 = torch.nn.Linear(Hidden_1, Hidden_2)
self.L2 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L3 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L4 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L5 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L6 = torch.nn.Linear(Hidden_2, Hidden_2)
self.L7 = torch.nn.Linear(Hidden_2, Hidden_3)
self.LF = torch.nn.Linear(Hidden_3, D_out)
def forward(self, x):
initial = self.LI(x)
L1 = self.L1(initial)
L2 = self.L2(L1)
L3 = self.L3(L2)
L4 = self.L4(L3)
L5 = self.L5(L4)
L6 = self.L6(L5)
L7 = self.L7(L6)
pred = self.LF(L7)
return pred
class DatabaseConnection():
def __init__(self):
self.conn = pymysql.connect(host='localhost', port=3306, user='brooks', password='<PASSWORD>', database='pubg4')
self.cursor = self.conn.cursor()
def op(self, query, arg_tuple=()):
self.cursor.execute(query, arg_tuple)
raw_data = self.cursor.fetchall()
data = [row[:-1] for row in raw_data]
print('the length of the sql data is ', len(data))
return data
def percentdec(prev, new):
x = 100* ((prev- new) / prev)
x = float(str(x)[:5])
return x
def probabilities(train, expect):
ph = []
index = 0
data = list(zip(train,expect))
d2 = data[:]
removed = 0
for pair in d2:
row = pair[1]
sum = 0
nested_ph = []
for item in row:
sum += item
# print('sum is %s for %s index is %s' % (sum, row, index))
if sum <= 30 or sum > 100:
# print('sum is %s for %s index is %s' % (sum, row, index))
data.pop(index)
removed += 1
continue
for item in row:
nested_ph.append(item/sum)
ph.append(nested_ph)
index += 1
if removed:
print('there were %s values removed'%(removed))
for pair in data:
row = pair[1]
sum = 0
for item in row:
sum += item
if sum <= 30 or sum > 100:
print('fuck dude you didnt get rid of the index for the sum', sum)
nt, ne = list(zip(*data))
return nt,ph
D_in = 25
Hidden_1 = 50
Hidden_2 = 40
Hidden_3 = 30
D_out = 25
epochs = 5000000
path = r'D:\Python\pytorch\model_2_3'
if __name__ == '__main__':
torch.set_printoptions(threshold=10000)
db = DatabaseConnection()
training_data, expected_results = probabilities(db.op('SELECT * from distances'), db.op('SELECT * from dropcount'))
print('len training data %s len expected results %s' % (len(training_data), len(expected_results)))
# training_data = training_data[:400]
# expected_results = expected_results[:400]
N = len(training_data)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = NeuralNetwork(D_in, Hidden_1, Hidden_2, Hidden_3, D_out)
model.to(device)
if os.path.exists(path):
print('loaded previous model')
model.load_state_dict(torch.load(path))
training_data = torch.Tensor(training_data).to(device)
expected_results = torch.Tensor(expected_results).to(device)
criterion = torch.nn.SmoothL1Loss(size_average=False, reduce=True)
optimizer = torch.optim.RMSprop(model.parameters(), lr=1e-5)
prev = 1
# time.sleep(363632)
for t in range(epochs):
predicted = model(training_data).view(N,D_out)
percent = str((100* t) / epochs)[:6]
loss = criterion(predicted, expected_results)
if t % 5000 == 0:
print('iter: %s percent done: %s loss: %s loss decrease: %s' % (t,percent, loss.item(), percentdec(prev, float(loss.item()))))
prev = float(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (t%10000) == 0:
torch.save(model.state_dict(), path)
torch.save(model.state_dict(), path) | en | 0.637213 | # print('sum is %s for %s index is %s' % (sum, row, index)) # print('sum is %s for %s index is %s' % (sum, row, index)) # training_data = training_data[:400] # expected_results = expected_results[:400] # time.sleep(363632) | 2.402696 | 2 |
day8/bst/two_nodes_sum_to.py | vlad-ed-git/leetcode_journey | 0 | 6625013 | <reponame>vlad-ed-git/leetcode_journey
#Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def findTarget(self, root: TreeNode, k: int) -> bool:
if not root:
return False
stack = []
visitedNodes = {}
stack.append(root)
while len(stack) > 0:
node = stack.pop()
if (k - node.val) in visitedNodes:
return True
visitedNodes[node.val] = True # visited
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return False
| #Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def findTarget(self, root: TreeNode, k: int) -> bool:
if not root:
return False
stack = []
visitedNodes = {}
stack.append(root)
while len(stack) > 0:
node = stack.pop()
if (k - node.val) in visitedNodes:
return True
visitedNodes[node.val] = True # visited
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return False | en | 0.809354 | #Definition for a binary tree node. # visited | 3.78582 | 4 |
survey/tests/views/test_question_set.py | ericazhou7/uSurvey | 5 | 6625014 | <filename>survey/tests/views/test_question_set.py
from django.contrib.auth.models import User
from django.test import Client
from survey.forms.question_module_form import QuestionModuleForm
from model_mommy import mommy
from survey.forms import *
from survey.models import *
from survey.models.batch import *
from survey.tests.base_test import BaseTest
from survey.forms.question_set import get_question_set_form, BatchForm
from survey.views.question_set import QuestionSetView
import json
class QuestionSetViewTest(BaseTest):
def setUp(self):
self.client = Client()
User.objects.create_user(
username='useless', email='<EMAIL>', password='<PASSWORD>')
raj = self.assign_permission_to(User.objects.create_user('demo8', '<EMAIL>', 'demo8'),
'can_view_batches')
self.assign_permission_to(raj, 'can_view_investigators')
self.assign_permission_to(raj, 'can_view_aggregates')
self.client.login(username='demo8', password='<PASSWORD>')
self.country = LocationType.objects.create(
name='Country', slug='country')
self.district = LocationType.objects.create(
name='District', parent=self.country, slug='district')
self.city = LocationType.objects.create(
name='City', parent=self.district, slug='city')
self.village = LocationType.objects.create(
name='village', parent=self.city, slug='village')
self.uganda = Location.objects.create(name='Uganda', type=self.country)
self.kampala = Location.objects.create(
name='Kampala', parent=self.uganda, type=self.district)
self.kampala_city = Location.objects.create(
name='Kampala City', parent=self.kampala, type=self.city)
self.ea = EnumerationArea.objects.create(name="BUBEMBE", code="11-BUBEMBE")
def test_index(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="Batch A1", survey=survey_obj)
response = self.client.get(reverse('batch_index_page', kwargs={"survey_id" : survey_obj.id}))
self.assertIn(response.status_code, [200, 302])
templates = [template.name for template in response.templates]
self.assertIn('question_set/index.html', templates)
self.assertIn('name, description', response.context['placeholder'])
self.assertEqual(Batch, response.context['model'])
self.assertEqual(Batch.__name__, response.context['model_name'])
self.assertIn(batch, response.context['question_sets'])
self.assertEqual(survey_obj, response.context['survey'])
def test_delete_should_delete_the_question(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="Batch ABC", survey=survey_obj)
interview = mommy.make(Interview, question_set=batch, survey=survey_obj, ea=self.ea)
qset = QuestionSet.get(id=batch.id)
response = self.client.get(reverse('delete_qset', kwargs={"question_id":qset.id, "batch_id":survey_obj.id}))
self.assertTrue(QuestionSet.objects.filter(id=batch.id).exists())
interview.delete() # now try after remobing the interview
response = self.client.get(reverse('delete_qset', kwargs={"question_id": qset.id, "batch_id": survey_obj.id}))
self.assertFalse(QuestionSet.objects.filter(id=batch.id).exists())
self.assertIn('Successfully deleted', response.cookies['messages'].__str__())
self.assertRedirects(response, expected_url= reverse('batch_index_page', kwargs={"survey_id" : survey_obj.id}),
msg_prefix='')
def test_search_questionset(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="bsearch", survey=survey_obj)
url = reverse('batch_index_page', kwargs={"survey_id" : survey_obj.id})
url = url+"?q=bsearch"
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
templates = [template.name for template in response.templates]
self.assertIn('question_set/index.html', templates)
self.assertIn('name, description', response.context['placeholder'])
self.assertEqual(Batch, response.context['model'])
self.assertEqual(Batch.__name__, response.context['model_name'])
self.assertIn(batch, response.context['question_sets'])
self.assertEqual(survey_obj, response.context['survey'])
def test_question_options_by_question(self):
listing_form = mommy.make(ListingTemplate)
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q1"
)
url = reverse('question_options')
url = url + "?ques_id=%s"%question1.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, '{"1": "q1"}')
def test_question_options_by_qset(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="bsearch", survey=survey_obj)
qset = QuestionSet.get(pk=batch.id)
question1 = mommy.make(Question, qset=qset, answer_type=TextAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=3,
text="q3"
)
question2 = mommy.make(Question, qset=qset, answer_type=TextAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=4,
text="q4"
)
QuestionFlow.objects.create(
name = 'a1',
desc = 'descq',
question = question2,
question_type = TextAnswer.choice_name(),
next_question = question1,
next_question_type = TextAnswer.choice_name()
)
QuestionLoop.objects.create(
loop_starter = question2,
loop_ender = question1
)
url = reverse('question_options')
url = url + "?id=%s"%qset.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content,'{}')
def test_question_validators(self):
listing_form = mommy.make(ListingTemplate)
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q5"
)
url = reverse('question_validators')
url = url + "?ques_id=%s"%question1.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, '["equals", "between", "less_than", "greater_than"]')
url = reverse('question_validators')
url = url + "?id=%s"%qset.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, '{}')
def test_list_questions(self):
listing_form = mommy.make(ListingTemplate)
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q6"
)
url = reverse('list_questions')
url = url + "?id=%s"%qset.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, '[]')
url = reverse('list_questions')
response = self.client.get(url)
response_data = json.loads(response.content)
is_exist = False
for each in response_data:
if each['id'] == question1.id:
is_exist = True
break
self.assertTrue(is_exist)
def test_list_qsets(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="b2", survey=survey_obj)
qset = QuestionSet.get(pk=batch.id)
url = reverse('view_qsets')
url = url + "?survey_id=%s"%survey_obj.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
response_data = json.loads(response.content)
is_exist = False
for each in response_data:
if each['id'] == batch.id:
is_exist = True
break
self.assertTrue(is_exist)
url = reverse('view_qsets')
response = self.client.get(url)
is_exist = False
response_data = json.loads(response.content)
for each in response_data:
if each['id'] == batch.id:
is_exist = True
break
self.assertTrue(is_exist)
def test_download_qset_data(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="b21", survey=survey_obj)
qset = QuestionSet.get(pk=batch.id)
url = reverse('download_qset_data', kwargs={"qset_id":qset.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, ',District,City,village,EA,interviewer__name,Uploaded,Completion Date\n')
def test_download_qset_attachment(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="b21", survey=survey_obj)
qset = QuestionSet.get(pk=batch.id)
q_data = {'identifier':"dummy",
'text' : "dummss",
'answer_type' : "Text Answer",
'qset_id' : qset.id
}
question1 = Question.objects.create(**q_data)
investigator = Interviewer.objects.create(name="Investigator1",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
ans_data = {
"question_type" : "qt1",
"interview" : interview_obj,
"question": question1,
"identifier" : "identifier",
"as_text" : "as_text",
"as_value" : 1
}
ans_obj = Answer.objects.create(**ans_data)
ans_data['value'] = 1
url = reverse('download_qset_attachment', kwargs={"interview_id": interview_obj.id, "question_id":question1.id})
response = self.client.get(url)
self.assertEqual(response.content, 'TextAnswer matching query does not exist.')
text_obj = TextAnswer.objects.create(**ans_data)
url = reverse('download_qset_attachment', kwargs={"interview_id": interview_obj.id, "question_id":question1.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_clone_qset_page(self):
listing_form = ListingTemplate.objects.create(
name="list1",
description="list1")
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
url = reverse('clone_qset_page', kwargs={"qset_id" : qset.id})
response = self.client.get(url)
self.assertIn('Successfully cloned %s' % qset.name, response.cookies['messages'].__str__())
self.assertRedirects(response, expected_url=reverse('listing_template_home'), msg_prefix='')
self.assertIn(response.status_code, [200, 302])
def test_qset_identifiers(self):
listing_form = mommy.make(ListingTemplate)
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q7"
)
url = reverse('qset_identifiers')
url = url + "?id=%s"%question1.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
is_exist = False
response_data = json.loads(response.content)
for each in response_data:
if each == question1.identifier:
is_exist = True
break
self.assertTrue(is_exist)
url = reverse('qset_identifiers')
response = self.client.get(url)
self.assertEqual(response.content, '[]')
question2 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=4,
text="q4"
)
QuestionFlow.objects.create(
name = 'a1',
desc = 'descq',
question = question2,
question_type = TextAnswer.choice_name(),
next_question = question1,
next_question_type = TextAnswer.choice_name()
)
QuestionLoop.objects.create(
loop_starter = question2,
loop_ender = question1
)
url = reverse('qset_identifiers')
url = url + "?id=%s&q_id=%s"%(question1.id, question2.id)
response = self.client.get(url)
def test_listing_entries(self):
listing_form = ListingTemplate.objects.create(name='l1', description='desc1')
kwargs = {'name': 'survey9', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
qset = QuestionSet.get(pk=listing_form.id)
url = reverse('listing_entries',kwargs={"qset_id":qset.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
templates = [template.name for template in response.templates]
self.assertIn('question_set/listing_entries.html', templates)
self.assertIn('name', response.context['placeholder'])
self.assertIn(survey_obj, response.context['surveys'])
self.assertEqual(qset, response.context['question_set'])
url = url + "?q=survey9"
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
templates = [template.name for template in response.templates]
self.assertIn('question_set/listing_entries.html', templates)
self.assertIn('name', response.context['placeholder'])
self.assertIn(survey_obj, response.context['surveys'])
self.assertEqual(qset, response.context['question_set'])
def test_listing_entries_404(self):
url = reverse('listing_entries',kwargs={"qset_id":999})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_qset_view_survey_data(self):
listing_form = ListingTemplate.objects.create(name='l1', description='desc1')
kwargs = {'name': 'survey10', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
batch_obj = Batch.objects.create(name='b1',description='d1', survey=survey_obj)
qset = QuestionSet.get(id=batch_obj.id)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
url = reverse('view_survey_data')
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = reverse('view_survey_data')
url = url + "?survey=%s"%survey_obj.id
response = self.client.get(url)
url = reverse('view_survey_data')
url = url + "?survey=%s&question_set=%s&page=1"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = reverse('view_survey_data')
url = url + "?survey=%s&question_set=%s"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_qset_view_view_listing_data(self):
listing_form = ListingTemplate.objects.create(name='l12', description='desc1')
kwargs = {'name': 'survey11', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
batch_obj = Batch.objects.create(name='b1',description='d1', survey=survey_obj)
qset = QuestionSet.get(id=batch_obj.id)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
url = reverse('view_listing_data')
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = url + "?survey=%s"%survey_obj.id
response = self.client.get(url)
url = reverse('view_listing_data')
url = url + "?survey=%s&question_set=%s&page=1"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = reverse('view_listing_data')
url = url + "?survey=%s&question_set=%s"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_qset_view_data(self):
listing_form = ListingTemplate.objects.create(name='l12', description='desc1')
kwargs = {'name': 'survey12', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
batch_obj = Batch.objects.create(name='b1',description='d1', survey=survey_obj)
qset = QuestionSet.get(id=batch_obj.id)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
url = reverse('view_data_home', kwargs={"qset_id" : qset.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = url + "?survey=%s"%survey_obj.id
response = self.client.get(url)
url = reverse('view_data_home', kwargs={"qset_id" : qset.id})
url = url + "?survey=%s&question_set=%s&page=1"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = reverse('view_data_home', kwargs={"qset_id" : qset.id})
url = url + "?survey=%s&question_set=%s"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_qset_delete_qset_listingform(self):
listing_form = ListingTemplate.objects.create(name='l121', description='desc1')
kwargs = {'name': 'survey121', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
batch_obj = Batch.objects.create(name='b1',description='d1', survey=survey_obj)
qset = QuestionSet.get(id=batch_obj.id)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q7"
)
question2 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=4,
text="q4"
)
QuestionFlow.objects.create(
name = 'a1',
desc = 'descq',
question = question2,
question_type = TextAnswer.choice_name(),
next_question = question1,
next_question_type = TextAnswer.choice_name()
)
QuestionLoop.objects.create(
loop_starter = question2,
loop_ender = question1
)
url = reverse('delete_qset_listingform', kwargs={"question_id" : qset.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertIn("Question Set cannot be deleted because it already has interviews.",
str(response.cookies['messages']))
def test_edit_question_set(self):
survey = mommy.make(Survey)
qset = mommy.make(Batch, survey=survey, name='sample tst')
data = {'name': 'editied name', 'description': 'test description',
'survey': survey.id, 'access_channels': ODKAccess.choice_name()}
url = reverse('edit_%s_page' % Batch.resolve_tag(), args=(qset.id, ))
response = self.client.post(url, data=data)
self.assertFalse(Batch.objects.filter(name=qset.name).exists())
self.assertTrue(Batch.objects.filter(name=data['name']).exists())
def test_delete_qset(self):
qset = mommy.make(QuestionSet)
question = mommy.make(Question, qset=qset)
url = reverse('delete_qset', args=(question.id, qset.id))
response = self.client.get(url)
self.assertFalse(QuestionSet.objects.filter(id=qset.id).exists())
self.assertFalse(Question.objects.filter(id=question.id).exists())
def test_delete_qset_listingform(self):
qset = mommy.make(QuestionSet)
question = mommy.make(Question, qset=qset)
qset.start_question = question
qset.save()
url = reverse('delete_qset_listingform', args=(question.id, ))
response = self.client.get(url)
self.assertFalse(QuestionSet.objects.filter(id=qset.id).exists())
url = reverse('delete_qset_listingform', kwargs={"question_id": 999})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_question_identifiers(self):
qset = mommy.make(QuestionSet)
question = mommy.make(Question, qset=qset)
qset.start_question = question
qset.save()
question2 = mommy.make(Question, qset=qset)
mommy.make(QuestionFlow, question=question, next_question=question2)
url = reverse('qset_identifiers')
response = self.client.get(url, data={'id': qset.id, 'q_id': question2.id})
data = json.loads(response.content)
self.assertIn(question.identifier, data)
def test_not_allowed_to_delete_qset_with_interview(self):
qset = mommy.make(QuestionSet)
question = mommy.make(Question, qset=qset)
qset.start_question = question
qset.save()
interview = mommy.make(Interview, question_set=qset)
self.assertTrue(QuestionSet.objects.filter(id=qset.id).exists())
url = reverse('delete_qset_listingform', args=(question.id,))
response = self.client.get(url)
self.assertTrue(QuestionSet.objects.filter(id=qset.id).exists())
self.assertIn(response.status_code, [302, 200])
| <filename>survey/tests/views/test_question_set.py
from django.contrib.auth.models import User
from django.test import Client
from survey.forms.question_module_form import QuestionModuleForm
from model_mommy import mommy
from survey.forms import *
from survey.models import *
from survey.models.batch import *
from survey.tests.base_test import BaseTest
from survey.forms.question_set import get_question_set_form, BatchForm
from survey.views.question_set import QuestionSetView
import json
class QuestionSetViewTest(BaseTest):
def setUp(self):
self.client = Client()
User.objects.create_user(
username='useless', email='<EMAIL>', password='<PASSWORD>')
raj = self.assign_permission_to(User.objects.create_user('demo8', '<EMAIL>', 'demo8'),
'can_view_batches')
self.assign_permission_to(raj, 'can_view_investigators')
self.assign_permission_to(raj, 'can_view_aggregates')
self.client.login(username='demo8', password='<PASSWORD>')
self.country = LocationType.objects.create(
name='Country', slug='country')
self.district = LocationType.objects.create(
name='District', parent=self.country, slug='district')
self.city = LocationType.objects.create(
name='City', parent=self.district, slug='city')
self.village = LocationType.objects.create(
name='village', parent=self.city, slug='village')
self.uganda = Location.objects.create(name='Uganda', type=self.country)
self.kampala = Location.objects.create(
name='Kampala', parent=self.uganda, type=self.district)
self.kampala_city = Location.objects.create(
name='Kampala City', parent=self.kampala, type=self.city)
self.ea = EnumerationArea.objects.create(name="BUBEMBE", code="11-BUBEMBE")
def test_index(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="Batch A1", survey=survey_obj)
response = self.client.get(reverse('batch_index_page', kwargs={"survey_id" : survey_obj.id}))
self.assertIn(response.status_code, [200, 302])
templates = [template.name for template in response.templates]
self.assertIn('question_set/index.html', templates)
self.assertIn('name, description', response.context['placeholder'])
self.assertEqual(Batch, response.context['model'])
self.assertEqual(Batch.__name__, response.context['model_name'])
self.assertIn(batch, response.context['question_sets'])
self.assertEqual(survey_obj, response.context['survey'])
def test_delete_should_delete_the_question(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="Batch ABC", survey=survey_obj)
interview = mommy.make(Interview, question_set=batch, survey=survey_obj, ea=self.ea)
qset = QuestionSet.get(id=batch.id)
response = self.client.get(reverse('delete_qset', kwargs={"question_id":qset.id, "batch_id":survey_obj.id}))
self.assertTrue(QuestionSet.objects.filter(id=batch.id).exists())
interview.delete() # now try after remobing the interview
response = self.client.get(reverse('delete_qset', kwargs={"question_id": qset.id, "batch_id": survey_obj.id}))
self.assertFalse(QuestionSet.objects.filter(id=batch.id).exists())
self.assertIn('Successfully deleted', response.cookies['messages'].__str__())
self.assertRedirects(response, expected_url= reverse('batch_index_page', kwargs={"survey_id" : survey_obj.id}),
msg_prefix='')
def test_search_questionset(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="bsearch", survey=survey_obj)
url = reverse('batch_index_page', kwargs={"survey_id" : survey_obj.id})
url = url+"?q=bsearch"
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
templates = [template.name for template in response.templates]
self.assertIn('question_set/index.html', templates)
self.assertIn('name, description', response.context['placeholder'])
self.assertEqual(Batch, response.context['model'])
self.assertEqual(Batch.__name__, response.context['model_name'])
self.assertIn(batch, response.context['question_sets'])
self.assertEqual(survey_obj, response.context['survey'])
def test_question_options_by_question(self):
listing_form = mommy.make(ListingTemplate)
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q1"
)
url = reverse('question_options')
url = url + "?ques_id=%s"%question1.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, '{"1": "q1"}')
def test_question_options_by_qset(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="bsearch", survey=survey_obj)
qset = QuestionSet.get(pk=batch.id)
question1 = mommy.make(Question, qset=qset, answer_type=TextAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=3,
text="q3"
)
question2 = mommy.make(Question, qset=qset, answer_type=TextAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=4,
text="q4"
)
QuestionFlow.objects.create(
name = 'a1',
desc = 'descq',
question = question2,
question_type = TextAnswer.choice_name(),
next_question = question1,
next_question_type = TextAnswer.choice_name()
)
QuestionLoop.objects.create(
loop_starter = question2,
loop_ender = question1
)
url = reverse('question_options')
url = url + "?id=%s"%qset.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content,'{}')
def test_question_validators(self):
listing_form = mommy.make(ListingTemplate)
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q5"
)
url = reverse('question_validators')
url = url + "?ques_id=%s"%question1.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, '["equals", "between", "less_than", "greater_than"]')
url = reverse('question_validators')
url = url + "?id=%s"%qset.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, '{}')
def test_list_questions(self):
listing_form = mommy.make(ListingTemplate)
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q6"
)
url = reverse('list_questions')
url = url + "?id=%s"%qset.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, '[]')
url = reverse('list_questions')
response = self.client.get(url)
response_data = json.loads(response.content)
is_exist = False
for each in response_data:
if each['id'] == question1.id:
is_exist = True
break
self.assertTrue(is_exist)
def test_list_qsets(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="b2", survey=survey_obj)
qset = QuestionSet.get(pk=batch.id)
url = reverse('view_qsets')
url = url + "?survey_id=%s"%survey_obj.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
response_data = json.loads(response.content)
is_exist = False
for each in response_data:
if each['id'] == batch.id:
is_exist = True
break
self.assertTrue(is_exist)
url = reverse('view_qsets')
response = self.client.get(url)
is_exist = False
response_data = json.loads(response.content)
for each in response_data:
if each['id'] == batch.id:
is_exist = True
break
self.assertTrue(is_exist)
def test_download_qset_data(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="b21", survey=survey_obj)
qset = QuestionSet.get(pk=batch.id)
url = reverse('download_qset_data', kwargs={"qset_id":qset.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertEqual(response.content, ',District,City,village,EA,interviewer__name,Uploaded,Completion Date\n')
def test_download_qset_attachment(self):
survey_obj = mommy.make(Survey)
batch = Batch.objects.create(order=1, name="b21", survey=survey_obj)
qset = QuestionSet.get(pk=batch.id)
q_data = {'identifier':"dummy",
'text' : "dummss",
'answer_type' : "Text Answer",
'qset_id' : qset.id
}
question1 = Question.objects.create(**q_data)
investigator = Interviewer.objects.create(name="Investigator1",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
ans_data = {
"question_type" : "qt1",
"interview" : interview_obj,
"question": question1,
"identifier" : "identifier",
"as_text" : "as_text",
"as_value" : 1
}
ans_obj = Answer.objects.create(**ans_data)
ans_data['value'] = 1
url = reverse('download_qset_attachment', kwargs={"interview_id": interview_obj.id, "question_id":question1.id})
response = self.client.get(url)
self.assertEqual(response.content, 'TextAnswer matching query does not exist.')
text_obj = TextAnswer.objects.create(**ans_data)
url = reverse('download_qset_attachment', kwargs={"interview_id": interview_obj.id, "question_id":question1.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_clone_qset_page(self):
listing_form = ListingTemplate.objects.create(
name="list1",
description="list1")
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
url = reverse('clone_qset_page', kwargs={"qset_id" : qset.id})
response = self.client.get(url)
self.assertIn('Successfully cloned %s' % qset.name, response.cookies['messages'].__str__())
self.assertRedirects(response, expected_url=reverse('listing_template_home'), msg_prefix='')
self.assertIn(response.status_code, [200, 302])
def test_qset_identifiers(self):
listing_form = mommy.make(ListingTemplate)
qset = QuestionSet.get(pk=listing_form.id)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q7"
)
url = reverse('qset_identifiers')
url = url + "?id=%s"%question1.id
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
is_exist = False
response_data = json.loads(response.content)
for each in response_data:
if each == question1.identifier:
is_exist = True
break
self.assertTrue(is_exist)
url = reverse('qset_identifiers')
response = self.client.get(url)
self.assertEqual(response.content, '[]')
question2 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=4,
text="q4"
)
QuestionFlow.objects.create(
name = 'a1',
desc = 'descq',
question = question2,
question_type = TextAnswer.choice_name(),
next_question = question1,
next_question_type = TextAnswer.choice_name()
)
QuestionLoop.objects.create(
loop_starter = question2,
loop_ender = question1
)
url = reverse('qset_identifiers')
url = url + "?id=%s&q_id=%s"%(question1.id, question2.id)
response = self.client.get(url)
def test_listing_entries(self):
listing_form = ListingTemplate.objects.create(name='l1', description='desc1')
kwargs = {'name': 'survey9', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
qset = QuestionSet.get(pk=listing_form.id)
url = reverse('listing_entries',kwargs={"qset_id":qset.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
templates = [template.name for template in response.templates]
self.assertIn('question_set/listing_entries.html', templates)
self.assertIn('name', response.context['placeholder'])
self.assertIn(survey_obj, response.context['surveys'])
self.assertEqual(qset, response.context['question_set'])
url = url + "?q=survey9"
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
templates = [template.name for template in response.templates]
self.assertIn('question_set/listing_entries.html', templates)
self.assertIn('name', response.context['placeholder'])
self.assertIn(survey_obj, response.context['surveys'])
self.assertEqual(qset, response.context['question_set'])
def test_listing_entries_404(self):
url = reverse('listing_entries',kwargs={"qset_id":999})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_qset_view_survey_data(self):
listing_form = ListingTemplate.objects.create(name='l1', description='desc1')
kwargs = {'name': 'survey10', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
batch_obj = Batch.objects.create(name='b1',description='d1', survey=survey_obj)
qset = QuestionSet.get(id=batch_obj.id)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
url = reverse('view_survey_data')
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = reverse('view_survey_data')
url = url + "?survey=%s"%survey_obj.id
response = self.client.get(url)
url = reverse('view_survey_data')
url = url + "?survey=%s&question_set=%s&page=1"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = reverse('view_survey_data')
url = url + "?survey=%s&question_set=%s"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_qset_view_view_listing_data(self):
listing_form = ListingTemplate.objects.create(name='l12', description='desc1')
kwargs = {'name': 'survey11', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
batch_obj = Batch.objects.create(name='b1',description='d1', survey=survey_obj)
qset = QuestionSet.get(id=batch_obj.id)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
url = reverse('view_listing_data')
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = url + "?survey=%s"%survey_obj.id
response = self.client.get(url)
url = reverse('view_listing_data')
url = url + "?survey=%s&question_set=%s&page=1"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = reverse('view_listing_data')
url = url + "?survey=%s&question_set=%s"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_qset_view_data(self):
listing_form = ListingTemplate.objects.create(name='l12', description='desc1')
kwargs = {'name': 'survey12', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
batch_obj = Batch.objects.create(name='b1',description='d1', survey=survey_obj)
qset = QuestionSet.get(id=batch_obj.id)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
url = reverse('view_data_home', kwargs={"qset_id" : qset.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = url + "?survey=%s"%survey_obj.id
response = self.client.get(url)
url = reverse('view_data_home', kwargs={"qset_id" : qset.id})
url = url + "?survey=%s&question_set=%s&page=1"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
url = reverse('view_data_home', kwargs={"qset_id" : qset.id})
url = url + "?survey=%s&question_set=%s"%(survey_obj.id, qset.id)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_qset_delete_qset_listingform(self):
listing_form = ListingTemplate.objects.create(name='l121', description='desc1')
kwargs = {'name': 'survey121', 'description': 'survey description demo12',
'has_sampling': True, 'sample_size': 10,'listing_form_id':listing_form.id}
survey_obj = Survey.objects.create(**kwargs)
batch_obj = Batch.objects.create(name='b1',description='d1', survey=survey_obj)
qset = QuestionSet.get(id=batch_obj.id)
investigator = Interviewer.objects.create(name="InvestigatorViewdata",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
interview_obj = Interview.objects.create(
interviewer = investigator,
ea = self.ea,
survey = survey_obj,
question_set = qset,
)
surveyAllocation_obj = SurveyAllocation.objects.create(
interviewer = investigator,
survey = survey_obj,
allocation_ea = self.ea,
status = 1
)
question1 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=1,
text="q7"
)
question2 = mommy.make(Question, qset=qset, answer_type=NumericalAnswer.choice_name())
QuestionOption.objects.create(
question=question1,
order=4,
text="q4"
)
QuestionFlow.objects.create(
name = 'a1',
desc = 'descq',
question = question2,
question_type = TextAnswer.choice_name(),
next_question = question1,
next_question_type = TextAnswer.choice_name()
)
QuestionLoop.objects.create(
loop_starter = question2,
loop_ender = question1
)
url = reverse('delete_qset_listingform', kwargs={"question_id" : qset.id})
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
self.assertIn("Question Set cannot be deleted because it already has interviews.",
str(response.cookies['messages']))
def test_edit_question_set(self):
survey = mommy.make(Survey)
qset = mommy.make(Batch, survey=survey, name='sample tst')
data = {'name': 'editied name', 'description': 'test description',
'survey': survey.id, 'access_channels': ODKAccess.choice_name()}
url = reverse('edit_%s_page' % Batch.resolve_tag(), args=(qset.id, ))
response = self.client.post(url, data=data)
self.assertFalse(Batch.objects.filter(name=qset.name).exists())
self.assertTrue(Batch.objects.filter(name=data['name']).exists())
def test_delete_qset(self):
qset = mommy.make(QuestionSet)
question = mommy.make(Question, qset=qset)
url = reverse('delete_qset', args=(question.id, qset.id))
response = self.client.get(url)
self.assertFalse(QuestionSet.objects.filter(id=qset.id).exists())
self.assertFalse(Question.objects.filter(id=question.id).exists())
def test_delete_qset_listingform(self):
qset = mommy.make(QuestionSet)
question = mommy.make(Question, qset=qset)
qset.start_question = question
qset.save()
url = reverse('delete_qset_listingform', args=(question.id, ))
response = self.client.get(url)
self.assertFalse(QuestionSet.objects.filter(id=qset.id).exists())
url = reverse('delete_qset_listingform', kwargs={"question_id": 999})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_question_identifiers(self):
qset = mommy.make(QuestionSet)
question = mommy.make(Question, qset=qset)
qset.start_question = question
qset.save()
question2 = mommy.make(Question, qset=qset)
mommy.make(QuestionFlow, question=question, next_question=question2)
url = reverse('qset_identifiers')
response = self.client.get(url, data={'id': qset.id, 'q_id': question2.id})
data = json.loads(response.content)
self.assertIn(question.identifier, data)
def test_not_allowed_to_delete_qset_with_interview(self):
qset = mommy.make(QuestionSet)
question = mommy.make(Question, qset=qset)
qset.start_question = question
qset.save()
interview = mommy.make(Interview, question_set=qset)
self.assertTrue(QuestionSet.objects.filter(id=qset.id).exists())
url = reverse('delete_qset_listingform', args=(question.id,))
response = self.client.get(url)
self.assertTrue(QuestionSet.objects.filter(id=qset.id).exists())
self.assertIn(response.status_code, [302, 200])
| en | 0.858227 | # now try after remobing the interview | 2.020803 | 2 |
tencentcloud/tag/v20180813/errorcodes.py | PlasticMem/tencentcloud-sdk-python | 0 | 6625015 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 未通过CAM鉴权。
AUTHFAILURE_UNAUTHORIZEDOPERATION = 'AuthFailure.UnauthorizedOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 单次请求的资源appId必须相同。
FAILEDOPERATION_RESOURCEAPPIDNOTSAME = 'FailedOperation.ResourceAppIdNotSame'
# 已关联资源的标签无法删除。
FAILEDOPERATION_TAGATTACHEDRESOURCE = 'FailedOperation.TagAttachedResource'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 系统预留标签键 qcloud、tencent和project 禁止创建。
INVALIDPARAMETER_RESERVEDTAGKEY = 'InvalidParameter.ReservedTagKey'
# Tag参数错误。
INVALIDPARAMETER_TAG = 'InvalidParameter.Tag'
# DeleteTags中不能包含ReplaceTags或AddTags中元素。
INVALIDPARAMETERVALUE_DELETETAGSPARAMERROR = 'InvalidParameterValue.DeleteTagsParamError'
# offset error。
INVALIDPARAMETERVALUE_OFFSETINVALID = 'InvalidParameterValue.OffsetInvalid'
# 地域错误。
INVALIDPARAMETERVALUE_REGIONINVALID = 'InvalidParameterValue.RegionInvalid'
# 系统预留标签键 qcloud、tencent和project 禁止创建。
INVALIDPARAMETERVALUE_RESERVEDTAGKEY = 'InvalidParameterValue.ReservedTagKey'
# 资源描述错误。
INVALIDPARAMETERVALUE_RESOURCEDESCRIPTIONERROR = 'InvalidParameterValue.ResourceDescriptionError'
# 资源Id错误。
INVALIDPARAMETERVALUE_RESOURCEIDINVALID = 'InvalidParameterValue.ResourceIdInvalid'
# 资源前缀错误。
INVALIDPARAMETERVALUE_RESOURCEPREFIXINVALID = 'InvalidParameterValue.ResourcePrefixInvalid'
# 业务类型错误。
INVALIDPARAMETERVALUE_SERVICETYPEINVALID = 'InvalidParameterValue.ServiceTypeInvalid'
# TagFilters参数错误。
INVALIDPARAMETERVALUE_TAGFILTERS = 'InvalidParameterValue.TagFilters'
# 过滤标签键对应标签值达到上限数 6。
INVALIDPARAMETERVALUE_TAGFILTERSLENGTHEXCEEDED = 'InvalidParameterValue.TagFiltersLengthExceeded'
# 标签键包含非法字符。
INVALIDPARAMETERVALUE_TAGKEYCHARACTERILLEGAL = 'InvalidParameterValue.TagKeyCharacterIllegal'
# 标签键不能为空值。
INVALIDPARAMETERVALUE_TAGKEYEMPTY = 'InvalidParameterValue.TagKeyEmpty'
# 标签键长度超过限制。
INVALIDPARAMETERVALUE_TAGKEYLENGTHEXCEEDED = 'InvalidParameterValue.TagKeyLengthExceeded'
# 标签值包含非法字符。
INVALIDPARAMETERVALUE_TAGVALUECHARACTERILLEGAL = 'InvalidParameterValue.TagValueCharacterIllegal'
# 标签值长度超过限制。
INVALIDPARAMETERVALUE_TAGVALUELENGTHEXCEEDED = 'InvalidParameterValue.TagValueLengthExceeded'
# Uin参数不合法。
INVALIDPARAMETERVALUE_UININVALID = 'InvalidParameterValue.UinInvalid'
# 资源关联的标签数超过限制。
LIMITEXCEEDED_RESOURCEATTACHEDTAGS = 'LimitExceeded.ResourceAttachedTags'
# 单次请求的资源数达到上限。
LIMITEXCEEDED_RESOURCENUMPERREQUEST = 'LimitExceeded.ResourceNumPerRequest'
# 用户创建标签键达到上限数 1000。
LIMITEXCEEDED_TAGKEY = 'LimitExceeded.TagKey'
# 单个标签键对应标签值达到上限数 1000。
LIMITEXCEEDED_TAGVALUE = 'LimitExceeded.TagValue'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# 标签已存在。
RESOURCEINUSE_TAGDUPLICATE = 'ResourceInUse.TagDuplicate'
# 对应的标签键和资源已关联。
RESOURCEINUSE_TAGKEYATTACHED = 'ResourceInUse.TagKeyAttached'
# 资源关联的标签键不存在。
RESOURCENOTFOUND_ATTACHEDTAGKEYNOTFOUND = 'ResourceNotFound.AttachedTagKeyNotFound'
# 标签不存在。
RESOURCENOTFOUND_TAGNONEXIST = 'ResourceNotFound.TagNonExist'
| # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 未通过CAM鉴权。
AUTHFAILURE_UNAUTHORIZEDOPERATION = 'AuthFailure.UnauthorizedOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 单次请求的资源appId必须相同。
FAILEDOPERATION_RESOURCEAPPIDNOTSAME = 'FailedOperation.ResourceAppIdNotSame'
# 已关联资源的标签无法删除。
FAILEDOPERATION_TAGATTACHEDRESOURCE = 'FailedOperation.TagAttachedResource'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 系统预留标签键 qcloud、tencent和project 禁止创建。
INVALIDPARAMETER_RESERVEDTAGKEY = 'InvalidParameter.ReservedTagKey'
# Tag参数错误。
INVALIDPARAMETER_TAG = 'InvalidParameter.Tag'
# DeleteTags中不能包含ReplaceTags或AddTags中元素。
INVALIDPARAMETERVALUE_DELETETAGSPARAMERROR = 'InvalidParameterValue.DeleteTagsParamError'
# offset error。
INVALIDPARAMETERVALUE_OFFSETINVALID = 'InvalidParameterValue.OffsetInvalid'
# 地域错误。
INVALIDPARAMETERVALUE_REGIONINVALID = 'InvalidParameterValue.RegionInvalid'
# 系统预留标签键 qcloud、tencent和project 禁止创建。
INVALIDPARAMETERVALUE_RESERVEDTAGKEY = 'InvalidParameterValue.ReservedTagKey'
# 资源描述错误。
INVALIDPARAMETERVALUE_RESOURCEDESCRIPTIONERROR = 'InvalidParameterValue.ResourceDescriptionError'
# 资源Id错误。
INVALIDPARAMETERVALUE_RESOURCEIDINVALID = 'InvalidParameterValue.ResourceIdInvalid'
# 资源前缀错误。
INVALIDPARAMETERVALUE_RESOURCEPREFIXINVALID = 'InvalidParameterValue.ResourcePrefixInvalid'
# 业务类型错误。
INVALIDPARAMETERVALUE_SERVICETYPEINVALID = 'InvalidParameterValue.ServiceTypeInvalid'
# TagFilters参数错误。
INVALIDPARAMETERVALUE_TAGFILTERS = 'InvalidParameterValue.TagFilters'
# 过滤标签键对应标签值达到上限数 6。
INVALIDPARAMETERVALUE_TAGFILTERSLENGTHEXCEEDED = 'InvalidParameterValue.TagFiltersLengthExceeded'
# 标签键包含非法字符。
INVALIDPARAMETERVALUE_TAGKEYCHARACTERILLEGAL = 'InvalidParameterValue.TagKeyCharacterIllegal'
# 标签键不能为空值。
INVALIDPARAMETERVALUE_TAGKEYEMPTY = 'InvalidParameterValue.TagKeyEmpty'
# 标签键长度超过限制。
INVALIDPARAMETERVALUE_TAGKEYLENGTHEXCEEDED = 'InvalidParameterValue.TagKeyLengthExceeded'
# 标签值包含非法字符。
INVALIDPARAMETERVALUE_TAGVALUECHARACTERILLEGAL = 'InvalidParameterValue.TagValueCharacterIllegal'
# 标签值长度超过限制。
INVALIDPARAMETERVALUE_TAGVALUELENGTHEXCEEDED = 'InvalidParameterValue.TagValueLengthExceeded'
# Uin参数不合法。
INVALIDPARAMETERVALUE_UININVALID = 'InvalidParameterValue.UinInvalid'
# 资源关联的标签数超过限制。
LIMITEXCEEDED_RESOURCEATTACHEDTAGS = 'LimitExceeded.ResourceAttachedTags'
# 单次请求的资源数达到上限。
LIMITEXCEEDED_RESOURCENUMPERREQUEST = 'LimitExceeded.ResourceNumPerRequest'
# 用户创建标签键达到上限数 1000。
LIMITEXCEEDED_TAGKEY = 'LimitExceeded.TagKey'
# 单个标签键对应标签值达到上限数 1000。
LIMITEXCEEDED_TAGVALUE = 'LimitExceeded.TagValue'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# 标签已存在。
RESOURCEINUSE_TAGDUPLICATE = 'ResourceInUse.TagDuplicate'
# 对应的标签键和资源已关联。
RESOURCEINUSE_TAGKEYATTACHED = 'ResourceInUse.TagKeyAttached'
# 资源关联的标签键不存在。
RESOURCENOTFOUND_ATTACHEDTAGKEYNOTFOUND = 'ResourceNotFound.AttachedTagKeyNotFound'
# 标签不存在。
RESOURCENOTFOUND_TAGNONEXIST = 'ResourceNotFound.TagNonExist'
| zh | 0.469348 | # -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # 未通过CAM鉴权。 # 操作失败。 # 单次请求的资源appId必须相同。 # 已关联资源的标签无法删除。 # 参数错误。 # 系统预留标签键 qcloud、tencent和project 禁止创建。 # Tag参数错误。 # DeleteTags中不能包含ReplaceTags或AddTags中元素。 # offset error。 # 地域错误。 # 系统预留标签键 qcloud、tencent和project 禁止创建。 # 资源描述错误。 # 资源Id错误。 # 资源前缀错误。 # 业务类型错误。 # TagFilters参数错误。 # 过滤标签键对应标签值达到上限数 6。 # 标签键包含非法字符。 # 标签键不能为空值。 # 标签键长度超过限制。 # 标签值包含非法字符。 # 标签值长度超过限制。 # Uin参数不合法。 # 资源关联的标签数超过限制。 # 单次请求的资源数达到上限。 # 用户创建标签键达到上限数 1000。 # 单个标签键对应标签值达到上限数 1000。 # 操作被拒绝。 # 标签已存在。 # 对应的标签键和资源已关联。 # 资源关联的标签键不存在。 # 标签不存在。 | 1.297988 | 1 |
gwpopulation/conversions.py | chase-kimball/gwpopulation | 0 | 6625016 | from __future__ import division
def convert_to_beta_parameters(parameters, remove=True):
"""
Convert to parameters for standard beta distribution
"""
added_keys = list()
converted = parameters.copy()
def _convert(suffix):
alpha = 'alpha_chi{}'.format(suffix)
beta = 'beta_chi{}'.format(suffix)
mu = 'mu_chi{}'.format(suffix)
sigma = 'sigma_chi{}'.format(suffix)
amax = 'amax{}'.format(suffix)
if alpha not in parameters.keys() or beta not in parameters.keys():
needed = True
elif converted[alpha] is None or converted[beta] is None:
needed = True
else:
needed = False
done = True
if needed:
if mu in converted.keys() and sigma in converted.keys():
done = True
converted[alpha], converted[beta], _ =\
mu_chi_var_chi_max_to_alpha_beta_max(
parameters[mu], parameters[sigma],
parameters[amax])
if remove:
added_keys.append(alpha)
added_keys.append(beta)
else:
done = False
return done
done = False
for suffix in ['_1', '_2']:
_done = _convert(suffix)
done = done or _done
if not done:
_ = _convert('')
return converted, added_keys
def alpha_beta_max_to_mu_chi_var_chi_max(alpha, beta, amax):
"""
Convert between parameters for beta distribution
"""
mu_chi = alpha / (alpha + beta) * amax
var_chi = alpha * beta / ((alpha + beta)**2 * (alpha + beta + 1)) * amax**2
return mu_chi, var_chi, amax
def mu_chi_var_chi_max_to_alpha_beta_max(mu_chi, var_chi, amax):
"""
Convert between parameters for beta distribution
"""
mu_chi /= amax
var_chi /= amax**2
alpha = (mu_chi**2 * (1 - mu_chi) - mu_chi * var_chi) / var_chi
beta = (mu_chi * (1 - mu_chi)**2 - (1 - mu_chi) * var_chi) / var_chi
return alpha, beta, amax
| from __future__ import division
def convert_to_beta_parameters(parameters, remove=True):
"""
Convert to parameters for standard beta distribution
"""
added_keys = list()
converted = parameters.copy()
def _convert(suffix):
alpha = 'alpha_chi{}'.format(suffix)
beta = 'beta_chi{}'.format(suffix)
mu = 'mu_chi{}'.format(suffix)
sigma = 'sigma_chi{}'.format(suffix)
amax = 'amax{}'.format(suffix)
if alpha not in parameters.keys() or beta not in parameters.keys():
needed = True
elif converted[alpha] is None or converted[beta] is None:
needed = True
else:
needed = False
done = True
if needed:
if mu in converted.keys() and sigma in converted.keys():
done = True
converted[alpha], converted[beta], _ =\
mu_chi_var_chi_max_to_alpha_beta_max(
parameters[mu], parameters[sigma],
parameters[amax])
if remove:
added_keys.append(alpha)
added_keys.append(beta)
else:
done = False
return done
done = False
for suffix in ['_1', '_2']:
_done = _convert(suffix)
done = done or _done
if not done:
_ = _convert('')
return converted, added_keys
def alpha_beta_max_to_mu_chi_var_chi_max(alpha, beta, amax):
"""
Convert between parameters for beta distribution
"""
mu_chi = alpha / (alpha + beta) * amax
var_chi = alpha * beta / ((alpha + beta)**2 * (alpha + beta + 1)) * amax**2
return mu_chi, var_chi, amax
def mu_chi_var_chi_max_to_alpha_beta_max(mu_chi, var_chi, amax):
"""
Convert between parameters for beta distribution
"""
mu_chi /= amax
var_chi /= amax**2
alpha = (mu_chi**2 * (1 - mu_chi) - mu_chi * var_chi) / var_chi
beta = (mu_chi * (1 - mu_chi)**2 - (1 - mu_chi) * var_chi) / var_chi
return alpha, beta, amax
| en | 0.508283 | Convert to parameters for standard beta distribution Convert between parameters for beta distribution Convert between parameters for beta distribution | 2.899153 | 3 |
libpermian/plugins/kickstart_test/__init__.py | rvykydal/permian | 0 | 6625017 | <reponame>rvykydal/permian<gh_stars>0
import logging
import tempfile
import subprocess
import os
import shutil
import requests
import itertools
import stat
from libpermian.plugins import api
from libpermian.workflows.isolated import GroupedWorkflow
from libpermian.events.base import Event
from libpermian.events.structures.builtin import OtherStructure, BaseStructure
from libpermian.result import Result
from libpermian.exceptions import UnsupportedConfiguration
LOGGER = logging.getLogger(__name__)
BOOT_ISO_RELATIVE_PATH = 'data/images/boot.iso'
BOOT_ISO_PATH_IN_INSTALLATION_TREE = 'images/boot.iso'
SUPPORTED_ARCHITECTURES = {'x86_64'}
class MissingBootIso(Exception):
"""
Raised when boot.iso for an architecture is not configured.
"""
def __init__(self, architecture):
msg = f"Boot.iso for '{architecture} is not configured"
self.architecture = architecture
super().__init__(msg)
class MissingInformation(Exception):
"""
Raised when workflow is missing required information.
"""
pass
class KicstartTestBatchCurrentResults():
"""Container for storing individual results of kickstart tests run in a batch.
The results are parsed from output of kickstart tests launcher running
the batch of kickstart tests.
:param tests: list of kickstart tests run in the batch
:type tests: list of str
:param retry: are the tests retried after the first failure ?
:type retry: bool
"""
def __init__(self, tests, retry=True):
self.results = {test: [] for test in tests}
self.retry = retry
def process_output_line(self, line):
"""Update the results from a line of tests launcher output.
:param line: a line of output from kickstart tests launcher
:type line: str
:return: tuple containing name of the finished test and its result if
the line indicates such or (None, None)
:rtype: (str, libpermian.result.Result)
"""
finished_test, permian_result = None, None
if "INFO: RESULT:" in line:
fields = line.split(":", 7)
if len(fields) != 8:
LOGGER.warning("Workflow is not able to parse result from output")
return (None, None)
_, _, _, _, test, _, result, detail = fields
if test in self.results:
self.results[test].append((result, detail))
finished_test = test
permian_result = self._get_permian_result_of_test(finished_test)
else:
LOGGER.warning("Found result of unexpected test %s", test)
return (finished_test, permian_result)
def _get_permian_result_of_test(self, test):
"""Get Permian Result of results stored for the kickstart test.
:param test: name of the tests to get the result of
:type test: str
:return: result corresponding to the results stored for the kickstart test
:rtype: libpermian.result.Result
"""
state, result, final = None, None, False
test_results = self.results.get(test, None)
if not test_results:
return Result(state, result, final)
state, final = "complete", True
test_result, _result_detail = test_results[-1]
if test_result == "SUCCESS":
result = "PASS"
elif test_result == "FAILED":
result = "FAIL"
# retry on flake
if self._is_flake(test_results):
state, result, final = "running", None, False
return Result(state, result, final, partial=self)
def _is_flake(self, test_results):
"""Are the results qualified as a flake?
A flake is a failed test which will be re-run (based on launcher option --retry).
"""
return self.retry and len(test_results) == 1 and test_results[0][0] == "FAILED"
def summary_message(self):
"""Create a message summarizing current results of the batch test.
:return: message with test results summary
:rtype: str
"""
success = failed = timed_out = flakes = 0
for test, results in self.results.items():
if not results:
continue
# If the current result of the test is a flake
if self._is_flake(results):
flakes += 1
else:
final_result = results[-1]
result, detail = final_result
if result == "SUCCESS":
success += 1
flakes += len(results) - 1
elif result == "FAILED":
failed += 1
all_results = list(itertools.chain.from_iterable(self.results.values()))
timed_out = sum([1 for result, detail in all_results
if result == "FAILED" and detail == "Test timed out"])
n_a = len(self.results) - success - failed
return f"SUCCESS: {success} FAILED: {failed} N/A: {n_a} (runs: {len(all_results)} flakes: {flakes} timed out: {timed_out})"
def get_test_results(self, test):
try:
return self.results[test]
except KeyError:
LOGGER.warning("Found result of unexpected test %s", test)
return None
@api.events.register_structure('bootIso')
class BootIsoStructure(OtherStructure):
pass
@api.events.register_structure('kstestParams')
class KstestParamsStructure(BaseStructure):
def __init__(self, settings, platform, urls=None):
super().__init__(settings)
self.platform = platform
self.urls = urls or dict()
def to_bootIso(self):
boot_isos = {}
for arch, urls in self.urls.items():
if 'installation_tree' in urls.keys():
boot_isos[arch] = os.path.join(urls['installation_tree'],
BOOT_ISO_PATH_IN_INSTALLATION_TREE)
if not boot_isos:
return NotImplemented
return BootIsoStructure(self.settings, **boot_isos)
@api.workflows.register("kickstart-test")
class KickstartTestWorkflow(GroupedWorkflow):
silent_exceptions = (UnsupportedConfiguration, MissingBootIso)
@classmethod
def factory(cls, testRuns, crcList):
for (arch, ), crcList in crcList.by_configuration('architecture').items():
cls(testRuns, crcList, arch=arch)
def __init__(self, testRuns, crcList, arch):
super().__init__(testRuns, crcList)
self.arch = arch
self.ksrepo_dir = None
self.ksrepo_local_dir = self.settings.get('kickstart_test', 'kstest_local_repo')
self.boot_iso_url = None
# The path of boot.iso expected by runner
self.boot_iso_dest = None
self.platform = None
# Path to configuration file overriding default (per platform) repository urls
self.url_overrides_path = None
self.runner_command = self.settings.get('kickstart_test', 'runner_command').split()
self.ksrepo = self.settings.get('kickstart_test', 'kstest_repo')
self.ksrepo_branch = self.settings.get('kickstart_test', 'kstest_repo_branch')
self.retry = self.settings.getboolean('kickstart_test', 'retry_on_failure')
self.timeout = self.settings.get('kickstart_test', 'timeout')
def _create_overrides_file(self, content):
with tempfile.NamedTemporaryFile("w", delete=False, prefix="defaults-") as f:
f.write(content)
fpath = f.name
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IROTH)
return fpath
def _get_url_overrides(self, urls):
try:
arch_urls = urls[self.arch]
except KeyError:
return ""
kstest_var_map = {
'KSTEST_URL': 'installation_tree',
'KSTEST_METALINK': 'metalink',
'KSTEST_MIRRORLIST': 'mirrorlist',
'KSTEST_FTP_URL': 'ftp_url',
'KSTEST_MODULAR_URL': 'modular_url',
}
overrides = []
for variable, event_key in kstest_var_map.items():
value = arch_urls.get(event_key)
if value:
overrides.append(f"export {variable}={value}")
return "\n".join(overrides)
def process_installation_urls(self, urls):
"""Get test run parameters from installationUrls event structure
:param urls: structure holding scenario data
:type urls: InstallationUrlsStructure
:returns: path of override defaults file with urls to be used by launcher
or None if there are no relevant overrides
:rtype: str
"""
url_overrides_path = None
# Configure installation repositories
variable_overrides = self._get_url_overrides(urls)
if variable_overrides:
url_overrides_path = self._create_overrides_file(variable_overrides)
LOGGER.info("Created url overrides file "
f"{url_overrides_path} with content:"
f"\n{variable_overrides}")
return url_overrides_path
def setup(self):
if self.arch not in SUPPORTED_ARCHITECTURES:
LOGGER.info(f"Architecture {self.arch} is not supported.")
raise UnsupportedConfiguration('architecture', self.arch)
if not self.event.kstestParams:
LOGGER.error(f"Platform configuration by kstestParams is missing")
raise MissingInformation("platform configuration")
self.platform = self.event.kstestParams.platform
urls = self.event.kstestParams.urls
if urls:
self.url_overrides_path = self.process_installation_urls(urls)
try:
self.boot_iso_url = self.event.bootIso[self.arch]
except (TypeError, KeyError): # BootIsoStructure or requred architecture is not available
LOGGER.info(f"Installer boot.iso location configuration for {self.arch} is missing")
raise MissingBootIso(self.arch)
self.groupReportResult(self.crcList, Result('queued'))
if self.ksrepo_local_dir:
self.ksrepo_dir = self.ksrepo_local_dir
LOGGER.info("Using existing kickstart-tests repository %s.", self.ksrepo_local_dir)
else:
self.ksrepo_dir = os.path.join(tempfile.mkdtemp(), "kickstart-tests")
LOGGER.info("Created kickstart-tests repository directory %s", self.ksrepo_dir)
LOGGER.info("Cloning kickstart-tests repository %s branch %s.",
self.ksrepo, self.ksrepo_branch)
subprocess.run(
['git',
'clone',
self.ksrepo,
self.ksrepo_dir,
'--branch',
self.ksrepo_branch,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
self.boot_iso_dest = os.path.join(self.ksrepo_dir, BOOT_ISO_RELATIVE_PATH)
LOGGER.info("Fetchig installer boot.iso %s", self.boot_iso_url)
self.fetch_boot_iso(self.boot_iso_url, self.boot_iso_dest)
@staticmethod
def fetch_boot_iso(iso_url, dest):
"""Fetch boot.iso."""
iso_dir = os.path.dirname(dest)
if not os.path.isdir(iso_dir):
os.makedirs(iso_dir, 0o755, exist_ok=True)
LOGGER.debug("Created %s", iso_dir)
if iso_url.startswith("http://"):
with requests.get(iso_url, stream=True, allow_redirects=True) as r:
with open(dest, 'wb') as f:
shutil.copyfileobj(r.raw, f)
elif iso_url.startswith("file://"):
shutil.copyfile(iso_url[7:], dest)
@staticmethod
def _get_test_from_crc(crc):
return crc.testcase.execution.automation_data['test']
@classmethod
def _map_tests_to_crcs(cls, crclist):
result = {}
for crc in crclist:
test = cls._get_test_from_crc(crc)
try:
result[test].append(crc)
except KeyError:
result[test] = [crc]
return result
def execute(self):
self.groupReportResult(self.crcList, Result('started'))
test_to_crcs = self._map_tests_to_crcs(self.crcList)
tests = list(test_to_crcs.keys())
current_results = KicstartTestBatchCurrentResults(tests, retry=self.retry)
self.groupReportResult(self.crcList, Result('running', current_results=current_results))
command = self.runner_command
command = command + ['--scenario', self.event.type]
command = command + ['--platform', self.platform]
if self.url_overrides_path:
command = command + ["--defaults", self.url_overrides_path]
if self.retry:
command = command + ["--retry"]
command = command + ["--timeout", self.timeout]
command = command + tests
LOGGER.info(f"Runner is starting. {current_results.summary_message()}")
LOGGER.info("Running %s", command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
universal_newlines=True,
cwd=self.ksrepo_dir,
) as p:
for line in p.stdout:
line = line.strip()
# TODO: make this configurable in settings
LOGGER.info("[runner] %s", line.strip())
finished_test, result = current_results.process_output_line(line)
if finished_test:
self.groupReportResult(test_to_crcs[finished_test], result)
LOGGER.info(f"Test {finished_test} finished. {current_results.summary_message()}")
LOGGER.info(f"Runner return code: {p.returncode}")
LOGGER.info(f"Runner finished. {current_results.summary_message()}")
def dry_execute(self):
self.runner_command = ['echo'] + self.runner_command
self.execute()
def teardown(self):
if self.boot_iso_url:
LOGGER.info("Removing installer boot.iso.")
try:
os.remove(self.boot_iso_dest)
except FileNotFoundError:
LOGGER.debug("Installer boot.iso %s not found", self.boot_iso_dest)
if self.url_overrides_path:
os.unlink(self.url_overrides_path)
if not self.ksrepo_local_dir and self.ksrepo_dir:
tempdir = os.path.normpath(os.path.join(self.ksrepo_dir, '..'))
LOGGER.info("Removing %s with kickstart-tests repo.", tempdir)
shutil.rmtree(tempdir)
def groupTerminate(self, crcIds):
LOGGER.info('Something attempted to stop me!')
return False
def groupDisplayStatus(self, crcId):
status = ""
current_results = self.crcList[crcId].result.extra_fields.get('current_results')
if current_results:
test = self._get_test_from_crc(self.crcList[crcId])
test_results = current_results.get_test_results(test)
if test_results:
status = f"{test_results}"
return status
| import logging
import tempfile
import subprocess
import os
import shutil
import requests
import itertools
import stat
from libpermian.plugins import api
from libpermian.workflows.isolated import GroupedWorkflow
from libpermian.events.base import Event
from libpermian.events.structures.builtin import OtherStructure, BaseStructure
from libpermian.result import Result
from libpermian.exceptions import UnsupportedConfiguration
LOGGER = logging.getLogger(__name__)
BOOT_ISO_RELATIVE_PATH = 'data/images/boot.iso'
BOOT_ISO_PATH_IN_INSTALLATION_TREE = 'images/boot.iso'
SUPPORTED_ARCHITECTURES = {'x86_64'}
class MissingBootIso(Exception):
"""
Raised when boot.iso for an architecture is not configured.
"""
def __init__(self, architecture):
msg = f"Boot.iso for '{architecture} is not configured"
self.architecture = architecture
super().__init__(msg)
class MissingInformation(Exception):
"""
Raised when workflow is missing required information.
"""
pass
class KicstartTestBatchCurrentResults():
"""Container for storing individual results of kickstart tests run in a batch.
The results are parsed from output of kickstart tests launcher running
the batch of kickstart tests.
:param tests: list of kickstart tests run in the batch
:type tests: list of str
:param retry: are the tests retried after the first failure ?
:type retry: bool
"""
def __init__(self, tests, retry=True):
self.results = {test: [] for test in tests}
self.retry = retry
def process_output_line(self, line):
"""Update the results from a line of tests launcher output.
:param line: a line of output from kickstart tests launcher
:type line: str
:return: tuple containing name of the finished test and its result if
the line indicates such or (None, None)
:rtype: (str, libpermian.result.Result)
"""
finished_test, permian_result = None, None
if "INFO: RESULT:" in line:
fields = line.split(":", 7)
if len(fields) != 8:
LOGGER.warning("Workflow is not able to parse result from output")
return (None, None)
_, _, _, _, test, _, result, detail = fields
if test in self.results:
self.results[test].append((result, detail))
finished_test = test
permian_result = self._get_permian_result_of_test(finished_test)
else:
LOGGER.warning("Found result of unexpected test %s", test)
return (finished_test, permian_result)
def _get_permian_result_of_test(self, test):
"""Get Permian Result of results stored for the kickstart test.
:param test: name of the tests to get the result of
:type test: str
:return: result corresponding to the results stored for the kickstart test
:rtype: libpermian.result.Result
"""
state, result, final = None, None, False
test_results = self.results.get(test, None)
if not test_results:
return Result(state, result, final)
state, final = "complete", True
test_result, _result_detail = test_results[-1]
if test_result == "SUCCESS":
result = "PASS"
elif test_result == "FAILED":
result = "FAIL"
# retry on flake
if self._is_flake(test_results):
state, result, final = "running", None, False
return Result(state, result, final, partial=self)
def _is_flake(self, test_results):
"""Are the results qualified as a flake?
A flake is a failed test which will be re-run (based on launcher option --retry).
"""
return self.retry and len(test_results) == 1 and test_results[0][0] == "FAILED"
def summary_message(self):
"""Create a message summarizing current results of the batch test.
:return: message with test results summary
:rtype: str
"""
success = failed = timed_out = flakes = 0
for test, results in self.results.items():
if not results:
continue
# If the current result of the test is a flake
if self._is_flake(results):
flakes += 1
else:
final_result = results[-1]
result, detail = final_result
if result == "SUCCESS":
success += 1
flakes += len(results) - 1
elif result == "FAILED":
failed += 1
all_results = list(itertools.chain.from_iterable(self.results.values()))
timed_out = sum([1 for result, detail in all_results
if result == "FAILED" and detail == "Test timed out"])
n_a = len(self.results) - success - failed
return f"SUCCESS: {success} FAILED: {failed} N/A: {n_a} (runs: {len(all_results)} flakes: {flakes} timed out: {timed_out})"
def get_test_results(self, test):
try:
return self.results[test]
except KeyError:
LOGGER.warning("Found result of unexpected test %s", test)
return None
@api.events.register_structure('bootIso')
class BootIsoStructure(OtherStructure):
pass
@api.events.register_structure('kstestParams')
class KstestParamsStructure(BaseStructure):
def __init__(self, settings, platform, urls=None):
super().__init__(settings)
self.platform = platform
self.urls = urls or dict()
def to_bootIso(self):
boot_isos = {}
for arch, urls in self.urls.items():
if 'installation_tree' in urls.keys():
boot_isos[arch] = os.path.join(urls['installation_tree'],
BOOT_ISO_PATH_IN_INSTALLATION_TREE)
if not boot_isos:
return NotImplemented
return BootIsoStructure(self.settings, **boot_isos)
@api.workflows.register("kickstart-test")
class KickstartTestWorkflow(GroupedWorkflow):
silent_exceptions = (UnsupportedConfiguration, MissingBootIso)
@classmethod
def factory(cls, testRuns, crcList):
for (arch, ), crcList in crcList.by_configuration('architecture').items():
cls(testRuns, crcList, arch=arch)
def __init__(self, testRuns, crcList, arch):
super().__init__(testRuns, crcList)
self.arch = arch
self.ksrepo_dir = None
self.ksrepo_local_dir = self.settings.get('kickstart_test', 'kstest_local_repo')
self.boot_iso_url = None
# The path of boot.iso expected by runner
self.boot_iso_dest = None
self.platform = None
# Path to configuration file overriding default (per platform) repository urls
self.url_overrides_path = None
self.runner_command = self.settings.get('kickstart_test', 'runner_command').split()
self.ksrepo = self.settings.get('kickstart_test', 'kstest_repo')
self.ksrepo_branch = self.settings.get('kickstart_test', 'kstest_repo_branch')
self.retry = self.settings.getboolean('kickstart_test', 'retry_on_failure')
self.timeout = self.settings.get('kickstart_test', 'timeout')
def _create_overrides_file(self, content):
with tempfile.NamedTemporaryFile("w", delete=False, prefix="defaults-") as f:
f.write(content)
fpath = f.name
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IROTH)
return fpath
def _get_url_overrides(self, urls):
try:
arch_urls = urls[self.arch]
except KeyError:
return ""
kstest_var_map = {
'KSTEST_URL': 'installation_tree',
'KSTEST_METALINK': 'metalink',
'KSTEST_MIRRORLIST': 'mirrorlist',
'KSTEST_FTP_URL': 'ftp_url',
'KSTEST_MODULAR_URL': 'modular_url',
}
overrides = []
for variable, event_key in kstest_var_map.items():
value = arch_urls.get(event_key)
if value:
overrides.append(f"export {variable}={value}")
return "\n".join(overrides)
def process_installation_urls(self, urls):
"""Get test run parameters from installationUrls event structure
:param urls: structure holding scenario data
:type urls: InstallationUrlsStructure
:returns: path of override defaults file with urls to be used by launcher
or None if there are no relevant overrides
:rtype: str
"""
url_overrides_path = None
# Configure installation repositories
variable_overrides = self._get_url_overrides(urls)
if variable_overrides:
url_overrides_path = self._create_overrides_file(variable_overrides)
LOGGER.info("Created url overrides file "
f"{url_overrides_path} with content:"
f"\n{variable_overrides}")
return url_overrides_path
def setup(self):
if self.arch not in SUPPORTED_ARCHITECTURES:
LOGGER.info(f"Architecture {self.arch} is not supported.")
raise UnsupportedConfiguration('architecture', self.arch)
if not self.event.kstestParams:
LOGGER.error(f"Platform configuration by kstestParams is missing")
raise MissingInformation("platform configuration")
self.platform = self.event.kstestParams.platform
urls = self.event.kstestParams.urls
if urls:
self.url_overrides_path = self.process_installation_urls(urls)
try:
self.boot_iso_url = self.event.bootIso[self.arch]
except (TypeError, KeyError): # BootIsoStructure or requred architecture is not available
LOGGER.info(f"Installer boot.iso location configuration for {self.arch} is missing")
raise MissingBootIso(self.arch)
self.groupReportResult(self.crcList, Result('queued'))
if self.ksrepo_local_dir:
self.ksrepo_dir = self.ksrepo_local_dir
LOGGER.info("Using existing kickstart-tests repository %s.", self.ksrepo_local_dir)
else:
self.ksrepo_dir = os.path.join(tempfile.mkdtemp(), "kickstart-tests")
LOGGER.info("Created kickstart-tests repository directory %s", self.ksrepo_dir)
LOGGER.info("Cloning kickstart-tests repository %s branch %s.",
self.ksrepo, self.ksrepo_branch)
subprocess.run(
['git',
'clone',
self.ksrepo,
self.ksrepo_dir,
'--branch',
self.ksrepo_branch,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
self.boot_iso_dest = os.path.join(self.ksrepo_dir, BOOT_ISO_RELATIVE_PATH)
LOGGER.info("Fetchig installer boot.iso %s", self.boot_iso_url)
self.fetch_boot_iso(self.boot_iso_url, self.boot_iso_dest)
@staticmethod
def fetch_boot_iso(iso_url, dest):
"""Fetch boot.iso."""
iso_dir = os.path.dirname(dest)
if not os.path.isdir(iso_dir):
os.makedirs(iso_dir, 0o755, exist_ok=True)
LOGGER.debug("Created %s", iso_dir)
if iso_url.startswith("http://"):
with requests.get(iso_url, stream=True, allow_redirects=True) as r:
with open(dest, 'wb') as f:
shutil.copyfileobj(r.raw, f)
elif iso_url.startswith("file://"):
shutil.copyfile(iso_url[7:], dest)
@staticmethod
def _get_test_from_crc(crc):
return crc.testcase.execution.automation_data['test']
@classmethod
def _map_tests_to_crcs(cls, crclist):
result = {}
for crc in crclist:
test = cls._get_test_from_crc(crc)
try:
result[test].append(crc)
except KeyError:
result[test] = [crc]
return result
def execute(self):
self.groupReportResult(self.crcList, Result('started'))
test_to_crcs = self._map_tests_to_crcs(self.crcList)
tests = list(test_to_crcs.keys())
current_results = KicstartTestBatchCurrentResults(tests, retry=self.retry)
self.groupReportResult(self.crcList, Result('running', current_results=current_results))
command = self.runner_command
command = command + ['--scenario', self.event.type]
command = command + ['--platform', self.platform]
if self.url_overrides_path:
command = command + ["--defaults", self.url_overrides_path]
if self.retry:
command = command + ["--retry"]
command = command + ["--timeout", self.timeout]
command = command + tests
LOGGER.info(f"Runner is starting. {current_results.summary_message()}")
LOGGER.info("Running %s", command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
universal_newlines=True,
cwd=self.ksrepo_dir,
) as p:
for line in p.stdout:
line = line.strip()
# TODO: make this configurable in settings
LOGGER.info("[runner] %s", line.strip())
finished_test, result = current_results.process_output_line(line)
if finished_test:
self.groupReportResult(test_to_crcs[finished_test], result)
LOGGER.info(f"Test {finished_test} finished. {current_results.summary_message()}")
LOGGER.info(f"Runner return code: {p.returncode}")
LOGGER.info(f"Runner finished. {current_results.summary_message()}")
def dry_execute(self):
self.runner_command = ['echo'] + self.runner_command
self.execute()
def teardown(self):
if self.boot_iso_url:
LOGGER.info("Removing installer boot.iso.")
try:
os.remove(self.boot_iso_dest)
except FileNotFoundError:
LOGGER.debug("Installer boot.iso %s not found", self.boot_iso_dest)
if self.url_overrides_path:
os.unlink(self.url_overrides_path)
if not self.ksrepo_local_dir and self.ksrepo_dir:
tempdir = os.path.normpath(os.path.join(self.ksrepo_dir, '..'))
LOGGER.info("Removing %s with kickstart-tests repo.", tempdir)
shutil.rmtree(tempdir)
def groupTerminate(self, crcIds):
LOGGER.info('Something attempted to stop me!')
return False
def groupDisplayStatus(self, crcId):
status = ""
current_results = self.crcList[crcId].result.extra_fields.get('current_results')
if current_results:
test = self._get_test_from_crc(self.crcList[crcId])
test_results = current_results.get_test_results(test)
if test_results:
status = f"{test_results}"
return status | en | 0.817041 | Raised when boot.iso for an architecture is not configured. Raised when workflow is missing required information. Container for storing individual results of kickstart tests run in a batch. The results are parsed from output of kickstart tests launcher running the batch of kickstart tests. :param tests: list of kickstart tests run in the batch :type tests: list of str :param retry: are the tests retried after the first failure ? :type retry: bool Update the results from a line of tests launcher output. :param line: a line of output from kickstart tests launcher :type line: str :return: tuple containing name of the finished test and its result if the line indicates such or (None, None) :rtype: (str, libpermian.result.Result) Get Permian Result of results stored for the kickstart test. :param test: name of the tests to get the result of :type test: str :return: result corresponding to the results stored for the kickstart test :rtype: libpermian.result.Result # retry on flake Are the results qualified as a flake? A flake is a failed test which will be re-run (based on launcher option --retry). Create a message summarizing current results of the batch test. :return: message with test results summary :rtype: str # If the current result of the test is a flake # The path of boot.iso expected by runner # Path to configuration file overriding default (per platform) repository urls Get test run parameters from installationUrls event structure :param urls: structure holding scenario data :type urls: InstallationUrlsStructure :returns: path of override defaults file with urls to be used by launcher or None if there are no relevant overrides :rtype: str # Configure installation repositories # BootIsoStructure or requred architecture is not available Fetch boot.iso. # TODO: make this configurable in settings | 2.222308 | 2 |
desktop/libs/notebook/src/notebook/connectors/hiveserver2_tests.py | 10088/hue | 0 | 6625018 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import next, object
import json
import logging
import re
import sys
import time
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal, assert_true, assert_raises
from django.urls import reverse
from TCLIService.ttypes import TStatusCode, TProtocolVersion, TOperationType
from desktop.auth.backend import rewrite_user
from desktop.conf import has_connectors
from desktop.lib.i18n import smart_str
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_to_group, grant_access
from beeswax.server import dbms
from beeswax.server.dbms import QueryServerException
from beeswax.test_base import BeeswaxSampleProvider, get_query_server_config, is_hive_on_spark
from hadoop.pseudo_hdfs4 import is_live_cluster
from useradmin.models import User
from notebook.api import _save_notebook
from notebook.connectors.base import QueryError, QueryExpired
from notebook.connectors.hiveserver2 import HS2Api
from notebook.models import make_notebook, Notebook
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
LOG = logging.getLogger(__name__)
class TestApiWithConnectors(object):
NOTEBOOK_JSON = """
{
"selectedSnippet": "impala",
"showHistory": false,
"description": "Test Impala Query",
"name": "Test Impala Query",
"sessions": [
{
"type": "impala",
"properties": [],
"id": null
}
],
"type": "query-impala",
"id": null,
"snippets": [{
"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"impala-xx","status":"running",
"statement_raw":"select * from web_logs",
"statement":"select * from web_logs",
"variables":[],
"properties":{"settings":[],"variables":[],"files":[],"functions":[]},
"result":{
"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table",
"handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,
"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,
"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}
},
"lastExecuted": 1462554843817,"database":"default"
}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a",
"isSaved":false
}
"""
CONNECTOR = [{
'nice_name': 'Impala', 'name': 'impala-xx', 'dialect': 'impala', 'interface': 'hiveserver2',
'settings': [
{'name': 'server_host', 'value': 'gethue.com'},
{'name': 'server_port', 'value': '21050'},
],
'id': 1, 'category': 'editor', 'description': '', 'is_sql': True
},
]
def setUp(self):
if not has_connectors():
raise SkipTest
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.user = rewrite_user(User.objects.get(username="test"))
grant_access("test", "default", "notebook")
def test_execute_impala(self):
with patch('desktop.lib.connectors.api.CONNECTOR_INSTANCES', TestApi.CONNECTOR):
with patch('desktop.lib.thrift_util.get_client') as get_client:
tclient = Mock()
successfullCall = Mock(
return_value=Mock(
status=Mock(
statusCode=TStatusCode.SUCCESS_STATUS
),
sessionHandle=Mock(
sessionId=Mock(
secret='\x7f\x98\x97s\xe1\xa8G\xf4\x8a\x8a\\r\x0e6\xc2\xee\xf0',
guid='\xfa\xb0/\x04 \xfeDX\x99\xfcq\xff2\x07\x02\xfe',
)
),
configuration={},
serverProtocolVersion=TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V7,
# TFetchResultsResp
results=Mock(
startRowOffset=0,
rows=[],
columns=[]
),
# ExecuteStatement
operationHandle=Mock(
operationId=Mock(
secret='\x7f\x98\x97s\xe1\xa8G\xf4\x8a\x8a\\r\x0e6\xc2\xee\xf0',
guid='\xfa\xb0/\x04 \xfeDX\x99\xfcq\xff2\x07\x02\xfe',
),
hasResultSet=True,
operationType=TOperationType.EXECUTE_STATEMENT,
modifiedRowCount=0
),
)
)
tclient.OpenSession = successfullCall
tclient.ExecuteStatement = successfullCall
tclient.FetchResults = successfullCall
tclient.GetResultSetMetadata = successfullCall
tclient.CloseOperation = successfullCall
get_client.return_value = tclient
tclient.get_coordinator_host = Mock(return_value={})
response = self.client.post(reverse('notebook:execute'), {
'notebook': TestApi.NOTEBOOK_JSON,
'snippet': json.dumps(json.loads(TestApi.NOTEBOOK_JSON)['snippets'][0]),
})
get_client.assert_called()
assert_equal(response.status_code, 200)
data = json.loads(response.content)
assert_equal(data['status'], 0)
def test_autocomplete_database_impala(self):
with patch('desktop.lib.connectors.api.CONNECTOR_INSTANCES', TestApi.CONNECTOR):
with patch('beeswax.server.dbms.get') as get:
get.return_value = Mock(
get_databases=Mock(
return_value=[{'comment': '', 'hdfs_link': 'hdfs://table'}]
)
)
response = self.client.post(reverse('notebook:api_autocomplete_databases'), {
'notebook': TestApi.NOTEBOOK_JSON,
'snippet': json.dumps(json.loads(TestApi.NOTEBOOK_JSON)['snippets'][0]),
})
get.assert_called()
assert_equal(response.status_code, 200)
data = json.loads(response.content)
assert_equal(data['status'], 0)
assert_equal(data['databases'], [{u'comment': u'', u'hdfs_link': u'hdfs://table'}])
def test_sample_data_table_sync_impala(self):
with patch('desktop.lib.connectors.api.CONNECTOR_INSTANCES', TestApi.CONNECTOR):
with patch('beeswax.server.dbms.get') as get:
get.return_value = Mock(
get_table=Mock(
return_value=Mock(is_impala_only=False)
),
get_sample=Mock(
return_value=Mock(
rows=Mock(return_value=[[1], [2]]),
cols=Mock(return_value=['name']),
full_cols=Mock(return_value=[{'name': 'name'}])
)
)
)
response = self.client.post(
reverse('notebook:api_sample_data', kwargs={'database': 'sfdc', 'table': 'customers'}), {
'notebook': TestApi.NOTEBOOK_JSON,
'snippet': json.dumps(json.loads(TestApi.NOTEBOOK_JSON)['snippets'][0]),
}
)
get.assert_called()
assert_equal(response.status_code, 200)
data = json.loads(response.content)
assert_equal(data['status'], 0)
assert_equal(data['headers'], ['name'])
assert_equal(data['full_headers'], [{'name': 'name'}])
assert_equal(data['rows'], [[1], [2]])
def test_sample_data_table_async_impala(self):
with patch('desktop.lib.connectors.api.CONNECTOR_INSTANCES', TestApi.CONNECTOR):
with patch('beeswax.server.dbms.get') as get:
get.return_value = Mock(
get_table=Mock(
return_value=Mock(is_impala_only=False)
),
server_name='impala-xx',
get_sample=Mock(
return_value='SELECT * from customers'
),
client=Mock(
user=self.user,
query=Mock(
return_value=Mock(
get=Mock(
return_value=('server_id', 'server_guid')
),
log_context='log_context',
has_result_set=True,
session_guid='session_guid',
modified_row_count=0,
operation_type=1
),
)
)
)
response = self.client.post(
reverse('notebook:api_sample_data', kwargs={'database': 'sfdc', 'table': 'customers'}), {
'notebook': TestApi.NOTEBOOK_JSON,
'snippet': json.dumps(json.loads(TestApi.NOTEBOOK_JSON)['snippets'][0]),
'async': '"true"'
}
)
get.assert_called()
assert_equal(response.status_code, 200)
data = json.loads(response.content)
assert_equal(data['status'], 0)
assert_equal(data['result']['handle']['secret'], 'server_id')
assert_equal(data['result']['handle']['statement'], 'SELECT * from customers')
class TestApi():
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.user = rewrite_user(User.objects.get(username="test"))
@patch('notebook.connectors.hiveserver2.has_jobbrowser', True)
def test_get_jobs_with_jobbrowser(self):
notebook = Mock()
snippet = {'type': 'hive', 'properties': {}}
logs = ''
with patch('notebook.connectors.hiveserver2.HS2Api._get_hive_execution_engine') as _get_hive_execution_engine:
with patch('notebook.connectors.hiveserver2.parse_out_jobs') as parse_out_jobs:
_get_hive_execution_engine.return_value = 'tez'
parse_out_jobs.return_value = [{'job_id': 'job_id_00001'}]
jobs = HS2Api(self.user).get_jobs(notebook, snippet, logs)
assert_true(jobs, jobs)
assert_equal(jobs[0]['name'], 'job_id_00001')
assert_equal(jobs[0]['url'], '/jobbrowser/jobs/job_id_00001')
@patch('notebook.connectors.hiveserver2.has_jobbrowser', False)
def test_get_jobs_without_jobbrowser(self):
notebook = Mock()
snippet = {'type': 'hive', 'properties': {}}
logs = ''
with patch('notebook.connectors.hiveserver2.HS2Api._get_hive_execution_engine') as _get_hive_execution_engine:
with patch('notebook.connectors.hiveserver2.parse_out_jobs') as parse_out_jobs:
_get_hive_execution_engine.return_value = 'tez'
parse_out_jobs.return_value = [{'job_id': 'job_id_00001'}]
jobs = HS2Api(self.user).get_jobs(notebook, snippet, logs)
assert_true(jobs, jobs)
assert_equal(jobs[0]['name'], 'job_id_00001')
assert_equal(jobs[0]['url'], '') # Is empty
def test_close_statement(self):
with patch('notebook.connectors.hiveserver2.HS2Api._get_db') as _get_db:
_get_db.return_value = Mock(
use=Mock(
),
client=Mock(
query=Mock(
side_effect=QueryServerException(
Exception('Execution error!'),
message='Error while compiling statement: FAILED: HiveAccessControlException Permission denied'
)
),
),
)
notebook = {}
snippet = {
'id': '7ccdd296-20a3-da33-16ec-db58149aba0b', 'type': 'impala', 'status': 'running', 'statementType': 'text',
'statement': 'SELECT *\nFROM `default`.sample_07\nLIMIT 100\n;', 'aceCursorPosition': None, 'statementPath': '',
'associatedDocumentUuid': None,
'properties': {'settings': []},
'result': {
'id': 'd9a8dc1b-7f6d-169f-7dd7-660723cba3f4', 'type': 'table',
'handle': {
'secret': 'obUXjEDWTh+ke73YLlOlMw==', 'guid': '2iv5rEXrRE4AAAAABtXdxA==', 'operation_type': 0, 'has_result_set': True,
'modified_row_count': None, 'log_context': None, 'session_guid': '2440c57bc3806c6e:598514f42764cc91', 'session_id': 2094,
'session_type': 'impala', 'statement_id': 0, 'has_more_statements': False, 'statements_count': 1,
'previous_statement_hash': '39b8e5b3c37fda5ebd438da23f3e198c914750a64aa147f819a6a1e0', 'start': {'row': 0, 'column': 0},
'end': {'row': 0, 'column': 43}, 'statement': 'SELECT *\nFROM `default`.sample_07\nLIMIT 100'
}
}, 'database': 'default',
'compute': {
'id': 'default', 'name': 'default', 'namespace': 'default', 'interface': 'impala', 'type': 'direct', 'options': {}
}, 'wasBatchExecuted': False, 'dialect': 'impala'
}
api = HS2Api(self.user)
response = api.close_statement(notebook, snippet)
assert_equal(response['status'], 0)
snippet = {
'id': '7ccdd296-20a3-da33-16ec-db58149aba0b', 'type': 'impala', 'status': 'running',
'statementType': 'text', 'statement': 'SELECT *\nFROM `default`.sample_07\nLIMIT 100\n;',
'aceCursorPosition': None, 'statementPath': '', 'associatedDocumentUuid': None,
'properties': {'settings': []},
'result': {
'id': 'd9a8dc1b-7f6d-169f-7dd7-660723cba3f4', 'type': 'table',
'handle': {
'has_more_statements': False, 'statement_id': 0, 'statements_count': 1,
'previous_statement_hash': '39b8e5b3c37fda5ebd438da23f3e198c914750a64aa147f819a6a1e0'
}
}, 'database': 'default', 'compute': {'id': 'default', 'name': 'default', 'namespace': 'default',
'interface': 'impala', 'type': 'direct', 'options': {}}, 'wasBatchExecuted': False, 'dialect': 'impala'
}
api = HS2Api(self.user)
response = api.close_statement(notebook, snippet)
assert_equal(response['status'], -1) # snippet['result']['handel'] ['guid'] and ['secret'] are missing
def test_get_error_message_from_query(self):
with patch('notebook.connectors.hiveserver2.HS2Api._get_db') as _get_db:
with patch('notebook.connectors.hiveserver2.HS2Api._get_current_statement') as _get_current_statement:
with patch('notebook.connectors.hiveserver2.HS2Api._get_session') as _get_session:
with patch('notebook.connectors.hiveserver2.HS2Api._prepare_hql_query') as _prepare_hql_query:
with patch('notebook.connectors.hiveserver2.HS2Api._get_session_by_id') as _get_session_by_id:
_get_db.return_value = Mock(
use=Mock(
),
client=Mock(
query=Mock(
side_effect=QueryServerException(
Exception('Execution error!'),
message='Error while compiling statement: FAILED: HiveAccessControlException Permission denied'
)
),
),
)
notebook, snippet = {}, {'type': 'hive'}
api = HS2Api(self.user)
assert_raises(QueryError, api.execute, notebook, snippet)
try:
api = api.execute(notebook, snippet)
except QueryError as e:
assert_equal(
e.message,
'Error while compiling statement: FAILED: HiveAccessControlException Permission denied',
)
def test_autocomplete_time_out(self):
snippet = {'type': 'hive', 'properties': {}}
with patch('notebook.connectors.hiveserver2._autocomplete') as _autocomplete:
_autocomplete.return_value = {
'code': 500,
'error': "HTTPSConnectionPool(host='gethue.com', port=10001): Read timed out. (read timeout=120)"
}
api = HS2Api(self.user)
try:
resp = api.autocomplete(snippet, database='database')
assert_false(True)
except QueryExpired as e:
assert_equal(e.message, "HTTPSConnectionPool(host='gethue.com', port=10001): Read timed out. (read timeout=120)")
def test_autocomplete_functions_hive(self):
snippet = {'type': 'hive', 'properties': {}}
with patch('notebook.connectors.hiveserver2.HS2Api._get_db') as _get_db:
with patch('beeswax.api._get_functions') as _get_functions:
db = Mock()
_get_functions.return_value = [
{'name': 'f1'}, {'name': 'f2'}, {'name': 'f3'}
]
api = HS2Api(self.user)
data = api.autocomplete(snippet, operation='functions')
assert_equal(
data['functions'],
[{'name': 'f1'}, {'name': 'f2'}, {'name': 'f3'}]
)
class TestHiveserver2ApiNonMock(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="test", recreate=False, is_superuser=False)
self.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "notebook")
grant_access("test", "test", "hive")
self.db = dbms.get(self.user, get_query_server_config())
self.api = HS2Api(self.user)
def test_prepare_hql_query(self):
statement = "SELECT myUpper(description) FROM sample_07 LIMIT 10"
snippet_json = """
{
"status": "running",
"database": "default",
"properties": {
"files": [],
"functions": [{
"class_name": "org.hue.udf.MyUpper",
"name": "myUpper"
}],
"settings": []
},
"result": {
"handle": {
"log_context": null,
"statements_count": 1,
"statement_id": 0,
"has_more_statements": false,
"secret": "<KEY>
"has_result_set": true,
"operation_type": 0,
"modified_row_count": null,
"guid": "ZxOd4IjqTeK1PUTq+MdcDA=="
},
"type": "table",
"id": "ae81b805-dcf1-9692-0452-797681e997ed"
},
"statement": "%(statement)s",
"type": "hive",
"id": "9b50e364-f7b2-303d-e924-db8b0bd9866d"
}
""" % {'statement': statement}
session_json = """
{
"type": "hive",
"properties": [
{
"multiple": true,
"value": [
{
"path": "/user/test/myudfs.jar",
"type": "jar"
}
],
"nice_name": "Files",
"key": "files",
"help_text": "Add one or more files, jars, or archives to the list of resources.",
"type": "hdfs-files"
},
{
"multiple": true,
"value": [
{
"class_name": "org.hue.udf.MyUpper",
"name": "myUpper"
}
],
"nice_name": "Functions",
"key": "functions",
"help_text": "Add one or more registered UDFs (requires function name and fully-qualified class name).",
"type": "functions"
},
{
"multiple": true,
"value": [
{
"value": "spark",
"key": "hive.execution.engine"
}
],
"nice_name": "Settings",
"key": "settings",
"help_text": "Hive and Hadoop configuration properties.",
"type": "settings",
"options": [
"hive.map.aggr",
"hive.exec.compress.output",
"hive.exec.parallel",
"hive.execution.engine",
"mapreduce.job.queuename"
]
}
],
"id": 30
}
"""
snippet = json.loads(snippet_json)
session = json.loads(session_json)
hql_query = self.api._prepare_hql_query(snippet, statement, session)
assert_equal([{'key': 'hive.execution.engine', 'value': 'spark'}], hql_query.settings)
assert_equal([{'type': 'jar', 'path': '/user/test/myudfs.jar'}], hql_query.file_resources)
assert_equal([{'name': 'myUpper', 'class_name': 'org.hue.udf.MyUpper'}], hql_query.functions)
config_statements = ', '.join(hql_query.get_configuration_statements())
pattern = re.compile("ADD JAR hdfs://[A-Za-z0-9.:_-]+/user/test/myudfs.jar")
assert_true(pattern.search(config_statements), config_statements)
assert_true("CREATE TEMPORARY FUNCTION myUpper AS 'org.hue.udf.MyUpper'" in config_statements, config_statements)
def test_upgrade_properties(self):
properties = None
# Verify that upgrade will return defaults if current properties not formatted as settings
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
assert_equal(upgraded_props, self.api.get_properties(lang='hive'))
# Verify that upgrade will save old properties and new settings
properties = [
{
'key': 'hive.execution.engine',
'value': 'mr'
},
{
'key': 'hive.exec.compress.output',
'value': False
}
]
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
settings = next((prop for prop in upgraded_props if prop['key'] == 'settings'), None)
assert_equal(settings['value'], properties)
# Verify that already upgraded properties will be unchanged
properties = [
{
"multiple": True,
"value": [],
"nice_name": "Files",
"key": "files",
"help_text": "Add one or more files, jars, or archives to the list of resources.",
"type": "hdfs-files"
},
{
"multiple": True,
"value": [],
"nice_name": "Functions",
"key": "functions",
"help_text": "Add one or more registered UDFs (requires function name and fully-qualified class name).",
"type": "functions"
},
{
"multiple": True,
"value": [
{
"key": "hive.execution.engine",
"value": "spark"
}
],
"nice_name": "Settings",
"key": "settings",
"help_text": "Hive and Hadoop configuration properties.",
"type": "settings",
"options": [
"hive.map.aggr",
"hive.exec.compress.output",
"hive.exec.parallel",
"hive.execution.engine",
"mapreduce.job.queuename"
]
}
]
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
assert_equal(upgraded_props, properties)
def test_progress(self):
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = """INFO : Compiling command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Semantic Analysis Completed
INFO : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null)
INFO : Completed compiling command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb); Time taken: 0.116 seconds
INFO : Executing command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Query ID = hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb
INFO : Total jobs = 2
INFO : Launching Job 1 out of 2
INFO : Starting task [Stage-1:MAPRED] in serial mode
INFO : Number of reduce tasks not specified. Estimated from input data size: 1
INFO : In order to change the average load for a reducer (in bytes):
INFO : set hive.exec.reducers.bytes.per.reducer=<number>
INFO : In order to limit the maximum number of reducers:
INFO : set hive.exec.reducers.max=<number>
INFO : In order to set a constant number of reducers:
INFO : set mapreduce.job.reduces=<number>
INFO : number of splits:1
INFO : Submitting tokens for job: job_1466104358744_0003
INFO : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466104358744_0003/
"""
assert_equal(self.api.progress({}, snippet, logs=logs), 5)
logs += """INFO : Starting Job = job_1466104358744_0003, Tracking URL = """\
"""http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466104358744_0003/
INFO : Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1466104358744_0003
INFO : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
INFO : 2016-06-20 13:30:34,494 Stage-1 map = 0%, reduce = 0%
INFO : 2016-06-20 13:30:47,081 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.13 sec
INFO : 2016-06-20 13:30:58,606 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.59 sec
INFO : MapReduce Total cumulative CPU time: 5 seconds 590 msec
INFO : Ended Job = job_1466104358744_0003
"""
assert_equal(self.api.progress({}, snippet, logs=logs), 50)
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "impala",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = "Query 734a81444c85be66:d05f3bb1a6c2d0a5: 0% Complete (1 out of 4693)"
assert_equal(self.api.progress({}, snippet, logs=logs), 0)
logs += """Query 734a81444c85be66:d05f3bb1a6c2d0a5: 20% Complete (4 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 30% Complete (7 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 40% Complete (7 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 50% Complete (234 out of 4693)
"""
assert_equal(self.api.progress({}, snippet, logs=logs), 50)
def test_get_jobs(self):
notebook = json.loads("""
{
"uuid": "f5d6394d-364f-56e8-6dd3-b1c5a4738c52",
"id": 1234,
"sessions": [{"type": "hive", "properties": [], "id": "1234"}],
"type": "query-hive",
"name": "Test Hiveserver2 Editor",
"isSaved": false,
"parentUuid": null
}
""")
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = """INFO : Compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Semantic Analysis Completed
INFO : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null)
INFO : Completed compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13); Time taken: 0.073 seconds
INFO : Executing command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Query ID = hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13
INFO : Total jobs = 2
INFO : Launching Job 1 out of 2
INFO : Starting task [Stage-1:MAPRED] in serial mode
INFO : Number of reduce tasks not specified. Estimated from input data size: 1
INFO : In order to change the average load for a reducer (in bytes):
INFO : set hive.exec.reducers.bytes.per.reducer=<number>
INFO : In order to limit the maximum number of reducers:
INFO : set hive.exec.reducers.max=<number>
INFO : In order to set a constant number of reducers:
INFO : set mapreduce.job.reduces=<number>
INFO : number of splits:1
INFO : Submitting tokens for job: job_1466630204796_0059
INFO : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/
INFO : Starting Job = job_1466630204796_0059, Tracking URL = http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/
INFO : Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1466630204796_0059
"""
jobs = self.api.get_jobs(notebook, snippet, logs)
assert_true(isinstance(jobs, list))
assert_true(len(jobs), 1)
assert_equal(jobs[0]['name'], 'job_1466630204796_0059')
assert_equal(jobs[0]['started'], True)
assert_equal(jobs[0]['finished'], False)
assert_true('url' in jobs[0])
logs += """INFO : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
INFO : 2016-06-24 15:55:51,125 Stage-1 map = 0%, reduce = 0%
INFO : 2016-06-24 15:56:00,410 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.12 sec
INFO : 2016-06-24 15:56:09,709 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.04 sec
INFO : MapReduce Total cumulative CPU time: 4 seconds 40 msec
INFO : Ended Job = job_1466630204796_0059
INFO : Launching Job 2 out of 2
"""
jobs = self.api.get_jobs(notebook, snippet, logs)
assert_true(len(jobs), 1)
assert_equal(jobs[0]['name'], 'job_1466630204796_0059')
assert_equal(jobs[0]['started'], True)
assert_equal(jobs[0]['finished'], True)
def test_get_current_statement(self):
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 1,
"has_more_statements": false
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': u"SELECT 'Привет', '你好';"}
)
statement = self.api._get_current_statement(MockDb(), snippet)
assert_equal('086ecec9a8b89b1b47cce358bdbb343be23b1f8b54ca76bc81927e27', statement['previous_statement_hash'])
def test_plan_extraction_from_profile(self):
query_plan = self.api._get_impala_profile_plan(
query_id='e147228183f1f0b3:6f086cc600000000', profile=IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE
)
assert_true(query_plan)
assert_equal(IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE_PLAN, query_plan)
def MockDb():
def close_operation(handle): pass
class TestHiveserver2ApiWithHadoop(BeeswaxSampleProvider):
integration = True
@classmethod
def setup_class(cls):
if not is_live_cluster():
raise SkipTest('These tests can only run on a live cluster')
super(TestHiveserver2ApiWithHadoop, cls).setup_class(load_data=False)
def setUp(self):
self.client.post('/beeswax/install_examples')
self.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "notebook")
grant_access("test", "test", "impala")
self.db = dbms.get(self.user, get_query_server_config())
self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir, '/user/test')
self.api = HS2Api(self.user)
self.statement = 'SELECT description, salary FROM sample_07 WHERE (sample_07.salary > 100000) ORDER BY salary DESC LIMIT 1000'
def create_query_document(self, owner, query_type='hive', database='default',
name='Test Query', description='Test Query', statement='',
files=None, functions=None, settings=None):
"""
Creates and returns a query Document2 object
:param owner: owner of doc
:param query_type: hive, impala or spark
:param database: database name
:param name: name of document
:param description: description of document
:param statement: SQL statement (can be multi-query statement)
:param files: list of dicts representing files
:param functions: list of dicts representing functions
:param settings: list of dicts representing settings
:return: Document2 object representing query
"""
if query_type not in ('hive', 'impala', 'spark'):
raise ValueError("Invalid query_type: %s" % query_type)
notebook = make_notebook(name=name, description=description, editor_type=query_type, statement=statement,
status='ready', database=database, files=files, functions=functions, settings=settings)
notebook_doc, save_as = _save_notebook(notebook.get_data(), owner)
return notebook_doc
def get_snippet(self, notebook, snippet_idx=0):
data = notebook.get_data()
snippet = data['snippets'][snippet_idx]
if 'result' not in snippet:
snippet['result'] = {}
if 'handle' not in snippet['result']:
snippet['result']['handle'] = {}
return snippet
def execute_and_wait(self, query_doc, snippet_idx=0, timeout=30.0, wait=1.0):
notebook = Notebook(document=query_doc)
snippet = self.get_snippet(notebook, snippet_idx=snippet_idx)
curr = time.time()
end = curr + timeout
status = 'ready'
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
notebook = Notebook(document=query_doc)
snippet = self.get_snippet(notebook, snippet_idx=snippet_idx)
data = json.loads(response.content)
snippet['result']['handle'] = data['handle']
while status != 'available' and curr <= end:
response = self.client.post(reverse('notebook:check_status'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
status = data['query_status']['status']
snippet['status'] = status
time.sleep(wait)
curr = time.time()
if status != 'available':
raise Exception('Query failed to complete or return results.')
return snippet
def test_query_with_unicode(self):
statement = "SELECT * FROM sample_07 WHERE code='한';"
doc = self.create_query_document(owner=self.user, statement=statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
snippet['result']['handle'] = data['handle']
response = self.client.post(reverse('notebook:get_logs'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true("SELECT * FROM sample_07 WHERE code='한'" in smart_str(data['logs']))
def test_get_current_statement(self):
multi_statement = "SELECT description, salary FROM sample_07 LIMIT 20;\r\nSELECT AVG(salary) FROM sample_07;"
doc = self.create_query_document(owner=self.user, statement=multi_statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal(0, data['handle']['statement_id'], data)
assert_equal(2, data['handle']['statements_count'], data)
assert_equal(True, data['handle']['has_more_statements'], data)
assert_equal({'row': 0, 'column': 0}, data['handle']['start'], data)
assert_equal({'row': 0, 'column': 51}, data['handle']['end'], data)
snippet['result']['handle'] = data['handle']
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal(1, data['handle']['statement_id'], data)
assert_equal(2, data['handle']['statements_count'], data)
assert_equal(False, data['handle']['has_more_statements'], data)
assert_equal({'row': 1, 'column': 0}, data['handle']['start'], data)
assert_equal({'row': 1, 'column': 33}, data['handle']['end'], data)
def test_explain(self):
# Hive 2 with Tez set hive.explain.user to true by default, but this test is expecting output when this setting
# is set to false.
doc = self.create_query_document(owner=self.user, statement=self.statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
snippet['properties']['settings'].append({"key": "hive.explain.user", "value": "false"})
response = self.client.post(reverse('notebook:explain'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('STAGE DEPENDENCIES' in data['explanation'], data)
assert_equal(self.statement, data['statement'], data)
def test_download(self):
statement = "SELECT 'hello world';"
doc = self.create_query_document(owner=self.user, statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:download'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'format': 'csv'})
assert_equal(200, response.status_code)
assert_equal(('Content-Disposition', 'attachment; filename="Test Query.csv"'), response._headers['content-disposition'])
def test_get_sample(self):
doc = self.create_query_document(owner=self.user, statement=self.statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:api_sample_data',
kwargs={'database': 'default', 'table': 'sample_07'}),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('headers' in data)
assert_true('rows' in data)
assert_true(len(data['rows']) > 0)
response = self.client.post(reverse('notebook:api_sample_data_column',
kwargs={'database': 'default', 'table': 'sample_07', 'column': 'code'}),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('headers' in data)
assert_equal(['code'], data['headers'])
assert_true('rows' in data)
assert_true(len(data['rows']) > 0)
def test_fetch_result_size_mr(self):
if not is_live_cluster(): # Mini-cluster does not have JHS
raise SkipTest
# Assert that a query with no job will return no rows or size
statement = "SELECT 'hello world';"
settings = [
{
'key': 'hive.execution.engine',
'value': 'mr'
}
]
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(None, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that a query with map & reduce task returns rows
statement = "SELECT DISTINCT code FROM sample_07;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(823, data['result']['rows'])
assert_true(data['result']['size'] > 0, data['result'])
# Assert that a query with multiple jobs returns rows
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=120.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(23, data['result']['rows'])
assert_true(data['result']['size'] > 0, data['result'])
def test_fetch_result_size_spark(self):
if not is_live_cluster() or not is_hive_on_spark():
raise SkipTest
# TODO: Add session cleanup here so we don't have orphan spark sessions
# Assert that a query with no job will return no rows or size
statement = "SELECT 'hello world';"
settings = [
{
'key': 'hive.execution.engine',
'value': 'spark'
}
]
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(None, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that a query that runs a job will return rows and size
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(23, data['result']['rows'])
assert_true(data['result']['size'] > 0)
def test_fetch_result_size_impala(self):
if not is_live_cluster():
raise SkipTest
# Create session so that session object is saved to DB for server URL lookup
session = self.api.create_session(lang='impala')
try:
# Assert that a query that runs a job will return rows
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(23, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that selecting all from partitioned table works
statement = "SELECT * FROM web_logs;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=5.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(1000, data['result']['rows'])
finally:
self.api.close_session(session)
def test_fetch_result_abbreviated(self):
if not is_live_cluster():
raise SkipTest
# Create session so that session object is saved to DB for server URL lookup
session = self.api.create_session(lang='impala')
try:
# Assert that abbreviated rows returned (e.g. - 1.00K) still returns actual rows
statement = "SELECT * FROM web_logs;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=5.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(1000, data['result']['rows'])
finally:
self.api.close_session(session)
IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE_PLAN = """Query (id=e147228183f1f0b3:6f086cc600000000):
Summary:
Session ID: 4043f7580371e0e6:f1068bf772ce4cb3
Session Type: HIVESERVER2
HiveServer2 Protocol Version: V6
Start Time: 2017-10-13 10:47:09.373244000
End Time: 2017-10-13 10:50:08.731647000
Query Type: QUERY
Query State: FINISHED
Query Status: OK
Impala Version: impalad version 2.11.0-SNAPSHOT RELEASE (build e9a30f67655a8da5b8526507fbe853adbd184932)
User: romain
Connected User: romain
Delegated User:
Network Address: 172.21.3.229:60523
Default Db: default
Sql Statement:
-- Compute total amount per order for all customers
SELECT
c.id AS customer_id,
c.name AS customer_name,
o.order_id,
v.total
FROM
customers c,
c.orders o,
(SELECT SUM(price * qty) total FROM o.items) v
Coordinator: self-service-analytics-2.gce.cloudera.com:22000
Query Options (set by configuration): QUERY_TIMEOUT_S=600
Query Options (set by configuration and planner): QUERY_TIMEOUT_S=600,MT_DOP=0
Plan:
----------------
Max Per-Host Resource Reservation: Memory=0B
Per-Host Resource Estimates: Memory=42.00MB
WARNING: The following tables have potentially corrupt table statistics.
Drop and re-compute statistics to resolve this problem.
default.customers
WARNING: The following tables are missing relevant table and/or column statistics.
default.customers
F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=0B mem-reservation=0B
PLAN-ROOT SINK
| mem-estimate=0B mem-reservation=0B
|
10:EXCHANGE [UNPARTITIONED]
| mem-estimate=0B mem-reservation=0B
| tuple-ids=3,1,0 row-size=75B cardinality=0
|
F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
Per-Host Resources: mem-estimate=42.00MB mem-reservation=0B
01:SUBPLAN
| mem-estimate=0B mem-reservation=0B
| tuple-ids=3,1,0 row-size=75B cardinality=0
|
|--09:NESTED LOOP JOIN [CROSS JOIN]
| | mem-estimate=35B mem-reservation=0B
| | tuple-ids=3,1,0 row-size=75B cardinality=10
| |
| |--02:SINGULAR ROW SRC
| | parent-subplan=01
| | mem-estimate=0B mem-reservation=0B
| | tuple-ids=0 row-size=35B cardinality=1
| |
| 04:SUBPLAN
| | mem-estimate=0B mem-reservation=0B
| | tuple-ids=3,1 row-size=40B cardinality=10
| |
| |--08:NESTED LOOP JOIN [CROSS JOIN]
| | | mem-estimate=32B mem-reservation=0B
| | | tuple-ids=3,1 row-size=40B cardinality=1
| | |
| | |--05:SINGULAR ROW SRC
| | | parent-subplan=04
| | | mem-estimate=0B mem-reservation=0B
| | | tuple-ids=1 row-size=32B cardinality=1
| | |
| | 07:AGGREGATE [FINALIZE]
| | | output: sum(price * qty)
| | | mem-estimate=10.00MB mem-reservation=0B spill-buffer=2.00MB
| | | tuple-ids=3 row-size=8B cardinality=1
| | |
| | 06:UNNEST [o.items]
| | parent-subplan=04
| | mem-estimate=0B mem-reservation=0B
| | tuple-ids=2 row-size=0B cardinality=10
| |
| 03:UNNEST [c.orders o]
| parent-subplan=01
| mem-estimate=0B mem-reservation=0B
| tuple-ids=1 row-size=0B cardinality=10
|
00:SCAN HDFS [default.customers c, RANDOM]
partitions=1/1 files=1 size=15.44KB
predicates: !empty(c.orders)
stats-rows=0 extrapolated-rows=disabled
table stats: rows=0 size=15.44KB
column stats: unavailable
mem-estimate=32.00MB mem-reservation=0B
tuple-ids=0 row-size=35B cardinality=0
----------------
Estimated Per-Host Mem: 44040259
Tables Missing Stats: default.customers
Tables With Corrupt Table Stats: default.customers
Per Host Min Reservation: self-service-analytics-2.gce.cloudera.com:22000(0)
Request Pool: root.romain
Admission result: Admitted immediately
ExecSummary:
Operator #Hosts Avg Time Max Time #Rows Est. #Rows Peak Mem Est. Peak Mem Detail
----------------------------------------------------------------------------------------------------------------------------
10:EXCHANGE 1 62.005ms 62.005ms 106 0 0 0 UNPARTITIONED
01:SUBPLAN 1 0.000ns 0.000ns 0 0 140.00 KB 0
|--09:NESTED LOOP JOIN 1 0.000ns 0.000ns 5.67K 10 32.00 KB 35.00 B CROSS JOIN
| |--02:SINGULAR ROW SRC 1 0.000ns 0.000ns 0 1 0 0
| 04:SUBPLAN 1 0.000ns 0.000ns 0 10 8.00 KB 0
| |--08:NESTED LOOP JOIN 1 0.000ns 0.000ns 160 1 24.00 KB 32.00 B CROSS JOIN
| | |--05:SINGULAR ROW SRC 1 0.000ns 0.000ns 0 1 0 0
| | 07:AGGREGATE 1 0.000ns 0.000ns 1 1 16.00 KB 10.00 MB FINALIZE
| | 06:UNNEST 1 0.000ns 0.000ns 2 10 0 0 o.items
| 03:UNNEST 1 0.000ns 0.000ns 2 10 0 0 c.orders o
00:SCAN HDFS 1 39.003ms 39.003ms 53 0 417.04 KB 32.00 MB default.customers c
Errors:
Planner Timeline: 36.379ms
- Analysis finished: 13.156ms (13.156ms)
- Equivalence classes computed: 13.775ms (619.949us)
- Single node plan created: 20.763ms (6.987ms)
- Runtime filters computed: 21.325ms (562.117us)
- Distributed plan created: 21.460ms (135.254us)
- Lineage info computed: 21.684ms (223.594us)
- Planning finished: 36.379ms (14.694ms)
Query Timeline: 2m59s
- Query submitted: 0.000ns (0.000ns)
- Planning finished: 42.003ms (42.003ms)
- Submit for admission: 43.003ms (1.000ms)
- Completed admission: 43.003ms (0.000ns)
- Ready to start on 1 backends: 43.003ms (0.000ns)
- All 1 execution backends (2 fragment instances) started: 44.003ms (1.000ms)
- Rows available: 121.009ms (77.006ms)
- First row fetched: 1s152ms (1s031ms)
- Unregister query: 2m59s (2m58s)
- ComputeScanRangeAssignmentTimer: 0.000ns
ImpalaServer:
- ClientFetchWaitTimer: 2m59s
- RowMaterializationTimer: 1.000ms
Execution Profile e147228183f1f0b3:6f086cc600000000"""
IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE = IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE_PLAN + \
""":(Total: 79.006ms, non-child: 0.000ns, % non-child: 0.00%)
Number of filters: 0
Filter routing table:
ID Src. Node Tgt. Node(s) Target type Partition filter Pending (Expected) First arrived Completed Enabled
-------------------------------------------------------------------------------------------------------------------
Backend startup latencies: Count: 1, min / max: 1ms / 1ms, 25th %-ile: 1ms, 50th %-ile: 1ms, """\
"""75th %-ile: 1ms, 90th %-ile: 1ms, 95th %-ile: 1ms, 99.9th %-ile: 1ms
Per Node Peak Memory Usage: self-service-analytics-2.gce.cloudera.com:22000(530.52 KB)
- FiltersReceived: 0 (0)
- FinalizationTimer: 0.000ns
Averaged Fragment F01:(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%)
split sizes: min: 0, max: 0, avg: 0, stddev: 0
completion times: min:2m59s max:2m59s mean: 2m59s stddev:0.000ns
execution rates: min:0.00 /sec max:0.00 /sec mean:0.00 /sec stddev:0.00 /sec
num instances: 1
- AverageThreadTokens: 0.00
- BloomFilterBytes: 0
- PeakMemoryUsage: 34.12 KB (34939)
- PeakReservation: 0
- PeakUsedReservation: 0
- PerHostPeakMemUsage: 530.52 KB (543253)
- RowsProduced: 106 (106)
- TotalNetworkReceiveTime: 62.005ms
- TotalNetworkSendTime: 0.000ns
- TotalStorageWaitTime: 0.000ns
- TotalThreadsInvoluntaryContextSwitches: 0 (0)
- TotalThreadsTotalWallClockTime: 62.005ms
- TotalThreadsSysTime: 3.000us
- TotalThreadsUserTime: 12.000us
- TotalThreadsVoluntaryContextSwitches: 1 (1)
Fragment Instance Lifecycle Timings:
- ExecTime: 0.000ns
- ExecTreeExecTime: 0.000ns
- OpenTime: 62.005ms
- ExecTreeOpenTime: 62.005ms
- PrepareTime: 14.001ms
- ExecTreePrepareTime: 0.000ns
PLAN_ROOT_SINK:
- PeakMemoryUsage: 0
CodeGen:(Total: 13.001ms, non-child: 13.001ms, % non-child: 100.00%)
- CodegenTime: 0.000ns
- CompileTime: 0.000ns
- LoadTime: 0.000ns
- ModuleBitcodeSize: 1.84 MB (1929624)
- NumFunctions: 0 (0)
- NumInstructions: 0 (0)
- OptimizationTime: 0.000ns
- PeakMemoryUsage: 0
- PrepareTime: 13.001ms
EXCHANGE_NODE (id=10):(Total: 62.005ms, non-child: 62.005ms, % non-child: 100.00%)
- ConvertRowBatchTime: 0.000ns
- PeakMemoryUsage: 0
- RowsReturned: 106 (106)
- RowsReturnedRate: 1.71 K/sec
DataStreamReceiver:
- BytesReceived: 5.50 KB (5632)
- DeserializeRowBatchTimer: 0.000ns
- FirstBatchArrivalWaitTime: 62.005ms
- PeakMemoryUsage: 10.12 KB (10363)
- SendersBlockedTimer: 0.000ns
- SendersBlockedTotalTimer(*): 0.000ns
Coordinator Fragment F01:
Instance e147228183f1f0b3:6f086cc600000000 (host=self-service-analytics-2.gce.cloudera.com:22000):"""\
"""(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%)
MemoryUsage(4s000ms): 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, """\
"""31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, """\
"""31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, """\
"""31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB
- AverageThreadTokens: 0.00
- BloomFilterBytes: 0
- PeakMemoryUsage: 34.12 KB (34939)
- PeakReservation: 0
- PeakUsedReservation: 0
- PerHostPeakMemUsage: 530.52 KB (543253)
- RowsProduced: 106 (106)
- TotalNetworkReceiveTime: 62.005ms
- TotalNetworkSendTime: 0.000ns
- TotalStorageWaitTime: 0.000ns
- TotalThreadsInvoluntaryContextSwitches: 0 (0)
- TotalThreadsTotalWallClockTime: 62.005ms
- TotalThreadsSysTime: 3.000us
- TotalThreadsUserTime: 12.000us
- TotalThreadsVoluntaryContextSwitches: 1 (1)
Fragment Instance Lifecycle Timings:
- ExecTime: 0.000ns
- ExecTreeExecTime: 0.000ns
- OpenTime: 62.005ms
- ExecTreeOpenTime: 62.005ms
- PrepareTime: 14.001ms
- ExecTreePrepareTime: 0.000ns
PLAN_ROOT_SINK:
- PeakMemoryUsage: 0
CodeGen:(Total: 13.001ms, non-child: 13.001ms, % non-child: 100.00%)
- CodegenTime: 0.000ns
- CompileTime: 0.000ns
- LoadTime: 0.000ns
- ModuleBitcodeSize: 1.84 MB (1929624)
- NumFunctions: 0 (0)
- NumInstructions: 0 (0)
- OptimizationTime: 0.000ns
- PeakMemoryUsage: 0
- PrepareTime: 13.001ms
EXCHANGE_NODE (id=10):(Total: 62.005ms, non-child: 62.005ms, % non-child: 100.00%)
- ConvertRowBatchTime: 0.000ns
- PeakMemoryUsage: 0
- RowsReturned: 106 (106)
- RowsReturnedRate: 1.71 K/sec
DataStreamReceiver:
BytesReceived(4s000ms): 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, """\
"""5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, """\
"""5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, """\
"""5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB
- BytesReceived: 5.50 KB (5632)
- DeserializeRowBatchTimer: 0.000ns
- FirstBatchArrivalWaitTime: 62.005ms
- PeakMemoryUsage: 10.12 KB (10363)
- SendersBlockedTimer: 0.000ns
- SendersBlockedTotalTimer(*): 0.000ns
Averaged Fragment F00:(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%)
split sizes: min: 15.44 KB, max: 15.44 KB, avg: 15.44 KB, stddev: 0
completion times: min:78.006ms max:78.006ms mean: 78.006ms stddev:0.000ns
execution rates: min:197.95 KB/sec max:197.95 KB/sec mean:197.95 KB/sec stddev:0.00 /sec
num instances: 1
- AverageThreadTokens: 0.00
- BloomFilterBytes: 0
- PeakMemoryUsage: 506.52 KB (518677)
- PeakReservation: 0
- PeakUsedReservation: 0
- PerHostPeakMemUsage: 530.52 KB (543253)
- RowsProduced: 106 (106)
- TotalNetworkReceiveTime: 0.000ns
- TotalNetworkSendTime: 0.000ns
- TotalStorageWaitTime: 38.003ms
- TotalThreadsInvoluntaryContextSwitches: 1 (1)
- TotalThreadsTotalWallClockTime: 100.008ms
- TotalThreadsSysTime: 1.520ms
- TotalThreadsUserTime: 22.153ms
- TotalThreadsVoluntaryContextSwitches: 8 (8)
Fragment Instance Lifecycle Timings:
- ExecTime: 39.003ms
- ExecTreeExecTime: 39.003ms
- OpenTime: 22.001ms
- ExecTreeOpenTime: 0.000ns
- PrepareTime: 15.001ms
- ExecTreePrepareTime: 0.000ns
DataStreamSender (dst_id=10):
- BytesSent: 5.50 KB (5632)
- NetworkThroughput(*): 0.00 /sec
- OverallThroughput: 0.00 /sec
- PeakMemoryUsage: 4.85 KB (4968)
- RowsReturned: 106 (106)
- SerializeBatchTime: 0.000ns
- TransmitDataRPCTime: 0.000ns
- UncompressedRowBatchSize: 8.89 KB (9103)
CodeGen:(Total: 36.002ms, non-child: 36.002ms, % non-child: 100.00%)
- CodegenTime: 2.000ms
- CompileTime: 7.000ms
- LoadTime: 0.000ns
- ModuleBitcodeSize: 1.84 MB (1929624)
- NumFunctions: 23 (23)
- NumInstructions: 365 (365)
- OptimizationTime: 15.001ms
- PeakMemoryUsage: 182.50 KB (186880)
- PrepareTime: 14.001ms
SUBPLAN_NODE (id=1):(Total: 39.003ms, non-child: 0.000ns, % non-child: 0.00%)
- PeakMemoryUsage: 140.00 KB (143360)
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
NESTED_LOOP_JOIN_NODE (id=9):
- BuildRows: 0 (0)
- BuildTime: 0.000ns
- PeakMemoryUsage: 32.00 KB (32768)
- ProbeRows: 106 (106)
- ProbeTime: 0.000ns
- RowsReturned: 5.67K (5671)
- RowsReturnedRate: 0
Nested Loop Join Builder:
- PeakMemoryUsage: 8.00 KB (8192)
SINGULAR_ROW_SRC_NODE (id=2):
- PeakMemoryUsage: 0
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
SUBPLAN_NODE (id=4):
- PeakMemoryUsage: 8.00 KB (8192)
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
NESTED_LOOP_JOIN_NODE (id=8):
- BuildRows: 0 (0)
- BuildTime: 0.000ns
- PeakMemoryUsage: 24.00 KB (24576)
- ProbeRows: 106 (106)
- ProbeTime: 0.000ns
- RowsReturned: 160 (160)
- RowsReturnedRate: 0
Nested Loop Join Builder:
- PeakMemoryUsage: 8.00 KB (8192)
SINGULAR_ROW_SRC_NODE (id=5):
- PeakMemoryUsage: 0
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
AGGREGATION_NODE (id=7):
- BuildTime: 0.000ns
- GetResultsTime: 0.000ns
- HTResizeTime: 0.000ns
- HashBuckets: 0 (0)
- LargestPartitionPercent: 0 (0)
- MaxPartitionLevel: 0 (0)
- NumRepartitions: 0 (0)
- PartitionsCreated: 0 (0)
- PeakMemoryUsage: 16.00 KB (16384)
- RowsRepartitioned: 0 (0)
- RowsReturned: 1 (1)
- RowsReturnedRate: 0
- SpilledPartitions: 0 (0)
UNNEST_NODE (id=6):
- AvgCollectionSize: 1.50
- MaxCollectionSize: 3 (3)
- MinCollectionSize: 1 (1)
- NumCollections: 106 (106)
- PeakMemoryUsage: 0
- RowsReturned: 2 (2)
- RowsReturnedRate: 0
UNNEST_NODE (id=3):
- AvgCollectionSize: 2.00
- MaxCollectionSize: 3 (3)
- MinCollectionSize: 1 (1)
- NumCollections: 53 (53)
- PeakMemoryUsage: 0
- RowsReturned: 2 (2)
- RowsReturnedRate: 0
HDFS_SCAN_NODE (id=0):(Total: 39.003ms, non-child: 39.003ms, % non-child: 100.00%)
- AverageHdfsReadThreadConcurrency: 0.00
- AverageScannerThreadConcurrency: 0.00
- BytesRead: 19.30 KB (19766)
- BytesReadDataNodeCache: 0
- BytesReadLocal: 19.30 KB (19766)
- BytesReadRemoteUnexpected: 0
- BytesReadShortCircuit: 19.30 KB (19766)
- CachedFileHandlesHitCount: 5 (5)
- CachedFileHandlesMissCount: 1 (1)
- CollectionItemsRead: 265 (265)
- DecompressionTime: 0.000ns
- MaxCompressedTextFileLength: 0
- NumColumns: 5 (5)
- NumDictFilteredRowGroups: 0 (0)
- NumDisksAccessed: 1 (1)
- NumRowGroups: 1 (1)
- NumScannerThreadsStarted: 1 (1)
- NumScannersWithNoReads: 0 (0)
- NumStatsFilteredRowGroups: 0 (0)
- PeakMemoryUsage: 417.04 KB (427045)
- PerReadThreadRawHdfsThroughput: 507.92 KB/sec
- RemoteScanRanges: 0 (0)
- RowBatchQueueGetWaitTime: 39.003ms
- RowBatchQueuePutWaitTime: 0.000ns
- RowsRead: 53 (53)
- RowsReturned: 53 (53)
- RowsReturnedRate: 1.36 K/sec
- ScanRangesComplete: 1 (1)
- ScannerThreadsInvoluntaryContextSwitches: 0 (0)
- ScannerThreadsTotalWallClockTime: 39.003ms
- MaterializeTupleTime(*): 1.000ms
- ScannerThreadsSysTime: 346.000us
- ScannerThreadsUserTime: 346.000us
- ScannerThreadsVoluntaryContextSwitches: 4 (4)
- TotalRawHdfsReadTime(*): 38.003ms
- TotalReadThroughput: 0.00 /sec
Fragment F00:
Instance e147228183f1f0b3:6f086cc600000001 (host=self-service-analytics-2.gce.cloudera.com:22000):"""\
"""(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%)
Hdfs split stats (<volume id>:<# splits>/<split lengths>): 0:1/15.44 KB
- AverageThreadTokens: 0.00
- BloomFilterBytes: 0
- PeakMemoryUsage: 506.52 KB (518677)
- PeakReservation: 0
- PeakUsedReservation: 0
- PerHostPeakMemUsage: 530.52 KB (543253)
- RowsProduced: 106 (106)
- TotalNetworkReceiveTime: 0.000ns
- TotalNetworkSendTime: 0.000ns
- TotalStorageWaitTime: 38.003ms
- TotalThreadsInvoluntaryContextSwitches: 1 (1)
- TotalThreadsTotalWallClockTime: 100.008ms
- TotalThreadsSysTime: 1.520ms
- TotalThreadsUserTime: 22.153ms
- TotalThreadsVoluntaryContextSwitches: 8 (8)
Fragment Instance Lifecycle Timings:
- ExecTime: 39.003ms
- ExecTreeExecTime: 39.003ms
- OpenTime: 22.001ms
- ExecTreeOpenTime: 0.000ns
- PrepareTime: 15.001ms
- ExecTreePrepareTime: 0.000ns
DataStreamSender (dst_id=10):
- BytesSent: 5.50 KB (5632)
- NetworkThroughput(*): 0.00 /sec
- OverallThroughput: 0.00 /sec
- PeakMemoryUsage: 4.85 KB (4968)
- RowsReturned: 106 (106)
- SerializeBatchTime: 0.000ns
- TransmitDataRPCTime: 0.000ns
- UncompressedRowBatchSize: 8.89 KB (9103)
CodeGen:(Total: 36.002ms, non-child: 36.002ms, % non-child: 100.00%)
- CodegenTime: 2.000ms
- CompileTime: 7.000ms
- LoadTime: 0.000ns
- ModuleBitcodeSize: 1.84 MB (1929624)
- NumFunctions: 23 (23)
- NumInstructions: 365 (365)
- OptimizationTime: 15.001ms
- PeakMemoryUsage: 182.50 KB (186880)
- PrepareTime: 14.001ms
SUBPLAN_NODE (id=1):(Total: 39.003ms, non-child: 0.000ns, % non-child: 0.00%)
- PeakMemoryUsage: 140.00 KB (143360)
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
NESTED_LOOP_JOIN_NODE (id=9):
- BuildRows: 0 (0)
- BuildTime: 0.000ns
- PeakMemoryUsage: 32.00 KB (32768)
- ProbeRows: 106 (106)
- ProbeTime: 0.000ns
- RowsReturned: 5.67K (5671)
- RowsReturnedRate: 0
Nested Loop Join Builder:
- PeakMemoryUsage: 8.00 KB (8192)
SINGULAR_ROW_SRC_NODE (id=2):
- PeakMemoryUsage: 0
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
SUBPLAN_NODE (id=4):
- PeakMemoryUsage: 8.00 KB (8192)
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
NESTED_LOOP_JOIN_NODE (id=8):
- BuildRows: 0 (0)
- BuildTime: 0.000ns
- PeakMemoryUsage: 24.00 KB (24576)
- ProbeRows: 106 (106)
- ProbeTime: 0.000ns
- RowsReturned: 160 (160)
- RowsReturnedRate: 0
Nested Loop Join Builder:
- PeakMemoryUsage: 8.00 KB (8192)
SINGULAR_ROW_SRC_NODE (id=5):
- PeakMemoryUsage: 0
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
AGGREGATION_NODE (id=7):
ExecOption: Codegen Enabled
- BuildTime: 0.000ns
- GetResultsTime: 0.000ns
- HTResizeTime: 0.000ns
- HashBuckets: 0 (0)
- LargestPartitionPercent: 0 (0)
- MaxPartitionLevel: 0 (0)
- NumRepartitions: 0 (0)
- PartitionsCreated: 0 (0)
- PeakMemoryUsage: 16.00 KB (16384)
- RowsRepartitioned: 0 (0)
- RowsReturned: 1 (1)
- RowsReturnedRate: 0
- SpilledPartitions: 0 (0)
UNNEST_NODE (id=6):
- AvgCollectionSize: 1.50
- MaxCollectionSize: 3 (3)
- MinCollectionSize: 1 (1)
- NumCollections: 106 (106)
- PeakMemoryUsage: 0
- RowsReturned: 2 (2)
- RowsReturnedRate: 0
UNNEST_NODE (id=3):
- AvgCollectionSize: 2.00
- MaxCollectionSize: 3 (3)
- MinCollectionSize: 1 (1)
- NumCollections: 53 (53)
- PeakMemoryUsage: 0
- RowsReturned: 2 (2)
- RowsReturnedRate: 0
HDFS_SCAN_NODE (id=0):(Total: 39.003ms, non-child: 39.003ms, % non-child: 100.00%)
Hdfs split stats (<volume id>:<# splits>/<split lengths>): 0:1/15.44 KB
ExecOption: PARQUET Codegen Enabled, Codegen enabled: 1 out of 1
Hdfs Read Thread Concurrency Bucket: 0:0% 1:0% 2:0% 3:0% 4:0%
File Formats: PARQUET/NONE:5
- FooterProcessingTime: (Avg: 38.003ms ; Min: 38.003ms ; Max: 38.003ms ; Number of samples: 1)
- AverageHdfsReadThreadConcurrency: 0.00
- AverageScannerThreadConcurrency: 0.00
- BytesRead: 19.30 KB (19766)
- BytesReadDataNodeCache: 0
- BytesReadLocal: 19.30 KB (19766)
- BytesReadRemoteUnexpected: 0
- BytesReadShortCircuit: 19.30 KB (19766)
- CachedFileHandlesHitCount: 5 (5)
- CachedFileHandlesMissCount: 1 (1)
- CollectionItemsRead: 265 (265)
- DecompressionTime: 0.000ns
- MaxCompressedTextFileLength: 0
- NumColumns: 5 (5)
- NumDictFilteredRowGroups: 0 (0)
- NumDisksAccessed: 1 (1)
- NumRowGroups: 1 (1)
- NumScannerThreadsStarted: 1 (1)
- NumScannersWithNoReads: 0 (0)
- NumStatsFilteredRowGroups: 0 (0)
- PeakMemoryUsage: 417.04 KB (427045)
- PerReadThreadRawHdfsThroughput: 507.92 KB/sec
- RemoteScanRanges: 0 (0)
- RowBatchQueueGetWaitTime: 39.003ms
- RowBatchQueuePutWaitTime: 0.000ns
- RowsRead: 53 (53)
- RowsReturned: 53 (53)
- RowsReturnedRate: 1.36 K/sec
- ScanRangesComplete: 1 (1)
- ScannerThreadsInvoluntaryContextSwitches: 0 (0)
- ScannerThreadsTotalWallClockTime: 39.003ms
- MaterializeTupleTime(*): 1.000ms
- ScannerThreadsSysTime: 346.000us
- ScannerThreadsUserTime: 346.000us
- ScannerThreadsVoluntaryContextSwitches: 4 (4)
- TotalRawHdfsReadTime(*): 38.003ms
- TotalReadThroughput: 0.00 /sec
"""
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import next, object
import json
import logging
import re
import sys
import time
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal, assert_true, assert_raises
from django.urls import reverse
from TCLIService.ttypes import TStatusCode, TProtocolVersion, TOperationType
from desktop.auth.backend import rewrite_user
from desktop.conf import has_connectors
from desktop.lib.i18n import smart_str
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_to_group, grant_access
from beeswax.server import dbms
from beeswax.server.dbms import QueryServerException
from beeswax.test_base import BeeswaxSampleProvider, get_query_server_config, is_hive_on_spark
from hadoop.pseudo_hdfs4 import is_live_cluster
from useradmin.models import User
from notebook.api import _save_notebook
from notebook.connectors.base import QueryError, QueryExpired
from notebook.connectors.hiveserver2 import HS2Api
from notebook.models import make_notebook, Notebook
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
LOG = logging.getLogger(__name__)
class TestApiWithConnectors(object):
NOTEBOOK_JSON = """
{
"selectedSnippet": "impala",
"showHistory": false,
"description": "Test Impala Query",
"name": "Test Impala Query",
"sessions": [
{
"type": "impala",
"properties": [],
"id": null
}
],
"type": "query-impala",
"id": null,
"snippets": [{
"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"impala-xx","status":"running",
"statement_raw":"select * from web_logs",
"statement":"select * from web_logs",
"variables":[],
"properties":{"settings":[],"variables":[],"files":[],"functions":[]},
"result":{
"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table",
"handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,
"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,
"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}
},
"lastExecuted": 1462554843817,"database":"default"
}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a",
"isSaved":false
}
"""
CONNECTOR = [{
'nice_name': 'Impala', 'name': 'impala-xx', 'dialect': 'impala', 'interface': 'hiveserver2',
'settings': [
{'name': 'server_host', 'value': 'gethue.com'},
{'name': 'server_port', 'value': '21050'},
],
'id': 1, 'category': 'editor', 'description': '', 'is_sql': True
},
]
def setUp(self):
if not has_connectors():
raise SkipTest
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.user = rewrite_user(User.objects.get(username="test"))
grant_access("test", "default", "notebook")
def test_execute_impala(self):
with patch('desktop.lib.connectors.api.CONNECTOR_INSTANCES', TestApi.CONNECTOR):
with patch('desktop.lib.thrift_util.get_client') as get_client:
tclient = Mock()
successfullCall = Mock(
return_value=Mock(
status=Mock(
statusCode=TStatusCode.SUCCESS_STATUS
),
sessionHandle=Mock(
sessionId=Mock(
secret='\x7f\x98\x97s\xe1\xa8G\xf4\x8a\x8a\\r\x0e6\xc2\xee\xf0',
guid='\xfa\xb0/\x04 \xfeDX\x99\xfcq\xff2\x07\x02\xfe',
)
),
configuration={},
serverProtocolVersion=TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V7,
# TFetchResultsResp
results=Mock(
startRowOffset=0,
rows=[],
columns=[]
),
# ExecuteStatement
operationHandle=Mock(
operationId=Mock(
secret='\x7f\x98\x97s\xe1\xa8G\xf4\x8a\x8a\\r\x0e6\xc2\xee\xf0',
guid='\xfa\xb0/\x04 \xfeDX\x99\xfcq\xff2\x07\x02\xfe',
),
hasResultSet=True,
operationType=TOperationType.EXECUTE_STATEMENT,
modifiedRowCount=0
),
)
)
tclient.OpenSession = successfullCall
tclient.ExecuteStatement = successfullCall
tclient.FetchResults = successfullCall
tclient.GetResultSetMetadata = successfullCall
tclient.CloseOperation = successfullCall
get_client.return_value = tclient
tclient.get_coordinator_host = Mock(return_value={})
response = self.client.post(reverse('notebook:execute'), {
'notebook': TestApi.NOTEBOOK_JSON,
'snippet': json.dumps(json.loads(TestApi.NOTEBOOK_JSON)['snippets'][0]),
})
get_client.assert_called()
assert_equal(response.status_code, 200)
data = json.loads(response.content)
assert_equal(data['status'], 0)
def test_autocomplete_database_impala(self):
with patch('desktop.lib.connectors.api.CONNECTOR_INSTANCES', TestApi.CONNECTOR):
with patch('beeswax.server.dbms.get') as get:
get.return_value = Mock(
get_databases=Mock(
return_value=[{'comment': '', 'hdfs_link': 'hdfs://table'}]
)
)
response = self.client.post(reverse('notebook:api_autocomplete_databases'), {
'notebook': TestApi.NOTEBOOK_JSON,
'snippet': json.dumps(json.loads(TestApi.NOTEBOOK_JSON)['snippets'][0]),
})
get.assert_called()
assert_equal(response.status_code, 200)
data = json.loads(response.content)
assert_equal(data['status'], 0)
assert_equal(data['databases'], [{u'comment': u'', u'hdfs_link': u'hdfs://table'}])
def test_sample_data_table_sync_impala(self):
with patch('desktop.lib.connectors.api.CONNECTOR_INSTANCES', TestApi.CONNECTOR):
with patch('beeswax.server.dbms.get') as get:
get.return_value = Mock(
get_table=Mock(
return_value=Mock(is_impala_only=False)
),
get_sample=Mock(
return_value=Mock(
rows=Mock(return_value=[[1], [2]]),
cols=Mock(return_value=['name']),
full_cols=Mock(return_value=[{'name': 'name'}])
)
)
)
response = self.client.post(
reverse('notebook:api_sample_data', kwargs={'database': 'sfdc', 'table': 'customers'}), {
'notebook': TestApi.NOTEBOOK_JSON,
'snippet': json.dumps(json.loads(TestApi.NOTEBOOK_JSON)['snippets'][0]),
}
)
get.assert_called()
assert_equal(response.status_code, 200)
data = json.loads(response.content)
assert_equal(data['status'], 0)
assert_equal(data['headers'], ['name'])
assert_equal(data['full_headers'], [{'name': 'name'}])
assert_equal(data['rows'], [[1], [2]])
def test_sample_data_table_async_impala(self):
with patch('desktop.lib.connectors.api.CONNECTOR_INSTANCES', TestApi.CONNECTOR):
with patch('beeswax.server.dbms.get') as get:
get.return_value = Mock(
get_table=Mock(
return_value=Mock(is_impala_only=False)
),
server_name='impala-xx',
get_sample=Mock(
return_value='SELECT * from customers'
),
client=Mock(
user=self.user,
query=Mock(
return_value=Mock(
get=Mock(
return_value=('server_id', 'server_guid')
),
log_context='log_context',
has_result_set=True,
session_guid='session_guid',
modified_row_count=0,
operation_type=1
),
)
)
)
response = self.client.post(
reverse('notebook:api_sample_data', kwargs={'database': 'sfdc', 'table': 'customers'}), {
'notebook': TestApi.NOTEBOOK_JSON,
'snippet': json.dumps(json.loads(TestApi.NOTEBOOK_JSON)['snippets'][0]),
'async': '"true"'
}
)
get.assert_called()
assert_equal(response.status_code, 200)
data = json.loads(response.content)
assert_equal(data['status'], 0)
assert_equal(data['result']['handle']['secret'], 'server_id')
assert_equal(data['result']['handle']['statement'], 'SELECT * from customers')
class TestApi():
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.user = rewrite_user(User.objects.get(username="test"))
@patch('notebook.connectors.hiveserver2.has_jobbrowser', True)
def test_get_jobs_with_jobbrowser(self):
notebook = Mock()
snippet = {'type': 'hive', 'properties': {}}
logs = ''
with patch('notebook.connectors.hiveserver2.HS2Api._get_hive_execution_engine') as _get_hive_execution_engine:
with patch('notebook.connectors.hiveserver2.parse_out_jobs') as parse_out_jobs:
_get_hive_execution_engine.return_value = 'tez'
parse_out_jobs.return_value = [{'job_id': 'job_id_00001'}]
jobs = HS2Api(self.user).get_jobs(notebook, snippet, logs)
assert_true(jobs, jobs)
assert_equal(jobs[0]['name'], 'job_id_00001')
assert_equal(jobs[0]['url'], '/jobbrowser/jobs/job_id_00001')
@patch('notebook.connectors.hiveserver2.has_jobbrowser', False)
def test_get_jobs_without_jobbrowser(self):
notebook = Mock()
snippet = {'type': 'hive', 'properties': {}}
logs = ''
with patch('notebook.connectors.hiveserver2.HS2Api._get_hive_execution_engine') as _get_hive_execution_engine:
with patch('notebook.connectors.hiveserver2.parse_out_jobs') as parse_out_jobs:
_get_hive_execution_engine.return_value = 'tez'
parse_out_jobs.return_value = [{'job_id': 'job_id_00001'}]
jobs = HS2Api(self.user).get_jobs(notebook, snippet, logs)
assert_true(jobs, jobs)
assert_equal(jobs[0]['name'], 'job_id_00001')
assert_equal(jobs[0]['url'], '') # Is empty
def test_close_statement(self):
with patch('notebook.connectors.hiveserver2.HS2Api._get_db') as _get_db:
_get_db.return_value = Mock(
use=Mock(
),
client=Mock(
query=Mock(
side_effect=QueryServerException(
Exception('Execution error!'),
message='Error while compiling statement: FAILED: HiveAccessControlException Permission denied'
)
),
),
)
notebook = {}
snippet = {
'id': '7ccdd296-20a3-da33-16ec-db58149aba0b', 'type': 'impala', 'status': 'running', 'statementType': 'text',
'statement': 'SELECT *\nFROM `default`.sample_07\nLIMIT 100\n;', 'aceCursorPosition': None, 'statementPath': '',
'associatedDocumentUuid': None,
'properties': {'settings': []},
'result': {
'id': 'd9a8dc1b-7f6d-169f-7dd7-660723cba3f4', 'type': 'table',
'handle': {
'secret': 'obUXjEDWTh+ke73YLlOlMw==', 'guid': '2iv5rEXrRE4AAAAABtXdxA==', 'operation_type': 0, 'has_result_set': True,
'modified_row_count': None, 'log_context': None, 'session_guid': '2440c57bc3806c6e:598514f42764cc91', 'session_id': 2094,
'session_type': 'impala', 'statement_id': 0, 'has_more_statements': False, 'statements_count': 1,
'previous_statement_hash': '39b8e5b3c37fda5ebd438da23f3e198c914750a64aa147f819a6a1e0', 'start': {'row': 0, 'column': 0},
'end': {'row': 0, 'column': 43}, 'statement': 'SELECT *\nFROM `default`.sample_07\nLIMIT 100'
}
}, 'database': 'default',
'compute': {
'id': 'default', 'name': 'default', 'namespace': 'default', 'interface': 'impala', 'type': 'direct', 'options': {}
}, 'wasBatchExecuted': False, 'dialect': 'impala'
}
api = HS2Api(self.user)
response = api.close_statement(notebook, snippet)
assert_equal(response['status'], 0)
snippet = {
'id': '7ccdd296-20a3-da33-16ec-db58149aba0b', 'type': 'impala', 'status': 'running',
'statementType': 'text', 'statement': 'SELECT *\nFROM `default`.sample_07\nLIMIT 100\n;',
'aceCursorPosition': None, 'statementPath': '', 'associatedDocumentUuid': None,
'properties': {'settings': []},
'result': {
'id': 'd9a8dc1b-7f6d-169f-7dd7-660723cba3f4', 'type': 'table',
'handle': {
'has_more_statements': False, 'statement_id': 0, 'statements_count': 1,
'previous_statement_hash': '39b8e5b3c37fda5ebd438da23f3e198c914750a64aa147f819a6a1e0'
}
}, 'database': 'default', 'compute': {'id': 'default', 'name': 'default', 'namespace': 'default',
'interface': 'impala', 'type': 'direct', 'options': {}}, 'wasBatchExecuted': False, 'dialect': 'impala'
}
api = HS2Api(self.user)
response = api.close_statement(notebook, snippet)
assert_equal(response['status'], -1) # snippet['result']['handel'] ['guid'] and ['secret'] are missing
def test_get_error_message_from_query(self):
with patch('notebook.connectors.hiveserver2.HS2Api._get_db') as _get_db:
with patch('notebook.connectors.hiveserver2.HS2Api._get_current_statement') as _get_current_statement:
with patch('notebook.connectors.hiveserver2.HS2Api._get_session') as _get_session:
with patch('notebook.connectors.hiveserver2.HS2Api._prepare_hql_query') as _prepare_hql_query:
with patch('notebook.connectors.hiveserver2.HS2Api._get_session_by_id') as _get_session_by_id:
_get_db.return_value = Mock(
use=Mock(
),
client=Mock(
query=Mock(
side_effect=QueryServerException(
Exception('Execution error!'),
message='Error while compiling statement: FAILED: HiveAccessControlException Permission denied'
)
),
),
)
notebook, snippet = {}, {'type': 'hive'}
api = HS2Api(self.user)
assert_raises(QueryError, api.execute, notebook, snippet)
try:
api = api.execute(notebook, snippet)
except QueryError as e:
assert_equal(
e.message,
'Error while compiling statement: FAILED: HiveAccessControlException Permission denied',
)
def test_autocomplete_time_out(self):
snippet = {'type': 'hive', 'properties': {}}
with patch('notebook.connectors.hiveserver2._autocomplete') as _autocomplete:
_autocomplete.return_value = {
'code': 500,
'error': "HTTPSConnectionPool(host='gethue.com', port=10001): Read timed out. (read timeout=120)"
}
api = HS2Api(self.user)
try:
resp = api.autocomplete(snippet, database='database')
assert_false(True)
except QueryExpired as e:
assert_equal(e.message, "HTTPSConnectionPool(host='gethue.com', port=10001): Read timed out. (read timeout=120)")
def test_autocomplete_functions_hive(self):
snippet = {'type': 'hive', 'properties': {}}
with patch('notebook.connectors.hiveserver2.HS2Api._get_db') as _get_db:
with patch('beeswax.api._get_functions') as _get_functions:
db = Mock()
_get_functions.return_value = [
{'name': 'f1'}, {'name': 'f2'}, {'name': 'f3'}
]
api = HS2Api(self.user)
data = api.autocomplete(snippet, operation='functions')
assert_equal(
data['functions'],
[{'name': 'f1'}, {'name': 'f2'}, {'name': 'f3'}]
)
class TestHiveserver2ApiNonMock(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="test", recreate=False, is_superuser=False)
self.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "notebook")
grant_access("test", "test", "hive")
self.db = dbms.get(self.user, get_query_server_config())
self.api = HS2Api(self.user)
def test_prepare_hql_query(self):
statement = "SELECT myUpper(description) FROM sample_07 LIMIT 10"
snippet_json = """
{
"status": "running",
"database": "default",
"properties": {
"files": [],
"functions": [{
"class_name": "org.hue.udf.MyUpper",
"name": "myUpper"
}],
"settings": []
},
"result": {
"handle": {
"log_context": null,
"statements_count": 1,
"statement_id": 0,
"has_more_statements": false,
"secret": "<KEY>
"has_result_set": true,
"operation_type": 0,
"modified_row_count": null,
"guid": "ZxOd4IjqTeK1PUTq+MdcDA=="
},
"type": "table",
"id": "ae81b805-dcf1-9692-0452-797681e997ed"
},
"statement": "%(statement)s",
"type": "hive",
"id": "9b50e364-f7b2-303d-e924-db8b0bd9866d"
}
""" % {'statement': statement}
session_json = """
{
"type": "hive",
"properties": [
{
"multiple": true,
"value": [
{
"path": "/user/test/myudfs.jar",
"type": "jar"
}
],
"nice_name": "Files",
"key": "files",
"help_text": "Add one or more files, jars, or archives to the list of resources.",
"type": "hdfs-files"
},
{
"multiple": true,
"value": [
{
"class_name": "org.hue.udf.MyUpper",
"name": "myUpper"
}
],
"nice_name": "Functions",
"key": "functions",
"help_text": "Add one or more registered UDFs (requires function name and fully-qualified class name).",
"type": "functions"
},
{
"multiple": true,
"value": [
{
"value": "spark",
"key": "hive.execution.engine"
}
],
"nice_name": "Settings",
"key": "settings",
"help_text": "Hive and Hadoop configuration properties.",
"type": "settings",
"options": [
"hive.map.aggr",
"hive.exec.compress.output",
"hive.exec.parallel",
"hive.execution.engine",
"mapreduce.job.queuename"
]
}
],
"id": 30
}
"""
snippet = json.loads(snippet_json)
session = json.loads(session_json)
hql_query = self.api._prepare_hql_query(snippet, statement, session)
assert_equal([{'key': 'hive.execution.engine', 'value': 'spark'}], hql_query.settings)
assert_equal([{'type': 'jar', 'path': '/user/test/myudfs.jar'}], hql_query.file_resources)
assert_equal([{'name': 'myUpper', 'class_name': 'org.hue.udf.MyUpper'}], hql_query.functions)
config_statements = ', '.join(hql_query.get_configuration_statements())
pattern = re.compile("ADD JAR hdfs://[A-Za-z0-9.:_-]+/user/test/myudfs.jar")
assert_true(pattern.search(config_statements), config_statements)
assert_true("CREATE TEMPORARY FUNCTION myUpper AS 'org.hue.udf.MyUpper'" in config_statements, config_statements)
def test_upgrade_properties(self):
properties = None
# Verify that upgrade will return defaults if current properties not formatted as settings
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
assert_equal(upgraded_props, self.api.get_properties(lang='hive'))
# Verify that upgrade will save old properties and new settings
properties = [
{
'key': 'hive.execution.engine',
'value': 'mr'
},
{
'key': 'hive.exec.compress.output',
'value': False
}
]
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
settings = next((prop for prop in upgraded_props if prop['key'] == 'settings'), None)
assert_equal(settings['value'], properties)
# Verify that already upgraded properties will be unchanged
properties = [
{
"multiple": True,
"value": [],
"nice_name": "Files",
"key": "files",
"help_text": "Add one or more files, jars, or archives to the list of resources.",
"type": "hdfs-files"
},
{
"multiple": True,
"value": [],
"nice_name": "Functions",
"key": "functions",
"help_text": "Add one or more registered UDFs (requires function name and fully-qualified class name).",
"type": "functions"
},
{
"multiple": True,
"value": [
{
"key": "hive.execution.engine",
"value": "spark"
}
],
"nice_name": "Settings",
"key": "settings",
"help_text": "Hive and Hadoop configuration properties.",
"type": "settings",
"options": [
"hive.map.aggr",
"hive.exec.compress.output",
"hive.exec.parallel",
"hive.execution.engine",
"mapreduce.job.queuename"
]
}
]
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
assert_equal(upgraded_props, properties)
def test_progress(self):
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = """INFO : Compiling command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Semantic Analysis Completed
INFO : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null)
INFO : Completed compiling command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb); Time taken: 0.116 seconds
INFO : Executing command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Query ID = hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb
INFO : Total jobs = 2
INFO : Launching Job 1 out of 2
INFO : Starting task [Stage-1:MAPRED] in serial mode
INFO : Number of reduce tasks not specified. Estimated from input data size: 1
INFO : In order to change the average load for a reducer (in bytes):
INFO : set hive.exec.reducers.bytes.per.reducer=<number>
INFO : In order to limit the maximum number of reducers:
INFO : set hive.exec.reducers.max=<number>
INFO : In order to set a constant number of reducers:
INFO : set mapreduce.job.reduces=<number>
INFO : number of splits:1
INFO : Submitting tokens for job: job_1466104358744_0003
INFO : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466104358744_0003/
"""
assert_equal(self.api.progress({}, snippet, logs=logs), 5)
logs += """INFO : Starting Job = job_1466104358744_0003, Tracking URL = """\
"""http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466104358744_0003/
INFO : Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1466104358744_0003
INFO : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
INFO : 2016-06-20 13:30:34,494 Stage-1 map = 0%, reduce = 0%
INFO : 2016-06-20 13:30:47,081 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.13 sec
INFO : 2016-06-20 13:30:58,606 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.59 sec
INFO : MapReduce Total cumulative CPU time: 5 seconds 590 msec
INFO : Ended Job = job_1466104358744_0003
"""
assert_equal(self.api.progress({}, snippet, logs=logs), 50)
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "impala",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = "Query 734a81444c85be66:d05f3bb1a6c2d0a5: 0% Complete (1 out of 4693)"
assert_equal(self.api.progress({}, snippet, logs=logs), 0)
logs += """Query 734a81444c85be66:d05f3bb1a6c2d0a5: 20% Complete (4 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 30% Complete (7 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 40% Complete (7 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 50% Complete (234 out of 4693)
"""
assert_equal(self.api.progress({}, snippet, logs=logs), 50)
def test_get_jobs(self):
notebook = json.loads("""
{
"uuid": "f5d6394d-364f-56e8-6dd3-b1c5a4738c52",
"id": 1234,
"sessions": [{"type": "hive", "properties": [], "id": "1234"}],
"type": "query-hive",
"name": "Test Hiveserver2 Editor",
"isSaved": false,
"parentUuid": null
}
""")
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = """INFO : Compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Semantic Analysis Completed
INFO : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null)
INFO : Completed compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13); Time taken: 0.073 seconds
INFO : Executing command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Query ID = hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13
INFO : Total jobs = 2
INFO : Launching Job 1 out of 2
INFO : Starting task [Stage-1:MAPRED] in serial mode
INFO : Number of reduce tasks not specified. Estimated from input data size: 1
INFO : In order to change the average load for a reducer (in bytes):
INFO : set hive.exec.reducers.bytes.per.reducer=<number>
INFO : In order to limit the maximum number of reducers:
INFO : set hive.exec.reducers.max=<number>
INFO : In order to set a constant number of reducers:
INFO : set mapreduce.job.reduces=<number>
INFO : number of splits:1
INFO : Submitting tokens for job: job_1466630204796_0059
INFO : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/
INFO : Starting Job = job_1466630204796_0059, Tracking URL = http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/
INFO : Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1466630204796_0059
"""
jobs = self.api.get_jobs(notebook, snippet, logs)
assert_true(isinstance(jobs, list))
assert_true(len(jobs), 1)
assert_equal(jobs[0]['name'], 'job_1466630204796_0059')
assert_equal(jobs[0]['started'], True)
assert_equal(jobs[0]['finished'], False)
assert_true('url' in jobs[0])
logs += """INFO : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
INFO : 2016-06-24 15:55:51,125 Stage-1 map = 0%, reduce = 0%
INFO : 2016-06-24 15:56:00,410 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.12 sec
INFO : 2016-06-24 15:56:09,709 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.04 sec
INFO : MapReduce Total cumulative CPU time: 4 seconds 40 msec
INFO : Ended Job = job_1466630204796_0059
INFO : Launching Job 2 out of 2
"""
jobs = self.api.get_jobs(notebook, snippet, logs)
assert_true(len(jobs), 1)
assert_equal(jobs[0]['name'], 'job_1466630204796_0059')
assert_equal(jobs[0]['started'], True)
assert_equal(jobs[0]['finished'], True)
def test_get_current_statement(self):
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 1,
"has_more_statements": false
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': u"SELECT 'Привет', '你好';"}
)
statement = self.api._get_current_statement(MockDb(), snippet)
assert_equal('086ecec9a8b89b1b47cce358bdbb343be23b1f8b54ca76bc81927e27', statement['previous_statement_hash'])
def test_plan_extraction_from_profile(self):
query_plan = self.api._get_impala_profile_plan(
query_id='e147228183f1f0b3:6f086cc600000000', profile=IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE
)
assert_true(query_plan)
assert_equal(IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE_PLAN, query_plan)
def MockDb():
def close_operation(handle): pass
class TestHiveserver2ApiWithHadoop(BeeswaxSampleProvider):
integration = True
@classmethod
def setup_class(cls):
if not is_live_cluster():
raise SkipTest('These tests can only run on a live cluster')
super(TestHiveserver2ApiWithHadoop, cls).setup_class(load_data=False)
def setUp(self):
self.client.post('/beeswax/install_examples')
self.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "notebook")
grant_access("test", "test", "impala")
self.db = dbms.get(self.user, get_query_server_config())
self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir, '/user/test')
self.api = HS2Api(self.user)
self.statement = 'SELECT description, salary FROM sample_07 WHERE (sample_07.salary > 100000) ORDER BY salary DESC LIMIT 1000'
def create_query_document(self, owner, query_type='hive', database='default',
name='Test Query', description='Test Query', statement='',
files=None, functions=None, settings=None):
"""
Creates and returns a query Document2 object
:param owner: owner of doc
:param query_type: hive, impala or spark
:param database: database name
:param name: name of document
:param description: description of document
:param statement: SQL statement (can be multi-query statement)
:param files: list of dicts representing files
:param functions: list of dicts representing functions
:param settings: list of dicts representing settings
:return: Document2 object representing query
"""
if query_type not in ('hive', 'impala', 'spark'):
raise ValueError("Invalid query_type: %s" % query_type)
notebook = make_notebook(name=name, description=description, editor_type=query_type, statement=statement,
status='ready', database=database, files=files, functions=functions, settings=settings)
notebook_doc, save_as = _save_notebook(notebook.get_data(), owner)
return notebook_doc
def get_snippet(self, notebook, snippet_idx=0):
data = notebook.get_data()
snippet = data['snippets'][snippet_idx]
if 'result' not in snippet:
snippet['result'] = {}
if 'handle' not in snippet['result']:
snippet['result']['handle'] = {}
return snippet
def execute_and_wait(self, query_doc, snippet_idx=0, timeout=30.0, wait=1.0):
notebook = Notebook(document=query_doc)
snippet = self.get_snippet(notebook, snippet_idx=snippet_idx)
curr = time.time()
end = curr + timeout
status = 'ready'
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
notebook = Notebook(document=query_doc)
snippet = self.get_snippet(notebook, snippet_idx=snippet_idx)
data = json.loads(response.content)
snippet['result']['handle'] = data['handle']
while status != 'available' and curr <= end:
response = self.client.post(reverse('notebook:check_status'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
status = data['query_status']['status']
snippet['status'] = status
time.sleep(wait)
curr = time.time()
if status != 'available':
raise Exception('Query failed to complete or return results.')
return snippet
def test_query_with_unicode(self):
statement = "SELECT * FROM sample_07 WHERE code='한';"
doc = self.create_query_document(owner=self.user, statement=statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
snippet['result']['handle'] = data['handle']
response = self.client.post(reverse('notebook:get_logs'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true("SELECT * FROM sample_07 WHERE code='한'" in smart_str(data['logs']))
def test_get_current_statement(self):
multi_statement = "SELECT description, salary FROM sample_07 LIMIT 20;\r\nSELECT AVG(salary) FROM sample_07;"
doc = self.create_query_document(owner=self.user, statement=multi_statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal(0, data['handle']['statement_id'], data)
assert_equal(2, data['handle']['statements_count'], data)
assert_equal(True, data['handle']['has_more_statements'], data)
assert_equal({'row': 0, 'column': 0}, data['handle']['start'], data)
assert_equal({'row': 0, 'column': 51}, data['handle']['end'], data)
snippet['result']['handle'] = data['handle']
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal(1, data['handle']['statement_id'], data)
assert_equal(2, data['handle']['statements_count'], data)
assert_equal(False, data['handle']['has_more_statements'], data)
assert_equal({'row': 1, 'column': 0}, data['handle']['start'], data)
assert_equal({'row': 1, 'column': 33}, data['handle']['end'], data)
def test_explain(self):
# Hive 2 with Tez set hive.explain.user to true by default, but this test is expecting output when this setting
# is set to false.
doc = self.create_query_document(owner=self.user, statement=self.statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
snippet['properties']['settings'].append({"key": "hive.explain.user", "value": "false"})
response = self.client.post(reverse('notebook:explain'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('STAGE DEPENDENCIES' in data['explanation'], data)
assert_equal(self.statement, data['statement'], data)
def test_download(self):
statement = "SELECT 'hello world';"
doc = self.create_query_document(owner=self.user, statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:download'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'format': 'csv'})
assert_equal(200, response.status_code)
assert_equal(('Content-Disposition', 'attachment; filename="Test Query.csv"'), response._headers['content-disposition'])
def test_get_sample(self):
doc = self.create_query_document(owner=self.user, statement=self.statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:api_sample_data',
kwargs={'database': 'default', 'table': 'sample_07'}),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('headers' in data)
assert_true('rows' in data)
assert_true(len(data['rows']) > 0)
response = self.client.post(reverse('notebook:api_sample_data_column',
kwargs={'database': 'default', 'table': 'sample_07', 'column': 'code'}),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('headers' in data)
assert_equal(['code'], data['headers'])
assert_true('rows' in data)
assert_true(len(data['rows']) > 0)
def test_fetch_result_size_mr(self):
if not is_live_cluster(): # Mini-cluster does not have JHS
raise SkipTest
# Assert that a query with no job will return no rows or size
statement = "SELECT 'hello world';"
settings = [
{
'key': 'hive.execution.engine',
'value': 'mr'
}
]
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(None, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that a query with map & reduce task returns rows
statement = "SELECT DISTINCT code FROM sample_07;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(823, data['result']['rows'])
assert_true(data['result']['size'] > 0, data['result'])
# Assert that a query with multiple jobs returns rows
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=120.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(23, data['result']['rows'])
assert_true(data['result']['size'] > 0, data['result'])
def test_fetch_result_size_spark(self):
if not is_live_cluster() or not is_hive_on_spark():
raise SkipTest
# TODO: Add session cleanup here so we don't have orphan spark sessions
# Assert that a query with no job will return no rows or size
statement = "SELECT 'hello world';"
settings = [
{
'key': 'hive.execution.engine',
'value': 'spark'
}
]
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(None, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that a query that runs a job will return rows and size
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(23, data['result']['rows'])
assert_true(data['result']['size'] > 0)
def test_fetch_result_size_impala(self):
if not is_live_cluster():
raise SkipTest
# Create session so that session object is saved to DB for server URL lookup
session = self.api.create_session(lang='impala')
try:
# Assert that a query that runs a job will return rows
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(23, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that selecting all from partitioned table works
statement = "SELECT * FROM web_logs;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=5.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(1000, data['result']['rows'])
finally:
self.api.close_session(session)
def test_fetch_result_abbreviated(self):
if not is_live_cluster():
raise SkipTest
# Create session so that session object is saved to DB for server URL lookup
session = self.api.create_session(lang='impala')
try:
# Assert that abbreviated rows returned (e.g. - 1.00K) still returns actual rows
statement = "SELECT * FROM web_logs;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=5.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(1000, data['result']['rows'])
finally:
self.api.close_session(session)
IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE_PLAN = """Query (id=e147228183f1f0b3:6f086cc600000000):
Summary:
Session ID: 4043f7580371e0e6:f1068bf772ce4cb3
Session Type: HIVESERVER2
HiveServer2 Protocol Version: V6
Start Time: 2017-10-13 10:47:09.373244000
End Time: 2017-10-13 10:50:08.731647000
Query Type: QUERY
Query State: FINISHED
Query Status: OK
Impala Version: impalad version 2.11.0-SNAPSHOT RELEASE (build e9a30f67655a8da5b8526507fbe853adbd184932)
User: romain
Connected User: romain
Delegated User:
Network Address: 172.21.3.229:60523
Default Db: default
Sql Statement:
-- Compute total amount per order for all customers
SELECT
c.id AS customer_id,
c.name AS customer_name,
o.order_id,
v.total
FROM
customers c,
c.orders o,
(SELECT SUM(price * qty) total FROM o.items) v
Coordinator: self-service-analytics-2.gce.cloudera.com:22000
Query Options (set by configuration): QUERY_TIMEOUT_S=600
Query Options (set by configuration and planner): QUERY_TIMEOUT_S=600,MT_DOP=0
Plan:
----------------
Max Per-Host Resource Reservation: Memory=0B
Per-Host Resource Estimates: Memory=42.00MB
WARNING: The following tables have potentially corrupt table statistics.
Drop and re-compute statistics to resolve this problem.
default.customers
WARNING: The following tables are missing relevant table and/or column statistics.
default.customers
F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=0B mem-reservation=0B
PLAN-ROOT SINK
| mem-estimate=0B mem-reservation=0B
|
10:EXCHANGE [UNPARTITIONED]
| mem-estimate=0B mem-reservation=0B
| tuple-ids=3,1,0 row-size=75B cardinality=0
|
F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
Per-Host Resources: mem-estimate=42.00MB mem-reservation=0B
01:SUBPLAN
| mem-estimate=0B mem-reservation=0B
| tuple-ids=3,1,0 row-size=75B cardinality=0
|
|--09:NESTED LOOP JOIN [CROSS JOIN]
| | mem-estimate=35B mem-reservation=0B
| | tuple-ids=3,1,0 row-size=75B cardinality=10
| |
| |--02:SINGULAR ROW SRC
| | parent-subplan=01
| | mem-estimate=0B mem-reservation=0B
| | tuple-ids=0 row-size=35B cardinality=1
| |
| 04:SUBPLAN
| | mem-estimate=0B mem-reservation=0B
| | tuple-ids=3,1 row-size=40B cardinality=10
| |
| |--08:NESTED LOOP JOIN [CROSS JOIN]
| | | mem-estimate=32B mem-reservation=0B
| | | tuple-ids=3,1 row-size=40B cardinality=1
| | |
| | |--05:SINGULAR ROW SRC
| | | parent-subplan=04
| | | mem-estimate=0B mem-reservation=0B
| | | tuple-ids=1 row-size=32B cardinality=1
| | |
| | 07:AGGREGATE [FINALIZE]
| | | output: sum(price * qty)
| | | mem-estimate=10.00MB mem-reservation=0B spill-buffer=2.00MB
| | | tuple-ids=3 row-size=8B cardinality=1
| | |
| | 06:UNNEST [o.items]
| | parent-subplan=04
| | mem-estimate=0B mem-reservation=0B
| | tuple-ids=2 row-size=0B cardinality=10
| |
| 03:UNNEST [c.orders o]
| parent-subplan=01
| mem-estimate=0B mem-reservation=0B
| tuple-ids=1 row-size=0B cardinality=10
|
00:SCAN HDFS [default.customers c, RANDOM]
partitions=1/1 files=1 size=15.44KB
predicates: !empty(c.orders)
stats-rows=0 extrapolated-rows=disabled
table stats: rows=0 size=15.44KB
column stats: unavailable
mem-estimate=32.00MB mem-reservation=0B
tuple-ids=0 row-size=35B cardinality=0
----------------
Estimated Per-Host Mem: 44040259
Tables Missing Stats: default.customers
Tables With Corrupt Table Stats: default.customers
Per Host Min Reservation: self-service-analytics-2.gce.cloudera.com:22000(0)
Request Pool: root.romain
Admission result: Admitted immediately
ExecSummary:
Operator #Hosts Avg Time Max Time #Rows Est. #Rows Peak Mem Est. Peak Mem Detail
----------------------------------------------------------------------------------------------------------------------------
10:EXCHANGE 1 62.005ms 62.005ms 106 0 0 0 UNPARTITIONED
01:SUBPLAN 1 0.000ns 0.000ns 0 0 140.00 KB 0
|--09:NESTED LOOP JOIN 1 0.000ns 0.000ns 5.67K 10 32.00 KB 35.00 B CROSS JOIN
| |--02:SINGULAR ROW SRC 1 0.000ns 0.000ns 0 1 0 0
| 04:SUBPLAN 1 0.000ns 0.000ns 0 10 8.00 KB 0
| |--08:NESTED LOOP JOIN 1 0.000ns 0.000ns 160 1 24.00 KB 32.00 B CROSS JOIN
| | |--05:SINGULAR ROW SRC 1 0.000ns 0.000ns 0 1 0 0
| | 07:AGGREGATE 1 0.000ns 0.000ns 1 1 16.00 KB 10.00 MB FINALIZE
| | 06:UNNEST 1 0.000ns 0.000ns 2 10 0 0 o.items
| 03:UNNEST 1 0.000ns 0.000ns 2 10 0 0 c.orders o
00:SCAN HDFS 1 39.003ms 39.003ms 53 0 417.04 KB 32.00 MB default.customers c
Errors:
Planner Timeline: 36.379ms
- Analysis finished: 13.156ms (13.156ms)
- Equivalence classes computed: 13.775ms (619.949us)
- Single node plan created: 20.763ms (6.987ms)
- Runtime filters computed: 21.325ms (562.117us)
- Distributed plan created: 21.460ms (135.254us)
- Lineage info computed: 21.684ms (223.594us)
- Planning finished: 36.379ms (14.694ms)
Query Timeline: 2m59s
- Query submitted: 0.000ns (0.000ns)
- Planning finished: 42.003ms (42.003ms)
- Submit for admission: 43.003ms (1.000ms)
- Completed admission: 43.003ms (0.000ns)
- Ready to start on 1 backends: 43.003ms (0.000ns)
- All 1 execution backends (2 fragment instances) started: 44.003ms (1.000ms)
- Rows available: 121.009ms (77.006ms)
- First row fetched: 1s152ms (1s031ms)
- Unregister query: 2m59s (2m58s)
- ComputeScanRangeAssignmentTimer: 0.000ns
ImpalaServer:
- ClientFetchWaitTimer: 2m59s
- RowMaterializationTimer: 1.000ms
Execution Profile e147228183f1f0b3:6f086cc600000000"""
IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE = IMPALA_CUSTOMER_QUERY_SAMPLE_PROFILE_PLAN + \
""":(Total: 79.006ms, non-child: 0.000ns, % non-child: 0.00%)
Number of filters: 0
Filter routing table:
ID Src. Node Tgt. Node(s) Target type Partition filter Pending (Expected) First arrived Completed Enabled
-------------------------------------------------------------------------------------------------------------------
Backend startup latencies: Count: 1, min / max: 1ms / 1ms, 25th %-ile: 1ms, 50th %-ile: 1ms, """\
"""75th %-ile: 1ms, 90th %-ile: 1ms, 95th %-ile: 1ms, 99.9th %-ile: 1ms
Per Node Peak Memory Usage: self-service-analytics-2.gce.cloudera.com:22000(530.52 KB)
- FiltersReceived: 0 (0)
- FinalizationTimer: 0.000ns
Averaged Fragment F01:(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%)
split sizes: min: 0, max: 0, avg: 0, stddev: 0
completion times: min:2m59s max:2m59s mean: 2m59s stddev:0.000ns
execution rates: min:0.00 /sec max:0.00 /sec mean:0.00 /sec stddev:0.00 /sec
num instances: 1
- AverageThreadTokens: 0.00
- BloomFilterBytes: 0
- PeakMemoryUsage: 34.12 KB (34939)
- PeakReservation: 0
- PeakUsedReservation: 0
- PerHostPeakMemUsage: 530.52 KB (543253)
- RowsProduced: 106 (106)
- TotalNetworkReceiveTime: 62.005ms
- TotalNetworkSendTime: 0.000ns
- TotalStorageWaitTime: 0.000ns
- TotalThreadsInvoluntaryContextSwitches: 0 (0)
- TotalThreadsTotalWallClockTime: 62.005ms
- TotalThreadsSysTime: 3.000us
- TotalThreadsUserTime: 12.000us
- TotalThreadsVoluntaryContextSwitches: 1 (1)
Fragment Instance Lifecycle Timings:
- ExecTime: 0.000ns
- ExecTreeExecTime: 0.000ns
- OpenTime: 62.005ms
- ExecTreeOpenTime: 62.005ms
- PrepareTime: 14.001ms
- ExecTreePrepareTime: 0.000ns
PLAN_ROOT_SINK:
- PeakMemoryUsage: 0
CodeGen:(Total: 13.001ms, non-child: 13.001ms, % non-child: 100.00%)
- CodegenTime: 0.000ns
- CompileTime: 0.000ns
- LoadTime: 0.000ns
- ModuleBitcodeSize: 1.84 MB (1929624)
- NumFunctions: 0 (0)
- NumInstructions: 0 (0)
- OptimizationTime: 0.000ns
- PeakMemoryUsage: 0
- PrepareTime: 13.001ms
EXCHANGE_NODE (id=10):(Total: 62.005ms, non-child: 62.005ms, % non-child: 100.00%)
- ConvertRowBatchTime: 0.000ns
- PeakMemoryUsage: 0
- RowsReturned: 106 (106)
- RowsReturnedRate: 1.71 K/sec
DataStreamReceiver:
- BytesReceived: 5.50 KB (5632)
- DeserializeRowBatchTimer: 0.000ns
- FirstBatchArrivalWaitTime: 62.005ms
- PeakMemoryUsage: 10.12 KB (10363)
- SendersBlockedTimer: 0.000ns
- SendersBlockedTotalTimer(*): 0.000ns
Coordinator Fragment F01:
Instance e147228183f1f0b3:6f086cc600000000 (host=self-service-analytics-2.gce.cloudera.com:22000):"""\
"""(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%)
MemoryUsage(4s000ms): 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, """\
"""31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, """\
"""31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, """\
"""31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB
- AverageThreadTokens: 0.00
- BloomFilterBytes: 0
- PeakMemoryUsage: 34.12 KB (34939)
- PeakReservation: 0
- PeakUsedReservation: 0
- PerHostPeakMemUsage: 530.52 KB (543253)
- RowsProduced: 106 (106)
- TotalNetworkReceiveTime: 62.005ms
- TotalNetworkSendTime: 0.000ns
- TotalStorageWaitTime: 0.000ns
- TotalThreadsInvoluntaryContextSwitches: 0 (0)
- TotalThreadsTotalWallClockTime: 62.005ms
- TotalThreadsSysTime: 3.000us
- TotalThreadsUserTime: 12.000us
- TotalThreadsVoluntaryContextSwitches: 1 (1)
Fragment Instance Lifecycle Timings:
- ExecTime: 0.000ns
- ExecTreeExecTime: 0.000ns
- OpenTime: 62.005ms
- ExecTreeOpenTime: 62.005ms
- PrepareTime: 14.001ms
- ExecTreePrepareTime: 0.000ns
PLAN_ROOT_SINK:
- PeakMemoryUsage: 0
CodeGen:(Total: 13.001ms, non-child: 13.001ms, % non-child: 100.00%)
- CodegenTime: 0.000ns
- CompileTime: 0.000ns
- LoadTime: 0.000ns
- ModuleBitcodeSize: 1.84 MB (1929624)
- NumFunctions: 0 (0)
- NumInstructions: 0 (0)
- OptimizationTime: 0.000ns
- PeakMemoryUsage: 0
- PrepareTime: 13.001ms
EXCHANGE_NODE (id=10):(Total: 62.005ms, non-child: 62.005ms, % non-child: 100.00%)
- ConvertRowBatchTime: 0.000ns
- PeakMemoryUsage: 0
- RowsReturned: 106 (106)
- RowsReturnedRate: 1.71 K/sec
DataStreamReceiver:
BytesReceived(4s000ms): 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, """\
"""5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, """\
"""5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, """\
"""5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB
- BytesReceived: 5.50 KB (5632)
- DeserializeRowBatchTimer: 0.000ns
- FirstBatchArrivalWaitTime: 62.005ms
- PeakMemoryUsage: 10.12 KB (10363)
- SendersBlockedTimer: 0.000ns
- SendersBlockedTotalTimer(*): 0.000ns
Averaged Fragment F00:(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%)
split sizes: min: 15.44 KB, max: 15.44 KB, avg: 15.44 KB, stddev: 0
completion times: min:78.006ms max:78.006ms mean: 78.006ms stddev:0.000ns
execution rates: min:197.95 KB/sec max:197.95 KB/sec mean:197.95 KB/sec stddev:0.00 /sec
num instances: 1
- AverageThreadTokens: 0.00
- BloomFilterBytes: 0
- PeakMemoryUsage: 506.52 KB (518677)
- PeakReservation: 0
- PeakUsedReservation: 0
- PerHostPeakMemUsage: 530.52 KB (543253)
- RowsProduced: 106 (106)
- TotalNetworkReceiveTime: 0.000ns
- TotalNetworkSendTime: 0.000ns
- TotalStorageWaitTime: 38.003ms
- TotalThreadsInvoluntaryContextSwitches: 1 (1)
- TotalThreadsTotalWallClockTime: 100.008ms
- TotalThreadsSysTime: 1.520ms
- TotalThreadsUserTime: 22.153ms
- TotalThreadsVoluntaryContextSwitches: 8 (8)
Fragment Instance Lifecycle Timings:
- ExecTime: 39.003ms
- ExecTreeExecTime: 39.003ms
- OpenTime: 22.001ms
- ExecTreeOpenTime: 0.000ns
- PrepareTime: 15.001ms
- ExecTreePrepareTime: 0.000ns
DataStreamSender (dst_id=10):
- BytesSent: 5.50 KB (5632)
- NetworkThroughput(*): 0.00 /sec
- OverallThroughput: 0.00 /sec
- PeakMemoryUsage: 4.85 KB (4968)
- RowsReturned: 106 (106)
- SerializeBatchTime: 0.000ns
- TransmitDataRPCTime: 0.000ns
- UncompressedRowBatchSize: 8.89 KB (9103)
CodeGen:(Total: 36.002ms, non-child: 36.002ms, % non-child: 100.00%)
- CodegenTime: 2.000ms
- CompileTime: 7.000ms
- LoadTime: 0.000ns
- ModuleBitcodeSize: 1.84 MB (1929624)
- NumFunctions: 23 (23)
- NumInstructions: 365 (365)
- OptimizationTime: 15.001ms
- PeakMemoryUsage: 182.50 KB (186880)
- PrepareTime: 14.001ms
SUBPLAN_NODE (id=1):(Total: 39.003ms, non-child: 0.000ns, % non-child: 0.00%)
- PeakMemoryUsage: 140.00 KB (143360)
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
NESTED_LOOP_JOIN_NODE (id=9):
- BuildRows: 0 (0)
- BuildTime: 0.000ns
- PeakMemoryUsage: 32.00 KB (32768)
- ProbeRows: 106 (106)
- ProbeTime: 0.000ns
- RowsReturned: 5.67K (5671)
- RowsReturnedRate: 0
Nested Loop Join Builder:
- PeakMemoryUsage: 8.00 KB (8192)
SINGULAR_ROW_SRC_NODE (id=2):
- PeakMemoryUsage: 0
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
SUBPLAN_NODE (id=4):
- PeakMemoryUsage: 8.00 KB (8192)
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
NESTED_LOOP_JOIN_NODE (id=8):
- BuildRows: 0 (0)
- BuildTime: 0.000ns
- PeakMemoryUsage: 24.00 KB (24576)
- ProbeRows: 106 (106)
- ProbeTime: 0.000ns
- RowsReturned: 160 (160)
- RowsReturnedRate: 0
Nested Loop Join Builder:
- PeakMemoryUsage: 8.00 KB (8192)
SINGULAR_ROW_SRC_NODE (id=5):
- PeakMemoryUsage: 0
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
AGGREGATION_NODE (id=7):
- BuildTime: 0.000ns
- GetResultsTime: 0.000ns
- HTResizeTime: 0.000ns
- HashBuckets: 0 (0)
- LargestPartitionPercent: 0 (0)
- MaxPartitionLevel: 0 (0)
- NumRepartitions: 0 (0)
- PartitionsCreated: 0 (0)
- PeakMemoryUsage: 16.00 KB (16384)
- RowsRepartitioned: 0 (0)
- RowsReturned: 1 (1)
- RowsReturnedRate: 0
- SpilledPartitions: 0 (0)
UNNEST_NODE (id=6):
- AvgCollectionSize: 1.50
- MaxCollectionSize: 3 (3)
- MinCollectionSize: 1 (1)
- NumCollections: 106 (106)
- PeakMemoryUsage: 0
- RowsReturned: 2 (2)
- RowsReturnedRate: 0
UNNEST_NODE (id=3):
- AvgCollectionSize: 2.00
- MaxCollectionSize: 3 (3)
- MinCollectionSize: 1 (1)
- NumCollections: 53 (53)
- PeakMemoryUsage: 0
- RowsReturned: 2 (2)
- RowsReturnedRate: 0
HDFS_SCAN_NODE (id=0):(Total: 39.003ms, non-child: 39.003ms, % non-child: 100.00%)
- AverageHdfsReadThreadConcurrency: 0.00
- AverageScannerThreadConcurrency: 0.00
- BytesRead: 19.30 KB (19766)
- BytesReadDataNodeCache: 0
- BytesReadLocal: 19.30 KB (19766)
- BytesReadRemoteUnexpected: 0
- BytesReadShortCircuit: 19.30 KB (19766)
- CachedFileHandlesHitCount: 5 (5)
- CachedFileHandlesMissCount: 1 (1)
- CollectionItemsRead: 265 (265)
- DecompressionTime: 0.000ns
- MaxCompressedTextFileLength: 0
- NumColumns: 5 (5)
- NumDictFilteredRowGroups: 0 (0)
- NumDisksAccessed: 1 (1)
- NumRowGroups: 1 (1)
- NumScannerThreadsStarted: 1 (1)
- NumScannersWithNoReads: 0 (0)
- NumStatsFilteredRowGroups: 0 (0)
- PeakMemoryUsage: 417.04 KB (427045)
- PerReadThreadRawHdfsThroughput: 507.92 KB/sec
- RemoteScanRanges: 0 (0)
- RowBatchQueueGetWaitTime: 39.003ms
- RowBatchQueuePutWaitTime: 0.000ns
- RowsRead: 53 (53)
- RowsReturned: 53 (53)
- RowsReturnedRate: 1.36 K/sec
- ScanRangesComplete: 1 (1)
- ScannerThreadsInvoluntaryContextSwitches: 0 (0)
- ScannerThreadsTotalWallClockTime: 39.003ms
- MaterializeTupleTime(*): 1.000ms
- ScannerThreadsSysTime: 346.000us
- ScannerThreadsUserTime: 346.000us
- ScannerThreadsVoluntaryContextSwitches: 4 (4)
- TotalRawHdfsReadTime(*): 38.003ms
- TotalReadThroughput: 0.00 /sec
Fragment F00:
Instance e147228183f1f0b3:6f086cc600000001 (host=self-service-analytics-2.gce.cloudera.com:22000):"""\
"""(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%)
Hdfs split stats (<volume id>:<# splits>/<split lengths>): 0:1/15.44 KB
- AverageThreadTokens: 0.00
- BloomFilterBytes: 0
- PeakMemoryUsage: 506.52 KB (518677)
- PeakReservation: 0
- PeakUsedReservation: 0
- PerHostPeakMemUsage: 530.52 KB (543253)
- RowsProduced: 106 (106)
- TotalNetworkReceiveTime: 0.000ns
- TotalNetworkSendTime: 0.000ns
- TotalStorageWaitTime: 38.003ms
- TotalThreadsInvoluntaryContextSwitches: 1 (1)
- TotalThreadsTotalWallClockTime: 100.008ms
- TotalThreadsSysTime: 1.520ms
- TotalThreadsUserTime: 22.153ms
- TotalThreadsVoluntaryContextSwitches: 8 (8)
Fragment Instance Lifecycle Timings:
- ExecTime: 39.003ms
- ExecTreeExecTime: 39.003ms
- OpenTime: 22.001ms
- ExecTreeOpenTime: 0.000ns
- PrepareTime: 15.001ms
- ExecTreePrepareTime: 0.000ns
DataStreamSender (dst_id=10):
- BytesSent: 5.50 KB (5632)
- NetworkThroughput(*): 0.00 /sec
- OverallThroughput: 0.00 /sec
- PeakMemoryUsage: 4.85 KB (4968)
- RowsReturned: 106 (106)
- SerializeBatchTime: 0.000ns
- TransmitDataRPCTime: 0.000ns
- UncompressedRowBatchSize: 8.89 KB (9103)
CodeGen:(Total: 36.002ms, non-child: 36.002ms, % non-child: 100.00%)
- CodegenTime: 2.000ms
- CompileTime: 7.000ms
- LoadTime: 0.000ns
- ModuleBitcodeSize: 1.84 MB (1929624)
- NumFunctions: 23 (23)
- NumInstructions: 365 (365)
- OptimizationTime: 15.001ms
- PeakMemoryUsage: 182.50 KB (186880)
- PrepareTime: 14.001ms
SUBPLAN_NODE (id=1):(Total: 39.003ms, non-child: 0.000ns, % non-child: 0.00%)
- PeakMemoryUsage: 140.00 KB (143360)
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
NESTED_LOOP_JOIN_NODE (id=9):
- BuildRows: 0 (0)
- BuildTime: 0.000ns
- PeakMemoryUsage: 32.00 KB (32768)
- ProbeRows: 106 (106)
- ProbeTime: 0.000ns
- RowsReturned: 5.67K (5671)
- RowsReturnedRate: 0
Nested Loop Join Builder:
- PeakMemoryUsage: 8.00 KB (8192)
SINGULAR_ROW_SRC_NODE (id=2):
- PeakMemoryUsage: 0
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
SUBPLAN_NODE (id=4):
- PeakMemoryUsage: 8.00 KB (8192)
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
NESTED_LOOP_JOIN_NODE (id=8):
- BuildRows: 0 (0)
- BuildTime: 0.000ns
- PeakMemoryUsage: 24.00 KB (24576)
- ProbeRows: 106 (106)
- ProbeTime: 0.000ns
- RowsReturned: 160 (160)
- RowsReturnedRate: 0
Nested Loop Join Builder:
- PeakMemoryUsage: 8.00 KB (8192)
SINGULAR_ROW_SRC_NODE (id=5):
- PeakMemoryUsage: 0
- RowsReturned: 0 (0)
- RowsReturnedRate: 0
AGGREGATION_NODE (id=7):
ExecOption: Codegen Enabled
- BuildTime: 0.000ns
- GetResultsTime: 0.000ns
- HTResizeTime: 0.000ns
- HashBuckets: 0 (0)
- LargestPartitionPercent: 0 (0)
- MaxPartitionLevel: 0 (0)
- NumRepartitions: 0 (0)
- PartitionsCreated: 0 (0)
- PeakMemoryUsage: 16.00 KB (16384)
- RowsRepartitioned: 0 (0)
- RowsReturned: 1 (1)
- RowsReturnedRate: 0
- SpilledPartitions: 0 (0)
UNNEST_NODE (id=6):
- AvgCollectionSize: 1.50
- MaxCollectionSize: 3 (3)
- MinCollectionSize: 1 (1)
- NumCollections: 106 (106)
- PeakMemoryUsage: 0
- RowsReturned: 2 (2)
- RowsReturnedRate: 0
UNNEST_NODE (id=3):
- AvgCollectionSize: 2.00
- MaxCollectionSize: 3 (3)
- MinCollectionSize: 1 (1)
- NumCollections: 53 (53)
- PeakMemoryUsage: 0
- RowsReturned: 2 (2)
- RowsReturnedRate: 0
HDFS_SCAN_NODE (id=0):(Total: 39.003ms, non-child: 39.003ms, % non-child: 100.00%)
Hdfs split stats (<volume id>:<# splits>/<split lengths>): 0:1/15.44 KB
ExecOption: PARQUET Codegen Enabled, Codegen enabled: 1 out of 1
Hdfs Read Thread Concurrency Bucket: 0:0% 1:0% 2:0% 3:0% 4:0%
File Formats: PARQUET/NONE:5
- FooterProcessingTime: (Avg: 38.003ms ; Min: 38.003ms ; Max: 38.003ms ; Number of samples: 1)
- AverageHdfsReadThreadConcurrency: 0.00
- AverageScannerThreadConcurrency: 0.00
- BytesRead: 19.30 KB (19766)
- BytesReadDataNodeCache: 0
- BytesReadLocal: 19.30 KB (19766)
- BytesReadRemoteUnexpected: 0
- BytesReadShortCircuit: 19.30 KB (19766)
- CachedFileHandlesHitCount: 5 (5)
- CachedFileHandlesMissCount: 1 (1)
- CollectionItemsRead: 265 (265)
- DecompressionTime: 0.000ns
- MaxCompressedTextFileLength: 0
- NumColumns: 5 (5)
- NumDictFilteredRowGroups: 0 (0)
- NumDisksAccessed: 1 (1)
- NumRowGroups: 1 (1)
- NumScannerThreadsStarted: 1 (1)
- NumScannersWithNoReads: 0 (0)
- NumStatsFilteredRowGroups: 0 (0)
- PeakMemoryUsage: 417.04 KB (427045)
- PerReadThreadRawHdfsThroughput: 507.92 KB/sec
- RemoteScanRanges: 0 (0)
- RowBatchQueueGetWaitTime: 39.003ms
- RowBatchQueuePutWaitTime: 0.000ns
- RowsRead: 53 (53)
- RowsReturned: 53 (53)
- RowsReturnedRate: 1.36 K/sec
- ScanRangesComplete: 1 (1)
- ScannerThreadsInvoluntaryContextSwitches: 0 (0)
- ScannerThreadsTotalWallClockTime: 39.003ms
- MaterializeTupleTime(*): 1.000ms
- ScannerThreadsSysTime: 346.000us
- ScannerThreadsUserTime: 346.000us
- ScannerThreadsVoluntaryContextSwitches: 4 (4)
- TotalRawHdfsReadTime(*): 38.003ms
- TotalReadThroughput: 0.00 /sec
"""
| en | 0.483736 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. { "selectedSnippet": "impala", "showHistory": false, "description": "Test Impala Query", "name": "Test Impala Query", "sessions": [ { "type": "impala", "properties": [], "id": null } ], "type": "query-impala", "id": null, "snippets": [{ "id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"impala-xx","status":"running", "statement_raw":"select * from web_logs", "statement":"select * from web_logs", "variables":[], "properties":{"settings":[],"variables":[],"files":[],"functions":[]}, "result":{ "id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table", "handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false, "start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true, "statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"} }, "lastExecuted": 1462554843817,"database":"default" }], "uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a", "isSaved":false } # TFetchResultsResp # ExecuteStatement # Is empty # snippet['result']['handel'] ['guid'] and ['secret'] are missing { "status": "running", "database": "default", "properties": { "files": [], "functions": [{ "class_name": "org.hue.udf.MyUpper", "name": "myUpper" }], "settings": [] }, "result": { "handle": { "log_context": null, "statements_count": 1, "statement_id": 0, "has_more_statements": false, "secret": "<KEY> "has_result_set": true, "operation_type": 0, "modified_row_count": null, "guid": "ZxOd4IjqTeK1PUTq+MdcDA==" }, "type": "table", "id": "ae81b805-dcf1-9692-0452-797681e997ed" }, "statement": "%(statement)s", "type": "hive", "id": "9b50e364-f7b2-303d-e924-db8b0bd9866d" } { "type": "hive", "properties": [ { "multiple": true, "value": [ { "path": "/user/test/myudfs.jar", "type": "jar" } ], "nice_name": "Files", "key": "files", "help_text": "Add one or more files, jars, or archives to the list of resources.", "type": "hdfs-files" }, { "multiple": true, "value": [ { "class_name": "org.hue.udf.MyUpper", "name": "myUpper" } ], "nice_name": "Functions", "key": "functions", "help_text": "Add one or more registered UDFs (requires function name and fully-qualified class name).", "type": "functions" }, { "multiple": true, "value": [ { "value": "spark", "key": "hive.execution.engine" } ], "nice_name": "Settings", "key": "settings", "help_text": "Hive and Hadoop configuration properties.", "type": "settings", "options": [ "hive.map.aggr", "hive.exec.compress.output", "hive.exec.parallel", "hive.execution.engine", "mapreduce.job.queuename" ] } ], "id": 30 } # Verify that upgrade will return defaults if current properties not formatted as settings # Verify that upgrade will save old properties and new settings # Verify that already upgraded properties will be unchanged { "status": "running", "database": "default", "id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5", "result": { "type": "table", "handle": { "statement_id": 0, "statements_count": 2, "has_more_statements": true }, "id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3" }, "statement": "%(statement)s", "type": "hive", "properties": { "files": [], "functions": [], "settings": [] } } INFO : Compiling command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb): SELECT app, AVG(bytes) AS avg_bytes FROM web_logs GROUP BY app HAVING app IS NOT NULL ORDER BY avg_bytes DESC INFO : Semantic Analysis Completed INFO : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null) INFO : Completed compiling command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb); Time taken: 0.116 seconds INFO : Executing command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb): SELECT app, AVG(bytes) AS avg_bytes FROM web_logs GROUP BY app HAVING app IS NOT NULL ORDER BY avg_bytes DESC INFO : Query ID = hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb INFO : Total jobs = 2 INFO : Launching Job 1 out of 2 INFO : Starting task [Stage-1:MAPRED] in serial mode INFO : Number of reduce tasks not specified. Estimated from input data size: 1 INFO : In order to change the average load for a reducer (in bytes): INFO : set hive.exec.reducers.bytes.per.reducer=<number> INFO : In order to limit the maximum number of reducers: INFO : set hive.exec.reducers.max=<number> INFO : In order to set a constant number of reducers: INFO : set mapreduce.job.reduces=<number> INFO : number of splits:1 INFO : Submitting tokens for job: job_1466104358744_0003 INFO : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466104358744_0003/ INFO : Starting Job = job_1466104358744_0003, Tracking URL = http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466104358744_0003/ INFO : Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1466104358744_0003 INFO : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1 INFO : 2016-06-20 13:30:34,494 Stage-1 map = 0%, reduce = 0% INFO : 2016-06-20 13:30:47,081 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.13 sec INFO : 2016-06-20 13:30:58,606 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.59 sec INFO : MapReduce Total cumulative CPU time: 5 seconds 590 msec INFO : Ended Job = job_1466104358744_0003 { "status": "running", "database": "default", "id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5", "result": { "type": "table", "handle": { "statement_id": 0, "statements_count": 2, "has_more_statements": true }, "id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3" }, "statement": "%(statement)s", "type": "impala", "properties": { "files": [], "functions": [], "settings": [] } } Query 734a81444c85be66:d05f3bb1a6c2d0a5: 20% Complete (4 out of 4693) Query 734a81444c85be66:d05f3bb1a6c2d0a5: 30% Complete (7 out of 4693) Query 734a81444c85be66:d05f3bb1a6c2d0a5: 40% Complete (7 out of 4693) Query 734a81444c85be66:d05f3bb1a6c2d0a5: 50% Complete (234 out of 4693) { "uuid": "f5d6394d-364f-56e8-6dd3-b1c5a4738c52", "id": 1234, "sessions": [{"type": "hive", "properties": [], "id": "1234"}], "type": "query-hive", "name": "Test Hiveserver2 Editor", "isSaved": false, "parentUuid": null } { "status": "running", "database": "default", "id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5", "result": { "type": "table", "handle": { "statement_id": 0, "statements_count": 2, "has_more_statements": true }, "id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3" }, "statement": "%(statement)s", "type": "hive", "properties": { "files": [], "functions": [], "settings": [] } } INFO : Compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app, AVG(bytes) AS avg_bytes FROM web_logs GROUP BY app HAVING app IS NOT NULL ORDER BY avg_bytes DESC INFO : Semantic Analysis Completed INFO : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null) INFO : Completed compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13); Time taken: 0.073 seconds INFO : Executing command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app, AVG(bytes) AS avg_bytes FROM web_logs GROUP BY app HAVING app IS NOT NULL ORDER BY avg_bytes DESC INFO : Query ID = hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13 INFO : Total jobs = 2 INFO : Launching Job 1 out of 2 INFO : Starting task [Stage-1:MAPRED] in serial mode INFO : Number of reduce tasks not specified. Estimated from input data size: 1 INFO : In order to change the average load for a reducer (in bytes): INFO : set hive.exec.reducers.bytes.per.reducer=<number> INFO : In order to limit the maximum number of reducers: INFO : set hive.exec.reducers.max=<number> INFO : In order to set a constant number of reducers: INFO : set mapreduce.job.reduces=<number> INFO : number of splits:1 INFO : Submitting tokens for job: job_1466630204796_0059 INFO : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/ INFO : Starting Job = job_1466630204796_0059, Tracking URL = http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/ INFO : Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1466630204796_0059 INFO : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1 INFO : 2016-06-24 15:55:51,125 Stage-1 map = 0%, reduce = 0% INFO : 2016-06-24 15:56:00,410 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.12 sec INFO : 2016-06-24 15:56:09,709 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.04 sec INFO : MapReduce Total cumulative CPU time: 4 seconds 40 msec INFO : Ended Job = job_1466630204796_0059 INFO : Launching Job 2 out of 2 { "status": "running", "database": "default", "id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5", "result": { "type": "table", "handle": { "statement_id": 0, "statements_count": 1, "has_more_statements": false }, "id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3" }, "statement": "%(statement)s", "type": "hive", "properties": { "files": [], "functions": [], "settings": [] } } Creates and returns a query Document2 object :param owner: owner of doc :param query_type: hive, impala or spark :param database: database name :param name: name of document :param description: description of document :param statement: SQL statement (can be multi-query statement) :param files: list of dicts representing files :param functions: list of dicts representing functions :param settings: list of dicts representing settings :return: Document2 object representing query # Hive 2 with Tez set hive.explain.user to true by default, but this test is expecting output when this setting # is set to false. # Mini-cluster does not have JHS # Assert that a query with no job will return no rows or size # Assert that a query with map & reduce task returns rows # Assert that a query with multiple jobs returns rows # TODO: Add session cleanup here so we don't have orphan spark sessions # Assert that a query with no job will return no rows or size # Assert that a query that runs a job will return rows and size # Create session so that session object is saved to DB for server URL lookup # Assert that a query that runs a job will return rows # Assert that selecting all from partitioned table works # Create session so that session object is saved to DB for server URL lookup # Assert that abbreviated rows returned (e.g. - 1.00K) still returns actual rows Query (id=e147228183f1f0b3:6f086cc600000000): Summary: Session ID: 4043f7580371e0e6:f1068bf772ce4cb3 Session Type: HIVESERVER2 HiveServer2 Protocol Version: V6 Start Time: 2017-10-13 10:47:09.373244000 End Time: 2017-10-13 10:50:08.731647000 Query Type: QUERY Query State: FINISHED Query Status: OK Impala Version: impalad version 2.11.0-SNAPSHOT RELEASE (build e9a30f67655a8da5b8526507fbe853adbd184932) User: romain Connected User: romain Delegated User: Network Address: 172.21.3.229:60523 Default Db: default Sql Statement: -- Compute total amount per order for all customers SELECT c.id AS customer_id, c.name AS customer_name, o.order_id, v.total FROM customers c, c.orders o, (SELECT SUM(price * qty) total FROM o.items) v Coordinator: self-service-analytics-2.gce.cloudera.com:22000 Query Options (set by configuration): QUERY_TIMEOUT_S=600 Query Options (set by configuration and planner): QUERY_TIMEOUT_S=600,MT_DOP=0 Plan: ---------------- Max Per-Host Resource Reservation: Memory=0B Per-Host Resource Estimates: Memory=42.00MB WARNING: The following tables have potentially corrupt table statistics. Drop and re-compute statistics to resolve this problem. default.customers WARNING: The following tables are missing relevant table and/or column statistics. default.customers F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1 | Per-Host Resources: mem-estimate=0B mem-reservation=0B PLAN-ROOT SINK | mem-estimate=0B mem-reservation=0B | 10:EXCHANGE [UNPARTITIONED] | mem-estimate=0B mem-reservation=0B | tuple-ids=3,1,0 row-size=75B cardinality=0 | F00:PLAN FRAGMENT [RANDOM] hosts=1 instances=1 Per-Host Resources: mem-estimate=42.00MB mem-reservation=0B 01:SUBPLAN | mem-estimate=0B mem-reservation=0B | tuple-ids=3,1,0 row-size=75B cardinality=0 | |--09:NESTED LOOP JOIN [CROSS JOIN] | | mem-estimate=35B mem-reservation=0B | | tuple-ids=3,1,0 row-size=75B cardinality=10 | | | |--02:SINGULAR ROW SRC | | parent-subplan=01 | | mem-estimate=0B mem-reservation=0B | | tuple-ids=0 row-size=35B cardinality=1 | | | 04:SUBPLAN | | mem-estimate=0B mem-reservation=0B | | tuple-ids=3,1 row-size=40B cardinality=10 | | | |--08:NESTED LOOP JOIN [CROSS JOIN] | | | mem-estimate=32B mem-reservation=0B | | | tuple-ids=3,1 row-size=40B cardinality=1 | | | | | |--05:SINGULAR ROW SRC | | | parent-subplan=04 | | | mem-estimate=0B mem-reservation=0B | | | tuple-ids=1 row-size=32B cardinality=1 | | | | | 07:AGGREGATE [FINALIZE] | | | output: sum(price * qty) | | | mem-estimate=10.00MB mem-reservation=0B spill-buffer=2.00MB | | | tuple-ids=3 row-size=8B cardinality=1 | | | | | 06:UNNEST [o.items] | | parent-subplan=04 | | mem-estimate=0B mem-reservation=0B | | tuple-ids=2 row-size=0B cardinality=10 | | | 03:UNNEST [c.orders o] | parent-subplan=01 | mem-estimate=0B mem-reservation=0B | tuple-ids=1 row-size=0B cardinality=10 | 00:SCAN HDFS [default.customers c, RANDOM] partitions=1/1 files=1 size=15.44KB predicates: !empty(c.orders) stats-rows=0 extrapolated-rows=disabled table stats: rows=0 size=15.44KB column stats: unavailable mem-estimate=32.00MB mem-reservation=0B tuple-ids=0 row-size=35B cardinality=0 ---------------- Estimated Per-Host Mem: 44040259 Tables Missing Stats: default.customers Tables With Corrupt Table Stats: default.customers Per Host Min Reservation: self-service-analytics-2.gce.cloudera.com:22000(0) Request Pool: root.romain Admission result: Admitted immediately ExecSummary: Operator #Hosts Avg Time Max Time #Rows Est. #Rows Peak Mem Est. Peak Mem Detail ---------------------------------------------------------------------------------------------------------------------------- 10:EXCHANGE 1 62.005ms 62.005ms 106 0 0 0 UNPARTITIONED 01:SUBPLAN 1 0.000ns 0.000ns 0 0 140.00 KB 0 |--09:NESTED LOOP JOIN 1 0.000ns 0.000ns 5.67K 10 32.00 KB 35.00 B CROSS JOIN | |--02:SINGULAR ROW SRC 1 0.000ns 0.000ns 0 1 0 0 | 04:SUBPLAN 1 0.000ns 0.000ns 0 10 8.00 KB 0 | |--08:NESTED LOOP JOIN 1 0.000ns 0.000ns 160 1 24.00 KB 32.00 B CROSS JOIN | | |--05:SINGULAR ROW SRC 1 0.000ns 0.000ns 0 1 0 0 | | 07:AGGREGATE 1 0.000ns 0.000ns 1 1 16.00 KB 10.00 MB FINALIZE | | 06:UNNEST 1 0.000ns 0.000ns 2 10 0 0 o.items | 03:UNNEST 1 0.000ns 0.000ns 2 10 0 0 c.orders o 00:SCAN HDFS 1 39.003ms 39.003ms 53 0 417.04 KB 32.00 MB default.customers c Errors: Planner Timeline: 36.379ms - Analysis finished: 13.156ms (13.156ms) - Equivalence classes computed: 13.775ms (619.949us) - Single node plan created: 20.763ms (6.987ms) - Runtime filters computed: 21.325ms (562.117us) - Distributed plan created: 21.460ms (135.254us) - Lineage info computed: 21.684ms (223.594us) - Planning finished: 36.379ms (14.694ms) Query Timeline: 2m59s - Query submitted: 0.000ns (0.000ns) - Planning finished: 42.003ms (42.003ms) - Submit for admission: 43.003ms (1.000ms) - Completed admission: 43.003ms (0.000ns) - Ready to start on 1 backends: 43.003ms (0.000ns) - All 1 execution backends (2 fragment instances) started: 44.003ms (1.000ms) - Rows available: 121.009ms (77.006ms) - First row fetched: 1s152ms (1s031ms) - Unregister query: 2m59s (2m58s) - ComputeScanRangeAssignmentTimer: 0.000ns ImpalaServer: - ClientFetchWaitTimer: 2m59s - RowMaterializationTimer: 1.000ms Execution Profile e147228183f1f0b3:6f086cc600000000 :(Total: 79.006ms, non-child: 0.000ns, % non-child: 0.00%) Number of filters: 0 Filter routing table: ID Src. Node Tgt. Node(s) Target type Partition filter Pending (Expected) First arrived Completed Enabled ------------------------------------------------------------------------------------------------------------------- Backend startup latencies: Count: 1, min / max: 1ms / 1ms, 25th %-ile: 1ms, 50th %-ile: 1ms, 75th %-ile: 1ms, 90th %-ile: 1ms, 95th %-ile: 1ms, 99.9th %-ile: 1ms Per Node Peak Memory Usage: self-service-analytics-2.gce.cloudera.com:22000(530.52 KB) - FiltersReceived: 0 (0) - FinalizationTimer: 0.000ns Averaged Fragment F01:(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%) split sizes: min: 0, max: 0, avg: 0, stddev: 0 completion times: min:2m59s max:2m59s mean: 2m59s stddev:0.000ns execution rates: min:0.00 /sec max:0.00 /sec mean:0.00 /sec stddev:0.00 /sec num instances: 1 - AverageThreadTokens: 0.00 - BloomFilterBytes: 0 - PeakMemoryUsage: 34.12 KB (34939) - PeakReservation: 0 - PeakUsedReservation: 0 - PerHostPeakMemUsage: 530.52 KB (543253) - RowsProduced: 106 (106) - TotalNetworkReceiveTime: 62.005ms - TotalNetworkSendTime: 0.000ns - TotalStorageWaitTime: 0.000ns - TotalThreadsInvoluntaryContextSwitches: 0 (0) - TotalThreadsTotalWallClockTime: 62.005ms - TotalThreadsSysTime: 3.000us - TotalThreadsUserTime: 12.000us - TotalThreadsVoluntaryContextSwitches: 1 (1) Fragment Instance Lifecycle Timings: - ExecTime: 0.000ns - ExecTreeExecTime: 0.000ns - OpenTime: 62.005ms - ExecTreeOpenTime: 62.005ms - PrepareTime: 14.001ms - ExecTreePrepareTime: 0.000ns PLAN_ROOT_SINK: - PeakMemoryUsage: 0 CodeGen:(Total: 13.001ms, non-child: 13.001ms, % non-child: 100.00%) - CodegenTime: 0.000ns - CompileTime: 0.000ns - LoadTime: 0.000ns - ModuleBitcodeSize: 1.84 MB (1929624) - NumFunctions: 0 (0) - NumInstructions: 0 (0) - OptimizationTime: 0.000ns - PeakMemoryUsage: 0 - PrepareTime: 13.001ms EXCHANGE_NODE (id=10):(Total: 62.005ms, non-child: 62.005ms, % non-child: 100.00%) - ConvertRowBatchTime: 0.000ns - PeakMemoryUsage: 0 - RowsReturned: 106 (106) - RowsReturnedRate: 1.71 K/sec DataStreamReceiver: - BytesReceived: 5.50 KB (5632) - DeserializeRowBatchTimer: 0.000ns - FirstBatchArrivalWaitTime: 62.005ms - PeakMemoryUsage: 10.12 KB (10363) - SendersBlockedTimer: 0.000ns - SendersBlockedTotalTimer(*): 0.000ns Coordinator Fragment F01: Instance e147228183f1f0b3:6f086cc600000000 (host=self-service-analytics-2.gce.cloudera.com:22000): (Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%) MemoryUsage(4s000ms): 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB, 31.64 KB - AverageThreadTokens: 0.00 - BloomFilterBytes: 0 - PeakMemoryUsage: 34.12 KB (34939) - PeakReservation: 0 - PeakUsedReservation: 0 - PerHostPeakMemUsage: 530.52 KB (543253) - RowsProduced: 106 (106) - TotalNetworkReceiveTime: 62.005ms - TotalNetworkSendTime: 0.000ns - TotalStorageWaitTime: 0.000ns - TotalThreadsInvoluntaryContextSwitches: 0 (0) - TotalThreadsTotalWallClockTime: 62.005ms - TotalThreadsSysTime: 3.000us - TotalThreadsUserTime: 12.000us - TotalThreadsVoluntaryContextSwitches: 1 (1) Fragment Instance Lifecycle Timings: - ExecTime: 0.000ns - ExecTreeExecTime: 0.000ns - OpenTime: 62.005ms - ExecTreeOpenTime: 62.005ms - PrepareTime: 14.001ms - ExecTreePrepareTime: 0.000ns PLAN_ROOT_SINK: - PeakMemoryUsage: 0 CodeGen:(Total: 13.001ms, non-child: 13.001ms, % non-child: 100.00%) - CodegenTime: 0.000ns - CompileTime: 0.000ns - LoadTime: 0.000ns - ModuleBitcodeSize: 1.84 MB (1929624) - NumFunctions: 0 (0) - NumInstructions: 0 (0) - OptimizationTime: 0.000ns - PeakMemoryUsage: 0 - PrepareTime: 13.001ms EXCHANGE_NODE (id=10):(Total: 62.005ms, non-child: 62.005ms, % non-child: 100.00%) - ConvertRowBatchTime: 0.000ns - PeakMemoryUsage: 0 - RowsReturned: 106 (106) - RowsReturnedRate: 1.71 K/sec DataStreamReceiver: BytesReceived(4s000ms): 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB, 5.50 KB - BytesReceived: 5.50 KB (5632) - DeserializeRowBatchTimer: 0.000ns - FirstBatchArrivalWaitTime: 62.005ms - PeakMemoryUsage: 10.12 KB (10363) - SendersBlockedTimer: 0.000ns - SendersBlockedTotalTimer(*): 0.000ns Averaged Fragment F00:(Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%) split sizes: min: 15.44 KB, max: 15.44 KB, avg: 15.44 KB, stddev: 0 completion times: min:78.006ms max:78.006ms mean: 78.006ms stddev:0.000ns execution rates: min:197.95 KB/sec max:197.95 KB/sec mean:197.95 KB/sec stddev:0.00 /sec num instances: 1 - AverageThreadTokens: 0.00 - BloomFilterBytes: 0 - PeakMemoryUsage: 506.52 KB (518677) - PeakReservation: 0 - PeakUsedReservation: 0 - PerHostPeakMemUsage: 530.52 KB (543253) - RowsProduced: 106 (106) - TotalNetworkReceiveTime: 0.000ns - TotalNetworkSendTime: 0.000ns - TotalStorageWaitTime: 38.003ms - TotalThreadsInvoluntaryContextSwitches: 1 (1) - TotalThreadsTotalWallClockTime: 100.008ms - TotalThreadsSysTime: 1.520ms - TotalThreadsUserTime: 22.153ms - TotalThreadsVoluntaryContextSwitches: 8 (8) Fragment Instance Lifecycle Timings: - ExecTime: 39.003ms - ExecTreeExecTime: 39.003ms - OpenTime: 22.001ms - ExecTreeOpenTime: 0.000ns - PrepareTime: 15.001ms - ExecTreePrepareTime: 0.000ns DataStreamSender (dst_id=10): - BytesSent: 5.50 KB (5632) - NetworkThroughput(*): 0.00 /sec - OverallThroughput: 0.00 /sec - PeakMemoryUsage: 4.85 KB (4968) - RowsReturned: 106 (106) - SerializeBatchTime: 0.000ns - TransmitDataRPCTime: 0.000ns - UncompressedRowBatchSize: 8.89 KB (9103) CodeGen:(Total: 36.002ms, non-child: 36.002ms, % non-child: 100.00%) - CodegenTime: 2.000ms - CompileTime: 7.000ms - LoadTime: 0.000ns - ModuleBitcodeSize: 1.84 MB (1929624) - NumFunctions: 23 (23) - NumInstructions: 365 (365) - OptimizationTime: 15.001ms - PeakMemoryUsage: 182.50 KB (186880) - PrepareTime: 14.001ms SUBPLAN_NODE (id=1):(Total: 39.003ms, non-child: 0.000ns, % non-child: 0.00%) - PeakMemoryUsage: 140.00 KB (143360) - RowsReturned: 0 (0) - RowsReturnedRate: 0 NESTED_LOOP_JOIN_NODE (id=9): - BuildRows: 0 (0) - BuildTime: 0.000ns - PeakMemoryUsage: 32.00 KB (32768) - ProbeRows: 106 (106) - ProbeTime: 0.000ns - RowsReturned: 5.67K (5671) - RowsReturnedRate: 0 Nested Loop Join Builder: - PeakMemoryUsage: 8.00 KB (8192) SINGULAR_ROW_SRC_NODE (id=2): - PeakMemoryUsage: 0 - RowsReturned: 0 (0) - RowsReturnedRate: 0 SUBPLAN_NODE (id=4): - PeakMemoryUsage: 8.00 KB (8192) - RowsReturned: 0 (0) - RowsReturnedRate: 0 NESTED_LOOP_JOIN_NODE (id=8): - BuildRows: 0 (0) - BuildTime: 0.000ns - PeakMemoryUsage: 24.00 KB (24576) - ProbeRows: 106 (106) - ProbeTime: 0.000ns - RowsReturned: 160 (160) - RowsReturnedRate: 0 Nested Loop Join Builder: - PeakMemoryUsage: 8.00 KB (8192) SINGULAR_ROW_SRC_NODE (id=5): - PeakMemoryUsage: 0 - RowsReturned: 0 (0) - RowsReturnedRate: 0 AGGREGATION_NODE (id=7): - BuildTime: 0.000ns - GetResultsTime: 0.000ns - HTResizeTime: 0.000ns - HashBuckets: 0 (0) - LargestPartitionPercent: 0 (0) - MaxPartitionLevel: 0 (0) - NumRepartitions: 0 (0) - PartitionsCreated: 0 (0) - PeakMemoryUsage: 16.00 KB (16384) - RowsRepartitioned: 0 (0) - RowsReturned: 1 (1) - RowsReturnedRate: 0 - SpilledPartitions: 0 (0) UNNEST_NODE (id=6): - AvgCollectionSize: 1.50 - MaxCollectionSize: 3 (3) - MinCollectionSize: 1 (1) - NumCollections: 106 (106) - PeakMemoryUsage: 0 - RowsReturned: 2 (2) - RowsReturnedRate: 0 UNNEST_NODE (id=3): - AvgCollectionSize: 2.00 - MaxCollectionSize: 3 (3) - MinCollectionSize: 1 (1) - NumCollections: 53 (53) - PeakMemoryUsage: 0 - RowsReturned: 2 (2) - RowsReturnedRate: 0 HDFS_SCAN_NODE (id=0):(Total: 39.003ms, non-child: 39.003ms, % non-child: 100.00%) - AverageHdfsReadThreadConcurrency: 0.00 - AverageScannerThreadConcurrency: 0.00 - BytesRead: 19.30 KB (19766) - BytesReadDataNodeCache: 0 - BytesReadLocal: 19.30 KB (19766) - BytesReadRemoteUnexpected: 0 - BytesReadShortCircuit: 19.30 KB (19766) - CachedFileHandlesHitCount: 5 (5) - CachedFileHandlesMissCount: 1 (1) - CollectionItemsRead: 265 (265) - DecompressionTime: 0.000ns - MaxCompressedTextFileLength: 0 - NumColumns: 5 (5) - NumDictFilteredRowGroups: 0 (0) - NumDisksAccessed: 1 (1) - NumRowGroups: 1 (1) - NumScannerThreadsStarted: 1 (1) - NumScannersWithNoReads: 0 (0) - NumStatsFilteredRowGroups: 0 (0) - PeakMemoryUsage: 417.04 KB (427045) - PerReadThreadRawHdfsThroughput: 507.92 KB/sec - RemoteScanRanges: 0 (0) - RowBatchQueueGetWaitTime: 39.003ms - RowBatchQueuePutWaitTime: 0.000ns - RowsRead: 53 (53) - RowsReturned: 53 (53) - RowsReturnedRate: 1.36 K/sec - ScanRangesComplete: 1 (1) - ScannerThreadsInvoluntaryContextSwitches: 0 (0) - ScannerThreadsTotalWallClockTime: 39.003ms - MaterializeTupleTime(*): 1.000ms - ScannerThreadsSysTime: 346.000us - ScannerThreadsUserTime: 346.000us - ScannerThreadsVoluntaryContextSwitches: 4 (4) - TotalRawHdfsReadTime(*): 38.003ms - TotalReadThroughput: 0.00 /sec Fragment F00: Instance e147228183f1f0b3:6f086cc600000001 (host=self-service-analytics-2.gce.cloudera.com:22000): (Total: 76.006ms, non-child: 1.000ms, % non-child: 1.32%) Hdfs split stats (<volume id>:<# splits>/<split lengths>): 0:1/15.44 KB - AverageThreadTokens: 0.00 - BloomFilterBytes: 0 - PeakMemoryUsage: 506.52 KB (518677) - PeakReservation: 0 - PeakUsedReservation: 0 - PerHostPeakMemUsage: 530.52 KB (543253) - RowsProduced: 106 (106) - TotalNetworkReceiveTime: 0.000ns - TotalNetworkSendTime: 0.000ns - TotalStorageWaitTime: 38.003ms - TotalThreadsInvoluntaryContextSwitches: 1 (1) - TotalThreadsTotalWallClockTime: 100.008ms - TotalThreadsSysTime: 1.520ms - TotalThreadsUserTime: 22.153ms - TotalThreadsVoluntaryContextSwitches: 8 (8) Fragment Instance Lifecycle Timings: - ExecTime: 39.003ms - ExecTreeExecTime: 39.003ms - OpenTime: 22.001ms - ExecTreeOpenTime: 0.000ns - PrepareTime: 15.001ms - ExecTreePrepareTime: 0.000ns DataStreamSender (dst_id=10): - BytesSent: 5.50 KB (5632) - NetworkThroughput(*): 0.00 /sec - OverallThroughput: 0.00 /sec - PeakMemoryUsage: 4.85 KB (4968) - RowsReturned: 106 (106) - SerializeBatchTime: 0.000ns - TransmitDataRPCTime: 0.000ns - UncompressedRowBatchSize: 8.89 KB (9103) CodeGen:(Total: 36.002ms, non-child: 36.002ms, % non-child: 100.00%) - CodegenTime: 2.000ms - CompileTime: 7.000ms - LoadTime: 0.000ns - ModuleBitcodeSize: 1.84 MB (1929624) - NumFunctions: 23 (23) - NumInstructions: 365 (365) - OptimizationTime: 15.001ms - PeakMemoryUsage: 182.50 KB (186880) - PrepareTime: 14.001ms SUBPLAN_NODE (id=1):(Total: 39.003ms, non-child: 0.000ns, % non-child: 0.00%) - PeakMemoryUsage: 140.00 KB (143360) - RowsReturned: 0 (0) - RowsReturnedRate: 0 NESTED_LOOP_JOIN_NODE (id=9): - BuildRows: 0 (0) - BuildTime: 0.000ns - PeakMemoryUsage: 32.00 KB (32768) - ProbeRows: 106 (106) - ProbeTime: 0.000ns - RowsReturned: 5.67K (5671) - RowsReturnedRate: 0 Nested Loop Join Builder: - PeakMemoryUsage: 8.00 KB (8192) SINGULAR_ROW_SRC_NODE (id=2): - PeakMemoryUsage: 0 - RowsReturned: 0 (0) - RowsReturnedRate: 0 SUBPLAN_NODE (id=4): - PeakMemoryUsage: 8.00 KB (8192) - RowsReturned: 0 (0) - RowsReturnedRate: 0 NESTED_LOOP_JOIN_NODE (id=8): - BuildRows: 0 (0) - BuildTime: 0.000ns - PeakMemoryUsage: 24.00 KB (24576) - ProbeRows: 106 (106) - ProbeTime: 0.000ns - RowsReturned: 160 (160) - RowsReturnedRate: 0 Nested Loop Join Builder: - PeakMemoryUsage: 8.00 KB (8192) SINGULAR_ROW_SRC_NODE (id=5): - PeakMemoryUsage: 0 - RowsReturned: 0 (0) - RowsReturnedRate: 0 AGGREGATION_NODE (id=7): ExecOption: Codegen Enabled - BuildTime: 0.000ns - GetResultsTime: 0.000ns - HTResizeTime: 0.000ns - HashBuckets: 0 (0) - LargestPartitionPercent: 0 (0) - MaxPartitionLevel: 0 (0) - NumRepartitions: 0 (0) - PartitionsCreated: 0 (0) - PeakMemoryUsage: 16.00 KB (16384) - RowsRepartitioned: 0 (0) - RowsReturned: 1 (1) - RowsReturnedRate: 0 - SpilledPartitions: 0 (0) UNNEST_NODE (id=6): - AvgCollectionSize: 1.50 - MaxCollectionSize: 3 (3) - MinCollectionSize: 1 (1) - NumCollections: 106 (106) - PeakMemoryUsage: 0 - RowsReturned: 2 (2) - RowsReturnedRate: 0 UNNEST_NODE (id=3): - AvgCollectionSize: 2.00 - MaxCollectionSize: 3 (3) - MinCollectionSize: 1 (1) - NumCollections: 53 (53) - PeakMemoryUsage: 0 - RowsReturned: 2 (2) - RowsReturnedRate: 0 HDFS_SCAN_NODE (id=0):(Total: 39.003ms, non-child: 39.003ms, % non-child: 100.00%) Hdfs split stats (<volume id>:<# splits>/<split lengths>): 0:1/15.44 KB ExecOption: PARQUET Codegen Enabled, Codegen enabled: 1 out of 1 Hdfs Read Thread Concurrency Bucket: 0:0% 1:0% 2:0% 3:0% 4:0% File Formats: PARQUET/NONE:5 - FooterProcessingTime: (Avg: 38.003ms ; Min: 38.003ms ; Max: 38.003ms ; Number of samples: 1) - AverageHdfsReadThreadConcurrency: 0.00 - AverageScannerThreadConcurrency: 0.00 - BytesRead: 19.30 KB (19766) - BytesReadDataNodeCache: 0 - BytesReadLocal: 19.30 KB (19766) - BytesReadRemoteUnexpected: 0 - BytesReadShortCircuit: 19.30 KB (19766) - CachedFileHandlesHitCount: 5 (5) - CachedFileHandlesMissCount: 1 (1) - CollectionItemsRead: 265 (265) - DecompressionTime: 0.000ns - MaxCompressedTextFileLength: 0 - NumColumns: 5 (5) - NumDictFilteredRowGroups: 0 (0) - NumDisksAccessed: 1 (1) - NumRowGroups: 1 (1) - NumScannerThreadsStarted: 1 (1) - NumScannersWithNoReads: 0 (0) - NumStatsFilteredRowGroups: 0 (0) - PeakMemoryUsage: 417.04 KB (427045) - PerReadThreadRawHdfsThroughput: 507.92 KB/sec - RemoteScanRanges: 0 (0) - RowBatchQueueGetWaitTime: 39.003ms - RowBatchQueuePutWaitTime: 0.000ns - RowsRead: 53 (53) - RowsReturned: 53 (53) - RowsReturnedRate: 1.36 K/sec - ScanRangesComplete: 1 (1) - ScannerThreadsInvoluntaryContextSwitches: 0 (0) - ScannerThreadsTotalWallClockTime: 39.003ms - MaterializeTupleTime(*): 1.000ms - ScannerThreadsSysTime: 346.000us - ScannerThreadsUserTime: 346.000us - ScannerThreadsVoluntaryContextSwitches: 4 (4) - TotalRawHdfsReadTime(*): 38.003ms - TotalReadThroughput: 0.00 /sec | 1.41777 | 1 |
delfin/drivers/netapp/dataontap/constants.py | guankc/delfin | 0 | 6625019 | <reponame>guankc/delfin
# Copyright 2021 The SODA Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from delfin.common import constants
SOCKET_TIMEOUT = 15
AUTH_KEY = 'Authorization'
RETURN_SUCCESS_CODE = 200
CREATED_SUCCESS_CODE = 201
ACCEPTED_RETURN_CODE = 202
BAD_REQUEST_RETURN_CODE = 400
UNAUTHORIZED_RETURN_CODE = 401
FORBIDDEN_RETURN_CODE = 403
NOT_FOUND_RETURN_CODE = 404
METHOD_NOT_ALLOWED_CODE = 405
CONFLICT_RETURN_CODE = 409
INTERNAL_ERROR_CODE = 500
HOUR_STAMP = '1h'
DAY_STAMP = '1d'
MONTH_STAMP = '1m'
WEEK_STAMP = '1w'
YEAR_STAMP = '1y'
CLUSTER_PERF_URL = '/api/cluster/metrics?interval=1h&fields=iops,' \
'throughput,latency'
POOL_PERF_URL = '/api/storage/aggregates/%s/metrics?interval=1h&'\
'fields=iops,throughput,latency'
VOLUME_PERF_URL = '/api/storage/luns/%s/metrics?interval=1h&fields=iops,'\
'throughput,latency'
FS_PERF_URL = '/api/storage/volumes/%s/metrics?interval=1h&fields=iops,'\
'throughput,latency'
FC_PERF_URL = '/api/network/fc/ports/%s/metrics?interval=1h&fields=iops,'\
'throughput,latency'
ETH_PERF_URL = '/api/network/ethernet/ports/%s/metrics?interval=1h&'\
'fields=throughput'
FS_INFO_URL = '/api/storage/volumes?fields=svm'
FC_INFO_URL = '/api/network/fc/ports'
ETH_INFO_URL = '/api/network/ethernet/ports?fields=node'
PERF_MAP = {
'iops': ['iops', 'total'],
'readIops': ['iops', 'read'],
'writeIops': ['iops', 'write'],
'throughput': ['throughput', 'total'],
'readThroughput': ['throughput', 'read'],
'writeThroughput': ['throughput', 'write'],
'responseTime': ['latency', 'total']
}
PATTERN = re.compile('^[-]{3,}')
FLOAT_PATTERN = r"\d\.\d"
IP_PATTERN = re.compile(r'(([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])\.){3}'
r'([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])$')
CLUSTER_SHOW_COMMAND = "cluster identity show"
VERSION_SHOW_COMMAND = "version"
STORAGE_STATUS_COMMAND = "system health status show"
POOLS_SHOW_DETAIL_COMMAND = "storage pool show -instance"
AGGREGATE_SHOW_DETAIL_COMMAND = "storage aggregate show -instance"
FS_SHOW_DETAIL_COMMAND = "vol show -instance"
THIN_FS_SHOW_COMMAND = "vol show -space-guarantee none"
ALTER_SHOW_DETAIL_COMMAND = "system health alert show -instance"
EVENT_SHOW_DETAIL_COMMAND = "event show -instance -severity EMERGENCY"
EVENT_TIME_TYPE = '%m/%d/%Y %H:%M:%S'
ALTER_TIME_TYPE = '%a %b %d %H:%M:%S %Y'
CLEAR_ALERT_COMMAND = \
"system health alert delete -alerting-resource * -alert-id"
DISK_SHOW_DETAIL_COMMAND = "disk show -instance"
DISK_SHOW_PHYSICAL_COMMAND = "disk show -physical"
DISK_ERROR_COMMAND = "disk error show"
LUN_SHOW_DETAIL_COMMAND = "lun show -instance"
CONTROLLER_SHOW_DETAIL_COMMAND = "node show -instance"
PORT_SHOW_DETAIL_COMMAND = "network port show -instance"
INTERFACE_SHOW_DETAIL_COMMAND = "network interface show -instance"
FC_PORT_SHOW_DETAIL_COMMAND = "fcp adapter show -instance"
QTREE_SHOW_DETAIL_COMMAND = "qtree show -instance"
CIFS_SHARE_SHOW_DETAIL_COMMAND = "vserver cifs share show -instance" \
" -vserver %(vserver_name)s"
SHARE_AGREEMENT_SHOW_COMMAND = "vserver show -fields Allowed-protocols"
VSERVER_SHOW_COMMAND = "vserver show -type data"
NFS_SHARE_SHOW_COMMAND = "volume show -junction-active true -instance"
STORAGE_VENDOR = "NetApp"
STORAGE_MODEL = "cmodel"
QUOTA_SHOW_DETAIL_COMMAND = "volume quota policy rule show -instance"
MGT_IP_COMMAND = "network interface show -fields address -role cluster-mgmt"
NODE_IP_COMMAND = "network interface show -fields address -role node-mgmt"
CONTROLLER_IP_COMMAND = "network interface show -fields " \
"curr-node,address -role node-mgmt"
SECURITY_STYLE = {
'mixed': constants.NASSecurityMode.MIXED,
'ntfs': constants.NASSecurityMode.NTFS,
'unix': constants.NASSecurityMode.UNIX
}
STORAGE_STATUS = {
'ok': constants.StorageStatus.NORMAL,
'ok-with-suppressed': constants.StorageStatus.NORMAL,
'degraded': constants.StorageStatus.DEGRADED,
'unreachable': constants.StorageStatus.ABNORMAL,
'unknown': constants.StorageStatus.ABNORMAL
}
AGGREGATE_STATUS = {
'online': constants.StoragePoolStatus.NORMAL,
'creating': constants.StoragePoolStatus.NORMAL,
'mounting': constants.StoragePoolStatus.NORMAL,
'relocating': constants.StoragePoolStatus.NORMAL,
'quiesced': constants.StoragePoolStatus.NORMAL,
'quiescing': constants.StoragePoolStatus.NORMAL,
'unmounted': constants.StoragePoolStatus.OFFLINE,
'unmounting': constants.StoragePoolStatus.OFFLINE,
'destroying': constants.StoragePoolStatus.ABNORMAL,
'partial': constants.StoragePoolStatus.ABNORMAL,
'frozen': constants.StoragePoolStatus.ABNORMAL,
'reverted': constants.StoragePoolStatus.NORMAL,
'restricted': constants.StoragePoolStatus.NORMAL,
'inconsistent': constants.StoragePoolStatus.ABNORMAL,
'iron_restricted': constants.StoragePoolStatus.ABNORMAL,
'unknown': constants.StoragePoolStatus.ABNORMAL,
'offline': constants.StoragePoolStatus.OFFLINE,
'failed': constants.StoragePoolStatus.ABNORMAL,
'remote_cluster': constants.StoragePoolStatus.NORMAL,
}
VOLUME_STATUS = {
'online': constants.VolumeStatus.AVAILABLE,
'offline': constants.VolumeStatus.ERROR,
'nvfail': constants.VolumeStatus.ERROR,
'space-error': constants.VolumeStatus.ERROR,
'foreign-lun-error': constants.VolumeStatus.ERROR,
}
ALERT_SEVERITY = {
'Unknown': constants.Severity.NOT_SPECIFIED,
'Other': constants.Severity.NOT_SPECIFIED,
'Information': constants.Severity.INFORMATIONAL,
'Degraded': constants.Severity.WARNING,
'Minor': constants.Severity.MINOR,
'Major': constants.Severity.MAJOR,
'Critical': constants.Severity.CRITICAL,
'Fatal': constants.Severity.FATAL,
}
DISK_TYPE = {
'ATA': constants.DiskPhysicalType.ATA,
'BSAS': constants.DiskPhysicalType.SATA,
'FCAL': constants.DiskPhysicalType.FC,
'FSAS': constants.DiskPhysicalType.NL_SAS,
'LUN': constants.DiskPhysicalType.LUN,
'SAS': constants.DiskPhysicalType.SAS,
'MSATA': constants.DiskPhysicalType.SATA,
'SSD': constants.DiskPhysicalType.SSD,
'VMDISK': constants.DiskPhysicalType.VMDISK,
'unknown': constants.DiskPhysicalType.UNKNOWN,
}
DISK_LOGICAL = {
'aggregate': constants.DiskLogicalType.AGGREGATE,
'spare': constants.DiskLogicalType.SPARE,
'unknown': constants.DiskLogicalType.UNKNOWN,
'free': constants.DiskLogicalType.FREE,
'broken': constants.DiskLogicalType.BROKEN,
'foreign': constants.DiskLogicalType.FOREIGN,
'labelmaint': constants.DiskLogicalType.LABELMAINT,
'maintenance': constants.DiskLogicalType.MAINTENANCE,
'shared': constants.DiskLogicalType.SHARED,
'unassigned': constants.DiskLogicalType.UNASSIGNED,
'unsupported': constants.DiskLogicalType.UNSUPPORTED,
'remote': constants.DiskLogicalType.REMOTE,
'mediator': constants.DiskLogicalType.MEDIATOR,
}
FS_STATUS = {
'online': constants.FilesystemStatus.NORMAL,
'restricted': constants.FilesystemStatus.FAULTY,
'offline': constants.FilesystemStatus.NORMAL,
'force-online': constants.FilesystemStatus.FAULTY,
'force-offline': constants.FilesystemStatus.FAULTY,
}
NETWORK_LOGICAL_TYPE = {
'data': constants.PortLogicalType.DATA,
'cluster': constants.PortLogicalType.CLUSTER,
'node-mgmt': constants.PortLogicalType.NODE_MGMT,
'cluster-mgmt': constants.PortLogicalType.CLUSTER_MGMT,
'intercluster': constants.PortLogicalType.INTERCLUSTER,
}
ETH_LOGICAL_TYPE = {
'physical': constants.PortLogicalType.PHYSICAL,
'if-group': constants.PortLogicalType.IF_GROUP,
'vlan': constants.PortLogicalType.VLAN,
'undef': constants.PortLogicalType.OTHER
}
FC_TYPE = {
'fibre-channel': constants.PortType.FC,
'ethernet': constants.PortType.FCOE
}
WORM_TYPE = {
'non-snaplock': constants.WORMType.NON_WORM,
'compliance': constants.WORMType.COMPLIANCE,
'enterprise': constants.WORMType.ENTERPRISE,
'-': constants.WORMType.NON_WORM
}
QUOTA_TYPE = {
'user': constants.QuotaType.USER,
'tree': constants.QuotaType.TREE,
'group': constants.QuotaType.GROUP
}
NETWORK_PORT_TYPE = {
'nfs': constants.PortType.NFS,
'cifs': constants.PortType.CIFS,
'iscsi': constants.PortType.ISCSI,
'fcp': constants.PortType.FC,
'fcache': constants.PortType.FCACHE,
'none': constants.PortType.OTHER,
}
SEVERITY_MAP = {
'AccessCache.ReachedLimits': 'EMERGENCY',
'LUN.inconsistent.filesystem': 'EMERGENCY',
'LUN.nvfail.vol.proc.failed': 'EMERGENCY',
'Nblade.DidNotInitialize': 'EMERGENCY',
'Nblade.cifsNoPrivShare': 'EMERGENCY',
'Nblade.nfsV4PoolExhaust': 'EMERGENCY',
'Nblade.vscanNoScannerConn': 'EMERGENCY',
'adt.dest.directory.full': 'EMERGENCY',
'adt.dest.directory.unavail': 'EMERGENCY',
'adt.dest.volume.offline': 'EMERGENCY',
'adt.service.block': 'EMERGENCY',
'adt.service.ro.filesystem': 'EMERGENCY',
'adt.stgvol.nospace': 'EMERGENCY',
'adt.stgvol.offline': 'EMERGENCY',
'api.engine.killed': 'EMERGENCY',
'app.log.emerg': 'EMERGENCY',
'arl.aggrOnlineFailed': 'EMERGENCY',
'bge.EepromCrc': 'EMERGENCY',
'boot.bootmenu.issue': 'EMERGENCY',
'boot.varfs.backup.issue': 'EMERGENCY',
'bootfs.env.issue': 'EMERGENCY',
'callhome.battery.failure': 'EMERGENCY',
'callhome.ch.ps.fan.bad.xmin': 'EMERGENCY',
'callhome.chassis.overtemp': 'EMERGENCY',
'callhome.chassis.undertemp': 'EMERGENCY',
'callhome.clam.node.ooq': 'EMERGENCY',
'callhome.client.app.emerg': 'EMERGENCY',
'callhome.fans.failed': 'EMERGENCY',
'callhome.hba.failed': 'EMERGENCY',
'callhome.ibretimerprog.fail': 'EMERGENCY',
'callhome.mcc.auso.trig.fail': 'EMERGENCY',
'callhome.mcc.switchback.failed': 'EMERGENCY',
'callhome.mcc.switchover.failed': 'EMERGENCY',
'callhome.mdb.recovery.unsuccessful': 'EMERGENCY',
'callhome.netinet.dup.clustIP': 'EMERGENCY',
'callhome.nvram.failure': 'EMERGENCY',
'callhome.partner.down': 'EMERGENCY',
'callhome.ps.removed': 'EMERGENCY',
'callhome.raid.no.recover': 'EMERGENCY',
'callhome.raidtree.assim': 'EMERGENCY',
'callhome.rlm.replace': 'EMERGENCY',
'callhome.rlm.replace.lan': 'EMERGENCY',
'callhome.root.vol.recovery.reqd': 'EMERGENCY',
'callhome.sblade.lu.resync.to': 'EMERGENCY',
'callhome.sblade.lu.rst.hung': 'EMERGENCY',
'callhome.sblade.prop.fail': 'EMERGENCY',
'callhome.sfo.takeover.panic': 'EMERGENCY',
'callhome.shlf.fan': 'EMERGENCY',
'callhome.vol.space.crit': 'EMERGENCY',
'cf.fm.panicInToMode': 'EMERGENCY',
'cf.fm.reserveDisksOff': 'EMERGENCY',
'cf.fsm.autoGivebackAttemptsExceeded': 'EMERGENCY',
'cf.takeover.missing.ptnrDiskInventory': 'EMERGENCY',
'cf.takeover.missing.ptnrDisks': 'EMERGENCY',
'cft.trans.commit.failed': 'EMERGENCY',
'clam.node.ooq': 'EMERGENCY',
'config.localswitch': 'EMERGENCY',
'config.noBconnect': 'EMERGENCY',
'config.noPartnerLUNs': 'EMERGENCY',
'coredump.dump.failed': 'EMERGENCY',
'ctran.group.reset.failed': 'EMERGENCY',
'ctran.jpc.multiple.nodes': 'EMERGENCY',
'ctran.jpc.split.brain': 'EMERGENCY',
'ctran.jpc.valid.failed': 'EMERGENCY',
'disk.dynamicqual.failure.shutdown': 'EMERGENCY',
'ds.sas.xfer.unknown.error': 'EMERGENCY',
'ems.eut.prilo0_log_emerg': 'EMERGENCY',
'ems.eut.privar0_log_emerg_var': 'EMERGENCY',
'fci.adapter.firmware.update.failed': 'EMERGENCY',
'ha.takeoverImpHotShelf': 'EMERGENCY',
'haosc.invalid.config': 'EMERGENCY',
'license.capac.eval.shutdown': 'EMERGENCY',
'license.capac.shutdown': 'EMERGENCY',
'license.capac.unl.shutdown': 'EMERGENCY',
'license.subscription.enforcement': 'EMERGENCY',
'lmgr.aggr.CA.locks.dropped': 'EMERGENCY',
'lun.metafile.OOVC.corrupt': 'EMERGENCY',
'lun.metafile.VTOC.corrupt': 'EMERGENCY',
'mcc.auso.trigFailed': 'EMERGENCY',
'mcc.auso.triggerFailed': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.changed': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.different': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.low.space': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.new': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.takeover.changed': 'EMERGENCY',
'mgr.boot.floppy_media': 'EMERGENCY',
'mgr.boot.reason_abnormal': 'EMERGENCY',
'mlm.array.portMixedAddress': 'EMERGENCY',
'monitor.chassisFanFail.xMinShutdown': 'EMERGENCY',
'monitor.fan.critical': 'EMERGENCY',
'monitor.globalStatus.critical': 'EMERGENCY',
'monitor.globalStatus.nonRecoverable': 'EMERGENCY',
'monitor.ioexpansionTemperature.cool': 'EMERGENCY',
'monitor.mismatch.shutdown': 'EMERGENCY',
'monitor.nvramLowBatteries': 'EMERGENCY',
'monitor.power.degraded': 'EMERGENCY',
'monitor.shelf.accessError': 'EMERGENCY',
'monitor.shutdown.brokenDisk': 'EMERGENCY',
'monitor.shutdown.chassisOverTemp': 'EMERGENCY',
'monitor.shutdown.emergency': 'EMERGENCY',
'monitor.shutdown.ioexpansionOverTemp': 'EMERGENCY',
'monitor.shutdown.ioexpansionUnderTemp': 'EMERGENCY',
'monitor.shutdown.nvramLowBatteries': 'EMERGENCY',
'monitor.shutdown.nvramLowBattery': 'EMERGENCY',
'netif.badEeprom': 'EMERGENCY',
'netif.overTempError': 'EMERGENCY',
'netif.uncorEccError': 'EMERGENCY',
'netinet.ethr.dup.clustIP': 'EMERGENCY',
'nodewatchdog.node.failure': 'EMERGENCY',
'nodewatchdog.node.longreboot': 'EMERGENCY',
'nodewatchdog.node.panic': 'EMERGENCY',
'nonha.resvConflictHalt': 'EMERGENCY',
'nv.fio.write.err': 'EMERGENCY',
'nv.none': 'EMERGENCY',
'nv2flash.copy2NVMEM.failure': 'EMERGENCY',
'nv2flash.copy2flash.failure': 'EMERGENCY',
'nv2flash.hw.failure': 'EMERGENCY',
'nv2flash.initfail': 'EMERGENCY',
'nvmem.battery.capLowCrit': 'EMERGENCY',
'nvmem.battery.capacity.low': 'EMERGENCY',
'nvmem.battery.current.high': 'EMERGENCY',
'nvmem.battery.currentHigh': 'EMERGENCY',
'nvmem.battery.currentLow': 'EMERGENCY',
'nvmem.battery.discFET.off': 'EMERGENCY',
'nvmem.battery.fccLowCrit': 'EMERGENCY',
'nvmem.battery.packInvalid': 'EMERGENCY',
'nvmem.battery.powerFault': 'EMERGENCY',
'nvmem.battery.temp.high': 'EMERGENCY',
'nvmem.battery.tempHigh': 'EMERGENCY',
'nvmem.battery.unread': 'EMERGENCY',
'nvmem.battery.voltage.high': 'EMERGENCY',
'nvmem.battery.voltageHigh': 'EMERGENCY',
'nvmem.battery.voltageLow': 'EMERGENCY',
'nvmem.voltage.high': 'EMERGENCY',
'nvram.battery.capacity.low.critical': 'EMERGENCY',
'nvram.battery.charging.nocharge': 'EMERGENCY',
'nvram.battery.current.high': 'EMERGENCY',
'nvram.battery.current.low': 'EMERGENCY',
'nvram.battery.dischargeFET.off': 'EMERGENCY',
'nvram.battery.fault': 'EMERGENCY',
'nvram.battery.fcc.low.critical': 'EMERGENCY',
'nvram.battery.not.present': 'EMERGENCY',
'nvram.battery.power.fault': 'EMERGENCY',
'nvram.battery.sensor.unreadable': 'EMERGENCY',
'nvram.battery.temp.high': 'EMERGENCY',
'nvram.battery.voltage.high': 'EMERGENCY',
'nvram.battery.voltage.low': 'EMERGENCY',
'nvram.decryptionKey.unavail': 'EMERGENCY',
'nvram.encryptionKey.initfail': 'EMERGENCY',
'nvram.hw.initFail': 'EMERGENCY',
'platform.insufficientMemory': 'EMERGENCY',
'pvif.allLinksDown': 'EMERGENCY',
'pvif.initMemFail': 'EMERGENCY',
'pvif.initMesgFail': 'EMERGENCY',
'raid.assim.disk.nolabels': 'EMERGENCY',
'raid.assim.fatal': 'EMERGENCY',
'raid.assim.fatal.upgrade': 'EMERGENCY',
'raid.assim.rg.missingChild': 'EMERGENCY',
'raid.assim.tree.degradedDirty': 'EMERGENCY',
'raid.assim.tree.multipleRootVols': 'EMERGENCY',
'raid.assim.upgrade.aggr.fail': 'EMERGENCY',
'raid.config.online.req.unsup': 'EMERGENCY',
'raid.disk.owner.change.fail': 'EMERGENCY',
'raid.mirror.bigio.restrict.failed': 'EMERGENCY',
'raid.mirror.bigio.wafliron.nostart': 'EMERGENCY',
'raid.multierr.unverified.block': 'EMERGENCY',
'raid.mv.defVol.online.fail': 'EMERGENCY',
'raid.rg.readerr.bad.file.block': 'EMERGENCY',
'raid.rg.readerr.wc.blkErr': 'EMERGENCY',
'raid.vol.volinfo.mismatch': 'EMERGENCY',
'rdb.recovery.failed': 'EMERGENCY',
'repl.checker.block.missing': 'EMERGENCY',
'repl.physdiff.invalid.hole': 'EMERGENCY',
'sas.adapter.firmware.update.failed': 'EMERGENCY',
'sas.cable.unqualified': 'EMERGENCY',
'sas.cpr.failed': 'EMERGENCY',
'sas.cpr.recoveryThreshold': 'EMERGENCY',
'scsiblade.kernel.volume.limbo.group': 'EMERGENCY',
'scsiblade.kernel.vserver.limbo.group': 'EMERGENCY',
'scsiblade.mgmt.wedged': 'EMERGENCY',
'scsiblade.prop.done.error': 'EMERGENCY',
'scsiblade.unavailable': 'EMERGENCY',
'scsiblade.vol.init.failed': 'EMERGENCY',
'scsiblade.volume.event.lost': 'EMERGENCY',
'scsiblade.vs.purge.fail': 'EMERGENCY',
'scsiblade.vserver.op.timeout': 'EMERGENCY',
'scsitarget.fct.postFailed': 'EMERGENCY',
'scsitarget.slifct.rebootRequired': 'EMERGENCY',
'secd.ldap.noServers': 'EMERGENCY',
'secd.lsa.noServers': 'EMERGENCY',
'secd.netlogon.noServers': 'EMERGENCY',
'secd.nis.noServers': 'EMERGENCY',
'ses.badShareStorageConfigErr': 'EMERGENCY',
'ses.config.IllegalEsh270': 'EMERGENCY',
'ses.config.shelfMixError': 'EMERGENCY',
'ses.psu.powerReqError': 'EMERGENCY',
'ses.shelf.em.ctrlFailErr': 'EMERGENCY',
'ses.status.enclError': 'EMERGENCY',
'ses.status.fanError': 'EMERGENCY',
'ses.status.volError': 'EMERGENCY',
'ses.system.em.mmErr': 'EMERGENCY',
'ses.unsupported.shelf.psu': 'EMERGENCY',
'ses.unsupported.shelves.psus': 'EMERGENCY',
'sfo.reassignFailed': 'EMERGENCY',
'snapmirror.replay.failed': 'EMERGENCY',
'sp.ipmi.lost.shutdown': 'EMERGENCY',
'spm.mgwd.process.exit': 'EMERGENCY',
'spm.secd.process.exit': 'EMERGENCY',
'spm.vifmgr.process.exit': 'EMERGENCY',
'spm.vldb.process.exit': 'EMERGENCY',
'ups.battery.critical.goodlinepower': 'EMERGENCY',
'ups.battery.warning': 'EMERGENCY',
'ups.battery.warning.goodlinepower': 'EMERGENCY',
'ups.inputpower.failed': 'EMERGENCY',
'ups.systemshutdown': 'EMERGENCY',
'vifmgr.clus.linkdown': 'EMERGENCY',
'vifmgr.cluscheck.l2ping': 'EMERGENCY',
'vifmgr.ipspace.tooMany': 'EMERGENCY',
'vldb.update.duringsofail': 'EMERGENCY',
'vol.phys.overalloc': 'EMERGENCY',
'vsa.inadequateVM': 'EMERGENCY',
'vsa.unlicensed': 'EMERGENCY',
'wafl.aggr.rsv.low.nomount': 'EMERGENCY',
'wafl.aggrtrans.outofspace.offline': 'EMERGENCY',
'wafl.bad.aggr.buftree.type': 'EMERGENCY',
'wafl.bad.vol.buftree.type': 'EMERGENCY',
'wafl.buf.badHeader': 'EMERGENCY',
'wafl.buf.freeingFreeBlock': 'EMERGENCY',
'wafl.failed.mount': 'EMERGENCY',
'wafl.failed.mount.bad.fsid': 'EMERGENCY',
'wafl.inconsistent.dirent': 'EMERGENCY',
'wafl.inconsistent.threshold.reached': 'EMERGENCY',
'wafl.iron.abort.offlineFail': 'EMERGENCY',
'wafl.iron.badfsid': 'EMERGENCY',
'wafl.iron.oc.abort.bad_blk': 'EMERGENCY',
'wafl.iron.oc.abort.clog_full': 'EMERGENCY',
'wafl.iron.oc.deletedChangeLog': 'EMERGENCY',
'wafl.iron.oc.errorCommitLog': 'EMERGENCY',
'wafl.iron.oc.root.lowMemory': 'EMERGENCY',
'wafl.mcc.so.nvram.warn': 'EMERGENCY',
'wafl.nvlog.checkFail': 'EMERGENCY',
'wafl.nvsave.replaying.fail': 'EMERGENCY',
'wafl.nvsave.saving.fail': 'EMERGENCY',
'wafl.offline.versionMismatch': 'EMERGENCY',
'wafl.online.fail.vmalign': 'EMERGENCY',
'wafl.online.notCompatibleVer': 'EMERGENCY',
'wafl.online.vbnMismatch': 'EMERGENCY',
'wafl.raid.incons.xidata': 'EMERGENCY',
'wafl.scan.typebits.diffFail': 'EMERGENCY',
'wafl.takeover.root.fail': 'EMERGENCY',
'wafl.takeover.vol.fail': 'EMERGENCY',
'wafl.vol.nvfail.offline': 'EMERGENCY',
'wafl.vol.walloc.rsv.failmount': 'EMERGENCY'}
IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Input/output operations per second"
}
READ_IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Read input/output operations per second"
}
WRITE_IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Write input/output operations per second"
}
THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data is "
"successfully transferred in MB/s"
}
READ_THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data read is "
"successfully transferred in MB/s"
}
WRITE_THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data write is "
"successfully transferred in MB/s"
}
RESPONSE_TIME_DESCRIPTION = {
"unit": "ms",
"description": "Average time taken for an IO "
"operation in ms"
}
CACHE_HIT_RATIO_DESCRIPTION = {
"unit": "%",
"description": "Percentage of io that are cache hits"
}
READ_CACHE_HIT_RATIO_DESCRIPTION = {
"unit": "%",
"description": "Percentage of read ops that are cache hits"
}
WRITE_CACHE_HIT_RATIO_DESCRIPTION = {
"unit": "%",
"description": "Percentage of write ops that are cache hits"
}
IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of IO requests in KB"
}
READ_IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of read IO requests in KB"
}
WRITE_IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of write IO requests in KB"
}
CPU_USAGE_DESCRIPTION = {
"unit": "%",
"description": "Percentage of CPU usage"
}
MEMORY_USAGE_DESCRIPTION = {
"unit": "%",
"description": "Percentage of DISK memory usage in percentage"
}
SERVICE_TIME = {
"unit": 'ms',
"description": "Service time of the resource in ms"
}
CAP_MAP = {
"iops": IOPS_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
"throughput": THROUGHPUT_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"cacheHitRatio": CACHE_HIT_RATIO_DESCRIPTION,
"readCacheHitRatio": READ_CACHE_HIT_RATIO_DESCRIPTION,
"writeCacheHitRatio": WRITE_CACHE_HIT_RATIO_DESCRIPTION,
"ioSize": IO_SIZE_DESCRIPTION,
"readIoSize": READ_IO_SIZE_DESCRIPTION,
"writeIoSize": WRITE_IO_SIZE_DESCRIPTION,
}
STORAGE_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
POOL_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
VOLUME_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
PORT_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
FS_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
| # Copyright 2021 The SODA Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from delfin.common import constants
SOCKET_TIMEOUT = 15
AUTH_KEY = 'Authorization'
RETURN_SUCCESS_CODE = 200
CREATED_SUCCESS_CODE = 201
ACCEPTED_RETURN_CODE = 202
BAD_REQUEST_RETURN_CODE = 400
UNAUTHORIZED_RETURN_CODE = 401
FORBIDDEN_RETURN_CODE = 403
NOT_FOUND_RETURN_CODE = 404
METHOD_NOT_ALLOWED_CODE = 405
CONFLICT_RETURN_CODE = 409
INTERNAL_ERROR_CODE = 500
HOUR_STAMP = '1h'
DAY_STAMP = '1d'
MONTH_STAMP = '1m'
WEEK_STAMP = '1w'
YEAR_STAMP = '1y'
CLUSTER_PERF_URL = '/api/cluster/metrics?interval=1h&fields=iops,' \
'throughput,latency'
POOL_PERF_URL = '/api/storage/aggregates/%s/metrics?interval=1h&'\
'fields=iops,throughput,latency'
VOLUME_PERF_URL = '/api/storage/luns/%s/metrics?interval=1h&fields=iops,'\
'throughput,latency'
FS_PERF_URL = '/api/storage/volumes/%s/metrics?interval=1h&fields=iops,'\
'throughput,latency'
FC_PERF_URL = '/api/network/fc/ports/%s/metrics?interval=1h&fields=iops,'\
'throughput,latency'
ETH_PERF_URL = '/api/network/ethernet/ports/%s/metrics?interval=1h&'\
'fields=throughput'
FS_INFO_URL = '/api/storage/volumes?fields=svm'
FC_INFO_URL = '/api/network/fc/ports'
ETH_INFO_URL = '/api/network/ethernet/ports?fields=node'
PERF_MAP = {
'iops': ['iops', 'total'],
'readIops': ['iops', 'read'],
'writeIops': ['iops', 'write'],
'throughput': ['throughput', 'total'],
'readThroughput': ['throughput', 'read'],
'writeThroughput': ['throughput', 'write'],
'responseTime': ['latency', 'total']
}
PATTERN = re.compile('^[-]{3,}')
FLOAT_PATTERN = r"\d\.\d"
IP_PATTERN = re.compile(r'(([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])\.){3}'
r'([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])$')
CLUSTER_SHOW_COMMAND = "cluster identity show"
VERSION_SHOW_COMMAND = "version"
STORAGE_STATUS_COMMAND = "system health status show"
POOLS_SHOW_DETAIL_COMMAND = "storage pool show -instance"
AGGREGATE_SHOW_DETAIL_COMMAND = "storage aggregate show -instance"
FS_SHOW_DETAIL_COMMAND = "vol show -instance"
THIN_FS_SHOW_COMMAND = "vol show -space-guarantee none"
ALTER_SHOW_DETAIL_COMMAND = "system health alert show -instance"
EVENT_SHOW_DETAIL_COMMAND = "event show -instance -severity EMERGENCY"
EVENT_TIME_TYPE = '%m/%d/%Y %H:%M:%S'
ALTER_TIME_TYPE = '%a %b %d %H:%M:%S %Y'
CLEAR_ALERT_COMMAND = \
"system health alert delete -alerting-resource * -alert-id"
DISK_SHOW_DETAIL_COMMAND = "disk show -instance"
DISK_SHOW_PHYSICAL_COMMAND = "disk show -physical"
DISK_ERROR_COMMAND = "disk error show"
LUN_SHOW_DETAIL_COMMAND = "lun show -instance"
CONTROLLER_SHOW_DETAIL_COMMAND = "node show -instance"
PORT_SHOW_DETAIL_COMMAND = "network port show -instance"
INTERFACE_SHOW_DETAIL_COMMAND = "network interface show -instance"
FC_PORT_SHOW_DETAIL_COMMAND = "fcp adapter show -instance"
QTREE_SHOW_DETAIL_COMMAND = "qtree show -instance"
CIFS_SHARE_SHOW_DETAIL_COMMAND = "vserver cifs share show -instance" \
" -vserver %(vserver_name)s"
SHARE_AGREEMENT_SHOW_COMMAND = "vserver show -fields Allowed-protocols"
VSERVER_SHOW_COMMAND = "vserver show -type data"
NFS_SHARE_SHOW_COMMAND = "volume show -junction-active true -instance"
STORAGE_VENDOR = "NetApp"
STORAGE_MODEL = "cmodel"
QUOTA_SHOW_DETAIL_COMMAND = "volume quota policy rule show -instance"
MGT_IP_COMMAND = "network interface show -fields address -role cluster-mgmt"
NODE_IP_COMMAND = "network interface show -fields address -role node-mgmt"
CONTROLLER_IP_COMMAND = "network interface show -fields " \
"curr-node,address -role node-mgmt"
SECURITY_STYLE = {
'mixed': constants.NASSecurityMode.MIXED,
'ntfs': constants.NASSecurityMode.NTFS,
'unix': constants.NASSecurityMode.UNIX
}
STORAGE_STATUS = {
'ok': constants.StorageStatus.NORMAL,
'ok-with-suppressed': constants.StorageStatus.NORMAL,
'degraded': constants.StorageStatus.DEGRADED,
'unreachable': constants.StorageStatus.ABNORMAL,
'unknown': constants.StorageStatus.ABNORMAL
}
AGGREGATE_STATUS = {
'online': constants.StoragePoolStatus.NORMAL,
'creating': constants.StoragePoolStatus.NORMAL,
'mounting': constants.StoragePoolStatus.NORMAL,
'relocating': constants.StoragePoolStatus.NORMAL,
'quiesced': constants.StoragePoolStatus.NORMAL,
'quiescing': constants.StoragePoolStatus.NORMAL,
'unmounted': constants.StoragePoolStatus.OFFLINE,
'unmounting': constants.StoragePoolStatus.OFFLINE,
'destroying': constants.StoragePoolStatus.ABNORMAL,
'partial': constants.StoragePoolStatus.ABNORMAL,
'frozen': constants.StoragePoolStatus.ABNORMAL,
'reverted': constants.StoragePoolStatus.NORMAL,
'restricted': constants.StoragePoolStatus.NORMAL,
'inconsistent': constants.StoragePoolStatus.ABNORMAL,
'iron_restricted': constants.StoragePoolStatus.ABNORMAL,
'unknown': constants.StoragePoolStatus.ABNORMAL,
'offline': constants.StoragePoolStatus.OFFLINE,
'failed': constants.StoragePoolStatus.ABNORMAL,
'remote_cluster': constants.StoragePoolStatus.NORMAL,
}
VOLUME_STATUS = {
'online': constants.VolumeStatus.AVAILABLE,
'offline': constants.VolumeStatus.ERROR,
'nvfail': constants.VolumeStatus.ERROR,
'space-error': constants.VolumeStatus.ERROR,
'foreign-lun-error': constants.VolumeStatus.ERROR,
}
ALERT_SEVERITY = {
'Unknown': constants.Severity.NOT_SPECIFIED,
'Other': constants.Severity.NOT_SPECIFIED,
'Information': constants.Severity.INFORMATIONAL,
'Degraded': constants.Severity.WARNING,
'Minor': constants.Severity.MINOR,
'Major': constants.Severity.MAJOR,
'Critical': constants.Severity.CRITICAL,
'Fatal': constants.Severity.FATAL,
}
DISK_TYPE = {
'ATA': constants.DiskPhysicalType.ATA,
'BSAS': constants.DiskPhysicalType.SATA,
'FCAL': constants.DiskPhysicalType.FC,
'FSAS': constants.DiskPhysicalType.NL_SAS,
'LUN': constants.DiskPhysicalType.LUN,
'SAS': constants.DiskPhysicalType.SAS,
'MSATA': constants.DiskPhysicalType.SATA,
'SSD': constants.DiskPhysicalType.SSD,
'VMDISK': constants.DiskPhysicalType.VMDISK,
'unknown': constants.DiskPhysicalType.UNKNOWN,
}
DISK_LOGICAL = {
'aggregate': constants.DiskLogicalType.AGGREGATE,
'spare': constants.DiskLogicalType.SPARE,
'unknown': constants.DiskLogicalType.UNKNOWN,
'free': constants.DiskLogicalType.FREE,
'broken': constants.DiskLogicalType.BROKEN,
'foreign': constants.DiskLogicalType.FOREIGN,
'labelmaint': constants.DiskLogicalType.LABELMAINT,
'maintenance': constants.DiskLogicalType.MAINTENANCE,
'shared': constants.DiskLogicalType.SHARED,
'unassigned': constants.DiskLogicalType.UNASSIGNED,
'unsupported': constants.DiskLogicalType.UNSUPPORTED,
'remote': constants.DiskLogicalType.REMOTE,
'mediator': constants.DiskLogicalType.MEDIATOR,
}
FS_STATUS = {
'online': constants.FilesystemStatus.NORMAL,
'restricted': constants.FilesystemStatus.FAULTY,
'offline': constants.FilesystemStatus.NORMAL,
'force-online': constants.FilesystemStatus.FAULTY,
'force-offline': constants.FilesystemStatus.FAULTY,
}
NETWORK_LOGICAL_TYPE = {
'data': constants.PortLogicalType.DATA,
'cluster': constants.PortLogicalType.CLUSTER,
'node-mgmt': constants.PortLogicalType.NODE_MGMT,
'cluster-mgmt': constants.PortLogicalType.CLUSTER_MGMT,
'intercluster': constants.PortLogicalType.INTERCLUSTER,
}
ETH_LOGICAL_TYPE = {
'physical': constants.PortLogicalType.PHYSICAL,
'if-group': constants.PortLogicalType.IF_GROUP,
'vlan': constants.PortLogicalType.VLAN,
'undef': constants.PortLogicalType.OTHER
}
FC_TYPE = {
'fibre-channel': constants.PortType.FC,
'ethernet': constants.PortType.FCOE
}
WORM_TYPE = {
'non-snaplock': constants.WORMType.NON_WORM,
'compliance': constants.WORMType.COMPLIANCE,
'enterprise': constants.WORMType.ENTERPRISE,
'-': constants.WORMType.NON_WORM
}
QUOTA_TYPE = {
'user': constants.QuotaType.USER,
'tree': constants.QuotaType.TREE,
'group': constants.QuotaType.GROUP
}
NETWORK_PORT_TYPE = {
'nfs': constants.PortType.NFS,
'cifs': constants.PortType.CIFS,
'iscsi': constants.PortType.ISCSI,
'fcp': constants.PortType.FC,
'fcache': constants.PortType.FCACHE,
'none': constants.PortType.OTHER,
}
SEVERITY_MAP = {
'AccessCache.ReachedLimits': 'EMERGENCY',
'LUN.inconsistent.filesystem': 'EMERGENCY',
'LUN.nvfail.vol.proc.failed': 'EMERGENCY',
'Nblade.DidNotInitialize': 'EMERGENCY',
'Nblade.cifsNoPrivShare': 'EMERGENCY',
'Nblade.nfsV4PoolExhaust': 'EMERGENCY',
'Nblade.vscanNoScannerConn': 'EMERGENCY',
'adt.dest.directory.full': 'EMERGENCY',
'adt.dest.directory.unavail': 'EMERGENCY',
'adt.dest.volume.offline': 'EMERGENCY',
'adt.service.block': 'EMERGENCY',
'adt.service.ro.filesystem': 'EMERGENCY',
'adt.stgvol.nospace': 'EMERGENCY',
'adt.stgvol.offline': 'EMERGENCY',
'api.engine.killed': 'EMERGENCY',
'app.log.emerg': 'EMERGENCY',
'arl.aggrOnlineFailed': 'EMERGENCY',
'bge.EepromCrc': 'EMERGENCY',
'boot.bootmenu.issue': 'EMERGENCY',
'boot.varfs.backup.issue': 'EMERGENCY',
'bootfs.env.issue': 'EMERGENCY',
'callhome.battery.failure': 'EMERGENCY',
'callhome.ch.ps.fan.bad.xmin': 'EMERGENCY',
'callhome.chassis.overtemp': 'EMERGENCY',
'callhome.chassis.undertemp': 'EMERGENCY',
'callhome.clam.node.ooq': 'EMERGENCY',
'callhome.client.app.emerg': 'EMERGENCY',
'callhome.fans.failed': 'EMERGENCY',
'callhome.hba.failed': 'EMERGENCY',
'callhome.ibretimerprog.fail': 'EMERGENCY',
'callhome.mcc.auso.trig.fail': 'EMERGENCY',
'callhome.mcc.switchback.failed': 'EMERGENCY',
'callhome.mcc.switchover.failed': 'EMERGENCY',
'callhome.mdb.recovery.unsuccessful': 'EMERGENCY',
'callhome.netinet.dup.clustIP': 'EMERGENCY',
'callhome.nvram.failure': 'EMERGENCY',
'callhome.partner.down': 'EMERGENCY',
'callhome.ps.removed': 'EMERGENCY',
'callhome.raid.no.recover': 'EMERGENCY',
'callhome.raidtree.assim': 'EMERGENCY',
'callhome.rlm.replace': 'EMERGENCY',
'callhome.rlm.replace.lan': 'EMERGENCY',
'callhome.root.vol.recovery.reqd': 'EMERGENCY',
'callhome.sblade.lu.resync.to': 'EMERGENCY',
'callhome.sblade.lu.rst.hung': 'EMERGENCY',
'callhome.sblade.prop.fail': 'EMERGENCY',
'callhome.sfo.takeover.panic': 'EMERGENCY',
'callhome.shlf.fan': 'EMERGENCY',
'callhome.vol.space.crit': 'EMERGENCY',
'cf.fm.panicInToMode': 'EMERGENCY',
'cf.fm.reserveDisksOff': 'EMERGENCY',
'cf.fsm.autoGivebackAttemptsExceeded': 'EMERGENCY',
'cf.takeover.missing.ptnrDiskInventory': 'EMERGENCY',
'cf.takeover.missing.ptnrDisks': 'EMERGENCY',
'cft.trans.commit.failed': 'EMERGENCY',
'clam.node.ooq': 'EMERGENCY',
'config.localswitch': 'EMERGENCY',
'config.noBconnect': 'EMERGENCY',
'config.noPartnerLUNs': 'EMERGENCY',
'coredump.dump.failed': 'EMERGENCY',
'ctran.group.reset.failed': 'EMERGENCY',
'ctran.jpc.multiple.nodes': 'EMERGENCY',
'ctran.jpc.split.brain': 'EMERGENCY',
'ctran.jpc.valid.failed': 'EMERGENCY',
'disk.dynamicqual.failure.shutdown': 'EMERGENCY',
'ds.sas.xfer.unknown.error': 'EMERGENCY',
'ems.eut.prilo0_log_emerg': 'EMERGENCY',
'ems.eut.privar0_log_emerg_var': 'EMERGENCY',
'fci.adapter.firmware.update.failed': 'EMERGENCY',
'ha.takeoverImpHotShelf': 'EMERGENCY',
'haosc.invalid.config': 'EMERGENCY',
'license.capac.eval.shutdown': 'EMERGENCY',
'license.capac.shutdown': 'EMERGENCY',
'license.capac.unl.shutdown': 'EMERGENCY',
'license.subscription.enforcement': 'EMERGENCY',
'lmgr.aggr.CA.locks.dropped': 'EMERGENCY',
'lun.metafile.OOVC.corrupt': 'EMERGENCY',
'lun.metafile.VTOC.corrupt': 'EMERGENCY',
'mcc.auso.trigFailed': 'EMERGENCY',
'mcc.auso.triggerFailed': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.changed': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.different': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.low.space': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.new': 'EMERGENCY',
'mgmtgwd.rootvol.recovery.takeover.changed': 'EMERGENCY',
'mgr.boot.floppy_media': 'EMERGENCY',
'mgr.boot.reason_abnormal': 'EMERGENCY',
'mlm.array.portMixedAddress': 'EMERGENCY',
'monitor.chassisFanFail.xMinShutdown': 'EMERGENCY',
'monitor.fan.critical': 'EMERGENCY',
'monitor.globalStatus.critical': 'EMERGENCY',
'monitor.globalStatus.nonRecoverable': 'EMERGENCY',
'monitor.ioexpansionTemperature.cool': 'EMERGENCY',
'monitor.mismatch.shutdown': 'EMERGENCY',
'monitor.nvramLowBatteries': 'EMERGENCY',
'monitor.power.degraded': 'EMERGENCY',
'monitor.shelf.accessError': 'EMERGENCY',
'monitor.shutdown.brokenDisk': 'EMERGENCY',
'monitor.shutdown.chassisOverTemp': 'EMERGENCY',
'monitor.shutdown.emergency': 'EMERGENCY',
'monitor.shutdown.ioexpansionOverTemp': 'EMERGENCY',
'monitor.shutdown.ioexpansionUnderTemp': 'EMERGENCY',
'monitor.shutdown.nvramLowBatteries': 'EMERGENCY',
'monitor.shutdown.nvramLowBattery': 'EMERGENCY',
'netif.badEeprom': 'EMERGENCY',
'netif.overTempError': 'EMERGENCY',
'netif.uncorEccError': 'EMERGENCY',
'netinet.ethr.dup.clustIP': 'EMERGENCY',
'nodewatchdog.node.failure': 'EMERGENCY',
'nodewatchdog.node.longreboot': 'EMERGENCY',
'nodewatchdog.node.panic': 'EMERGENCY',
'nonha.resvConflictHalt': 'EMERGENCY',
'nv.fio.write.err': 'EMERGENCY',
'nv.none': 'EMERGENCY',
'nv2flash.copy2NVMEM.failure': 'EMERGENCY',
'nv2flash.copy2flash.failure': 'EMERGENCY',
'nv2flash.hw.failure': 'EMERGENCY',
'nv2flash.initfail': 'EMERGENCY',
'nvmem.battery.capLowCrit': 'EMERGENCY',
'nvmem.battery.capacity.low': 'EMERGENCY',
'nvmem.battery.current.high': 'EMERGENCY',
'nvmem.battery.currentHigh': 'EMERGENCY',
'nvmem.battery.currentLow': 'EMERGENCY',
'nvmem.battery.discFET.off': 'EMERGENCY',
'nvmem.battery.fccLowCrit': 'EMERGENCY',
'nvmem.battery.packInvalid': 'EMERGENCY',
'nvmem.battery.powerFault': 'EMERGENCY',
'nvmem.battery.temp.high': 'EMERGENCY',
'nvmem.battery.tempHigh': 'EMERGENCY',
'nvmem.battery.unread': 'EMERGENCY',
'nvmem.battery.voltage.high': 'EMERGENCY',
'nvmem.battery.voltageHigh': 'EMERGENCY',
'nvmem.battery.voltageLow': 'EMERGENCY',
'nvmem.voltage.high': 'EMERGENCY',
'nvram.battery.capacity.low.critical': 'EMERGENCY',
'nvram.battery.charging.nocharge': 'EMERGENCY',
'nvram.battery.current.high': 'EMERGENCY',
'nvram.battery.current.low': 'EMERGENCY',
'nvram.battery.dischargeFET.off': 'EMERGENCY',
'nvram.battery.fault': 'EMERGENCY',
'nvram.battery.fcc.low.critical': 'EMERGENCY',
'nvram.battery.not.present': 'EMERGENCY',
'nvram.battery.power.fault': 'EMERGENCY',
'nvram.battery.sensor.unreadable': 'EMERGENCY',
'nvram.battery.temp.high': 'EMERGENCY',
'nvram.battery.voltage.high': 'EMERGENCY',
'nvram.battery.voltage.low': 'EMERGENCY',
'nvram.decryptionKey.unavail': 'EMERGENCY',
'nvram.encryptionKey.initfail': 'EMERGENCY',
'nvram.hw.initFail': 'EMERGENCY',
'platform.insufficientMemory': 'EMERGENCY',
'pvif.allLinksDown': 'EMERGENCY',
'pvif.initMemFail': 'EMERGENCY',
'pvif.initMesgFail': 'EMERGENCY',
'raid.assim.disk.nolabels': 'EMERGENCY',
'raid.assim.fatal': 'EMERGENCY',
'raid.assim.fatal.upgrade': 'EMERGENCY',
'raid.assim.rg.missingChild': 'EMERGENCY',
'raid.assim.tree.degradedDirty': 'EMERGENCY',
'raid.assim.tree.multipleRootVols': 'EMERGENCY',
'raid.assim.upgrade.aggr.fail': 'EMERGENCY',
'raid.config.online.req.unsup': 'EMERGENCY',
'raid.disk.owner.change.fail': 'EMERGENCY',
'raid.mirror.bigio.restrict.failed': 'EMERGENCY',
'raid.mirror.bigio.wafliron.nostart': 'EMERGENCY',
'raid.multierr.unverified.block': 'EMERGENCY',
'raid.mv.defVol.online.fail': 'EMERGENCY',
'raid.rg.readerr.bad.file.block': 'EMERGENCY',
'raid.rg.readerr.wc.blkErr': 'EMERGENCY',
'raid.vol.volinfo.mismatch': 'EMERGENCY',
'rdb.recovery.failed': 'EMERGENCY',
'repl.checker.block.missing': 'EMERGENCY',
'repl.physdiff.invalid.hole': 'EMERGENCY',
'sas.adapter.firmware.update.failed': 'EMERGENCY',
'sas.cable.unqualified': 'EMERGENCY',
'sas.cpr.failed': 'EMERGENCY',
'sas.cpr.recoveryThreshold': 'EMERGENCY',
'scsiblade.kernel.volume.limbo.group': 'EMERGENCY',
'scsiblade.kernel.vserver.limbo.group': 'EMERGENCY',
'scsiblade.mgmt.wedged': 'EMERGENCY',
'scsiblade.prop.done.error': 'EMERGENCY',
'scsiblade.unavailable': 'EMERGENCY',
'scsiblade.vol.init.failed': 'EMERGENCY',
'scsiblade.volume.event.lost': 'EMERGENCY',
'scsiblade.vs.purge.fail': 'EMERGENCY',
'scsiblade.vserver.op.timeout': 'EMERGENCY',
'scsitarget.fct.postFailed': 'EMERGENCY',
'scsitarget.slifct.rebootRequired': 'EMERGENCY',
'secd.ldap.noServers': 'EMERGENCY',
'secd.lsa.noServers': 'EMERGENCY',
'secd.netlogon.noServers': 'EMERGENCY',
'secd.nis.noServers': 'EMERGENCY',
'ses.badShareStorageConfigErr': 'EMERGENCY',
'ses.config.IllegalEsh270': 'EMERGENCY',
'ses.config.shelfMixError': 'EMERGENCY',
'ses.psu.powerReqError': 'EMERGENCY',
'ses.shelf.em.ctrlFailErr': 'EMERGENCY',
'ses.status.enclError': 'EMERGENCY',
'ses.status.fanError': 'EMERGENCY',
'ses.status.volError': 'EMERGENCY',
'ses.system.em.mmErr': 'EMERGENCY',
'ses.unsupported.shelf.psu': 'EMERGENCY',
'ses.unsupported.shelves.psus': 'EMERGENCY',
'sfo.reassignFailed': 'EMERGENCY',
'snapmirror.replay.failed': 'EMERGENCY',
'sp.ipmi.lost.shutdown': 'EMERGENCY',
'spm.mgwd.process.exit': 'EMERGENCY',
'spm.secd.process.exit': 'EMERGENCY',
'spm.vifmgr.process.exit': 'EMERGENCY',
'spm.vldb.process.exit': 'EMERGENCY',
'ups.battery.critical.goodlinepower': 'EMERGENCY',
'ups.battery.warning': 'EMERGENCY',
'ups.battery.warning.goodlinepower': 'EMERGENCY',
'ups.inputpower.failed': 'EMERGENCY',
'ups.systemshutdown': 'EMERGENCY',
'vifmgr.clus.linkdown': 'EMERGENCY',
'vifmgr.cluscheck.l2ping': 'EMERGENCY',
'vifmgr.ipspace.tooMany': 'EMERGENCY',
'vldb.update.duringsofail': 'EMERGENCY',
'vol.phys.overalloc': 'EMERGENCY',
'vsa.inadequateVM': 'EMERGENCY',
'vsa.unlicensed': 'EMERGENCY',
'wafl.aggr.rsv.low.nomount': 'EMERGENCY',
'wafl.aggrtrans.outofspace.offline': 'EMERGENCY',
'wafl.bad.aggr.buftree.type': 'EMERGENCY',
'wafl.bad.vol.buftree.type': 'EMERGENCY',
'wafl.buf.badHeader': 'EMERGENCY',
'wafl.buf.freeingFreeBlock': 'EMERGENCY',
'wafl.failed.mount': 'EMERGENCY',
'wafl.failed.mount.bad.fsid': 'EMERGENCY',
'wafl.inconsistent.dirent': 'EMERGENCY',
'wafl.inconsistent.threshold.reached': 'EMERGENCY',
'wafl.iron.abort.offlineFail': 'EMERGENCY',
'wafl.iron.badfsid': 'EMERGENCY',
'wafl.iron.oc.abort.bad_blk': 'EMERGENCY',
'wafl.iron.oc.abort.clog_full': 'EMERGENCY',
'wafl.iron.oc.deletedChangeLog': 'EMERGENCY',
'wafl.iron.oc.errorCommitLog': 'EMERGENCY',
'wafl.iron.oc.root.lowMemory': 'EMERGENCY',
'wafl.mcc.so.nvram.warn': 'EMERGENCY',
'wafl.nvlog.checkFail': 'EMERGENCY',
'wafl.nvsave.replaying.fail': 'EMERGENCY',
'wafl.nvsave.saving.fail': 'EMERGENCY',
'wafl.offline.versionMismatch': 'EMERGENCY',
'wafl.online.fail.vmalign': 'EMERGENCY',
'wafl.online.notCompatibleVer': 'EMERGENCY',
'wafl.online.vbnMismatch': 'EMERGENCY',
'wafl.raid.incons.xidata': 'EMERGENCY',
'wafl.scan.typebits.diffFail': 'EMERGENCY',
'wafl.takeover.root.fail': 'EMERGENCY',
'wafl.takeover.vol.fail': 'EMERGENCY',
'wafl.vol.nvfail.offline': 'EMERGENCY',
'wafl.vol.walloc.rsv.failmount': 'EMERGENCY'}
IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Input/output operations per second"
}
READ_IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Read input/output operations per second"
}
WRITE_IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Write input/output operations per second"
}
THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data is "
"successfully transferred in MB/s"
}
READ_THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data read is "
"successfully transferred in MB/s"
}
WRITE_THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data write is "
"successfully transferred in MB/s"
}
RESPONSE_TIME_DESCRIPTION = {
"unit": "ms",
"description": "Average time taken for an IO "
"operation in ms"
}
CACHE_HIT_RATIO_DESCRIPTION = {
"unit": "%",
"description": "Percentage of io that are cache hits"
}
READ_CACHE_HIT_RATIO_DESCRIPTION = {
"unit": "%",
"description": "Percentage of read ops that are cache hits"
}
WRITE_CACHE_HIT_RATIO_DESCRIPTION = {
"unit": "%",
"description": "Percentage of write ops that are cache hits"
}
IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of IO requests in KB"
}
READ_IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of read IO requests in KB"
}
WRITE_IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of write IO requests in KB"
}
CPU_USAGE_DESCRIPTION = {
"unit": "%",
"description": "Percentage of CPU usage"
}
MEMORY_USAGE_DESCRIPTION = {
"unit": "%",
"description": "Percentage of DISK memory usage in percentage"
}
SERVICE_TIME = {
"unit": 'ms',
"description": "Service time of the resource in ms"
}
CAP_MAP = {
"iops": IOPS_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
"throughput": THROUGHPUT_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"cacheHitRatio": CACHE_HIT_RATIO_DESCRIPTION,
"readCacheHitRatio": READ_CACHE_HIT_RATIO_DESCRIPTION,
"writeCacheHitRatio": WRITE_CACHE_HIT_RATIO_DESCRIPTION,
"ioSize": IO_SIZE_DESCRIPTION,
"readIoSize": READ_IO_SIZE_DESCRIPTION,
"writeIoSize": WRITE_IO_SIZE_DESCRIPTION,
}
STORAGE_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
POOL_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
VOLUME_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
PORT_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
}
FS_CAPABILITIES = {
"throughput": THROUGHPUT_DESCRIPTION,
"iops": IOPS_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
} | en | 0.853516 | # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.413548 | 1 |
jrpc/server/__init__.py | cympfh/jrpc | 0 | 6625020 | import asyncio
from typing import Any, Dict, List, Optional, Union
from aiohttp import web
class Application:
"""Web Application Wrapper for JSON-RPC"""
def __init__(self):
"""Init Empty Server"""
self.app = web.Application()
self.methods = {}
def method(self, func):
"""Add Method for RPC"""
self.methods[func.__name__] = func
async def call(self, method: str, params: Union[List[Any], Dict[str, Any]]) -> Any:
"""Call a method with params"""
if isinstance(params, list):
return await self.methods[method](*params)
elif isinstance(params, dict):
return await self.methods[method](**params)
else:
return None
async def _process_single(self, data: dict) -> Optional[dict]:
"""Single request/notify
Parameters
----------
data
Single JSON Object for Request/Notify
Returns
-------
Result JSON Object for Request,
or None for Notify
"""
assert data.get("jsonrpc") == "2.0", "Suppoting Only JSON-RPC 2.0"
method = data.get("method")
params = data.get("params", [])
request_id = data.get("id", None)
result = await self.call(method, params)
if request_id is None: # notify
return None
else: # request
return {"jsonrpc": "2.0", "result": result, "id": request_id}
async def _process_batch(self, data: List[dict]) -> List[dict]:
"""Batch of request/notify"""
return [
result
for result in await asyncio.gather(*(self._process_single(o) for o in data))
if result is not None
]
async def handle(self, request):
"""Web Handler"""
data = await request.json()
# single mode
if isinstance(data, dict):
return web.json_response(await self._process_single(data))
# batch mode
elif isinstance(data, list):
return web.json_response(await self._process_batch(data))
def run(
self,
port: int = 80,
endpoint: str = "/",
host: str = "127.0.0.1",
debug: bool = False,
):
"""Launch a Web Server"""
self.app.add_routes([web.post(endpoint, self.handle)])
web.run_app(self.app, host=host, port=port)
| import asyncio
from typing import Any, Dict, List, Optional, Union
from aiohttp import web
class Application:
"""Web Application Wrapper for JSON-RPC"""
def __init__(self):
"""Init Empty Server"""
self.app = web.Application()
self.methods = {}
def method(self, func):
"""Add Method for RPC"""
self.methods[func.__name__] = func
async def call(self, method: str, params: Union[List[Any], Dict[str, Any]]) -> Any:
"""Call a method with params"""
if isinstance(params, list):
return await self.methods[method](*params)
elif isinstance(params, dict):
return await self.methods[method](**params)
else:
return None
async def _process_single(self, data: dict) -> Optional[dict]:
"""Single request/notify
Parameters
----------
data
Single JSON Object for Request/Notify
Returns
-------
Result JSON Object for Request,
or None for Notify
"""
assert data.get("jsonrpc") == "2.0", "Suppoting Only JSON-RPC 2.0"
method = data.get("method")
params = data.get("params", [])
request_id = data.get("id", None)
result = await self.call(method, params)
if request_id is None: # notify
return None
else: # request
return {"jsonrpc": "2.0", "result": result, "id": request_id}
async def _process_batch(self, data: List[dict]) -> List[dict]:
"""Batch of request/notify"""
return [
result
for result in await asyncio.gather(*(self._process_single(o) for o in data))
if result is not None
]
async def handle(self, request):
"""Web Handler"""
data = await request.json()
# single mode
if isinstance(data, dict):
return web.json_response(await self._process_single(data))
# batch mode
elif isinstance(data, list):
return web.json_response(await self._process_batch(data))
def run(
self,
port: int = 80,
endpoint: str = "/",
host: str = "127.0.0.1",
debug: bool = False,
):
"""Launch a Web Server"""
self.app.add_routes([web.post(endpoint, self.handle)])
web.run_app(self.app, host=host, port=port)
| en | 0.487022 | Web Application Wrapper for JSON-RPC Init Empty Server Add Method for RPC Call a method with params Single request/notify Parameters ---------- data Single JSON Object for Request/Notify Returns ------- Result JSON Object for Request, or None for Notify # notify # request Batch of request/notify Web Handler # single mode # batch mode Launch a Web Server | 2.761188 | 3 |
day04.py | tommyip/aoc2021 | 0 | 6625021 | import sys
import re
from typing import List
lines = sys.stdin.read().split('\n\n')
numbers = [int(x) for x in lines[0].split(',')]
grids = [[[int(x) for x in re.split('\s+', line.strip())]
for line in grid.strip().split('\n')] for grid in lines[1:]]
def isGridWinning(grid: List[List[int]]):
# First check rows
for line in grid:
if sum(line) == -5:
return True
# Then check cols
for col in range(5):
if sum((grid[i][col] for i in range(5))) == -5:
return True
return False
def mark(grid: List[List[int]], x: int):
for j in range(5):
for i in range(5):
if grid[j][i] == x:
grid[j][i] = -1
def score(num, grid):
sumOfGrid = sum(x for line in grid for x in line if x != -1)
return num * sumOfGrid
def main():
firstFound = False
notWon = set(range(len(grids)))
for num in numbers:
for i, grid in enumerate(grids):
if i in notWon:
mark(grid, num)
if isGridWinning(grid):
if not firstFound:
print('Part 1:', score(num, grid))
firstFound = True
if len(notWon) == 1:
print('Part 2:', score(num, grid))
return
notWon.remove(i)
main()
| import sys
import re
from typing import List
lines = sys.stdin.read().split('\n\n')
numbers = [int(x) for x in lines[0].split(',')]
grids = [[[int(x) for x in re.split('\s+', line.strip())]
for line in grid.strip().split('\n')] for grid in lines[1:]]
def isGridWinning(grid: List[List[int]]):
# First check rows
for line in grid:
if sum(line) == -5:
return True
# Then check cols
for col in range(5):
if sum((grid[i][col] for i in range(5))) == -5:
return True
return False
def mark(grid: List[List[int]], x: int):
for j in range(5):
for i in range(5):
if grid[j][i] == x:
grid[j][i] = -1
def score(num, grid):
sumOfGrid = sum(x for line in grid for x in line if x != -1)
return num * sumOfGrid
def main():
firstFound = False
notWon = set(range(len(grids)))
for num in numbers:
for i, grid in enumerate(grids):
if i in notWon:
mark(grid, num)
if isGridWinning(grid):
if not firstFound:
print('Part 1:', score(num, grid))
firstFound = True
if len(notWon) == 1:
print('Part 2:', score(num, grid))
return
notWon.remove(i)
main()
| en | 0.274653 | # First check rows # Then check cols | 3.477301 | 3 |
from_tfrecords.py | Luonic/tf-cnn-lstm-ocr-captcha | 10 | 6625022 | <reponame>Luonic/tf-cnn-lstm-ocr-captcha
import tensorflow as tf
import ImageAugmenter
import cv2
from tqdm import tqdm
import glob
import os
import numpy as np
from random import shuffle
reconstructed_images = []
record_iterator = tf.python_io.tf_record_iterator(path=os.path.abspath(os.path.join("data", "train", "tfrecords", "0.tfrecords")))
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
height = int(example.features.feature['height']
.int64_list
.value[0])
width = int(example.features.feature['width']
.int64_list
.value[0])
img_string = (example.features.feature['image']
.bytes_list
.value[0])
label = (example.features.feature['label']
.int64_list
.value[0])
img_1d = np.fromstring(img_string, dtype=np.uint8)
reconstructed_img = img_1d.reshape((height, width, -1))
reconstructed_images.append(reconstructed_img)
for rec_image in tqdm(reconstructed_images):
cv2.imshow("image", rec_image)
cv2.waitKey(0) | import tensorflow as tf
import ImageAugmenter
import cv2
from tqdm import tqdm
import glob
import os
import numpy as np
from random import shuffle
reconstructed_images = []
record_iterator = tf.python_io.tf_record_iterator(path=os.path.abspath(os.path.join("data", "train", "tfrecords", "0.tfrecords")))
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
height = int(example.features.feature['height']
.int64_list
.value[0])
width = int(example.features.feature['width']
.int64_list
.value[0])
img_string = (example.features.feature['image']
.bytes_list
.value[0])
label = (example.features.feature['label']
.int64_list
.value[0])
img_1d = np.fromstring(img_string, dtype=np.uint8)
reconstructed_img = img_1d.reshape((height, width, -1))
reconstructed_images.append(reconstructed_img)
for rec_image in tqdm(reconstructed_images):
cv2.imshow("image", rec_image)
cv2.waitKey(0) | none | 1 | 2.482672 | 2 | |
app/views/history.py | chick0/dashboard | 0 | 6625023 | <reponame>chick0/dashboard
from flask import Blueprint
from flask import session
from flask import redirect
from flask import url_for
from flask import render_template
from app import db
from app.models import History
from app.check import is_login
bp = Blueprint(
name="history",
import_name="history",
url_prefix="/history"
)
@bp.get("")
def show_all():
# 로그인 상태가 아니라면 로그인 화면으로 이동하기
if not is_login():
return redirect(url_for("dashboard.login.form"))
# 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하기
history_list = History.query.filter_by(
email=session['user']['email']
).all()
return render_template(
"dashboard/history/show_all.html",
history_list=history_list
)
@bp.get("/<int:idx>")
def detail(idx: int):
# 로그인 상태가 아니라면 로그인 화면으로 이동하기
if not is_login():
return redirect(url_for("dashboard.login.form"))
# 기록 아이디와 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하기
history = History.query.filter_by(
idx=idx,
email=session['user']['email'],
).first()
# 검색결과가 없다면 목록으로 이동하기
if history is None:
return redirect(url_for("dashboard.history.show_all"))
return render_template(
"dashboard/history/detail.html",
history=history
)
@bp.get("/<int:idx>/delete")
def delete(idx: int):
# 로그인 상태가 아니라면 로그인 화면으로 이동하기
if not is_login():
return redirect(url_for("dashboard.login.form"))
# 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하고 삭제하기
History.query.filter_by(
idx=idx,
email=session['user']['email'],
).delete()
# 변경사항 데이터베이스에 저장하기
db.session.commit()
return redirect(url_for("dashboard.history.show_all"))
| from flask import Blueprint
from flask import session
from flask import redirect
from flask import url_for
from flask import render_template
from app import db
from app.models import History
from app.check import is_login
bp = Blueprint(
name="history",
import_name="history",
url_prefix="/history"
)
@bp.get("")
def show_all():
# 로그인 상태가 아니라면 로그인 화면으로 이동하기
if not is_login():
return redirect(url_for("dashboard.login.form"))
# 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하기
history_list = History.query.filter_by(
email=session['user']['email']
).all()
return render_template(
"dashboard/history/show_all.html",
history_list=history_list
)
@bp.get("/<int:idx>")
def detail(idx: int):
# 로그인 상태가 아니라면 로그인 화면으로 이동하기
if not is_login():
return redirect(url_for("dashboard.login.form"))
# 기록 아이디와 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하기
history = History.query.filter_by(
idx=idx,
email=session['user']['email'],
).first()
# 검색결과가 없다면 목록으로 이동하기
if history is None:
return redirect(url_for("dashboard.history.show_all"))
return render_template(
"dashboard/history/detail.html",
history=history
)
@bp.get("/<int:idx>/delete")
def delete(idx: int):
# 로그인 상태가 아니라면 로그인 화면으로 이동하기
if not is_login():
return redirect(url_for("dashboard.login.form"))
# 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하고 삭제하기
History.query.filter_by(
idx=idx,
email=session['user']['email'],
).delete()
# 변경사항 데이터베이스에 저장하기
db.session.commit()
return redirect(url_for("dashboard.history.show_all")) | ko | 1.00007 | # 로그인 상태가 아니라면 로그인 화면으로 이동하기 # 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하기 # 로그인 상태가 아니라면 로그인 화면으로 이동하기 # 기록 아이디와 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하기 # 검색결과가 없다면 목록으로 이동하기 # 로그인 상태가 아니라면 로그인 화면으로 이동하기 # 로그인중인 유저와 동일한 이메일로 시도한 기록을 데이터베이스에서 검색하고 삭제하기 # 변경사항 데이터베이스에 저장하기 | 2.345021 | 2 |
tests/server_sdk_consistency_test.py | peter-bertuglia/python-sdk | 0 | 6625024 | <reponame>peter-bertuglia/python-sdk
import unittest
import requests
from statsig import StatsigUser, StatsigOptions, StatsigServer
import os
import io
import sys
import time
if "test_api_key" in os.environ:
SDK_KEY = os.environ["test_api_key"]
else:
try:
f = io.open("../../ops/secrets/prod_keys/statsig-rulesets-eval-consistency-test-secret.key",
mode="r", encoding="utf-8")
except OSError:
print("THIS TEST IS EXPECTED TO FAIL FOR NON-STATSIG EMPLOYEES! If this is the only test failing, please proceed to submit a pull request. If you are a Statsig employee, chat with jkw.")
sys.exit()
SDK_KEY = f.read()
f.close()
TEST_URLS = [
"https://api.statsig.com/v1",
"https://latest.api.statsig.com/v1",
]
class ServerSDKConsistencyTest(unittest.TestCase):
def test_all_regions(self):
for api in TEST_URLS:
headers = {
'STATSIG-API-KEY': SDK_KEY,
'STATSIG-CLIENT-TIME': str(round(time.time() * 1000)),
}
response = requests.post(
api + "/rulesets_e2e_test", headers=headers)
self.data = response.json()
options = StatsigOptions(api=api)
self.sdk = StatsigServer()
print(api)
self.sdk.initialize(SDK_KEY, options)
self._test_consistency()
self.sdk.shutdown()
def _test_consistency(self):
for entry in self.data:
for val in self.data[entry]:
user = val["user"]
statsig_user = StatsigUser(user["userID"])
statsig_user.app_version = user["appVersion"]
statsig_user.user_agent = user["userAgent"]
statsig_user.ip = user["ip"]
if "email" in user:
statsig_user.email = user["email"]
if "statsigEnvironment" in user:
statsig_user._statsig_environment = user["statsigEnvironment"]
if "custom" in user:
statsig_user.custom = user["custom"]
if "privateAttributes" in user:
statsig_user.private_attributes = user["privateAttributes"]
if "customIDs" in user:
statsig_user.custom_ids = user["customIDs"]
gates = val["feature_gates_v2"]
for name in gates:
eval_result = self.sdk._evaluator.check_gate(
statsig_user, name)
sdk_result = self.sdk.check_gate(statsig_user, name)
server_result = gates[name]
if eval_result.boolean_value != server_result["value"]:
print(
f'\nDifferent values for gate {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["value"]}, Actual: {eval_result.boolean_value}')
self.assertEqual(eval_result.boolean_value,
server_result["value"])
self.assertEqual(eval_result.boolean_value, sdk_result)
if eval_result.rule_id != server_result["rule_id"]:
print(
f'\nDifferent rule_id for gate {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["rule_id"]}, Actual: {eval_result.rule_id}')
self.assertEqual(eval_result.rule_id,
server_result["rule_id"])
if eval_result.secondary_exposures != server_result["secondary_exposures"]:
print(
f'\nDifferent secondary_exposures for gate {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["secondary_exposures"]}, Actual: {eval_result.secondary_exposures}')
self.assertEqual(eval_result.secondary_exposures,
server_result.get("secondary_exposures"))
print(".", end="")
configs = val["dynamic_configs"]
for name in configs:
eval_result = self.sdk._evaluator.get_config(
statsig_user, name)
sdk_result = self.sdk.get_config(statsig_user, name)
server_result = configs[name]
if eval_result.json_value != server_result["value"]:
print(
f'\nDifferent values for config {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["value"]}, Actual: {eval_result.json_value}')
self.assertEqual(eval_result.json_value,
server_result["value"])
self.assertEqual(eval_result.json_value,
sdk_result.get_value())
if eval_result.rule_id != server_result["rule_id"]:
print(
f'\nDifferent rule_id for config {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["rule_id"]}, Actual: {eval_result.rule_id}')
self.assertEqual(eval_result.rule_id,
server_result["rule_id"])
if eval_result.secondary_exposures != server_result["secondary_exposures"]:
print(
f'\nDifferent secondary_exposures for config {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["secondary_exposures"]}, Actual: {eval_result.secondary_exposures}')
self.assertEqual(eval_result.secondary_exposures,
server_result.get("secondary_exposures"))
print(".", end="")
print("[end]")
if __name__ == '__main__':
unittest.main()
| import unittest
import requests
from statsig import StatsigUser, StatsigOptions, StatsigServer
import os
import io
import sys
import time
if "test_api_key" in os.environ:
SDK_KEY = os.environ["test_api_key"]
else:
try:
f = io.open("../../ops/secrets/prod_keys/statsig-rulesets-eval-consistency-test-secret.key",
mode="r", encoding="utf-8")
except OSError:
print("THIS TEST IS EXPECTED TO FAIL FOR NON-STATSIG EMPLOYEES! If this is the only test failing, please proceed to submit a pull request. If you are a Statsig employee, chat with jkw.")
sys.exit()
SDK_KEY = f.read()
f.close()
TEST_URLS = [
"https://api.statsig.com/v1",
"https://latest.api.statsig.com/v1",
]
class ServerSDKConsistencyTest(unittest.TestCase):
def test_all_regions(self):
for api in TEST_URLS:
headers = {
'STATSIG-API-KEY': SDK_KEY,
'STATSIG-CLIENT-TIME': str(round(time.time() * 1000)),
}
response = requests.post(
api + "/rulesets_e2e_test", headers=headers)
self.data = response.json()
options = StatsigOptions(api=api)
self.sdk = StatsigServer()
print(api)
self.sdk.initialize(SDK_KEY, options)
self._test_consistency()
self.sdk.shutdown()
def _test_consistency(self):
for entry in self.data:
for val in self.data[entry]:
user = val["user"]
statsig_user = StatsigUser(user["userID"])
statsig_user.app_version = user["appVersion"]
statsig_user.user_agent = user["userAgent"]
statsig_user.ip = user["ip"]
if "email" in user:
statsig_user.email = user["email"]
if "statsigEnvironment" in user:
statsig_user._statsig_environment = user["statsigEnvironment"]
if "custom" in user:
statsig_user.custom = user["custom"]
if "privateAttributes" in user:
statsig_user.private_attributes = user["privateAttributes"]
if "customIDs" in user:
statsig_user.custom_ids = user["customIDs"]
gates = val["feature_gates_v2"]
for name in gates:
eval_result = self.sdk._evaluator.check_gate(
statsig_user, name)
sdk_result = self.sdk.check_gate(statsig_user, name)
server_result = gates[name]
if eval_result.boolean_value != server_result["value"]:
print(
f'\nDifferent values for gate {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["value"]}, Actual: {eval_result.boolean_value}')
self.assertEqual(eval_result.boolean_value,
server_result["value"])
self.assertEqual(eval_result.boolean_value, sdk_result)
if eval_result.rule_id != server_result["rule_id"]:
print(
f'\nDifferent rule_id for gate {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["rule_id"]}, Actual: {eval_result.rule_id}')
self.assertEqual(eval_result.rule_id,
server_result["rule_id"])
if eval_result.secondary_exposures != server_result["secondary_exposures"]:
print(
f'\nDifferent secondary_exposures for gate {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["secondary_exposures"]}, Actual: {eval_result.secondary_exposures}')
self.assertEqual(eval_result.secondary_exposures,
server_result.get("secondary_exposures"))
print(".", end="")
configs = val["dynamic_configs"]
for name in configs:
eval_result = self.sdk._evaluator.get_config(
statsig_user, name)
sdk_result = self.sdk.get_config(statsig_user, name)
server_result = configs[name]
if eval_result.json_value != server_result["value"]:
print(
f'\nDifferent values for config {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["value"]}, Actual: {eval_result.json_value}')
self.assertEqual(eval_result.json_value,
server_result["value"])
self.assertEqual(eval_result.json_value,
sdk_result.get_value())
if eval_result.rule_id != server_result["rule_id"]:
print(
f'\nDifferent rule_id for config {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["rule_id"]}, Actual: {eval_result.rule_id}')
self.assertEqual(eval_result.rule_id,
server_result["rule_id"])
if eval_result.secondary_exposures != server_result["secondary_exposures"]:
print(
f'\nDifferent secondary_exposures for config {name} user: {statsig_user.to_dict(True)}')
print(
f'\nExpected: {server_result["secondary_exposures"]}, Actual: {eval_result.secondary_exposures}')
self.assertEqual(eval_result.secondary_exposures,
server_result.get("secondary_exposures"))
print(".", end="")
print("[end]")
if __name__ == '__main__':
unittest.main() | none | 1 | 2.315009 | 2 | |
townsquare/players.py | ryanvolz/botc_extensions | 0 | 6625025 | # ----------------------------------------------------------------------------
# Copyright (c) 2020 <NAME>
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
#
# SPDX-License-Identifier: BSD-3-Clause
# ----------------------------------------------------------------------------
"""Components for Blood on the Clocktower voice/text players cog."""
import functools
import math
import typing
import discord
from discord.ext import commands
from . import common
from ...utils.commands import delete_command_message
EMOJI_DIGITS = {
str(num): "{}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}".format(num)
for num in range(10)
}
EMOJI_DIGITS[" "] = "\N{BLACK LARGE SQUARE}"
EMOJI_DIGITS["10"] = "\N{KEYCAP TEN}"
EMOJI_DIGITS["*"] = "*\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}"
BOTC_COUNT = {
5: dict(town=3, out=0, minion=1, demon=1),
6: dict(town=3, out=1, minion=1, demon=1),
7: dict(town=5, out=0, minion=1, demon=1),
8: dict(town=5, out=1, minion=1, demon=1),
9: dict(town=5, out=2, minion=1, demon=1),
10: dict(town=7, out=0, minion=2, demon=1),
11: dict(town=7, out=1, minion=2, demon=1),
12: dict(town=7, out=2, minion=2, demon=1),
13: dict(town=9, out=0, minion=3, demon=1),
14: dict(town=9, out=1, minion=3, demon=1),
15: dict(town=9, out=2, minion=3, demon=1),
}
def require_locked_town():
"""Return command decorator that raises an error if the town is lunocked."""
def decorator(command):
@functools.wraps(command)
async def wrapper(self, ctx, *args, **kwargs):
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
if not town["locked"]:
raise common.BOTCTownSquareErrors.TownUnlocked(
"Command requires a locked town."
)
return await command(self, ctx, *args, **kwargs)
return wrapper
return decorator
class BOTCTownSquarePlayers(
common.BOTCTownSquareErrorMixin, commands.Cog, name="Players"
):
"""Commands for Blood on the Clocktower voice/text players.
During play, you can get a live sense of the state of the game by looking at the
voice chat user list. Each player's state, including if they are dead, ghost votes
they have, and whether they are traveling, is represented by emojis in their
nickname.
When you learn that your state has changed (dead / alive / used ghost vote), use
the appropriate command (`dead` / `alive` / `voted`) in the text chat, and the
bot will give you the appropriate emojis.
Anyone can use `townsquare` or `ts` and the bot will respond with a summary of the
state of the town. If you just want to know the default character-type count for
the game, use `count`.
To make a nomination yourself, use the `nominate` command (`nom` or `n` for short)
followed by the seat number of the player you'd like to nominate, e.g.
`.nominate 1`. When the vote is counted, the storyteller or a helper will record
the number of votes as a reaction to the nomination message by using the
`nominate votes` sub-command followed by a number.
The `public` command is a general tool for making statements that you want to be
more noticeable (e.g. Juggler or Gossip abilities). Whatever text you include in
the command, as in `.public <text>`, will be repeated and attributed to you using
the bot's megaphone.
"""
def __init__(self, bot):
"""Initialize cog for town square player commands."""
self.bot = bot
async def cog_check(self, ctx):
"""Check that setup commands are called from a guild and a town category."""
result = await commands.guild_only().predicate(
ctx
) and await common.is_called_from_botc_category().predicate(ctx)
return result
@commands.command(brief="Set player to 'dead'", usage="[<seat>|<name>]")
@delete_command_message()
async def dead(self, ctx, *, member: typing.Union[int, discord.Member] = None):
"""Set the caller or user as dead, changing their name appropriately.
Indicate another player if necessary using either their seat number or their
*exact* name/tag.
"""
ts = self.bot.botc_townsquare
member = await ts.resolve_player_arg(ctx, member)
await ts.set_player_info(ctx, member, dead=True, num_votes=1)
@commands.command(brief="Set player to 'voted'", usage="[<seat>|<name>]")
@delete_command_message()
async def voted(self, ctx, *, member: typing.Union[int, discord.Member] = None):
"""Set the caller or user as dead with a used ghost vote.
Indicate another player if necessary using either their seat number or their
*exact* name/tag.
"""
ts = self.bot.botc_townsquare
member = await ts.resolve_player_arg(ctx, member)
await ts.set_player_info(ctx, member, dead=True, num_votes=0)
@commands.command(brief="Set player to 'alive'", usage="[<seat>|<name>]")
@delete_command_message()
async def alive(self, ctx, *, member: typing.Union[int, discord.Member] = None):
"""Set the caller or user as alive, changing their name appropriately.
Indicate another player if necessary using either their seat number or their
*exact* name/tag.
"""
ts = self.bot.botc_townsquare
member = await ts.resolve_player_arg(ctx, member)
await ts.set_player_info(ctx, member, dead=False, num_votes=None)
@commands.command(name="townsquare", aliases=["ts"], brief="Show the town square")
@require_locked_town()
@delete_command_message()
async def townsquare(self, ctx):
"""Show the current town square."""
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
lines = []
alive_count = 0
for idx, player in enumerate(town["player_order"]):
num = idx + 1
digits = "".join(EMOJI_DIGITS[d] for d in f"{num}")
fill = self.bot.botc_townsquare.player_nickname_components(ctx, player)
s = "{digits}{dead}{votes}{traveling} {nick}".format(digits=digits, **fill)
lines.append(s)
if not town["player_info"][player]["dead"]:
alive_count += 1
min_ex = int(math.ceil(alive_count / 2))
non_traveler_count = len(town["players"]) - len(town["travelers"])
try:
count_dict = BOTC_COUNT[non_traveler_count]
except KeyError:
pass
else:
lines.append("{town}/{out}/{minion}/{demon}".format(**count_dict))
lines.append(f"**{alive_count}** players alive.")
lines.append(f"**{min_ex}** votes to execute.")
embed = discord.Embed(
description="\n".join(lines), color=discord.Color.dark_magenta()
)
await ctx.send(content=None, embed=embed)
@commands.command(brief="Print the count of character types")
@require_locked_town()
@delete_command_message()
async def count(self, ctx):
"""Print the count of each character type in this game."""
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
non_traveler_count = len(town["players"]) - len(town["travelers"])
try:
count_dict = BOTC_COUNT[non_traveler_count]
except KeyError:
await ctx.send(
"You don't have the players for a proper game.",
delete_after=common.BOTC_MESSAGE_DELETE_DELAY,
)
else:
countstr = (
"{town} townsfolk, {out} outsider(s), {minion} minion(s),"
" and {demon} demon"
).format(**count_dict)
await ctx.send(countstr)
@commands.group(
invoke_without_command=True,
aliases=["nom", "n"],
brief="Nominate a player for execution",
usage="( <target-player> | <nominator> <target-player> )",
)
@require_locked_town()
@delete_command_message()
async def nominate(
self, ctx, members: commands.Greedy[typing.Union[int, discord.Member]]
):
"""Nominate a player for execution, or set both nominator and target.
Indicate a player using either their seat number or their *exact* name/tag.
With one argument, the user of the command will be taken as the nominator.
"""
ts = self.bot.botc_townsquare
category = ctx.message.channel.category
town = ts.get_town(category)
if len(members) == 0:
raise commands.UserInputError("Could not parse any members to nominate")
if town["nomination"] is not None:
msg = (
f"A nomination is already in progress."
f" [`{ctx.prefix}nominate votes <#>`]"
)
return await ctx.send(msg, delete_after=common.BOTC_MESSAGE_DELETE_DELAY)
if len(members) > 2:
raise commands.TooManyArguments(
"Nominate only accepts 1 or 2 player arguments."
)
if len(members) == 1:
nominator = ctx.message.author
target = await ts.resolve_player_arg(ctx, members[0])
else:
nominator = await ts.resolve_player_arg(ctx, members[0])
target = await ts.resolve_player_arg(ctx, members[1])
if target not in town["travelers"]:
nom_type = "execution"
nom_color = discord.Color.green()
else:
nom_type = "exile"
nom_color = discord.Color.gold()
nominator_nick = discord.utils.escape_markdown(
ts.match_name_re(category, nominator)["nick"]
)
target_nick = discord.utils.escape_markdown(
ts.match_name_re(category, target)["nick"]
)
nom_type = "execution" if target not in town["travelers"] else "exile"
nom_str = f"**{nominator_nick}** nominates **{target_nick}** for {nom_type}."
nom_content = nom_str + "\n||\n||"
embed = discord.Embed(color=nom_color, description=nom_str)
embed.set_author(name=nominator_nick, icon_url=nominator.avatar_url)
embed.set_thumbnail(url=target.avatar_url)
nomination = await ctx.send(content=nom_content, embed=embed)
town["nomination"] = nomination
@nominate.command(
name="votes",
aliases=["vote"],
brief="React to nomination with # of votes",
usage="<num-votes>",
)
@require_locked_town()
@delete_command_message()
async def nominate_votes(self, ctx, num_votes: int):
"""React to the current/previous nomination with the given number of votes."""
if num_votes < 0 or num_votes > 20:
raise commands.BadArgument("Number of votes must be in [0, 20].")
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
if town["nomination"] is not None:
nom = town["nomination"]
elif town["prev_nomination"] is not None:
nom = town["prev_nomination"]
else:
return await ctx.send(
"There has not been a nomination to vote on.",
delete_after=common.BOTC_MESSAGE_DELETE_DELAY,
)
await nom.clear_reactions()
digits = []
tens = num_votes // 10
ones = num_votes % 10
if tens == 1:
digits.append(EMOJI_DIGITS["10"])
elif tens == 2:
digits.append(EMOJI_DIGITS["*"])
if not (ones == 0 and tens > 0):
digits.append(EMOJI_DIGITS[f"{ones}"])
for d in digits:
await nom.add_reaction(d)
# now that the nomination has a number of votes set, it should be moved to prev
town["prev_nomination"] = nom
town["nomination"] = None
@nominate.command(
name="cancel", aliases=["delete", "del"], brief="Cancel the nomination"
)
@require_locked_town()
@delete_command_message()
async def nominate_cancel(self, ctx):
"""Cancel/delete the current or previous nomination."""
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
if town["nomination"] is not None:
await town["nomination"].delete()
town["nomination"] = None
elif town["prev_nomination"] is not None:
await town["prev_nomination"].delete()
town["prev_nomination"] = None
else:
await ctx.send(
"There is no nomination to cancel.",
delete_after=common.BOTC_MESSAGE_DELETE_DELAY,
)
@commands.command(
name="public", aliases=["pub", "say"], brief="Make a public statement"
)
@require_locked_town()
@delete_command_message()
async def public(self, ctx, *, statement: str):
"""Make a public statement, highlighted for visibility."""
if not statement:
raise commands.UserInputError("Statement is empty")
author = ctx.message.author
author_nick = discord.utils.escape_markdown(
self.bot.botc_townsquare.match_name_re(
ctx.message.channel.category, author
)["nick"]
)
embed = discord.Embed(description=statement, color=discord.Color.blue())
embed.set_author(name=author_nick, icon_url=author.avatar_url)
await ctx.send(content=None, embed=embed)
@commands.command(brief="Go to a voice channel", usage="[sidebar-num|name]")
@delete_command_message(delay=0)
async def go(self, ctx, *, vchan: typing.Union[int, discord.VoiceChannel] = None):
"""Go to a specified voice channel/sidebar in the current town category.
Specify a number to go to the voice channel at that position (starting from 0)
in the category list, or pass the tag/name for the voice channel. If no
argument is specified, move to the top voice channel (e.g. Town Square).
If the Town Square is the top voice channel and the sidebar voice channels are
numbered after that, the sidebar number can be used as the argument.
"""
voice_channels = ctx.message.channel.category.voice_channels
if vchan is None:
try:
vchan = voice_channels[0]
except IndexError:
raise common.BOTCTownSquareErrors.BadSidebarArgument(
"No voice channels exist in the category"
)
elif isinstance(vchan, discord.VoiceChannel):
# otherwise vchan is either a discord.VoiceChannel...
pass
else:
# or an int, representing a voice channel (sidebar) number in the category
try:
# assume town square is the 0th voice channel, so sidebar numbers can
# be indexed directly without modification
vchan = voice_channels[vchan]
except IndexError:
raise common.BOTCTownSquareErrors.BadSidebarArgument(
"Voice channel number is invalid"
)
# move author to the requested voice channel
try:
await ctx.message.author.move_to(vchan)
except discord.HTTPException:
await ctx.send(
"Bring yourself back online first. [connect to voice]",
delete_after=common.BOTC_MESSAGE_DELETE_DELAY,
)
| # ----------------------------------------------------------------------------
# Copyright (c) 2020 <NAME>
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
#
# SPDX-License-Identifier: BSD-3-Clause
# ----------------------------------------------------------------------------
"""Components for Blood on the Clocktower voice/text players cog."""
import functools
import math
import typing
import discord
from discord.ext import commands
from . import common
from ...utils.commands import delete_command_message
EMOJI_DIGITS = {
str(num): "{}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}".format(num)
for num in range(10)
}
EMOJI_DIGITS[" "] = "\N{BLACK LARGE SQUARE}"
EMOJI_DIGITS["10"] = "\N{KEYCAP TEN}"
EMOJI_DIGITS["*"] = "*\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}"
BOTC_COUNT = {
5: dict(town=3, out=0, minion=1, demon=1),
6: dict(town=3, out=1, minion=1, demon=1),
7: dict(town=5, out=0, minion=1, demon=1),
8: dict(town=5, out=1, minion=1, demon=1),
9: dict(town=5, out=2, minion=1, demon=1),
10: dict(town=7, out=0, minion=2, demon=1),
11: dict(town=7, out=1, minion=2, demon=1),
12: dict(town=7, out=2, minion=2, demon=1),
13: dict(town=9, out=0, minion=3, demon=1),
14: dict(town=9, out=1, minion=3, demon=1),
15: dict(town=9, out=2, minion=3, demon=1),
}
def require_locked_town():
"""Return command decorator that raises an error if the town is lunocked."""
def decorator(command):
@functools.wraps(command)
async def wrapper(self, ctx, *args, **kwargs):
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
if not town["locked"]:
raise common.BOTCTownSquareErrors.TownUnlocked(
"Command requires a locked town."
)
return await command(self, ctx, *args, **kwargs)
return wrapper
return decorator
class BOTCTownSquarePlayers(
common.BOTCTownSquareErrorMixin, commands.Cog, name="Players"
):
"""Commands for Blood on the Clocktower voice/text players.
During play, you can get a live sense of the state of the game by looking at the
voice chat user list. Each player's state, including if they are dead, ghost votes
they have, and whether they are traveling, is represented by emojis in their
nickname.
When you learn that your state has changed (dead / alive / used ghost vote), use
the appropriate command (`dead` / `alive` / `voted`) in the text chat, and the
bot will give you the appropriate emojis.
Anyone can use `townsquare` or `ts` and the bot will respond with a summary of the
state of the town. If you just want to know the default character-type count for
the game, use `count`.
To make a nomination yourself, use the `nominate` command (`nom` or `n` for short)
followed by the seat number of the player you'd like to nominate, e.g.
`.nominate 1`. When the vote is counted, the storyteller or a helper will record
the number of votes as a reaction to the nomination message by using the
`nominate votes` sub-command followed by a number.
The `public` command is a general tool for making statements that you want to be
more noticeable (e.g. Juggler or Gossip abilities). Whatever text you include in
the command, as in `.public <text>`, will be repeated and attributed to you using
the bot's megaphone.
"""
def __init__(self, bot):
"""Initialize cog for town square player commands."""
self.bot = bot
async def cog_check(self, ctx):
"""Check that setup commands are called from a guild and a town category."""
result = await commands.guild_only().predicate(
ctx
) and await common.is_called_from_botc_category().predicate(ctx)
return result
@commands.command(brief="Set player to 'dead'", usage="[<seat>|<name>]")
@delete_command_message()
async def dead(self, ctx, *, member: typing.Union[int, discord.Member] = None):
"""Set the caller or user as dead, changing their name appropriately.
Indicate another player if necessary using either their seat number or their
*exact* name/tag.
"""
ts = self.bot.botc_townsquare
member = await ts.resolve_player_arg(ctx, member)
await ts.set_player_info(ctx, member, dead=True, num_votes=1)
@commands.command(brief="Set player to 'voted'", usage="[<seat>|<name>]")
@delete_command_message()
async def voted(self, ctx, *, member: typing.Union[int, discord.Member] = None):
"""Set the caller or user as dead with a used ghost vote.
Indicate another player if necessary using either their seat number or their
*exact* name/tag.
"""
ts = self.bot.botc_townsquare
member = await ts.resolve_player_arg(ctx, member)
await ts.set_player_info(ctx, member, dead=True, num_votes=0)
@commands.command(brief="Set player to 'alive'", usage="[<seat>|<name>]")
@delete_command_message()
async def alive(self, ctx, *, member: typing.Union[int, discord.Member] = None):
"""Set the caller or user as alive, changing their name appropriately.
Indicate another player if necessary using either their seat number or their
*exact* name/tag.
"""
ts = self.bot.botc_townsquare
member = await ts.resolve_player_arg(ctx, member)
await ts.set_player_info(ctx, member, dead=False, num_votes=None)
@commands.command(name="townsquare", aliases=["ts"], brief="Show the town square")
@require_locked_town()
@delete_command_message()
async def townsquare(self, ctx):
"""Show the current town square."""
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
lines = []
alive_count = 0
for idx, player in enumerate(town["player_order"]):
num = idx + 1
digits = "".join(EMOJI_DIGITS[d] for d in f"{num}")
fill = self.bot.botc_townsquare.player_nickname_components(ctx, player)
s = "{digits}{dead}{votes}{traveling} {nick}".format(digits=digits, **fill)
lines.append(s)
if not town["player_info"][player]["dead"]:
alive_count += 1
min_ex = int(math.ceil(alive_count / 2))
non_traveler_count = len(town["players"]) - len(town["travelers"])
try:
count_dict = BOTC_COUNT[non_traveler_count]
except KeyError:
pass
else:
lines.append("{town}/{out}/{minion}/{demon}".format(**count_dict))
lines.append(f"**{alive_count}** players alive.")
lines.append(f"**{min_ex}** votes to execute.")
embed = discord.Embed(
description="\n".join(lines), color=discord.Color.dark_magenta()
)
await ctx.send(content=None, embed=embed)
@commands.command(brief="Print the count of character types")
@require_locked_town()
@delete_command_message()
async def count(self, ctx):
"""Print the count of each character type in this game."""
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
non_traveler_count = len(town["players"]) - len(town["travelers"])
try:
count_dict = BOTC_COUNT[non_traveler_count]
except KeyError:
await ctx.send(
"You don't have the players for a proper game.",
delete_after=common.BOTC_MESSAGE_DELETE_DELAY,
)
else:
countstr = (
"{town} townsfolk, {out} outsider(s), {minion} minion(s),"
" and {demon} demon"
).format(**count_dict)
await ctx.send(countstr)
@commands.group(
invoke_without_command=True,
aliases=["nom", "n"],
brief="Nominate a player for execution",
usage="( <target-player> | <nominator> <target-player> )",
)
@require_locked_town()
@delete_command_message()
async def nominate(
self, ctx, members: commands.Greedy[typing.Union[int, discord.Member]]
):
"""Nominate a player for execution, or set both nominator and target.
Indicate a player using either their seat number or their *exact* name/tag.
With one argument, the user of the command will be taken as the nominator.
"""
ts = self.bot.botc_townsquare
category = ctx.message.channel.category
town = ts.get_town(category)
if len(members) == 0:
raise commands.UserInputError("Could not parse any members to nominate")
if town["nomination"] is not None:
msg = (
f"A nomination is already in progress."
f" [`{ctx.prefix}nominate votes <#>`]"
)
return await ctx.send(msg, delete_after=common.BOTC_MESSAGE_DELETE_DELAY)
if len(members) > 2:
raise commands.TooManyArguments(
"Nominate only accepts 1 or 2 player arguments."
)
if len(members) == 1:
nominator = ctx.message.author
target = await ts.resolve_player_arg(ctx, members[0])
else:
nominator = await ts.resolve_player_arg(ctx, members[0])
target = await ts.resolve_player_arg(ctx, members[1])
if target not in town["travelers"]:
nom_type = "execution"
nom_color = discord.Color.green()
else:
nom_type = "exile"
nom_color = discord.Color.gold()
nominator_nick = discord.utils.escape_markdown(
ts.match_name_re(category, nominator)["nick"]
)
target_nick = discord.utils.escape_markdown(
ts.match_name_re(category, target)["nick"]
)
nom_type = "execution" if target not in town["travelers"] else "exile"
nom_str = f"**{nominator_nick}** nominates **{target_nick}** for {nom_type}."
nom_content = nom_str + "\n||\n||"
embed = discord.Embed(color=nom_color, description=nom_str)
embed.set_author(name=nominator_nick, icon_url=nominator.avatar_url)
embed.set_thumbnail(url=target.avatar_url)
nomination = await ctx.send(content=nom_content, embed=embed)
town["nomination"] = nomination
@nominate.command(
name="votes",
aliases=["vote"],
brief="React to nomination with # of votes",
usage="<num-votes>",
)
@require_locked_town()
@delete_command_message()
async def nominate_votes(self, ctx, num_votes: int):
"""React to the current/previous nomination with the given number of votes."""
if num_votes < 0 or num_votes > 20:
raise commands.BadArgument("Number of votes must be in [0, 20].")
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
if town["nomination"] is not None:
nom = town["nomination"]
elif town["prev_nomination"] is not None:
nom = town["prev_nomination"]
else:
return await ctx.send(
"There has not been a nomination to vote on.",
delete_after=common.BOTC_MESSAGE_DELETE_DELAY,
)
await nom.clear_reactions()
digits = []
tens = num_votes // 10
ones = num_votes % 10
if tens == 1:
digits.append(EMOJI_DIGITS["10"])
elif tens == 2:
digits.append(EMOJI_DIGITS["*"])
if not (ones == 0 and tens > 0):
digits.append(EMOJI_DIGITS[f"{ones}"])
for d in digits:
await nom.add_reaction(d)
# now that the nomination has a number of votes set, it should be moved to prev
town["prev_nomination"] = nom
town["nomination"] = None
@nominate.command(
name="cancel", aliases=["delete", "del"], brief="Cancel the nomination"
)
@require_locked_town()
@delete_command_message()
async def nominate_cancel(self, ctx):
"""Cancel/delete the current or previous nomination."""
town = self.bot.botc_townsquare.get_town(ctx.message.channel.category)
if town["nomination"] is not None:
await town["nomination"].delete()
town["nomination"] = None
elif town["prev_nomination"] is not None:
await town["prev_nomination"].delete()
town["prev_nomination"] = None
else:
await ctx.send(
"There is no nomination to cancel.",
delete_after=common.BOTC_MESSAGE_DELETE_DELAY,
)
@commands.command(
name="public", aliases=["pub", "say"], brief="Make a public statement"
)
@require_locked_town()
@delete_command_message()
async def public(self, ctx, *, statement: str):
"""Make a public statement, highlighted for visibility."""
if not statement:
raise commands.UserInputError("Statement is empty")
author = ctx.message.author
author_nick = discord.utils.escape_markdown(
self.bot.botc_townsquare.match_name_re(
ctx.message.channel.category, author
)["nick"]
)
embed = discord.Embed(description=statement, color=discord.Color.blue())
embed.set_author(name=author_nick, icon_url=author.avatar_url)
await ctx.send(content=None, embed=embed)
@commands.command(brief="Go to a voice channel", usage="[sidebar-num|name]")
@delete_command_message(delay=0)
async def go(self, ctx, *, vchan: typing.Union[int, discord.VoiceChannel] = None):
"""Go to a specified voice channel/sidebar in the current town category.
Specify a number to go to the voice channel at that position (starting from 0)
in the category list, or pass the tag/name for the voice channel. If no
argument is specified, move to the top voice channel (e.g. Town Square).
If the Town Square is the top voice channel and the sidebar voice channels are
numbered after that, the sidebar number can be used as the argument.
"""
voice_channels = ctx.message.channel.category.voice_channels
if vchan is None:
try:
vchan = voice_channels[0]
except IndexError:
raise common.BOTCTownSquareErrors.BadSidebarArgument(
"No voice channels exist in the category"
)
elif isinstance(vchan, discord.VoiceChannel):
# otherwise vchan is either a discord.VoiceChannel...
pass
else:
# or an int, representing a voice channel (sidebar) number in the category
try:
# assume town square is the 0th voice channel, so sidebar numbers can
# be indexed directly without modification
vchan = voice_channels[vchan]
except IndexError:
raise common.BOTCTownSquareErrors.BadSidebarArgument(
"Voice channel number is invalid"
)
# move author to the requested voice channel
try:
await ctx.message.author.move_to(vchan)
except discord.HTTPException:
await ctx.send(
"Bring yourself back online first. [connect to voice]",
delete_after=common.BOTC_MESSAGE_DELETE_DELAY,
)
| en | 0.893316 | # ---------------------------------------------------------------------------- # Copyright (c) 2020 <NAME> # All rights reserved. # # Distributed under the terms of the BSD 3-clause license. # # The full license is in the LICENSE file, distributed with this software. # # SPDX-License-Identifier: BSD-3-Clause # ---------------------------------------------------------------------------- Components for Blood on the Clocktower voice/text players cog. Return command decorator that raises an error if the town is lunocked. Commands for Blood on the Clocktower voice/text players. During play, you can get a live sense of the state of the game by looking at the voice chat user list. Each player's state, including if they are dead, ghost votes they have, and whether they are traveling, is represented by emojis in their nickname. When you learn that your state has changed (dead / alive / used ghost vote), use the appropriate command (`dead` / `alive` / `voted`) in the text chat, and the bot will give you the appropriate emojis. Anyone can use `townsquare` or `ts` and the bot will respond with a summary of the state of the town. If you just want to know the default character-type count for the game, use `count`. To make a nomination yourself, use the `nominate` command (`nom` or `n` for short) followed by the seat number of the player you'd like to nominate, e.g. `.nominate 1`. When the vote is counted, the storyteller or a helper will record the number of votes as a reaction to the nomination message by using the `nominate votes` sub-command followed by a number. The `public` command is a general tool for making statements that you want to be more noticeable (e.g. Juggler or Gossip abilities). Whatever text you include in the command, as in `.public <text>`, will be repeated and attributed to you using the bot's megaphone. Initialize cog for town square player commands. Check that setup commands are called from a guild and a town category. Set the caller or user as dead, changing their name appropriately. Indicate another player if necessary using either their seat number or their *exact* name/tag. Set the caller or user as dead with a used ghost vote. Indicate another player if necessary using either their seat number or their *exact* name/tag. Set the caller or user as alive, changing their name appropriately. Indicate another player if necessary using either their seat number or their *exact* name/tag. Show the current town square. Print the count of each character type in this game. Nominate a player for execution, or set both nominator and target. Indicate a player using either their seat number or their *exact* name/tag. With one argument, the user of the command will be taken as the nominator. #>`]" # of votes", React to the current/previous nomination with the given number of votes. # now that the nomination has a number of votes set, it should be moved to prev Cancel/delete the current or previous nomination. Make a public statement, highlighted for visibility. Go to a specified voice channel/sidebar in the current town category. Specify a number to go to the voice channel at that position (starting from 0) in the category list, or pass the tag/name for the voice channel. If no argument is specified, move to the top voice channel (e.g. Town Square). If the Town Square is the top voice channel and the sidebar voice channels are numbered after that, the sidebar number can be used as the argument. # otherwise vchan is either a discord.VoiceChannel... # or an int, representing a voice channel (sidebar) number in the category # assume town square is the 0th voice channel, so sidebar numbers can # be indexed directly without modification # move author to the requested voice channel | 2.617 | 3 |
experiments/training/v6/minimal_defense/dqn/run.py | FredericoNesti/gym-idsgame | 15 | 6625026 | <filename>experiments/training/v6/minimal_defense/dqn/run.py
import os
import time
import sys
from gym_idsgame.config.runner_mode import RunnerMode
from gym_idsgame.agents.training_agents.q_learning.q_agent_config import QAgentConfig
from gym_idsgame.agents.dao.agent_type import AgentType
from gym_idsgame.config.client_config import ClientConfig
from gym_idsgame.runnner import Runner
from experiments.util import plotting_util, util
from gym_idsgame.agents.training_agents.q_learning.dqn.dqn_config import DQNConfig
def get_script_path():
"""
:return: the script path
"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def default_output_dir() -> str:
"""
:return: the default output dir
"""
script_dir = get_script_path()
return script_dir
def default_config_path() -> str:
"""
:return: the default path to configuration file
"""
config_path = os.path.join(default_output_dir(), './config.json')
return config_path
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
dqn_config = DQNConfig(input_dim=242, attacker_output_dim=220, hidden_dim=64, replay_memory_size=10000,
num_hidden_layers=1,
replay_start_size=1000, batch_size=32, target_network_update_freq=5000,
gpu=True, tensorboard=True, tensorboard_dir=default_output_dir() + "/tensorboard",
loss_fn="Huber", optimizer="Adam", lr_exp_decay=True, lr_decay_rate=0.99995)
q_agent_config = QAgentConfig(gamma=1, alpha=0.00001, epsilon=0.75, render=False, eval_sleep=0.9,
min_epsilon=0.05, eval_episodes=100, train_log_frequency=100,
epsilon_decay=0.99995, video=True, eval_log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/videos", num_episodes=500000,
eval_render=False, gifs=True, gif_dir=default_output_dir() + "/gifs",
eval_frequency=5000, attacker=True, defender=False, video_frequency=101,
save_dir=default_output_dir() + "/data", dqn_config=dqn_config,
checkpoint_freq=10000)
env_name = "idsgame-minimal_defense-v6"
client_config = ClientConfig(env_name=env_name, attacker_type=AgentType.DQN_AGENT.value,
mode=RunnerMode.TRAIN_ATTACKER.value,
q_agent_config=q_agent_config, output_dir=default_output_dir(),
title="TrainingDQNAgent vs DefendMinimalDefender")
return client_config
def write_default_config(path:str = None) -> None:
"""
Writes the default configuration to a json file
:param path: the path to write the configuration to
:return: None
"""
if path is None:
path = default_config_path()
config = default_config()
util.write_config_file(config, path)
def plot_csv(config: ClientConfig, eval_csv_path:str, train_csv_path: str) -> None:
"""
Plot results from csv files
:param config: client config
:param eval_csv_path: path to the csv file with evaluation results
:param train_csv_path: path to the csv file with training results
:return: None
"""
plotting_util.read_and_plot_results(train_csv_path, eval_csv_path, config.q_agent_config.train_log_frequency,
config.q_agent_config.eval_frequency, config.q_agent_config.eval_log_frequency,
config.q_agent_config.eval_episodes, config.output_dir, sim=False)
# Program entrypoint
if __name__ == '__main__':
args = util.parse_args(default_config_path())
if args.configpath is not None:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
time_str = str(time.time())
util.create_artefact_dirs(config.output_dir)
logger = util.setup_logger("dqn_vs_random_defense-v5", config.output_dir + "/logs/",
time_str=time_str)
config.logger = logger
config.q_agent_config.logger = logger
config.q_agent_config.to_csv(config.output_dir + "/hyperparameters/" + time_str + ".csv")
train_result, eval_result = Runner.run(config)
if len(train_result.avg_episode_steps) > 0 and len(eval_result.avg_episode_steps) > 0:
train_csv_path = config.output_dir + "/data/" + time_str + "_train" + ".csv"
train_result.to_csv(train_csv_path)
eval_csv_path = config.output_dir + "/data/" + time_str + "_eval" + ".csv"
eval_result.to_csv(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path)
| <filename>experiments/training/v6/minimal_defense/dqn/run.py
import os
import time
import sys
from gym_idsgame.config.runner_mode import RunnerMode
from gym_idsgame.agents.training_agents.q_learning.q_agent_config import QAgentConfig
from gym_idsgame.agents.dao.agent_type import AgentType
from gym_idsgame.config.client_config import ClientConfig
from gym_idsgame.runnner import Runner
from experiments.util import plotting_util, util
from gym_idsgame.agents.training_agents.q_learning.dqn.dqn_config import DQNConfig
def get_script_path():
"""
:return: the script path
"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def default_output_dir() -> str:
"""
:return: the default output dir
"""
script_dir = get_script_path()
return script_dir
def default_config_path() -> str:
"""
:return: the default path to configuration file
"""
config_path = os.path.join(default_output_dir(), './config.json')
return config_path
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
dqn_config = DQNConfig(input_dim=242, attacker_output_dim=220, hidden_dim=64, replay_memory_size=10000,
num_hidden_layers=1,
replay_start_size=1000, batch_size=32, target_network_update_freq=5000,
gpu=True, tensorboard=True, tensorboard_dir=default_output_dir() + "/tensorboard",
loss_fn="Huber", optimizer="Adam", lr_exp_decay=True, lr_decay_rate=0.99995)
q_agent_config = QAgentConfig(gamma=1, alpha=0.00001, epsilon=0.75, render=False, eval_sleep=0.9,
min_epsilon=0.05, eval_episodes=100, train_log_frequency=100,
epsilon_decay=0.99995, video=True, eval_log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/videos", num_episodes=500000,
eval_render=False, gifs=True, gif_dir=default_output_dir() + "/gifs",
eval_frequency=5000, attacker=True, defender=False, video_frequency=101,
save_dir=default_output_dir() + "/data", dqn_config=dqn_config,
checkpoint_freq=10000)
env_name = "idsgame-minimal_defense-v6"
client_config = ClientConfig(env_name=env_name, attacker_type=AgentType.DQN_AGENT.value,
mode=RunnerMode.TRAIN_ATTACKER.value,
q_agent_config=q_agent_config, output_dir=default_output_dir(),
title="TrainingDQNAgent vs DefendMinimalDefender")
return client_config
def write_default_config(path:str = None) -> None:
"""
Writes the default configuration to a json file
:param path: the path to write the configuration to
:return: None
"""
if path is None:
path = default_config_path()
config = default_config()
util.write_config_file(config, path)
def plot_csv(config: ClientConfig, eval_csv_path:str, train_csv_path: str) -> None:
"""
Plot results from csv files
:param config: client config
:param eval_csv_path: path to the csv file with evaluation results
:param train_csv_path: path to the csv file with training results
:return: None
"""
plotting_util.read_and_plot_results(train_csv_path, eval_csv_path, config.q_agent_config.train_log_frequency,
config.q_agent_config.eval_frequency, config.q_agent_config.eval_log_frequency,
config.q_agent_config.eval_episodes, config.output_dir, sim=False)
# Program entrypoint
if __name__ == '__main__':
args = util.parse_args(default_config_path())
if args.configpath is not None:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
time_str = str(time.time())
util.create_artefact_dirs(config.output_dir)
logger = util.setup_logger("dqn_vs_random_defense-v5", config.output_dir + "/logs/",
time_str=time_str)
config.logger = logger
config.q_agent_config.logger = logger
config.q_agent_config.to_csv(config.output_dir + "/hyperparameters/" + time_str + ".csv")
train_result, eval_result = Runner.run(config)
if len(train_result.avg_episode_steps) > 0 and len(eval_result.avg_episode_steps) > 0:
train_csv_path = config.output_dir + "/data/" + time_str + "_train" + ".csv"
train_result.to_csv(train_csv_path)
eval_csv_path = config.output_dir + "/data/" + time_str + "_eval" + ".csv"
eval_result.to_csv(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path)
| en | 0.708275 | :return: the script path :return: the default output dir :return: the default path to configuration file :return: Default configuration for the experiment Writes the default configuration to a json file :param path: the path to write the configuration to :return: None Plot results from csv files :param config: client config :param eval_csv_path: path to the csv file with evaluation results :param train_csv_path: path to the csv file with training results :return: None # Program entrypoint | 1.87857 | 2 |
linked_list/doubly/crud.py | maransowthri/data-structures-algorithms | 0 | 6625027 | class Node:
def __init__(self, val, next=None, prev=None):
self.val = val
self.next = next
self.prev = prev
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
cur = self.head
while cur:
yield cur
cur = cur.next
def size(self):
node_len = 0
cur = self.head
while cur:
node_len += 1
cur = cur.next
return node_len
def reverse(self):
cur = self.tail
while cur:
yield cur
cur = cur.prev
def index(self, val):
if not self.head:
raise Exception('Linked list does not exist')
cur_index = 0
cur = self.head
while cur:
if cur.val == val:
return cur_index
cur = cur.next
cur_index += 1
raise Exception('Value not exist')
def append(self, val):
new_node = Node(val)
if self.head:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
else:
self.head = new_node
self.tail = new_node
def insert(self, val, index):
head_size = self.size()
if not 0 <= index <= head_size:
raise Exception("Index out of range")
new_node = Node(val)
if index == 0:
if self.head:
new_node.next = self.head
self.head.prev = new_node
else:
self.tail = new_node
self.head = new_node
elif index == head_size:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
else:
cur_index = 1
cur = self.head
while index != cur_index:
cur_index += 1
cur = cur.next
new_node.prev = cur
new_node.next = cur.next
cur.next.prev = new_node
cur.next = new_node
def pop(self, index):
head_size = self.size()
if not 0 <= index < head_size:
raise Exception("Index out of range")
if not self.head:
raise Exception("List is empty")
if index == 0:
if self.head == self.tail:
self.head = None
self.tail = None
else:
self.head = self.head.next
self.head.prev = None
else:
cur_index = 0
cur = self.head
while cur_index < index:
cur_index += 1
prev = cur
cur = cur.next
if cur_index == head_size - 1:
self.tail = self.tail.prev
self.tail.next = None
else:
prev.next = cur.next
cur.next.prev = prev
cur.next = None
cur.prev = None
def clear(self):
if not self.head:
raise Exception("List does not exist")
cur = self.head
while cur:
cur.prev = None
cur = cur.next
self.head = None
self.tail = None
linked_list = LinkedList()
input_list = []
input_list = [1, 3]
'''Append'''
for val in input_list:
linked_list.append(val)
'''Insertion'''
linked_list.insert(0, 0)
linked_list.insert(4, 3)
linked_list.insert(2, 2)
'''Searching'''
print('Index of 0 is', linked_list.index(0))
# print('Index of 4 is', linked_list.index(4))
# print('Index of 8 is', linked_list.index(8))
'''Removal'''
linked_list.pop(0)
linked_list.pop(1)
linked_list.pop(2)
'''Clear'''
linked_list.clear()
'''Traversal'''
print('Traversal')
for node in linked_list:
print(node.val)
'''Reversal'''
print('Reversal')
for node in linked_list.reverse():
print(node.val)
'''Head & Tail Values'''
print('First', linked_list.head.val if linked_list.head else None)
print('Last', linked_list.tail.val if linked_list.tail else None)
| class Node:
def __init__(self, val, next=None, prev=None):
self.val = val
self.next = next
self.prev = prev
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
cur = self.head
while cur:
yield cur
cur = cur.next
def size(self):
node_len = 0
cur = self.head
while cur:
node_len += 1
cur = cur.next
return node_len
def reverse(self):
cur = self.tail
while cur:
yield cur
cur = cur.prev
def index(self, val):
if not self.head:
raise Exception('Linked list does not exist')
cur_index = 0
cur = self.head
while cur:
if cur.val == val:
return cur_index
cur = cur.next
cur_index += 1
raise Exception('Value not exist')
def append(self, val):
new_node = Node(val)
if self.head:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
else:
self.head = new_node
self.tail = new_node
def insert(self, val, index):
head_size = self.size()
if not 0 <= index <= head_size:
raise Exception("Index out of range")
new_node = Node(val)
if index == 0:
if self.head:
new_node.next = self.head
self.head.prev = new_node
else:
self.tail = new_node
self.head = new_node
elif index == head_size:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
else:
cur_index = 1
cur = self.head
while index != cur_index:
cur_index += 1
cur = cur.next
new_node.prev = cur
new_node.next = cur.next
cur.next.prev = new_node
cur.next = new_node
def pop(self, index):
head_size = self.size()
if not 0 <= index < head_size:
raise Exception("Index out of range")
if not self.head:
raise Exception("List is empty")
if index == 0:
if self.head == self.tail:
self.head = None
self.tail = None
else:
self.head = self.head.next
self.head.prev = None
else:
cur_index = 0
cur = self.head
while cur_index < index:
cur_index += 1
prev = cur
cur = cur.next
if cur_index == head_size - 1:
self.tail = self.tail.prev
self.tail.next = None
else:
prev.next = cur.next
cur.next.prev = prev
cur.next = None
cur.prev = None
def clear(self):
if not self.head:
raise Exception("List does not exist")
cur = self.head
while cur:
cur.prev = None
cur = cur.next
self.head = None
self.tail = None
linked_list = LinkedList()
input_list = []
input_list = [1, 3]
'''Append'''
for val in input_list:
linked_list.append(val)
'''Insertion'''
linked_list.insert(0, 0)
linked_list.insert(4, 3)
linked_list.insert(2, 2)
'''Searching'''
print('Index of 0 is', linked_list.index(0))
# print('Index of 4 is', linked_list.index(4))
# print('Index of 8 is', linked_list.index(8))
'''Removal'''
linked_list.pop(0)
linked_list.pop(1)
linked_list.pop(2)
'''Clear'''
linked_list.clear()
'''Traversal'''
print('Traversal')
for node in linked_list:
print(node.val)
'''Reversal'''
print('Reversal')
for node in linked_list.reverse():
print(node.val)
'''Head & Tail Values'''
print('First', linked_list.head.val if linked_list.head else None)
print('Last', linked_list.tail.val if linked_list.tail else None)
| en | 0.25607 | Append Insertion Searching # print('Index of 4 is', linked_list.index(4)) # print('Index of 8 is', linked_list.index(8)) Removal Clear Traversal Reversal Head & Tail Values | 3.972592 | 4 |
models/L41.py | Totoketchup/Adaptive-MultiSpeaker-Separation | 43 | 6625028 | <reponame>Totoketchup/Adaptive-MultiSpeaker-Separation
# -*- coding: utf-8 -*-
import tensorflow as tf
from utils.ops import BLSTM, Conv1D, Reshape, Normalize, f_props, scope, variable_summaries
from models.network import Separator
class L41Model(Separator):
def __init__(self, graph=None, **kwargs):
kwargs['mask_a'] = 1.0
kwargs['mask_b'] = -1.0
super(L41Model, self).__init__(graph, **kwargs)
with self.graph.as_default():
# Define the speaker vectors to use during training
self.speaker_vectors =tf.Variable(tf.truncated_normal(
[self.num_speakers, self.embedding_size],
stddev=tf.sqrt(2/float(self.embedding_size))), name='speaker_centroids')
self.init_separator()
@scope
def prediction(self):
# L41 network
shape = tf.shape(self.X)
self.true_masks = 1.0 + self.y
X_in = tf.identity(self.X)
layers = [BLSTM(self.layer_size, name='BLSTM_'+str(i), drop_val=self.rdropout) for i in range(self.nb_layers)]
layers_sp = [
Conv1D([1, self.layer_size, self.embedding_size*self.F]),
Reshape([self.B, shape[1], self.F, self.embedding_size])
]
if self.normalize:
layers_sp += [Normalize(3)]
layers += layers_sp
y = f_props(layers, X_in)
return y
@scope
def cost(self):
"""
Construct the cost function op for the negative sampling cost
"""
# Get the embedded T-F vectors from the network
embedding = self.prediction # [B, T, F, E]
# Normalize the speaker vectors and collect the speaker vectors
# corresponding to the speakers in batch
if self.normalize:
speaker_vectors = tf.nn.l2_normalize(self.speaker_vectors, 1)
else:
speaker_vectors = self.speaker_vectors
if self.sampling is None:
I = tf.expand_dims(self.I, axis=2) # [B, S, 1]
# Gathering the speaker_vectors [|S|, E]
Vspeakers = tf.gather_nd(speaker_vectors, I) # [B, S, E]
else:
I = tf.expand_dims(self.I, axis=2)
# Gathering the speaker_vectors [B, S, E]
Vspeakers = tf.gather_nd(speaker_vectors, I)
# Get index of dominant speaker
dominant = tf.argmax(self.y, -1) # [B, T, F]
# [B, TF]
dominant = tf.reshape(dominant, [self.B, -1, 1])
# []
dominant_speaker = tf.gather(self.I, dominant) # [B, TF]
dominant_speaker_vector = tf.gather_nd(tf.expand_dims(speaker_vectors, 1), dominant_speaker) # [B, TF, E]
dominant_speaker_vector = tf.reshape(dominant_speaker_vector, [self.B, -1, self.F, self.embedding_size])
dominant_speaker_vector = tf.expand_dims(dominant_speaker_vector, 3) # [B, T, F, 1, E]
if self.ns_method == 'k-nearest':
# For each speaker vector get the K-neighbors
with tf.name_scope('K-Neighbors'):
# [B, S, 1, E]
Vspeakers_ext = tf.expand_dims(Vspeakers, 2)
# [1, 1, |S|, E]
speaker_vectors_ext = tf.expand_dims(tf.expand_dims(speaker_vectors, 0), 0)
# dot product # [B, S, |S|]
prod_dot = tf.reduce_sum(Vspeakers_ext * speaker_vectors_ext, 3)
# K neighbors [B, S, K]
_, k_neighbors = tf.nn.top_k(prod_dot, k=self.sampling, sorted=False)
k_neighbors = tf.reshape(k_neighbors, [-1, 1])
# K neighbors vectors [B, S, K, E]
k_neighbors_vectors = tf.gather_nd(speaker_vectors, k_neighbors)
k_neighbors_vectors = tf.reshape(k_neighbors_vectors, [self.B, self.S, self.sampling, self.embedding_size])
batch_range = tf.tile(tf.reshape(tf.range(tf.cast(self.B, tf.int64), dtype=tf.int64), shape=[self.B, 1, 1]), [1, tf.shape(dominant)[1], 1])
indices = tf.concat([batch_range, dominant], axis = 2)
# Gathered K-nearest neighbors on each tf bins for the dominant
# [B, T, F, K, E]
vectors_tf = tf.reshape(tf.gather_nd(k_neighbors_vectors, indices)
,[self.B, -1, self.F, self.sampling, self.embedding_size])
elif self.ns_method == 'random':
# Select randomly K other vectors, except the one in the batch
with tf.name_scope('Random'):
ext_I = tf.cast(tf.expand_dims(self.I, 1), tf.int32)
ranges = tf.cast(tf.tile(tf.reshape(tf.range(self.num_speakers), [1, self.num_speakers, 1]), [self.B, 1, 1]), tf.int32)
# [B, S] boolean mask
indices_available = tf.logical_not(tf.reduce_any(tf.equal(ext_I, ranges), -1))
indices_available = tf.boolean_mask(tf.squeeze(ranges), indices_available)
# [B, |S| - S]
indices_available = tf.reshape(indices_available, [self.B, self.num_speakers - self.S])
shuffled_indices = tf.map_fn(lambda x : tf.random_shuffle(x, seed=42) , indices_available)
rand_I = shuffled_indices[:, :self.sampling] # [B, K]
rand_I = tf.expand_dims(rand_I, 2) # [B, K, 1]
# Gathering the speaker_vectors [B, K, E]
Vspeakers_other = tf.gather_nd(speaker_vectors, rand_I)
vectors_tf = tf.reshape(Vspeakers_other, [self.B, 1 , 1, self.sampling, self.embedding_size])
# Additional term for the loss
embedding_ext = tf.expand_dims(embedding, 3)
doto = tf.reduce_sum(vectors_tf * embedding_ext, -1)
c = -tf.log(tf.nn.sigmoid(tf.negative(doto))) # [B, T, F, K]
neg_sampl = tf.reduce_mean(c, -1) # [B, T, F]
# Expand the dimensions in preparation for broadcasting
Vspeakers_broad = tf.expand_dims(Vspeakers, 1)
Vspeakers_broad = tf.expand_dims(Vspeakers_broad, 1)
embedding_broad = tf.expand_dims(embedding, 3)
# Compute the dot product between the embedding vectors and speaker
# vectors
dot = tf.reduce_sum(Vspeakers_broad * embedding_broad, 4)
# Compute the cost for every element
cost = -tf.log(tf.nn.sigmoid(self.y * dot))
# Average the cost over all speakers in the input
cost = tf.reduce_mean(cost, 3)
if self.sampling is not None:
cost += self.ns_rate * neg_sampl
# Average the cost over all batches
cost = tf.reduce_mean(cost, 0)
training_vars = tf.trainable_variables()
for var in training_vars:
if 'prediction' in var.name:
variable_summaries(var)
# Average the cost over all T-F elements. Here is where weighting to
# account for gradient confidence can occur
cost = tf.reduce_mean(cost)
tf.summary.scalar('cost', cost)
#cost = cost + 0.001*self.adapt_front.l*reg
# tf.summary.scalar('regularized', cost)
return cost | # -*- coding: utf-8 -*-
import tensorflow as tf
from utils.ops import BLSTM, Conv1D, Reshape, Normalize, f_props, scope, variable_summaries
from models.network import Separator
class L41Model(Separator):
def __init__(self, graph=None, **kwargs):
kwargs['mask_a'] = 1.0
kwargs['mask_b'] = -1.0
super(L41Model, self).__init__(graph, **kwargs)
with self.graph.as_default():
# Define the speaker vectors to use during training
self.speaker_vectors =tf.Variable(tf.truncated_normal(
[self.num_speakers, self.embedding_size],
stddev=tf.sqrt(2/float(self.embedding_size))), name='speaker_centroids')
self.init_separator()
@scope
def prediction(self):
# L41 network
shape = tf.shape(self.X)
self.true_masks = 1.0 + self.y
X_in = tf.identity(self.X)
layers = [BLSTM(self.layer_size, name='BLSTM_'+str(i), drop_val=self.rdropout) for i in range(self.nb_layers)]
layers_sp = [
Conv1D([1, self.layer_size, self.embedding_size*self.F]),
Reshape([self.B, shape[1], self.F, self.embedding_size])
]
if self.normalize:
layers_sp += [Normalize(3)]
layers += layers_sp
y = f_props(layers, X_in)
return y
@scope
def cost(self):
"""
Construct the cost function op for the negative sampling cost
"""
# Get the embedded T-F vectors from the network
embedding = self.prediction # [B, T, F, E]
# Normalize the speaker vectors and collect the speaker vectors
# corresponding to the speakers in batch
if self.normalize:
speaker_vectors = tf.nn.l2_normalize(self.speaker_vectors, 1)
else:
speaker_vectors = self.speaker_vectors
if self.sampling is None:
I = tf.expand_dims(self.I, axis=2) # [B, S, 1]
# Gathering the speaker_vectors [|S|, E]
Vspeakers = tf.gather_nd(speaker_vectors, I) # [B, S, E]
else:
I = tf.expand_dims(self.I, axis=2)
# Gathering the speaker_vectors [B, S, E]
Vspeakers = tf.gather_nd(speaker_vectors, I)
# Get index of dominant speaker
dominant = tf.argmax(self.y, -1) # [B, T, F]
# [B, TF]
dominant = tf.reshape(dominant, [self.B, -1, 1])
# []
dominant_speaker = tf.gather(self.I, dominant) # [B, TF]
dominant_speaker_vector = tf.gather_nd(tf.expand_dims(speaker_vectors, 1), dominant_speaker) # [B, TF, E]
dominant_speaker_vector = tf.reshape(dominant_speaker_vector, [self.B, -1, self.F, self.embedding_size])
dominant_speaker_vector = tf.expand_dims(dominant_speaker_vector, 3) # [B, T, F, 1, E]
if self.ns_method == 'k-nearest':
# For each speaker vector get the K-neighbors
with tf.name_scope('K-Neighbors'):
# [B, S, 1, E]
Vspeakers_ext = tf.expand_dims(Vspeakers, 2)
# [1, 1, |S|, E]
speaker_vectors_ext = tf.expand_dims(tf.expand_dims(speaker_vectors, 0), 0)
# dot product # [B, S, |S|]
prod_dot = tf.reduce_sum(Vspeakers_ext * speaker_vectors_ext, 3)
# K neighbors [B, S, K]
_, k_neighbors = tf.nn.top_k(prod_dot, k=self.sampling, sorted=False)
k_neighbors = tf.reshape(k_neighbors, [-1, 1])
# K neighbors vectors [B, S, K, E]
k_neighbors_vectors = tf.gather_nd(speaker_vectors, k_neighbors)
k_neighbors_vectors = tf.reshape(k_neighbors_vectors, [self.B, self.S, self.sampling, self.embedding_size])
batch_range = tf.tile(tf.reshape(tf.range(tf.cast(self.B, tf.int64), dtype=tf.int64), shape=[self.B, 1, 1]), [1, tf.shape(dominant)[1], 1])
indices = tf.concat([batch_range, dominant], axis = 2)
# Gathered K-nearest neighbors on each tf bins for the dominant
# [B, T, F, K, E]
vectors_tf = tf.reshape(tf.gather_nd(k_neighbors_vectors, indices)
,[self.B, -1, self.F, self.sampling, self.embedding_size])
elif self.ns_method == 'random':
# Select randomly K other vectors, except the one in the batch
with tf.name_scope('Random'):
ext_I = tf.cast(tf.expand_dims(self.I, 1), tf.int32)
ranges = tf.cast(tf.tile(tf.reshape(tf.range(self.num_speakers), [1, self.num_speakers, 1]), [self.B, 1, 1]), tf.int32)
# [B, S] boolean mask
indices_available = tf.logical_not(tf.reduce_any(tf.equal(ext_I, ranges), -1))
indices_available = tf.boolean_mask(tf.squeeze(ranges), indices_available)
# [B, |S| - S]
indices_available = tf.reshape(indices_available, [self.B, self.num_speakers - self.S])
shuffled_indices = tf.map_fn(lambda x : tf.random_shuffle(x, seed=42) , indices_available)
rand_I = shuffled_indices[:, :self.sampling] # [B, K]
rand_I = tf.expand_dims(rand_I, 2) # [B, K, 1]
# Gathering the speaker_vectors [B, K, E]
Vspeakers_other = tf.gather_nd(speaker_vectors, rand_I)
vectors_tf = tf.reshape(Vspeakers_other, [self.B, 1 , 1, self.sampling, self.embedding_size])
# Additional term for the loss
embedding_ext = tf.expand_dims(embedding, 3)
doto = tf.reduce_sum(vectors_tf * embedding_ext, -1)
c = -tf.log(tf.nn.sigmoid(tf.negative(doto))) # [B, T, F, K]
neg_sampl = tf.reduce_mean(c, -1) # [B, T, F]
# Expand the dimensions in preparation for broadcasting
Vspeakers_broad = tf.expand_dims(Vspeakers, 1)
Vspeakers_broad = tf.expand_dims(Vspeakers_broad, 1)
embedding_broad = tf.expand_dims(embedding, 3)
# Compute the dot product between the embedding vectors and speaker
# vectors
dot = tf.reduce_sum(Vspeakers_broad * embedding_broad, 4)
# Compute the cost for every element
cost = -tf.log(tf.nn.sigmoid(self.y * dot))
# Average the cost over all speakers in the input
cost = tf.reduce_mean(cost, 3)
if self.sampling is not None:
cost += self.ns_rate * neg_sampl
# Average the cost over all batches
cost = tf.reduce_mean(cost, 0)
training_vars = tf.trainable_variables()
for var in training_vars:
if 'prediction' in var.name:
variable_summaries(var)
# Average the cost over all T-F elements. Here is where weighting to
# account for gradient confidence can occur
cost = tf.reduce_mean(cost)
tf.summary.scalar('cost', cost)
#cost = cost + 0.001*self.adapt_front.l*reg
# tf.summary.scalar('regularized', cost)
return cost | en | 0.807033 | # -*- coding: utf-8 -*- # Define the speaker vectors to use during training # L41 network Construct the cost function op for the negative sampling cost # Get the embedded T-F vectors from the network # [B, T, F, E] # Normalize the speaker vectors and collect the speaker vectors # corresponding to the speakers in batch # [B, S, 1] # Gathering the speaker_vectors [|S|, E] # [B, S, E] # Gathering the speaker_vectors [B, S, E] # Get index of dominant speaker # [B, T, F] # [B, TF] # [] # [B, TF] # [B, TF, E] # [B, T, F, 1, E] # For each speaker vector get the K-neighbors # [B, S, 1, E] # [1, 1, |S|, E] # dot product # [B, S, |S|] # K neighbors [B, S, K] # K neighbors vectors [B, S, K, E] # Gathered K-nearest neighbors on each tf bins for the dominant # [B, T, F, K, E] # Select randomly K other vectors, except the one in the batch # [B, S] boolean mask # [B, |S| - S] # [B, K] # [B, K, 1] # Gathering the speaker_vectors [B, K, E] # Additional term for the loss # [B, T, F, K] # [B, T, F] # Expand the dimensions in preparation for broadcasting # Compute the dot product between the embedding vectors and speaker # vectors # Compute the cost for every element # Average the cost over all speakers in the input # Average the cost over all batches # Average the cost over all T-F elements. Here is where weighting to # account for gradient confidence can occur #cost = cost + 0.001*self.adapt_front.l*reg # tf.summary.scalar('regularized', cost) | 2.553409 | 3 |
python/heterocl/mlir/__init__.py | chhzh123/heterocl | 0 | 6625029 | import hcl_mlir
from hcl_mlir.ir import *
print("Done HCL-MLIR initialization") | import hcl_mlir
from hcl_mlir.ir import *
print("Done HCL-MLIR initialization") | none | 1 | 1.210288 | 1 | |
python/test_selection_sort.py | PabloAceG/sorting-algorithms | 0 | 6625030 | <reponame>PabloAceG/sorting-algorithms
import unittest
from selection_sort import sort
from strategy import Order, BadOrderError
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__credits__ = [ "<NAME>" ]
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class SelectionSortTest(unittest.TestCase):
# Order integers
def test_order_list_ten_integers(self):
# The desired order of the array
ordered_arr_int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# See if it does not desorders an ordered array of integers and that
# it is able to order an array of integers
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
arr2 = [9, 5, 10, 1, 3, 2, 4, 8, 7, 6]
self.assertEqual(ordered_arr_int, sort(arr))
self.assertEqual(ordered_arr_int, sort(arr2))
# Order decimals
def test_order_list_ten_decimals(self):
# The desired order of the array
ordered_arr_float = [0.130549262817116, 0.218716609132574,
0.250759585601376, 0.308800541636632,
0.472098705199645, 0.607735863960446,
0.613506363079981, 0.658317309179441,
0.835447955717431, 0.943529879515157]
# See if it does not desorders an ordered array of floats and that
# it is able to order an array of floats
arr = [0.130549262817116, 0.218716609132574,
0.250759585601376, 0.308800541636632,
0.472098705199645, 0.607735863960446,
0.613506363079981, 0.658317309179441,
0.835447955717431, 0.943529879515157]
arr2 = [0.943529879515157, 0.472098705199645,
0.308800541636632, 0.658317309179441,
0.835447955717431, 0.218716609132574,
0.250759585601376, 0.613506363079981,
0.607735863960446, 0.130549262817116]
self.assertEqual(ordered_arr_float, sort(arr))
self.assertEqual(ordered_arr_float, sort(arr2))
# Order characters
def test_order_list_ten_characters(self):
# The desired order of the array
ordered_arr_char = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
# See if it does not desorders an ordered array of chars and that
# it is able to order an array of chars
arr = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
arr2 = ['e', 'c', 'j', 'b', 'h', 'f', 'd', 'a', 'g', 'i']
self.assertEqual(ordered_arr_char, sort(arr))
self.assertEqual(ordered_arr_char, sort(arr2))
# Order strings
def test_order_list_ten_strings(self):
# The desired order of the array
ordered_arr_string = ['2ZvVygy7kV', '2uVuM9ogZL',
'3okG2eowaZ', '78lu6g4xPl',
'LaK7Zv0vFn', 'OLfyqOE8nX',
'VVZ1YRcAY3', 'YhHda1XsQS',
'ohS2yW0msn', 'ru3TkQyUWv']
# See if it does not desorders an ordered array of strings and that
# it is able to order an array of strings
arr = ['2ZvVygy7kV', '2uVuM9ogZL',
'3okG2eowaZ', '78lu6g4xPl',
'LaK7Zv0vFn', 'OLfyqOE8nX',
'VVZ1YRcAY3', 'YhHda1XsQS',
'ohS2yW0msn', 'ru3TkQyUWv']
arr2 = ['ohS2yW0msn', 'ru3TkQyUWv',
'YhHda1XsQS', 'VVZ1YRcAY3',
'2uVuM9ogZL', 'OLfyqOE8nX',
'3okG2eowaZ', '78lu6g4xPl',
'2ZvVygy7kV', 'LaK7Zv0vFn']
self.assertEqual(ordered_arr_string, sort(arr))
self.assertEqual(ordered_arr_string, sort(arr2))
# Descending
def test_order_descending(self):
# The desired order of the array
ordered_arr = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
# See if it is able to sort the array descending order
arr = [9, 5, 0, 1, 3, 2, 4, 8, 7, 6]
self.assertEqual(ordered_arr, sort(arr, order=Order.DESC))
# Empty array
def test_empty_list(self):
# Empty list ordering should return empty list
self.assertEqual([], sort([]))
# Bad Inputs
def test_bad_order(self):
# Bad ordering preference should raise an error
with self.assertRaises(BadOrderError):
sort([], order="BadOrder")
if __name__ == '__main__':
unittest.main()
| import unittest
from selection_sort import sort
from strategy import Order, BadOrderError
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__credits__ = [ "<NAME>" ]
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class SelectionSortTest(unittest.TestCase):
# Order integers
def test_order_list_ten_integers(self):
# The desired order of the array
ordered_arr_int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# See if it does not desorders an ordered array of integers and that
# it is able to order an array of integers
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
arr2 = [9, 5, 10, 1, 3, 2, 4, 8, 7, 6]
self.assertEqual(ordered_arr_int, sort(arr))
self.assertEqual(ordered_arr_int, sort(arr2))
# Order decimals
def test_order_list_ten_decimals(self):
# The desired order of the array
ordered_arr_float = [0.130549262817116, 0.218716609132574,
0.250759585601376, 0.308800541636632,
0.472098705199645, 0.607735863960446,
0.613506363079981, 0.658317309179441,
0.835447955717431, 0.943529879515157]
# See if it does not desorders an ordered array of floats and that
# it is able to order an array of floats
arr = [0.130549262817116, 0.218716609132574,
0.250759585601376, 0.308800541636632,
0.472098705199645, 0.607735863960446,
0.613506363079981, 0.658317309179441,
0.835447955717431, 0.943529879515157]
arr2 = [0.943529879515157, 0.472098705199645,
0.308800541636632, 0.658317309179441,
0.835447955717431, 0.218716609132574,
0.250759585601376, 0.613506363079981,
0.607735863960446, 0.130549262817116]
self.assertEqual(ordered_arr_float, sort(arr))
self.assertEqual(ordered_arr_float, sort(arr2))
# Order characters
def test_order_list_ten_characters(self):
# The desired order of the array
ordered_arr_char = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
# See if it does not desorders an ordered array of chars and that
# it is able to order an array of chars
arr = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
arr2 = ['e', 'c', 'j', 'b', 'h', 'f', 'd', 'a', 'g', 'i']
self.assertEqual(ordered_arr_char, sort(arr))
self.assertEqual(ordered_arr_char, sort(arr2))
# Order strings
def test_order_list_ten_strings(self):
# The desired order of the array
ordered_arr_string = ['2ZvVygy7kV', '2uVuM9ogZL',
'3okG2eowaZ', '78lu6g4xPl',
'LaK7Zv0vFn', 'OLfyqOE8nX',
'VVZ1YRcAY3', 'YhHda1XsQS',
'ohS2yW0msn', 'ru3TkQyUWv']
# See if it does not desorders an ordered array of strings and that
# it is able to order an array of strings
arr = ['2ZvVygy7kV', '2uVuM9ogZL',
'3okG2eowaZ', '78lu6g4xPl',
'LaK7Zv0vFn', 'OLfyqOE8nX',
'VVZ1YRcAY3', 'YhHda1XsQS',
'ohS2yW0msn', 'ru3TkQyUWv']
arr2 = ['ohS2yW0msn', 'ru3TkQyUWv',
'YhHda1XsQS', 'VVZ1YRcAY3',
'2uVuM9ogZL', 'OLfyqOE8nX',
'3okG2eowaZ', '78lu6g4xPl',
'2ZvVygy7kV', 'LaK7Zv0vFn']
self.assertEqual(ordered_arr_string, sort(arr))
self.assertEqual(ordered_arr_string, sort(arr2))
# Descending
def test_order_descending(self):
# The desired order of the array
ordered_arr = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
# See if it is able to sort the array descending order
arr = [9, 5, 0, 1, 3, 2, 4, 8, 7, 6]
self.assertEqual(ordered_arr, sort(arr, order=Order.DESC))
# Empty array
def test_empty_list(self):
# Empty list ordering should return empty list
self.assertEqual([], sort([]))
# Bad Inputs
def test_bad_order(self):
# Bad ordering preference should raise an error
with self.assertRaises(BadOrderError):
sort([], order="BadOrder")
if __name__ == '__main__':
unittest.main() | en | 0.809464 | # Order integers # The desired order of the array # See if it does not desorders an ordered array of integers and that # it is able to order an array of integers # Order decimals # The desired order of the array # See if it does not desorders an ordered array of floats and that # it is able to order an array of floats # Order characters # The desired order of the array # See if it does not desorders an ordered array of chars and that # it is able to order an array of chars # Order strings # The desired order of the array # See if it does not desorders an ordered array of strings and that # it is able to order an array of strings # Descending # The desired order of the array # See if it is able to sort the array descending order # Empty array # Empty list ordering should return empty list # Bad Inputs # Bad ordering preference should raise an error | 3.387326 | 3 |
mosqito/functions/oct3filter/square_and_smooth.py | JosemaAlvarez/MoSQITo | 0 | 6625031 | # -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Standard library imports
import numpy as np
from scipy import signal
def square_and_smooth(sig, center_freq, fs):
"""3rd order low-pass filtering (See ISO 532-1 section 6.3)
Parameters
----------
sig : numpy.ndarray
time signal sampled at 48 kHz [pa]
coeff : numpy.ndarray
filter coeeficients
gain : float
filter gain
Outputs
-------
signal_filt : numpy.ndarray
filtered time signal
"""
# Frequency dependent time constant
if center_freq <= 1000:
tau = 2 / (3 * center_freq)
else:
tau = 2 / (3 * 1000)
# Squaring
sig = sig**2
# Three smoothing low-pass filters
a1 = np.exp(-1 / (fs * tau))
b0 = 1 -a1
# zi = signal.lfilter_zi([b0], [1 -a1])
for i in range(3):
sig = signal.lfilter([b0], [1, -a1], sig)
return sig | # -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Standard library imports
import numpy as np
from scipy import signal
def square_and_smooth(sig, center_freq, fs):
"""3rd order low-pass filtering (See ISO 532-1 section 6.3)
Parameters
----------
sig : numpy.ndarray
time signal sampled at 48 kHz [pa]
coeff : numpy.ndarray
filter coeeficients
gain : float
filter gain
Outputs
-------
signal_filt : numpy.ndarray
filtered time signal
"""
# Frequency dependent time constant
if center_freq <= 1000:
tau = 2 / (3 * center_freq)
else:
tau = 2 / (3 * 1000)
# Squaring
sig = sig**2
# Three smoothing low-pass filters
a1 = np.exp(-1 / (fs * tau))
b0 = 1 -a1
# zi = signal.lfilter_zi([b0], [1 -a1])
for i in range(3):
sig = signal.lfilter([b0], [1, -a1], sig)
return sig | en | 0.67208 | # -*- coding: utf-8 -*- @date Created on Fri May 22 2020 @author martin_g for Eomys # Standard library imports 3rd order low-pass filtering (See ISO 532-1 section 6.3) Parameters ---------- sig : numpy.ndarray time signal sampled at 48 kHz [pa] coeff : numpy.ndarray filter coeeficients gain : float filter gain Outputs ------- signal_filt : numpy.ndarray filtered time signal # Frequency dependent time constant # Squaring # Three smoothing low-pass filters # zi = signal.lfilter_zi([b0], [1 -a1]) | 2.771978 | 3 |
pyqtgraph/opengl/items/GLVolumeItem.py | robertsj/poropy | 1 | 6625032 | <gh_stars>1-10
from OpenGL.GL import *
from .. GLGraphicsItem import GLGraphicsItem
from pyqtgraph.Qt import QtGui
import numpy as np
__all__ = ['GLVolumeItem']
class GLVolumeItem(GLGraphicsItem):
"""
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`
Displays volumetric data.
"""
def __init__(self, data, sliceDensity=1, smooth=True):
"""
============== =======================================================================================
**Arguments:**
data Volume data to be rendered. *Must* be 4D numpy array (x, y, z, RGBA) with dtype=ubyte.
sliceDensity Density of slices to render through the volume. A value of 1 means one slice per voxel.
smooth (bool) If True, the volume slices are rendered with linear interpolation
============== =======================================================================================
"""
self.sliceDensity = sliceDensity
self.smooth = smooth
self.data = data
GLGraphicsItem.__init__(self)
def initializeGL(self):
glEnable(GL_TEXTURE_3D)
self.texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_3D, self.texture)
if self.smooth:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
else:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)
shape = self.data.shape
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGBA, shape[0], shape[1], shape[2], 0, GL_RGBA, GL_UNSIGNED_BYTE, self.data.transpose((2,1,0,3)))
glDisable(GL_TEXTURE_3D)
self.lists = {}
for ax in [0,1,2]:
for d in [-1, 1]:
l = glGenLists(1)
self.lists[(ax,d)] = l
glNewList(l, GL_COMPILE)
self.drawVolume(ax, d)
glEndList()
def paint(self):
glEnable(GL_TEXTURE_3D)
glBindTexture(GL_TEXTURE_3D, self.texture)
glEnable(GL_DEPTH_TEST)
#glDisable(GL_CULL_FACE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable( GL_BLEND )
glEnable( GL_ALPHA_TEST )
glColor4f(1,1,1,1)
view = self.view()
center = QtGui.QVector3D(*[x/2. for x in self.data.shape[:3]])
cam = self.mapFromParent(view.cameraPosition()) - center
#print "center", center, "cam", view.cameraPosition(), self.mapFromParent(view.cameraPosition()), "diff", cam
cam = np.array([cam.x(), cam.y(), cam.z()])
ax = np.argmax(abs(cam))
d = 1 if cam[ax] > 0 else -1
glCallList(self.lists[(ax,d)]) ## draw axes
glDisable(GL_TEXTURE_3D)
def drawVolume(self, ax, d):
N = 5
imax = [0,1,2]
imax.remove(ax)
tp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
vp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
nudge = [0.5/x for x in self.data.shape]
tp[0][imax[0]] = 0+nudge[imax[0]]
tp[0][imax[1]] = 0+nudge[imax[1]]
tp[1][imax[0]] = 1-nudge[imax[0]]
tp[1][imax[1]] = 0+nudge[imax[1]]
tp[2][imax[0]] = 1-nudge[imax[0]]
tp[2][imax[1]] = 1-nudge[imax[1]]
tp[3][imax[0]] = 0+nudge[imax[0]]
tp[3][imax[1]] = 1-nudge[imax[1]]
vp[0][imax[0]] = 0
vp[0][imax[1]] = 0
vp[1][imax[0]] = self.data.shape[imax[0]]
vp[1][imax[1]] = 0
vp[2][imax[0]] = self.data.shape[imax[0]]
vp[2][imax[1]] = self.data.shape[imax[1]]
vp[3][imax[0]] = 0
vp[3][imax[1]] = self.data.shape[imax[1]]
slices = self.data.shape[ax] * self.sliceDensity
r = range(slices)
if d == -1:
r = r[::-1]
glBegin(GL_QUADS)
tzVals = np.linspace(nudge[ax], 1.0-nudge[ax], slices)
vzVals = np.linspace(0, self.data.shape[ax], slices)
for i in r:
z = tzVals[i]
w = vzVals[i]
tp[0][ax] = z
tp[1][ax] = z
tp[2][ax] = z
tp[3][ax] = z
vp[0][ax] = w
vp[1][ax] = w
vp[2][ax] = w
vp[3][ax] = w
glTexCoord3f(*tp[0])
glVertex3f(*vp[0])
glTexCoord3f(*tp[1])
glVertex3f(*vp[1])
glTexCoord3f(*tp[2])
glVertex3f(*vp[2])
glTexCoord3f(*tp[3])
glVertex3f(*vp[3])
glEnd()
## Interesting idea:
## remove projection/modelview matrixes, recreate in texture coords.
## it _sorta_ works, but needs tweaking.
#mvm = glGetDoublev(GL_MODELVIEW_MATRIX)
#pm = glGetDoublev(GL_PROJECTION_MATRIX)
#m = QtGui.QMatrix4x4(mvm.flatten()).inverted()[0]
#p = QtGui.QMatrix4x4(pm.flatten()).inverted()[0]
#glMatrixMode(GL_PROJECTION)
#glPushMatrix()
#glLoadIdentity()
#N=1
#glOrtho(-N,N,-N,N,-100,100)
#glMatrixMode(GL_MODELVIEW)
#glLoadIdentity()
#glMatrixMode(GL_TEXTURE)
#glLoadIdentity()
#glMultMatrixf(m.copyDataTo())
#view = self.view()
#w = view.width()
#h = view.height()
#dist = view.opts['distance']
#fov = view.opts['fov']
#nearClip = dist * .1
#farClip = dist * 5.
#r = nearClip * np.tan(fov)
#t = r * h / w
#p = QtGui.QMatrix4x4()
#p.frustum( -r, r, -t, t, nearClip, farClip)
#glMultMatrixf(p.inverted()[0].copyDataTo())
#glBegin(GL_QUADS)
#M=1
#for i in range(500):
#z = i/500.
#w = -i/500.
#glTexCoord3f(-M, -M, z)
#glVertex3f(-N, -N, w)
#glTexCoord3f(M, -M, z)
#glVertex3f(N, -N, w)
#glTexCoord3f(M, M, z)
#glVertex3f(N, N, w)
#glTexCoord3f(-M, M, z)
#glVertex3f(-N, N, w)
#glEnd()
#glDisable(GL_TEXTURE_3D)
#glMatrixMode(GL_PROJECTION)
#glPopMatrix()
| from OpenGL.GL import *
from .. GLGraphicsItem import GLGraphicsItem
from pyqtgraph.Qt import QtGui
import numpy as np
__all__ = ['GLVolumeItem']
class GLVolumeItem(GLGraphicsItem):
"""
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`
Displays volumetric data.
"""
def __init__(self, data, sliceDensity=1, smooth=True):
"""
============== =======================================================================================
**Arguments:**
data Volume data to be rendered. *Must* be 4D numpy array (x, y, z, RGBA) with dtype=ubyte.
sliceDensity Density of slices to render through the volume. A value of 1 means one slice per voxel.
smooth (bool) If True, the volume slices are rendered with linear interpolation
============== =======================================================================================
"""
self.sliceDensity = sliceDensity
self.smooth = smooth
self.data = data
GLGraphicsItem.__init__(self)
def initializeGL(self):
glEnable(GL_TEXTURE_3D)
self.texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_3D, self.texture)
if self.smooth:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
else:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)
shape = self.data.shape
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGBA, shape[0], shape[1], shape[2], 0, GL_RGBA, GL_UNSIGNED_BYTE, self.data.transpose((2,1,0,3)))
glDisable(GL_TEXTURE_3D)
self.lists = {}
for ax in [0,1,2]:
for d in [-1, 1]:
l = glGenLists(1)
self.lists[(ax,d)] = l
glNewList(l, GL_COMPILE)
self.drawVolume(ax, d)
glEndList()
def paint(self):
glEnable(GL_TEXTURE_3D)
glBindTexture(GL_TEXTURE_3D, self.texture)
glEnable(GL_DEPTH_TEST)
#glDisable(GL_CULL_FACE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable( GL_BLEND )
glEnable( GL_ALPHA_TEST )
glColor4f(1,1,1,1)
view = self.view()
center = QtGui.QVector3D(*[x/2. for x in self.data.shape[:3]])
cam = self.mapFromParent(view.cameraPosition()) - center
#print "center", center, "cam", view.cameraPosition(), self.mapFromParent(view.cameraPosition()), "diff", cam
cam = np.array([cam.x(), cam.y(), cam.z()])
ax = np.argmax(abs(cam))
d = 1 if cam[ax] > 0 else -1
glCallList(self.lists[(ax,d)]) ## draw axes
glDisable(GL_TEXTURE_3D)
def drawVolume(self, ax, d):
N = 5
imax = [0,1,2]
imax.remove(ax)
tp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
vp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
nudge = [0.5/x for x in self.data.shape]
tp[0][imax[0]] = 0+nudge[imax[0]]
tp[0][imax[1]] = 0+nudge[imax[1]]
tp[1][imax[0]] = 1-nudge[imax[0]]
tp[1][imax[1]] = 0+nudge[imax[1]]
tp[2][imax[0]] = 1-nudge[imax[0]]
tp[2][imax[1]] = 1-nudge[imax[1]]
tp[3][imax[0]] = 0+nudge[imax[0]]
tp[3][imax[1]] = 1-nudge[imax[1]]
vp[0][imax[0]] = 0
vp[0][imax[1]] = 0
vp[1][imax[0]] = self.data.shape[imax[0]]
vp[1][imax[1]] = 0
vp[2][imax[0]] = self.data.shape[imax[0]]
vp[2][imax[1]] = self.data.shape[imax[1]]
vp[3][imax[0]] = 0
vp[3][imax[1]] = self.data.shape[imax[1]]
slices = self.data.shape[ax] * self.sliceDensity
r = range(slices)
if d == -1:
r = r[::-1]
glBegin(GL_QUADS)
tzVals = np.linspace(nudge[ax], 1.0-nudge[ax], slices)
vzVals = np.linspace(0, self.data.shape[ax], slices)
for i in r:
z = tzVals[i]
w = vzVals[i]
tp[0][ax] = z
tp[1][ax] = z
tp[2][ax] = z
tp[3][ax] = z
vp[0][ax] = w
vp[1][ax] = w
vp[2][ax] = w
vp[3][ax] = w
glTexCoord3f(*tp[0])
glVertex3f(*vp[0])
glTexCoord3f(*tp[1])
glVertex3f(*vp[1])
glTexCoord3f(*tp[2])
glVertex3f(*vp[2])
glTexCoord3f(*tp[3])
glVertex3f(*vp[3])
glEnd()
## Interesting idea:
## remove projection/modelview matrixes, recreate in texture coords.
## it _sorta_ works, but needs tweaking.
#mvm = glGetDoublev(GL_MODELVIEW_MATRIX)
#pm = glGetDoublev(GL_PROJECTION_MATRIX)
#m = QtGui.QMatrix4x4(mvm.flatten()).inverted()[0]
#p = QtGui.QMatrix4x4(pm.flatten()).inverted()[0]
#glMatrixMode(GL_PROJECTION)
#glPushMatrix()
#glLoadIdentity()
#N=1
#glOrtho(-N,N,-N,N,-100,100)
#glMatrixMode(GL_MODELVIEW)
#glLoadIdentity()
#glMatrixMode(GL_TEXTURE)
#glLoadIdentity()
#glMultMatrixf(m.copyDataTo())
#view = self.view()
#w = view.width()
#h = view.height()
#dist = view.opts['distance']
#fov = view.opts['fov']
#nearClip = dist * .1
#farClip = dist * 5.
#r = nearClip * np.tan(fov)
#t = r * h / w
#p = QtGui.QMatrix4x4()
#p.frustum( -r, r, -t, t, nearClip, farClip)
#glMultMatrixf(p.inverted()[0].copyDataTo())
#glBegin(GL_QUADS)
#M=1
#for i in range(500):
#z = i/500.
#w = -i/500.
#glTexCoord3f(-M, -M, z)
#glVertex3f(-N, -N, w)
#glTexCoord3f(M, -M, z)
#glVertex3f(N, -N, w)
#glTexCoord3f(M, M, z)
#glVertex3f(N, N, w)
#glTexCoord3f(-M, M, z)
#glVertex3f(-N, N, w)
#glEnd()
#glDisable(GL_TEXTURE_3D)
#glMatrixMode(GL_PROJECTION)
#glPopMatrix() | en | 0.388862 | **Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>` Displays volumetric data. ============== ======================================================================================= **Arguments:** data Volume data to be rendered. *Must* be 4D numpy array (x, y, z, RGBA) with dtype=ubyte. sliceDensity Density of slices to render through the volume. A value of 1 means one slice per voxel. smooth (bool) If True, the volume slices are rendered with linear interpolation ============== ======================================================================================= #glDisable(GL_CULL_FACE) #print "center", center, "cam", view.cameraPosition(), self.mapFromParent(view.cameraPosition()), "diff", cam ## draw axes ## Interesting idea: ## remove projection/modelview matrixes, recreate in texture coords. ## it _sorta_ works, but needs tweaking. #mvm = glGetDoublev(GL_MODELVIEW_MATRIX) #pm = glGetDoublev(GL_PROJECTION_MATRIX) #m = QtGui.QMatrix4x4(mvm.flatten()).inverted()[0] #p = QtGui.QMatrix4x4(pm.flatten()).inverted()[0] #glMatrixMode(GL_PROJECTION) #glPushMatrix() #glLoadIdentity() #N=1 #glOrtho(-N,N,-N,N,-100,100) #glMatrixMode(GL_MODELVIEW) #glLoadIdentity() #glMatrixMode(GL_TEXTURE) #glLoadIdentity() #glMultMatrixf(m.copyDataTo()) #view = self.view() #w = view.width() #h = view.height() #dist = view.opts['distance'] #fov = view.opts['fov'] #nearClip = dist * .1 #farClip = dist * 5. #r = nearClip * np.tan(fov) #t = r * h / w #p = QtGui.QMatrix4x4() #p.frustum( -r, r, -t, t, nearClip, farClip) #glMultMatrixf(p.inverted()[0].copyDataTo()) #glBegin(GL_QUADS) #M=1 #for i in range(500): #z = i/500. #w = -i/500. #glTexCoord3f(-M, -M, z) #glVertex3f(-N, -N, w) #glTexCoord3f(M, -M, z) #glVertex3f(N, -N, w) #glTexCoord3f(M, M, z) #glVertex3f(N, N, w) #glTexCoord3f(-M, M, z) #glVertex3f(-N, N, w) #glEnd() #glDisable(GL_TEXTURE_3D) #glMatrixMode(GL_PROJECTION) #glPopMatrix() | 2.845592 | 3 |
pyefun/encoding/ebinary/binary.py | nuo010/pyefun | 94 | 6625033 | <reponame>nuo010/pyefun
import binascii
def binary编码(data):
if (type(data) == str):
data_bytes = data.encode("utf-8")
else:
data_bytes = data
return binascii.b2a_hex(data_bytes)
def binary解码(data):
return binascii.a2b_hex(data)
| import binascii
def binary编码(data):
if (type(data) == str):
data_bytes = data.encode("utf-8")
else:
data_bytes = data
return binascii.b2a_hex(data_bytes)
def binary解码(data):
return binascii.a2b_hex(data) | none | 1 | 3.296756 | 3 | |
python/day06.py | VenomPaco/advent-of-code-2020 | 0 | 6625034 | <filename>python/day06.py
import os
# Complexity: O(n * m), n is number of people and m number of questions
def anyone(lines):
groups = []
group = set()
for line in lines:
if line:
for letter in line:
group.add(letter)
else:
groups.append(group)
group = set()
if group:
groups.append(group)
return groups
# Complexity: O(n * m), n is number of people and m number of questions
def everyone(lines):
groups = []
group = {}
members = 0
for line in lines:
if line:
for letter in line:
if letter in group:
group[letter] += 1
else:
group[letter] = 1
members += 1
else:
to_delete = [letter for letter in group if group[letter] != members]
for letter in to_delete:
del group[letter]
groups.append(group)
group = {}
members = 0
if group:
to_delete = [letter for letter in group if group[letter] != members]
for letter in to_delete:
del group[letter]
groups.append(group)
return groups
def parse_file(path):
script_dir = os.path.dirname(__file__)
f = open(os.path.join(script_dir, path), 'r')
return [line.strip() for line in f]
lines = parse_file('../data/day06.txt')
groups_anyone = anyone(lines)
groups_everyone = everyone(lines)
print(sum(map(len, groups_anyone)))
print(sum(map(len, groups_everyone)))
| <filename>python/day06.py
import os
# Complexity: O(n * m), n is number of people and m number of questions
def anyone(lines):
groups = []
group = set()
for line in lines:
if line:
for letter in line:
group.add(letter)
else:
groups.append(group)
group = set()
if group:
groups.append(group)
return groups
# Complexity: O(n * m), n is number of people and m number of questions
def everyone(lines):
groups = []
group = {}
members = 0
for line in lines:
if line:
for letter in line:
if letter in group:
group[letter] += 1
else:
group[letter] = 1
members += 1
else:
to_delete = [letter for letter in group if group[letter] != members]
for letter in to_delete:
del group[letter]
groups.append(group)
group = {}
members = 0
if group:
to_delete = [letter for letter in group if group[letter] != members]
for letter in to_delete:
del group[letter]
groups.append(group)
return groups
def parse_file(path):
script_dir = os.path.dirname(__file__)
f = open(os.path.join(script_dir, path), 'r')
return [line.strip() for line in f]
lines = parse_file('../data/day06.txt')
groups_anyone = anyone(lines)
groups_everyone = everyone(lines)
print(sum(map(len, groups_anyone)))
print(sum(map(len, groups_everyone)))
| en | 0.905692 | # Complexity: O(n * m), n is number of people and m number of questions # Complexity: O(n * m), n is number of people and m number of questions | 3.406482 | 3 |
app.py | pranjalAI/Glass-Detection | 1 | 6625035 | <reponame>pranjalAI/Glass-Detection
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re, glob, os,cv2
import numpy as np
import pandas as pd
import glass_detection
from shutil import copyfile
import shutil
from distutils.dir_util import copy_tree
# Keras
#from keras.applications.imagenet_utils import preprocess_input, decode_predictions
#from keras.models import load_model
#from keras.preprocessing import image
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# Define a flask app
app = Flask(__name__)
for f in os.listdir("D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"):
os.remove("D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"+f)
# Model saved with Keras model.save()
#MODEL_PATH = 'models/model_resnet.h5'
# Load your trained model
#model = load_model(MODEL_PATH)
#model._make_predict_function() # Necessary
# print('Model loaded. Start serving...')
# You can also use pretrained model from Keras
# Check https://keras.io/applications/
#from keras.applications.resnet50 import ResNet50
#model = ResNet50(weights='imagenet')
#model.save('')
print('Model loaded. Check http://127.0.0.1:5000/')
def model_predict(img_path, model):
img = image.load_img(img_path, target_size=(224, 224))
# Preprocessing the image
x = image.img_to_array(img)
# x = np.true_divide(x, 255)
x = np.expand_dims(x, axis=0)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
x = preprocess_input(x, mode='caffe')
preds = model.predict(x)
return preds
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# Make prediction
similar_glass_details=glass_detection.getUrl(file_path)
glass_details=similar_glass_details[0]
suggested_url=similar_glass_details[1]
parentclass=similar_glass_details[2]
childclass=similar_glass_details[3]
print("Checking for similar images.......")
#getting similar images
test_image = cv2.imread(file_path)
gray_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
histogram_test = cv2.calcHist([gray_image], [0],
None, [256], [0, 256])
hist_dict={}
for image in os.listdir("D:\\My ML Projects\\lenskart task\\train\\"+str(parentclass)+"\\"):
try:
img_to_compare = cv2.imread("D:\\My ML Projects\\lenskart task\\train\\"+str(parentclass)+"\\"+image)
img_to_compare = cv2.cvtColor(img_to_compare, cv2.COLOR_BGR2GRAY)
img_to_compare_hist = cv2.calcHist([img_to_compare], [0],
None, [256], [0, 256])
c=0
i = 0
while i<len(histogram_test) and i<len(img_to_compare_hist):
c+=(histogram_test[i]-img_to_compare_hist[i])**2
i+= 1
c = c**(1 / 2)
hist_dict[image]=c[0]
except:
print(image)
sort_dict = sorted(hist_dict.items(), key=lambda x: x[1], reverse=False)[1:11]
similar_images=[]
for i in sort_dict:
similar_images.append("D:\\My ML Projects\\lenskart task\\train\\"+str(parentclass)+"\\"+str(i[0]))
for f in os.listdir("D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"):
os.remove("D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"+f)
for count, image in enumerate(similar_images):
dst=f"D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"
#copyfile(image, dst)
shutil.copy(image, dst, follow_symlinks=True)
for count, filename in enumerate(os.listdir("D:\\My ML Projects\\Lenskart Final\\static\\similar_images")):
dst ="Glasses" + str(count+1) + ".jpg"
src ="D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"+ filename
dst ="D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"+ dst
os.rename(src, dst)
print(glass_details)
return glass_details
#return suggested_url
#return render_template("index.html",foobar=similar_images)
return None
if __name__ == '__main__':
app.run(debug=False)
| from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re, glob, os,cv2
import numpy as np
import pandas as pd
import glass_detection
from shutil import copyfile
import shutil
from distutils.dir_util import copy_tree
# Keras
#from keras.applications.imagenet_utils import preprocess_input, decode_predictions
#from keras.models import load_model
#from keras.preprocessing import image
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# Define a flask app
app = Flask(__name__)
for f in os.listdir("D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"):
os.remove("D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"+f)
# Model saved with Keras model.save()
#MODEL_PATH = 'models/model_resnet.h5'
# Load your trained model
#model = load_model(MODEL_PATH)
#model._make_predict_function() # Necessary
# print('Model loaded. Start serving...')
# You can also use pretrained model from Keras
# Check https://keras.io/applications/
#from keras.applications.resnet50 import ResNet50
#model = ResNet50(weights='imagenet')
#model.save('')
print('Model loaded. Check http://127.0.0.1:5000/')
def model_predict(img_path, model):
img = image.load_img(img_path, target_size=(224, 224))
# Preprocessing the image
x = image.img_to_array(img)
# x = np.true_divide(x, 255)
x = np.expand_dims(x, axis=0)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
x = preprocess_input(x, mode='caffe')
preds = model.predict(x)
return preds
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# Make prediction
similar_glass_details=glass_detection.getUrl(file_path)
glass_details=similar_glass_details[0]
suggested_url=similar_glass_details[1]
parentclass=similar_glass_details[2]
childclass=similar_glass_details[3]
print("Checking for similar images.......")
#getting similar images
test_image = cv2.imread(file_path)
gray_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
histogram_test = cv2.calcHist([gray_image], [0],
None, [256], [0, 256])
hist_dict={}
for image in os.listdir("D:\\My ML Projects\\lenskart task\\train\\"+str(parentclass)+"\\"):
try:
img_to_compare = cv2.imread("D:\\My ML Projects\\lenskart task\\train\\"+str(parentclass)+"\\"+image)
img_to_compare = cv2.cvtColor(img_to_compare, cv2.COLOR_BGR2GRAY)
img_to_compare_hist = cv2.calcHist([img_to_compare], [0],
None, [256], [0, 256])
c=0
i = 0
while i<len(histogram_test) and i<len(img_to_compare_hist):
c+=(histogram_test[i]-img_to_compare_hist[i])**2
i+= 1
c = c**(1 / 2)
hist_dict[image]=c[0]
except:
print(image)
sort_dict = sorted(hist_dict.items(), key=lambda x: x[1], reverse=False)[1:11]
similar_images=[]
for i in sort_dict:
similar_images.append("D:\\My ML Projects\\lenskart task\\train\\"+str(parentclass)+"\\"+str(i[0]))
for f in os.listdir("D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"):
os.remove("D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"+f)
for count, image in enumerate(similar_images):
dst=f"D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"
#copyfile(image, dst)
shutil.copy(image, dst, follow_symlinks=True)
for count, filename in enumerate(os.listdir("D:\\My ML Projects\\Lenskart Final\\static\\similar_images")):
dst ="Glasses" + str(count+1) + ".jpg"
src ="D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"+ filename
dst ="D:\\My ML Projects\\Lenskart Final\\static\\similar_images\\"+ dst
os.rename(src, dst)
print(glass_details)
return glass_details
#return suggested_url
#return render_template("index.html",foobar=similar_images)
return None
if __name__ == '__main__':
app.run(debug=False) | en | 0.58156 | # coding=utf-8 # Keras #from keras.applications.imagenet_utils import preprocess_input, decode_predictions #from keras.models import load_model #from keras.preprocessing import image # Flask utils # Define a flask app # Model saved with Keras model.save() #MODEL_PATH = 'models/model_resnet.h5' # Load your trained model #model = load_model(MODEL_PATH) #model._make_predict_function() # Necessary # print('Model loaded. Start serving...') # You can also use pretrained model from Keras # Check https://keras.io/applications/ #from keras.applications.resnet50 import ResNet50 #model = ResNet50(weights='imagenet') #model.save('') # Preprocessing the image # x = np.true_divide(x, 255) # Be careful how your trained model deals with the input # otherwise, it won't make correct prediction! # Main page # Get the file from post request # Save the file to ./uploads # Make prediction #getting similar images #copyfile(image, dst) #return suggested_url #return render_template("index.html",foobar=similar_images) | 2.50786 | 3 |
setup.py | philipov/powertools | 0 | 6625036 | #!python
#-- setup.py -- powertools
from setuptools import setup
from powertools.setup.arguments import kwargs
import os
with open( os.path.join( os.path.dirname( __file__ ), 'DESCRIPTION.rst' ) ) as r_file :
long_description = r_file.read()
setup( **kwargs, long_description=long_description )
#----------------------------------------------------------------------------------------------#
| #!python
#-- setup.py -- powertools
from setuptools import setup
from powertools.setup.arguments import kwargs
import os
with open( os.path.join( os.path.dirname( __file__ ), 'DESCRIPTION.rst' ) ) as r_file :
long_description = r_file.read()
setup( **kwargs, long_description=long_description )
#----------------------------------------------------------------------------------------------#
| pt | 0.103955 | #!python #-- setup.py -- powertools #----------------------------------------------------------------------------------------------# | 1.511889 | 2 |
chaospy/descriptives/expected.py | Novermars/chaospy | 0 | 6625037 | """Expected value."""
import numpy
import numpoly
def E(poly, dist=None, **kws):
"""
The expected value of a distribution or polynomial.
1st order statistics of a probability distribution or polynomial on a given
probability space.
Args:
poly (numpoly.ndpoly, Distribution):
Input to take expected value on.
dist (Distribution):
Defines the space the expected value is taken on. It is ignored if
``poly`` is a distribution.
Returns:
(numpy.ndarray):
The expected value of the polynomial or distribution, where
``expected.shape == poly.shape``.
Examples:
>>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
>>> chaospy.E(dist)
array([1., 0.])
>>> q0, q1 = chaospy.variable(2)
>>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1])
>>> chaospy.E(poly, dist)
array([ 1., 1., 0., -1.])
"""
if dist is None:
dist, poly = poly, numpoly.variable(len(poly))
poly = numpoly.set_dimensions(poly, len(dist))
if poly.isconstant():
return poly.tonumpy()
moments = dist.mom(poly.exponents.T, **kws)
if len(dist) == 1:
moments = moments[0]
out = numpy.zeros(poly.shape)
for idx, key in enumerate(poly.keys):
out += poly[key]*moments[idx]
return out
| """Expected value."""
import numpy
import numpoly
def E(poly, dist=None, **kws):
"""
The expected value of a distribution or polynomial.
1st order statistics of a probability distribution or polynomial on a given
probability space.
Args:
poly (numpoly.ndpoly, Distribution):
Input to take expected value on.
dist (Distribution):
Defines the space the expected value is taken on. It is ignored if
``poly`` is a distribution.
Returns:
(numpy.ndarray):
The expected value of the polynomial or distribution, where
``expected.shape == poly.shape``.
Examples:
>>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
>>> chaospy.E(dist)
array([1., 0.])
>>> q0, q1 = chaospy.variable(2)
>>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1])
>>> chaospy.E(poly, dist)
array([ 1., 1., 0., -1.])
"""
if dist is None:
dist, poly = poly, numpoly.variable(len(poly))
poly = numpoly.set_dimensions(poly, len(dist))
if poly.isconstant():
return poly.tonumpy()
moments = dist.mom(poly.exponents.T, **kws)
if len(dist) == 1:
moments = moments[0]
out = numpy.zeros(poly.shape)
for idx, key in enumerate(poly.keys):
out += poly[key]*moments[idx]
return out
| en | 0.708794 | Expected value. The expected value of a distribution or polynomial. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (numpoly.ndpoly, Distribution): Input to take expected value on. dist (Distribution): Defines the space the expected value is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): The expected value of the polynomial or distribution, where ``expected.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> chaospy.E(dist) array([1., 0.]) >>> q0, q1 = chaospy.variable(2) >>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1]) >>> chaospy.E(poly, dist) array([ 1., 1., 0., -1.]) | 3.906318 | 4 |
invenio_records_marc21/records/__init__.py | bengaal/invenio-records-marc21 | 0 | 6625038 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Graz University of Technology.
#
# Invenio-Records-Marc21 is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Marc21 Records module."""
from .api import Marc21Draft, Marc21Record
from .models import DraftMetadata, RecordMetadata
__all__ = (
"Marc21Draft",
"Marc21Record",
"DraftMetadata",
"RecordMetadata",
)
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Graz University of Technology.
#
# Invenio-Records-Marc21 is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Marc21 Records module."""
from .api import Marc21Draft, Marc21Record
from .models import DraftMetadata, RecordMetadata
__all__ = (
"Marc21Draft",
"Marc21Record",
"DraftMetadata",
"RecordMetadata",
)
| en | 0.722976 | # -*- coding: utf-8 -*- # # Copyright (C) 2021 Graz University of Technology. # # Invenio-Records-Marc21 is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. Marc21 Records module. | 1.262366 | 1 |
aetros/predict.py | aetros/aetros-cli | 120 | 6625039 | <filename>aetros/predict.py<gh_stars>100-1000
from __future__ import print_function
from __future__ import absolute_import
import simplejson
import numpy as np
import os
from aetros.utils import unpack_full_job_id
from .backend import JobBackend, invalid_json_values
def predict(logger, job_id, file_paths, weights_path=None):
owner, name, id = unpack_full_job_id(job_id)
job_backend = JobBackend(model_name=owner+'/'+name)
job_backend.fetch(id)
job_backend.load(id)
job_model = job_backend.get_job_model()
os.chdir(job_backend.git.work_tree)
if not weights_path:
weights_path = job_model.get_weights_filepath_latest()
from .Trainer import Trainer
trainer = Trainer(job_backend)
job_model.set_input_shape(trainer)
import keras.backend
if hasattr(keras.backend, 'set_image_dim_ordering'):
keras.backend.set_image_dim_ordering('tf')
if hasattr(keras.backend, 'set_image_data_format'):
keras.backend.set_image_data_format('channels_last')
job_backend.logger.info("Load model and compile ...")
model = job_model.get_built_model(trainer)
trainer.model = model
from aetros.keras import load_weights
logger.info('Load weights from ' + weights_path)
load_weights(model, weights_path)
inputs = []
for idx, file_path in enumerate(file_paths):
inputs.append(job_model.convert_file_to_input_node(file_path, job_model.get_input_node(idx)))
job_backend.logger.info("Start prediction ...")
prediction = job_model.predict(trainer, np.array(inputs))
print(simplejson.dumps(prediction, indent=4, default=invalid_json_values))
| <filename>aetros/predict.py<gh_stars>100-1000
from __future__ import print_function
from __future__ import absolute_import
import simplejson
import numpy as np
import os
from aetros.utils import unpack_full_job_id
from .backend import JobBackend, invalid_json_values
def predict(logger, job_id, file_paths, weights_path=None):
owner, name, id = unpack_full_job_id(job_id)
job_backend = JobBackend(model_name=owner+'/'+name)
job_backend.fetch(id)
job_backend.load(id)
job_model = job_backend.get_job_model()
os.chdir(job_backend.git.work_tree)
if not weights_path:
weights_path = job_model.get_weights_filepath_latest()
from .Trainer import Trainer
trainer = Trainer(job_backend)
job_model.set_input_shape(trainer)
import keras.backend
if hasattr(keras.backend, 'set_image_dim_ordering'):
keras.backend.set_image_dim_ordering('tf')
if hasattr(keras.backend, 'set_image_data_format'):
keras.backend.set_image_data_format('channels_last')
job_backend.logger.info("Load model and compile ...")
model = job_model.get_built_model(trainer)
trainer.model = model
from aetros.keras import load_weights
logger.info('Load weights from ' + weights_path)
load_weights(model, weights_path)
inputs = []
for idx, file_path in enumerate(file_paths):
inputs.append(job_model.convert_file_to_input_node(file_path, job_model.get_input_node(idx)))
job_backend.logger.info("Start prediction ...")
prediction = job_model.predict(trainer, np.array(inputs))
print(simplejson.dumps(prediction, indent=4, default=invalid_json_values))
| none | 1 | 2.179523 | 2 | |
lib/streamlit/ReportSession.py | brandonJY/streamlit | 1 | 6625040 | # -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from enum import Enum
import tornado.gen
import tornado.ioloop
import streamlit.elements.exception_proto as exception_proto
from streamlit import __installation_id__
from streamlit import __version__
from streamlit import caching
from streamlit import config
from streamlit import url_util
from streamlit.UploadedFileManager import UploadedFileManager
from streamlit.DeltaGenerator import DeltaGenerator
from streamlit.Report import Report
from streamlit.ScriptRequestQueue import RerunData
from streamlit.ScriptRequestQueue import ScriptRequest
from streamlit.ScriptRequestQueue import ScriptRequestQueue
from streamlit.ScriptRunner import ScriptRunner
from streamlit.ScriptRunner import ScriptRunnerEvent
from streamlit.credentials import Credentials
from streamlit.logger import get_logger
from streamlit.proto.BlockPath_pb2 import BlockPath
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.Widget_pb2 import WidgetStates
from streamlit.server.server_util import serialize_forward_msg
from streamlit.storage.S3Storage import S3Storage
from streamlit.storage.FileStorage import FileStorage
from streamlit.watcher.LocalSourcesWatcher import LocalSourcesWatcher
LOGGER = get_logger(__name__)
class ReportSessionState(Enum):
REPORT_NOT_RUNNING = "REPORT_NOT_RUNNING"
REPORT_IS_RUNNING = "REPORT_IS_RUNNING"
SHUTDOWN_REQUESTED = "SHUTDOWN_REQUESTED"
class ReportSession(object):
"""
Contains session data for a single "user" of an active report
(that is, a connected browser tab).
Each ReportSession has its own Report, root DeltaGenerator, ScriptRunner,
and widget state.
A ReportSession is attached to each thread involved in running its Report.
"""
_next_id = 0
def __init__(self, ioloop, script_path, command_line):
"""Initialize the ReportSession.
Parameters
----------
ioloop : tornado.ioloop.IOLoop
The Tornado IOLoop that we're running within.
script_path : str
Path of the Python file from which this report is generated.
command_line : str
Command line as input by the user.
"""
# Each ReportSession gets a unique ID
self.id = ReportSession._next_id
ReportSession._next_id += 1
self._ioloop = ioloop
self._report = Report(script_path, command_line)
self._state = ReportSessionState.REPORT_NOT_RUNNING
self._uploaded_file_mgr = UploadedFileManager()
self._widget_states = WidgetStates()
self._local_sources_watcher = LocalSourcesWatcher(
self._report, self._on_source_file_changed
)
self._sent_initialize_message = False
self._storage = None
self._maybe_reuse_previous_run = False
self._run_on_save = config.get_option("server.runOnSave")
# The ScriptRequestQueue is the means by which we communicate
# with the active ScriptRunner.
self._script_request_queue = ScriptRequestQueue()
self._scriptrunner = None
LOGGER.debug("ReportSession initialized (id=%s)", self.id)
def flush_browser_queue(self):
"""Clears the report queue and returns the messages it contained.
The Server calls this periodically to deliver new messages
to the browser connected to this report.
Returns
-------
list[ForwardMsg]
The messages that were removed from the queue and should
be delivered to the browser.
"""
return self._report.flush_browser_queue()
def shutdown(self):
"""Shuts down the ReportSession.
It's an error to use a ReportSession after it's been shut down.
"""
if self._state != ReportSessionState.SHUTDOWN_REQUESTED:
LOGGER.debug("Shutting down (id=%s)", self.id)
self._uploaded_file_mgr.delete_all_files()
# Shut down the ScriptRunner, if one is active.
# self._state must not be set to SHUTDOWN_REQUESTED until
# after this is called.
if self._scriptrunner is not None:
self._enqueue_script_request(ScriptRequest.SHUTDOWN)
self._state = ReportSessionState.SHUTDOWN_REQUESTED
self._local_sources_watcher.close()
def enqueue(self, msg):
"""Enqueues a new ForwardMsg to our browser queue.
This can be called on both the main thread and a ScriptRunner
run thread.
Parameters
----------
msg : ForwardMsg
The message to enqueue
"""
if not config.get_option("client.displayEnabled"):
return
# Avoid having two maybe_handle_execution_control_request running on
# top of each other when tracer is installed. This leads to a lock
# contention.
if not config.get_option("runner.installTracer"):
# If we have an active ScriptRunner, signal that it can handle an
# execution control request. (Copy the scriptrunner reference to
# avoid it being unset from underneath us, as this function can be
# called outside the main thread.)
scriptrunner = self._scriptrunner
if scriptrunner is not None:
scriptrunner.maybe_handle_execution_control_request()
self._report.enqueue(msg)
def enqueue_exception(self, e):
"""Enqueues an Exception message.
Parameters
----------
e : BaseException
"""
# This does a few things:
# 1) Clears the current report in the browser.
# 2) Marks the current report as "stopped" in the browser.
# 3) HACK: Resets any script params that may have been broken (e.g. the
# command-line when rerunning with wrong argv[0])
self._on_scriptrunner_event(ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
self._on_scriptrunner_event(ScriptRunnerEvent.SCRIPT_STARTED)
self._on_scriptrunner_event(ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
msg = ForwardMsg()
msg.metadata.delta_id = 0
exception_proto.marshall(msg.delta.new_element.exception, e)
self.enqueue(msg)
def request_rerun(self, widget_state=None):
"""Signal that we're interested in running the script.
If the script is not already running, it will be started immediately.
Otherwise, a rerun will be requested.
Parameters
----------
widget_state : dict | None
The widget state dictionary to run the script with, or None
to use the widget state from the previous run of the script.
"""
self._enqueue_script_request(ScriptRequest.RERUN, RerunData(widget_state))
def _on_source_file_changed(self):
"""One of our source files changed. Schedule a rerun if appropriate."""
if self._run_on_save:
self.request_rerun()
else:
self._enqueue_file_change_message()
def _clear_queue(self):
self._report.clear()
def _on_scriptrunner_event(self, event, exception=None, widget_states=None):
"""Called when our ScriptRunner emits an event.
This is *not* called on the main thread.
Parameters
----------
event : ScriptRunnerEvent
exception : BaseException | None
An exception thrown during compilation. Set only for the
SCRIPT_STOPPED_WITH_COMPILE_ERROR event.
widget_states : streamlit.proto.Widget_pb2.WidgetStates | None
The ScriptRunner's final WidgetStates. Set only for the
SHUTDOWN event.
"""
LOGGER.debug("OnScriptRunnerEvent: %s", event)
prev_state = self._state
if event == ScriptRunnerEvent.SCRIPT_STARTED:
if self._state != ReportSessionState.SHUTDOWN_REQUESTED:
self._state = ReportSessionState.REPORT_IS_RUNNING
if config.get_option("server.liveSave"):
# Enqueue into the IOLoop so it runs without blocking AND runs
# on the main thread.
self._ioloop.spawn_callback(self._save_running_report)
self._clear_queue()
self._maybe_enqueue_initialize_message()
self._enqueue_new_report_message()
elif (
event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
or event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR
):
if self._state != ReportSessionState.SHUTDOWN_REQUESTED:
self._state = ReportSessionState.REPORT_NOT_RUNNING
script_succeeded = event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
self._enqueue_report_finished_message(
ForwardMsg.FINISHED_SUCCESSFULLY
if script_succeeded
else ForwardMsg.FINISHED_WITH_COMPILE_ERROR
)
if config.get_option("server.liveSave"):
# Enqueue into the IOLoop so it runs without blocking AND runs
# on the main thread.
self._ioloop.spawn_callback(self._save_final_report_and_quit)
if script_succeeded:
# When a script completes successfully, we update our
# LocalSourcesWatcher to account for any source code changes
# that change which modules should be watched. (This is run on
# the main thread, because LocalSourcesWatcher is not
# thread safe.)
self._ioloop.spawn_callback(
self._local_sources_watcher.update_watched_modules
)
else:
# When a script fails to compile, we send along the exception.
from streamlit.elements import exception_proto
msg = ForwardMsg()
exception_proto.marshall(
msg.session_event.script_compilation_exception, exception
)
self.enqueue(msg)
elif event == ScriptRunnerEvent.SHUTDOWN:
# When ScriptRunner shuts down, update our local reference to it,
# and check to see if we need to spawn a new one. (This is run on
# the main thread.)
def on_shutdown():
self._widget_states = widget_states
self._scriptrunner = None
# Because a new ScriptEvent could have been enqueued while the
# scriptrunner was shutting down, we check to see if we should
# create a new one. (Otherwise, a newly-enqueued ScriptEvent
# won't be processed until another event is enqueued.)
self._maybe_create_scriptrunner()
self._ioloop.spawn_callback(on_shutdown)
# Send a message if our run state changed
report_was_running = prev_state == ReportSessionState.REPORT_IS_RUNNING
report_is_running = self._state == ReportSessionState.REPORT_IS_RUNNING
if report_is_running != report_was_running:
self._enqueue_session_state_changed_message()
def _enqueue_session_state_changed_message(self):
msg = ForwardMsg()
msg.session_state_changed.run_on_save = self._run_on_save
msg.session_state_changed.report_is_running = (
self._state == ReportSessionState.REPORT_IS_RUNNING
)
self.enqueue(msg)
def _enqueue_file_change_message(self):
LOGGER.debug("Enqueuing report_changed message (id=%s)", self.id)
msg = ForwardMsg()
msg.session_event.report_changed_on_disk = True
self.enqueue(msg)
def _maybe_enqueue_initialize_message(self):
if self._sent_initialize_message:
return
self._sent_initialize_message = True
msg = ForwardMsg()
imsg = msg.initialize
imsg.config.sharing_enabled = config.get_option("global.sharingMode") != "off"
imsg.config.gather_usage_stats = config.get_option("browser.gatherUsageStats")
imsg.config.max_cached_message_age = config.get_option(
"global.maxCachedMessageAge"
)
imsg.config.mapbox_token = config.get_option("mapbox.token")
LOGGER.debug(
"New browser connection: "
"gather_usage_stats=%s, "
"sharing_enabled=%s, "
"max_cached_message_age=%s",
imsg.config.gather_usage_stats,
imsg.config.sharing_enabled,
imsg.config.max_cached_message_age,
)
imsg.environment_info.streamlit_version = __version__
imsg.environment_info.python_version = ".".join(map(str, sys.version_info))
imsg.session_state.run_on_save = self._run_on_save
imsg.session_state.report_is_running = (
self._state == ReportSessionState.REPORT_IS_RUNNING
)
imsg.user_info.installation_id = __installation_id__
if Credentials.get_current().activation:
imsg.user_info.email = Credentials.get_current().activation.email
else:
imsg.user_info.email = ""
imsg.command_line = self._report.command_line
self.enqueue(msg)
def _enqueue_new_report_message(self):
self._report.generate_new_id()
msg = ForwardMsg()
msg.new_report.id = self._report.report_id
msg.new_report.name = self._report.name
msg.new_report.script_path = self._report.script_path
self.enqueue(msg)
def _enqueue_report_finished_message(self, status):
"""Enqueues a report_finished ForwardMsg.
Parameters
----------
status : ReportFinishedStatus
"""
msg = ForwardMsg()
msg.report_finished = status
self.enqueue(msg)
def handle_rerun_script_request(
self, command_line=None, widget_state=None, is_preheat=False
):
"""Tells the ScriptRunner to re-run its report.
Parameters
----------
command_line : str | None
The new command line arguments to run the script with, or None
to use its previous command line value.
widget_state : WidgetStates | None
The WidgetStates protobuf to run the script with, or None
to use its previous widget states.
is_preheat: boolean
True if this ReportSession should run the script immediately, and
then ignore the next rerun request if it matches the already-ran
widget state.
"""
if is_preheat:
self._maybe_reuse_previous_run = True # For next time.
elif self._maybe_reuse_previous_run:
# If this is a "preheated" ReportSession, reuse the previous run if
# the widget state matches. But only do this one time ever.
self._maybe_reuse_previous_run = False
has_widget_state = (
widget_state is not None and len(widget_state.widgets) > 0
)
if not has_widget_state:
LOGGER.debug("Skipping rerun since the preheated run is the same")
return
self.request_rerun(widget_state)
def handle_upload_file(self, upload_file):
self._uploaded_file_mgr.create_or_clear_file(
widget_id=upload_file.widget_id,
name=upload_file.name,
size=upload_file.size,
last_modified=upload_file.lastModified,
chunks=upload_file.chunks,
)
self.handle_rerun_script_request(widget_state=self._widget_states)
def handle_upload_file_chunk(self, upload_file_chunk):
progress = self._uploaded_file_mgr.process_chunk(
widget_id=upload_file_chunk.widget_id,
index=upload_file_chunk.index,
data=upload_file_chunk.data,
)
if progress == 1:
self.handle_rerun_script_request(widget_state=self._widget_states)
def handle_delete_uploaded_file(self, delete_uploaded_file):
self._uploaded_file_mgr.delete_file(widget_id=delete_uploaded_file.widget_id)
self.handle_rerun_script_request(widget_state=self._widget_states)
def handle_stop_script_request(self):
"""Tells the ScriptRunner to stop running its report."""
self._enqueue_script_request(ScriptRequest.STOP)
def handle_clear_cache_request(self):
"""Clears this report's cache.
Because this cache is global, it will be cleared for all users.
"""
# Setting verbose=True causes clear_cache to print to stdout.
# Since this command was initiated from the browser, the user
# doesn't need to see the results of the command in their
# terminal.
caching.clear_cache()
def handle_set_run_on_save_request(self, new_value):
"""Changes our run_on_save flag to the given value.
The browser will be notified of the change.
Parameters
----------
new_value : bool
New run_on_save value
"""
self._run_on_save = new_value
self._enqueue_session_state_changed_message()
def _enqueue_script_request(self, request, data=None):
"""Enqueue a ScriptEvent into our ScriptEventQueue.
If a script thread is not already running, one will be created
to handle the event.
Parameters
----------
request : ScriptRequest
The type of request.
data : Any
Data associated with the request, if any.
"""
if self._state == ReportSessionState.SHUTDOWN_REQUESTED:
LOGGER.warning("Discarding %s request after shutdown" % request)
return
self._script_request_queue.enqueue(request, data)
self._maybe_create_scriptrunner()
def _maybe_create_scriptrunner(self):
"""Create a new ScriptRunner if we have unprocessed script requests.
This is called every time a ScriptRequest is enqueued, and also
after a ScriptRunner shuts down, in case new requests were enqueued
during its termination.
This function should only be called on the main thread.
"""
if (
self._state == ReportSessionState.SHUTDOWN_REQUESTED
or self._scriptrunner is not None
or not self._script_request_queue.has_request
):
return
# Create the ScriptRunner, attach event handlers, and start it
self._scriptrunner = ScriptRunner(
report=self._report,
enqueue_forward_msg=self.enqueue,
widget_states=self._widget_states,
request_queue=self._script_request_queue,
uploaded_file_mgr=self._uploaded_file_mgr,
)
self._scriptrunner.on_event.connect(self._on_scriptrunner_event)
self._scriptrunner.start()
@tornado.gen.coroutine
def handle_save_request(self, ws):
"""Save serialized version of report deltas to the cloud.
"Progress" ForwardMsgs will be sent to the client during the upload.
These messages are sent "out of band" - that is, they don't get
enqueued into the ReportQueue (because they're not part of the report).
Instead, they're written directly to the report's WebSocket.
Parameters
----------
ws : _BrowserWebSocketHandler
The report's websocket handler.
"""
@tornado.gen.coroutine
def progress(percent):
progress_msg = ForwardMsg()
progress_msg.upload_report_progress = percent
yield ws.write_message(serialize_forward_msg(progress_msg), binary=True)
# Indicate that the save is starting.
try:
yield progress(0)
url = yield self._save_final_report(progress)
# Indicate that the save is done.
progress_msg = ForwardMsg()
progress_msg.report_uploaded = url
yield ws.write_message(serialize_forward_msg(progress_msg), binary=True)
except Exception as e:
# Horrible hack to show something if something breaks.
err_msg = "%s: %s" % (type(e).__name__, str(e) or "No further details.")
progress_msg = ForwardMsg()
progress_msg.report_uploaded = err_msg
yield ws.write_message(serialize_forward_msg(progress_msg), binary=True)
LOGGER.warning("Failed to save report:", exc_info=e)
@tornado.gen.coroutine
def _save_running_report(self):
files = self._report.serialize_running_report_to_files()
url = yield self._get_storage().save_report_files(self._report.report_id, files)
if config.get_option("server.liveSave"):
url_util.print_url("Saved running app", url)
raise tornado.gen.Return(url)
@tornado.gen.coroutine
def _save_final_report(self, progress_coroutine=None):
files = self._report.serialize_final_report_to_files()
url = yield self._get_storage().save_report_files(
self._report.report_id, files, progress_coroutine
)
if config.get_option("server.liveSave"):
url_util.print_url("Saved final app", url)
raise tornado.gen.Return(url)
@tornado.gen.coroutine
def _save_final_report_and_quit(self):
yield self._save_final_report()
self._ioloop.stop()
def _get_storage(self):
if self._storage is None:
sharing_mode = config.get_option("global.sharingMode")
if sharing_mode == "s3":
self._storage = S3Storage()
elif sharing_mode == "file":
self._storage = FileStorage()
else:
raise RuntimeError("Unsupported sharing mode '%s'" % sharing_mode)
return self._storage
| # -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from enum import Enum
import tornado.gen
import tornado.ioloop
import streamlit.elements.exception_proto as exception_proto
from streamlit import __installation_id__
from streamlit import __version__
from streamlit import caching
from streamlit import config
from streamlit import url_util
from streamlit.UploadedFileManager import UploadedFileManager
from streamlit.DeltaGenerator import DeltaGenerator
from streamlit.Report import Report
from streamlit.ScriptRequestQueue import RerunData
from streamlit.ScriptRequestQueue import ScriptRequest
from streamlit.ScriptRequestQueue import ScriptRequestQueue
from streamlit.ScriptRunner import ScriptRunner
from streamlit.ScriptRunner import ScriptRunnerEvent
from streamlit.credentials import Credentials
from streamlit.logger import get_logger
from streamlit.proto.BlockPath_pb2 import BlockPath
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.Widget_pb2 import WidgetStates
from streamlit.server.server_util import serialize_forward_msg
from streamlit.storage.S3Storage import S3Storage
from streamlit.storage.FileStorage import FileStorage
from streamlit.watcher.LocalSourcesWatcher import LocalSourcesWatcher
LOGGER = get_logger(__name__)
class ReportSessionState(Enum):
REPORT_NOT_RUNNING = "REPORT_NOT_RUNNING"
REPORT_IS_RUNNING = "REPORT_IS_RUNNING"
SHUTDOWN_REQUESTED = "SHUTDOWN_REQUESTED"
class ReportSession(object):
"""
Contains session data for a single "user" of an active report
(that is, a connected browser tab).
Each ReportSession has its own Report, root DeltaGenerator, ScriptRunner,
and widget state.
A ReportSession is attached to each thread involved in running its Report.
"""
_next_id = 0
def __init__(self, ioloop, script_path, command_line):
"""Initialize the ReportSession.
Parameters
----------
ioloop : tornado.ioloop.IOLoop
The Tornado IOLoop that we're running within.
script_path : str
Path of the Python file from which this report is generated.
command_line : str
Command line as input by the user.
"""
# Each ReportSession gets a unique ID
self.id = ReportSession._next_id
ReportSession._next_id += 1
self._ioloop = ioloop
self._report = Report(script_path, command_line)
self._state = ReportSessionState.REPORT_NOT_RUNNING
self._uploaded_file_mgr = UploadedFileManager()
self._widget_states = WidgetStates()
self._local_sources_watcher = LocalSourcesWatcher(
self._report, self._on_source_file_changed
)
self._sent_initialize_message = False
self._storage = None
self._maybe_reuse_previous_run = False
self._run_on_save = config.get_option("server.runOnSave")
# The ScriptRequestQueue is the means by which we communicate
# with the active ScriptRunner.
self._script_request_queue = ScriptRequestQueue()
self._scriptrunner = None
LOGGER.debug("ReportSession initialized (id=%s)", self.id)
def flush_browser_queue(self):
"""Clears the report queue and returns the messages it contained.
The Server calls this periodically to deliver new messages
to the browser connected to this report.
Returns
-------
list[ForwardMsg]
The messages that were removed from the queue and should
be delivered to the browser.
"""
return self._report.flush_browser_queue()
def shutdown(self):
"""Shuts down the ReportSession.
It's an error to use a ReportSession after it's been shut down.
"""
if self._state != ReportSessionState.SHUTDOWN_REQUESTED:
LOGGER.debug("Shutting down (id=%s)", self.id)
self._uploaded_file_mgr.delete_all_files()
# Shut down the ScriptRunner, if one is active.
# self._state must not be set to SHUTDOWN_REQUESTED until
# after this is called.
if self._scriptrunner is not None:
self._enqueue_script_request(ScriptRequest.SHUTDOWN)
self._state = ReportSessionState.SHUTDOWN_REQUESTED
self._local_sources_watcher.close()
def enqueue(self, msg):
"""Enqueues a new ForwardMsg to our browser queue.
This can be called on both the main thread and a ScriptRunner
run thread.
Parameters
----------
msg : ForwardMsg
The message to enqueue
"""
if not config.get_option("client.displayEnabled"):
return
# Avoid having two maybe_handle_execution_control_request running on
# top of each other when tracer is installed. This leads to a lock
# contention.
if not config.get_option("runner.installTracer"):
# If we have an active ScriptRunner, signal that it can handle an
# execution control request. (Copy the scriptrunner reference to
# avoid it being unset from underneath us, as this function can be
# called outside the main thread.)
scriptrunner = self._scriptrunner
if scriptrunner is not None:
scriptrunner.maybe_handle_execution_control_request()
self._report.enqueue(msg)
def enqueue_exception(self, e):
"""Enqueues an Exception message.
Parameters
----------
e : BaseException
"""
# This does a few things:
# 1) Clears the current report in the browser.
# 2) Marks the current report as "stopped" in the browser.
# 3) HACK: Resets any script params that may have been broken (e.g. the
# command-line when rerunning with wrong argv[0])
self._on_scriptrunner_event(ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
self._on_scriptrunner_event(ScriptRunnerEvent.SCRIPT_STARTED)
self._on_scriptrunner_event(ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
msg = ForwardMsg()
msg.metadata.delta_id = 0
exception_proto.marshall(msg.delta.new_element.exception, e)
self.enqueue(msg)
def request_rerun(self, widget_state=None):
"""Signal that we're interested in running the script.
If the script is not already running, it will be started immediately.
Otherwise, a rerun will be requested.
Parameters
----------
widget_state : dict | None
The widget state dictionary to run the script with, or None
to use the widget state from the previous run of the script.
"""
self._enqueue_script_request(ScriptRequest.RERUN, RerunData(widget_state))
def _on_source_file_changed(self):
"""One of our source files changed. Schedule a rerun if appropriate."""
if self._run_on_save:
self.request_rerun()
else:
self._enqueue_file_change_message()
def _clear_queue(self):
self._report.clear()
def _on_scriptrunner_event(self, event, exception=None, widget_states=None):
"""Called when our ScriptRunner emits an event.
This is *not* called on the main thread.
Parameters
----------
event : ScriptRunnerEvent
exception : BaseException | None
An exception thrown during compilation. Set only for the
SCRIPT_STOPPED_WITH_COMPILE_ERROR event.
widget_states : streamlit.proto.Widget_pb2.WidgetStates | None
The ScriptRunner's final WidgetStates. Set only for the
SHUTDOWN event.
"""
LOGGER.debug("OnScriptRunnerEvent: %s", event)
prev_state = self._state
if event == ScriptRunnerEvent.SCRIPT_STARTED:
if self._state != ReportSessionState.SHUTDOWN_REQUESTED:
self._state = ReportSessionState.REPORT_IS_RUNNING
if config.get_option("server.liveSave"):
# Enqueue into the IOLoop so it runs without blocking AND runs
# on the main thread.
self._ioloop.spawn_callback(self._save_running_report)
self._clear_queue()
self._maybe_enqueue_initialize_message()
self._enqueue_new_report_message()
elif (
event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
or event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR
):
if self._state != ReportSessionState.SHUTDOWN_REQUESTED:
self._state = ReportSessionState.REPORT_NOT_RUNNING
script_succeeded = event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
self._enqueue_report_finished_message(
ForwardMsg.FINISHED_SUCCESSFULLY
if script_succeeded
else ForwardMsg.FINISHED_WITH_COMPILE_ERROR
)
if config.get_option("server.liveSave"):
# Enqueue into the IOLoop so it runs without blocking AND runs
# on the main thread.
self._ioloop.spawn_callback(self._save_final_report_and_quit)
if script_succeeded:
# When a script completes successfully, we update our
# LocalSourcesWatcher to account for any source code changes
# that change which modules should be watched. (This is run on
# the main thread, because LocalSourcesWatcher is not
# thread safe.)
self._ioloop.spawn_callback(
self._local_sources_watcher.update_watched_modules
)
else:
# When a script fails to compile, we send along the exception.
from streamlit.elements import exception_proto
msg = ForwardMsg()
exception_proto.marshall(
msg.session_event.script_compilation_exception, exception
)
self.enqueue(msg)
elif event == ScriptRunnerEvent.SHUTDOWN:
# When ScriptRunner shuts down, update our local reference to it,
# and check to see if we need to spawn a new one. (This is run on
# the main thread.)
def on_shutdown():
self._widget_states = widget_states
self._scriptrunner = None
# Because a new ScriptEvent could have been enqueued while the
# scriptrunner was shutting down, we check to see if we should
# create a new one. (Otherwise, a newly-enqueued ScriptEvent
# won't be processed until another event is enqueued.)
self._maybe_create_scriptrunner()
self._ioloop.spawn_callback(on_shutdown)
# Send a message if our run state changed
report_was_running = prev_state == ReportSessionState.REPORT_IS_RUNNING
report_is_running = self._state == ReportSessionState.REPORT_IS_RUNNING
if report_is_running != report_was_running:
self._enqueue_session_state_changed_message()
def _enqueue_session_state_changed_message(self):
msg = ForwardMsg()
msg.session_state_changed.run_on_save = self._run_on_save
msg.session_state_changed.report_is_running = (
self._state == ReportSessionState.REPORT_IS_RUNNING
)
self.enqueue(msg)
def _enqueue_file_change_message(self):
LOGGER.debug("Enqueuing report_changed message (id=%s)", self.id)
msg = ForwardMsg()
msg.session_event.report_changed_on_disk = True
self.enqueue(msg)
def _maybe_enqueue_initialize_message(self):
if self._sent_initialize_message:
return
self._sent_initialize_message = True
msg = ForwardMsg()
imsg = msg.initialize
imsg.config.sharing_enabled = config.get_option("global.sharingMode") != "off"
imsg.config.gather_usage_stats = config.get_option("browser.gatherUsageStats")
imsg.config.max_cached_message_age = config.get_option(
"global.maxCachedMessageAge"
)
imsg.config.mapbox_token = config.get_option("mapbox.token")
LOGGER.debug(
"New browser connection: "
"gather_usage_stats=%s, "
"sharing_enabled=%s, "
"max_cached_message_age=%s",
imsg.config.gather_usage_stats,
imsg.config.sharing_enabled,
imsg.config.max_cached_message_age,
)
imsg.environment_info.streamlit_version = __version__
imsg.environment_info.python_version = ".".join(map(str, sys.version_info))
imsg.session_state.run_on_save = self._run_on_save
imsg.session_state.report_is_running = (
self._state == ReportSessionState.REPORT_IS_RUNNING
)
imsg.user_info.installation_id = __installation_id__
if Credentials.get_current().activation:
imsg.user_info.email = Credentials.get_current().activation.email
else:
imsg.user_info.email = ""
imsg.command_line = self._report.command_line
self.enqueue(msg)
def _enqueue_new_report_message(self):
self._report.generate_new_id()
msg = ForwardMsg()
msg.new_report.id = self._report.report_id
msg.new_report.name = self._report.name
msg.new_report.script_path = self._report.script_path
self.enqueue(msg)
def _enqueue_report_finished_message(self, status):
"""Enqueues a report_finished ForwardMsg.
Parameters
----------
status : ReportFinishedStatus
"""
msg = ForwardMsg()
msg.report_finished = status
self.enqueue(msg)
def handle_rerun_script_request(
self, command_line=None, widget_state=None, is_preheat=False
):
"""Tells the ScriptRunner to re-run its report.
Parameters
----------
command_line : str | None
The new command line arguments to run the script with, or None
to use its previous command line value.
widget_state : WidgetStates | None
The WidgetStates protobuf to run the script with, or None
to use its previous widget states.
is_preheat: boolean
True if this ReportSession should run the script immediately, and
then ignore the next rerun request if it matches the already-ran
widget state.
"""
if is_preheat:
self._maybe_reuse_previous_run = True # For next time.
elif self._maybe_reuse_previous_run:
# If this is a "preheated" ReportSession, reuse the previous run if
# the widget state matches. But only do this one time ever.
self._maybe_reuse_previous_run = False
has_widget_state = (
widget_state is not None and len(widget_state.widgets) > 0
)
if not has_widget_state:
LOGGER.debug("Skipping rerun since the preheated run is the same")
return
self.request_rerun(widget_state)
def handle_upload_file(self, upload_file):
self._uploaded_file_mgr.create_or_clear_file(
widget_id=upload_file.widget_id,
name=upload_file.name,
size=upload_file.size,
last_modified=upload_file.lastModified,
chunks=upload_file.chunks,
)
self.handle_rerun_script_request(widget_state=self._widget_states)
def handle_upload_file_chunk(self, upload_file_chunk):
progress = self._uploaded_file_mgr.process_chunk(
widget_id=upload_file_chunk.widget_id,
index=upload_file_chunk.index,
data=upload_file_chunk.data,
)
if progress == 1:
self.handle_rerun_script_request(widget_state=self._widget_states)
def handle_delete_uploaded_file(self, delete_uploaded_file):
self._uploaded_file_mgr.delete_file(widget_id=delete_uploaded_file.widget_id)
self.handle_rerun_script_request(widget_state=self._widget_states)
def handle_stop_script_request(self):
"""Tells the ScriptRunner to stop running its report."""
self._enqueue_script_request(ScriptRequest.STOP)
def handle_clear_cache_request(self):
"""Clears this report's cache.
Because this cache is global, it will be cleared for all users.
"""
# Setting verbose=True causes clear_cache to print to stdout.
# Since this command was initiated from the browser, the user
# doesn't need to see the results of the command in their
# terminal.
caching.clear_cache()
def handle_set_run_on_save_request(self, new_value):
"""Changes our run_on_save flag to the given value.
The browser will be notified of the change.
Parameters
----------
new_value : bool
New run_on_save value
"""
self._run_on_save = new_value
self._enqueue_session_state_changed_message()
def _enqueue_script_request(self, request, data=None):
"""Enqueue a ScriptEvent into our ScriptEventQueue.
If a script thread is not already running, one will be created
to handle the event.
Parameters
----------
request : ScriptRequest
The type of request.
data : Any
Data associated with the request, if any.
"""
if self._state == ReportSessionState.SHUTDOWN_REQUESTED:
LOGGER.warning("Discarding %s request after shutdown" % request)
return
self._script_request_queue.enqueue(request, data)
self._maybe_create_scriptrunner()
def _maybe_create_scriptrunner(self):
"""Create a new ScriptRunner if we have unprocessed script requests.
This is called every time a ScriptRequest is enqueued, and also
after a ScriptRunner shuts down, in case new requests were enqueued
during its termination.
This function should only be called on the main thread.
"""
if (
self._state == ReportSessionState.SHUTDOWN_REQUESTED
or self._scriptrunner is not None
or not self._script_request_queue.has_request
):
return
# Create the ScriptRunner, attach event handlers, and start it
self._scriptrunner = ScriptRunner(
report=self._report,
enqueue_forward_msg=self.enqueue,
widget_states=self._widget_states,
request_queue=self._script_request_queue,
uploaded_file_mgr=self._uploaded_file_mgr,
)
self._scriptrunner.on_event.connect(self._on_scriptrunner_event)
self._scriptrunner.start()
@tornado.gen.coroutine
def handle_save_request(self, ws):
"""Save serialized version of report deltas to the cloud.
"Progress" ForwardMsgs will be sent to the client during the upload.
These messages are sent "out of band" - that is, they don't get
enqueued into the ReportQueue (because they're not part of the report).
Instead, they're written directly to the report's WebSocket.
Parameters
----------
ws : _BrowserWebSocketHandler
The report's websocket handler.
"""
@tornado.gen.coroutine
def progress(percent):
progress_msg = ForwardMsg()
progress_msg.upload_report_progress = percent
yield ws.write_message(serialize_forward_msg(progress_msg), binary=True)
# Indicate that the save is starting.
try:
yield progress(0)
url = yield self._save_final_report(progress)
# Indicate that the save is done.
progress_msg = ForwardMsg()
progress_msg.report_uploaded = url
yield ws.write_message(serialize_forward_msg(progress_msg), binary=True)
except Exception as e:
# Horrible hack to show something if something breaks.
err_msg = "%s: %s" % (type(e).__name__, str(e) or "No further details.")
progress_msg = ForwardMsg()
progress_msg.report_uploaded = err_msg
yield ws.write_message(serialize_forward_msg(progress_msg), binary=True)
LOGGER.warning("Failed to save report:", exc_info=e)
@tornado.gen.coroutine
def _save_running_report(self):
files = self._report.serialize_running_report_to_files()
url = yield self._get_storage().save_report_files(self._report.report_id, files)
if config.get_option("server.liveSave"):
url_util.print_url("Saved running app", url)
raise tornado.gen.Return(url)
@tornado.gen.coroutine
def _save_final_report(self, progress_coroutine=None):
files = self._report.serialize_final_report_to_files()
url = yield self._get_storage().save_report_files(
self._report.report_id, files, progress_coroutine
)
if config.get_option("server.liveSave"):
url_util.print_url("Saved final app", url)
raise tornado.gen.Return(url)
@tornado.gen.coroutine
def _save_final_report_and_quit(self):
yield self._save_final_report()
self._ioloop.stop()
def _get_storage(self):
if self._storage is None:
sharing_mode = config.get_option("global.sharingMode")
if sharing_mode == "s3":
self._storage = S3Storage()
elif sharing_mode == "file":
self._storage = FileStorage()
else:
raise RuntimeError("Unsupported sharing mode '%s'" % sharing_mode)
return self._storage
| en | 0.872839 | # -*- coding: utf-8 -*- # Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Contains session data for a single "user" of an active report (that is, a connected browser tab). Each ReportSession has its own Report, root DeltaGenerator, ScriptRunner, and widget state. A ReportSession is attached to each thread involved in running its Report. Initialize the ReportSession. Parameters ---------- ioloop : tornado.ioloop.IOLoop The Tornado IOLoop that we're running within. script_path : str Path of the Python file from which this report is generated. command_line : str Command line as input by the user. # Each ReportSession gets a unique ID # The ScriptRequestQueue is the means by which we communicate # with the active ScriptRunner. Clears the report queue and returns the messages it contained. The Server calls this periodically to deliver new messages to the browser connected to this report. Returns ------- list[ForwardMsg] The messages that were removed from the queue and should be delivered to the browser. Shuts down the ReportSession. It's an error to use a ReportSession after it's been shut down. # Shut down the ScriptRunner, if one is active. # self._state must not be set to SHUTDOWN_REQUESTED until # after this is called. Enqueues a new ForwardMsg to our browser queue. This can be called on both the main thread and a ScriptRunner run thread. Parameters ---------- msg : ForwardMsg The message to enqueue # Avoid having two maybe_handle_execution_control_request running on # top of each other when tracer is installed. This leads to a lock # contention. # If we have an active ScriptRunner, signal that it can handle an # execution control request. (Copy the scriptrunner reference to # avoid it being unset from underneath us, as this function can be # called outside the main thread.) Enqueues an Exception message. Parameters ---------- e : BaseException # This does a few things: # 1) Clears the current report in the browser. # 2) Marks the current report as "stopped" in the browser. # 3) HACK: Resets any script params that may have been broken (e.g. the # command-line when rerunning with wrong argv[0]) Signal that we're interested in running the script. If the script is not already running, it will be started immediately. Otherwise, a rerun will be requested. Parameters ---------- widget_state : dict | None The widget state dictionary to run the script with, or None to use the widget state from the previous run of the script. One of our source files changed. Schedule a rerun if appropriate. Called when our ScriptRunner emits an event. This is *not* called on the main thread. Parameters ---------- event : ScriptRunnerEvent exception : BaseException | None An exception thrown during compilation. Set only for the SCRIPT_STOPPED_WITH_COMPILE_ERROR event. widget_states : streamlit.proto.Widget_pb2.WidgetStates | None The ScriptRunner's final WidgetStates. Set only for the SHUTDOWN event. # Enqueue into the IOLoop so it runs without blocking AND runs # on the main thread. # Enqueue into the IOLoop so it runs without blocking AND runs # on the main thread. # When a script completes successfully, we update our # LocalSourcesWatcher to account for any source code changes # that change which modules should be watched. (This is run on # the main thread, because LocalSourcesWatcher is not # thread safe.) # When a script fails to compile, we send along the exception. # When ScriptRunner shuts down, update our local reference to it, # and check to see if we need to spawn a new one. (This is run on # the main thread.) # Because a new ScriptEvent could have been enqueued while the # scriptrunner was shutting down, we check to see if we should # create a new one. (Otherwise, a newly-enqueued ScriptEvent # won't be processed until another event is enqueued.) # Send a message if our run state changed Enqueues a report_finished ForwardMsg. Parameters ---------- status : ReportFinishedStatus Tells the ScriptRunner to re-run its report. Parameters ---------- command_line : str | None The new command line arguments to run the script with, or None to use its previous command line value. widget_state : WidgetStates | None The WidgetStates protobuf to run the script with, or None to use its previous widget states. is_preheat: boolean True if this ReportSession should run the script immediately, and then ignore the next rerun request if it matches the already-ran widget state. # For next time. # If this is a "preheated" ReportSession, reuse the previous run if # the widget state matches. But only do this one time ever. Tells the ScriptRunner to stop running its report. Clears this report's cache. Because this cache is global, it will be cleared for all users. # Setting verbose=True causes clear_cache to print to stdout. # Since this command was initiated from the browser, the user # doesn't need to see the results of the command in their # terminal. Changes our run_on_save flag to the given value. The browser will be notified of the change. Parameters ---------- new_value : bool New run_on_save value Enqueue a ScriptEvent into our ScriptEventQueue. If a script thread is not already running, one will be created to handle the event. Parameters ---------- request : ScriptRequest The type of request. data : Any Data associated with the request, if any. Create a new ScriptRunner if we have unprocessed script requests. This is called every time a ScriptRequest is enqueued, and also after a ScriptRunner shuts down, in case new requests were enqueued during its termination. This function should only be called on the main thread. # Create the ScriptRunner, attach event handlers, and start it Save serialized version of report deltas to the cloud. "Progress" ForwardMsgs will be sent to the client during the upload. These messages are sent "out of band" - that is, they don't get enqueued into the ReportQueue (because they're not part of the report). Instead, they're written directly to the report's WebSocket. Parameters ---------- ws : _BrowserWebSocketHandler The report's websocket handler. # Indicate that the save is starting. # Indicate that the save is done. # Horrible hack to show something if something breaks. | 1.585141 | 2 |
hassio-google-drive-backup/backup/debugworker.py | spo0n125/teste | 0 | 6625041 | <gh_stars>0
import asyncio
import json
import socket
import subprocess
from datetime import datetime, timedelta
from aiohttp import ClientSession
from injector import inject, singleton
from backup.config import Config, Setting, VERSION, _DEFAULTS, PRIVATE
from backup.exceptions import KnownError
from backup.util import GlobalInfo, Resolver
from backup.time import Time
from backup.worker import Worker
from backup.logger import getLogger, getHistory
from backup.ha import HaRequests, HaSource
from backup.model import Coordinator
logger = getLogger(__name__)
ERROR_LOG_LENGTH = 30
@singleton
class DebugWorker(Worker):
@inject
def __init__(self, time: Time, info: GlobalInfo, config: Config, resolver: Resolver, session: ClientSession, ha: HaRequests, coord: Coordinator, ha_source: HaSource):
super().__init__("Debug Worker", self.doWork, time, interval=10)
self.time = time
self._info = info
self.config = config
self.ha = ha
self.ha_source = ha_source
self.coord = coord
self.last_dns_update = None
self.dns_info = None
self.last_sent_error = None
self.last_sent_error_time = None
self.resolver = resolver
self.session = session
async def doWork(self):
if not self.last_dns_update or self.time.now() > self.last_dns_update + timedelta(hours=12):
await self.updateDns()
if self.config.get(Setting.SEND_ERROR_REPORTS):
try:
await self.maybeSendErrorReport()
except Exception as e:
# just eat the error
pass
async def maybeSendErrorReport(self):
error = self._info._last_error
if error is not None:
if isinstance(error, KnownError):
error = error.code()
else:
error = logger.formatException(error)
if error != self.last_sent_error:
self.last_sent_error = error
if error is not None:
self.last_sent_error_time = self.time.now()
package = await self.buildErrorReport(error)
else:
package = self.buildClearReport()
logger.info("Sending error report (see settings to disable)")
headers = {
'client': self.config.clientIdentifier(),
'addon_version': VERSION
}
async with self.session.post(self.config.get(Setting.ERROR_REPORT_URL), headers=headers, json=package):
pass
async def updateDns(self):
self.last_dns_update = self.time.now()
try:
# Resolve google's addresses
self.dns_info = await self.getPingInfo()
self._info.setDnsInfo(self.dns_info)
except Exception as e:
self.dns_info = logger.formatException(e)
async def buildErrorReport(self, error):
config_special = {}
for setting in Setting:
if self.config.get(setting) != _DEFAULTS[setting]:
if setting in PRIVATE:
config_special[str(setting)] = "REDACTED"
else:
config_special[str(setting)] = self.config.get(setting)
report = {}
report['config'] = config_special
report['time'] = self.formatDate(self.time.now())
report['start_time'] = self.formatDate(self._info._start_time)
report['addon_version'] = VERSION
report['failure_time'] = self.formatDate(self._info._last_failure_time)
report['failure_count'] = self._info._failures
report['sync_last_start'] = self.formatDate(self._info._last_sync_start)
report['sync_count'] = self._info._syncs
report['sync_success_count'] = self._info._successes
report['sync_last_success'] = self.formatDate(self._info._last_sync_success)
report['upload_count'] = self._info._uploads
report['upload_last_size'] = self._info._last_upload_size
report['upload_last_attempt'] = self.formatDate(self._info._last_upload)
report['debug'] = self._info.debug
report['version'] = VERSION
report['error'] = error
report['client'] = self.config.clientIdentifier()
if self.ha_source.isInitialized():
report["super_version"] = self.ha_source.host_info.get('supervisor', "None")
report["hassos_version"] = self.ha_source.host_info.get('hassos', "None")
report["docker_version"] = self.ha_source.host_info.get('docker', "None")
report["machine"] = self.ha_source.host_info.get('machine', "None")
report["supervisor_channel"] = self.ha_source.host_info.get('channel', "None")
report["arch"] = self.ha_source.super_info.get('arch', "None")
report["timezone"] = self.ha_source.super_info.get('timezone', "None")
report["ha_version"] = self.ha_source.ha_info.get('version', "None")
else:
report["super_version"] = "Uninitialized"
report["arch"] = "Uninitialized"
report["timezone"] = "Uninitialized"
report["ha_version"] = "Uninitialized"
report["snapshots"] = self.coord.buildSnapshotMetrics()
return report
async def buildBugReportData(self, error):
report = await self.buildErrorReport(error)
report['addon_logs'] = "\n".join(b for a, b in list(getHistory(0, False))[-ERROR_LOG_LENGTH:])
try:
report['super_logs'] = "\n".join((await self.ha.getSuperLogs()).split("\n")[-ERROR_LOG_LENGTH:])
except Exception as e:
report['super_logs'] = logger.formatException(e)
try:
report['core_logs'] = "\n".join((await self.ha.getCoreLogs()).split("\n")[-ERROR_LOG_LENGTH:])
except Exception as e:
report['core_logs'] = logger.formatException(e)
return report
def buildClearReport(self):
duration = self.time.now() - self.last_sent_error_time
report = {
'duration': str(duration)
}
return report
def formatDate(self, date: datetime):
if date is None:
return "Never"
else:
return date.isoformat()
async def getPingInfo(self):
who = self.config.get(Setting.DRIVE_HOST_NAME)
ips = await self.resolve(who)
pings = {who: {}}
for ip in ips:
pings[who][ip] = "Unknown"
command = "fping -t 5000 " + " ".join(ips)
# fping each server
process = await asyncio.create_subprocess_shell(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_data, stderr_data = await process.communicate()
for line in stdout_data.decode().split("\n"):
for host in pings.keys():
for address in pings[host].keys():
if line.startswith(address):
response = line[len(address):].strip()
if response.startswith(":"):
response = response[2:].strip()
if response.startswith("is"):
response = response[3:].strip()
pings[host][address] = response
return pings
async def resolve(self, who: str):
try:
ret = [who]
addresses = await self.resolver.resolve(who, 443, socket.AF_INET)
for address in addresses:
ret.append(address['host'])
return ret
except Exception:
return [who]
| import asyncio
import json
import socket
import subprocess
from datetime import datetime, timedelta
from aiohttp import ClientSession
from injector import inject, singleton
from backup.config import Config, Setting, VERSION, _DEFAULTS, PRIVATE
from backup.exceptions import KnownError
from backup.util import GlobalInfo, Resolver
from backup.time import Time
from backup.worker import Worker
from backup.logger import getLogger, getHistory
from backup.ha import HaRequests, HaSource
from backup.model import Coordinator
logger = getLogger(__name__)
ERROR_LOG_LENGTH = 30
@singleton
class DebugWorker(Worker):
@inject
def __init__(self, time: Time, info: GlobalInfo, config: Config, resolver: Resolver, session: ClientSession, ha: HaRequests, coord: Coordinator, ha_source: HaSource):
super().__init__("Debug Worker", self.doWork, time, interval=10)
self.time = time
self._info = info
self.config = config
self.ha = ha
self.ha_source = ha_source
self.coord = coord
self.last_dns_update = None
self.dns_info = None
self.last_sent_error = None
self.last_sent_error_time = None
self.resolver = resolver
self.session = session
async def doWork(self):
if not self.last_dns_update or self.time.now() > self.last_dns_update + timedelta(hours=12):
await self.updateDns()
if self.config.get(Setting.SEND_ERROR_REPORTS):
try:
await self.maybeSendErrorReport()
except Exception as e:
# just eat the error
pass
async def maybeSendErrorReport(self):
error = self._info._last_error
if error is not None:
if isinstance(error, KnownError):
error = error.code()
else:
error = logger.formatException(error)
if error != self.last_sent_error:
self.last_sent_error = error
if error is not None:
self.last_sent_error_time = self.time.now()
package = await self.buildErrorReport(error)
else:
package = self.buildClearReport()
logger.info("Sending error report (see settings to disable)")
headers = {
'client': self.config.clientIdentifier(),
'addon_version': VERSION
}
async with self.session.post(self.config.get(Setting.ERROR_REPORT_URL), headers=headers, json=package):
pass
async def updateDns(self):
self.last_dns_update = self.time.now()
try:
# Resolve google's addresses
self.dns_info = await self.getPingInfo()
self._info.setDnsInfo(self.dns_info)
except Exception as e:
self.dns_info = logger.formatException(e)
async def buildErrorReport(self, error):
config_special = {}
for setting in Setting:
if self.config.get(setting) != _DEFAULTS[setting]:
if setting in PRIVATE:
config_special[str(setting)] = "REDACTED"
else:
config_special[str(setting)] = self.config.get(setting)
report = {}
report['config'] = config_special
report['time'] = self.formatDate(self.time.now())
report['start_time'] = self.formatDate(self._info._start_time)
report['addon_version'] = VERSION
report['failure_time'] = self.formatDate(self._info._last_failure_time)
report['failure_count'] = self._info._failures
report['sync_last_start'] = self.formatDate(self._info._last_sync_start)
report['sync_count'] = self._info._syncs
report['sync_success_count'] = self._info._successes
report['sync_last_success'] = self.formatDate(self._info._last_sync_success)
report['upload_count'] = self._info._uploads
report['upload_last_size'] = self._info._last_upload_size
report['upload_last_attempt'] = self.formatDate(self._info._last_upload)
report['debug'] = self._info.debug
report['version'] = VERSION
report['error'] = error
report['client'] = self.config.clientIdentifier()
if self.ha_source.isInitialized():
report["super_version"] = self.ha_source.host_info.get('supervisor', "None")
report["hassos_version"] = self.ha_source.host_info.get('hassos', "None")
report["docker_version"] = self.ha_source.host_info.get('docker', "None")
report["machine"] = self.ha_source.host_info.get('machine', "None")
report["supervisor_channel"] = self.ha_source.host_info.get('channel', "None")
report["arch"] = self.ha_source.super_info.get('arch', "None")
report["timezone"] = self.ha_source.super_info.get('timezone', "None")
report["ha_version"] = self.ha_source.ha_info.get('version', "None")
else:
report["super_version"] = "Uninitialized"
report["arch"] = "Uninitialized"
report["timezone"] = "Uninitialized"
report["ha_version"] = "Uninitialized"
report["snapshots"] = self.coord.buildSnapshotMetrics()
return report
async def buildBugReportData(self, error):
report = await self.buildErrorReport(error)
report['addon_logs'] = "\n".join(b for a, b in list(getHistory(0, False))[-ERROR_LOG_LENGTH:])
try:
report['super_logs'] = "\n".join((await self.ha.getSuperLogs()).split("\n")[-ERROR_LOG_LENGTH:])
except Exception as e:
report['super_logs'] = logger.formatException(e)
try:
report['core_logs'] = "\n".join((await self.ha.getCoreLogs()).split("\n")[-ERROR_LOG_LENGTH:])
except Exception as e:
report['core_logs'] = logger.formatException(e)
return report
def buildClearReport(self):
duration = self.time.now() - self.last_sent_error_time
report = {
'duration': str(duration)
}
return report
def formatDate(self, date: datetime):
if date is None:
return "Never"
else:
return date.isoformat()
async def getPingInfo(self):
who = self.config.get(Setting.DRIVE_HOST_NAME)
ips = await self.resolve(who)
pings = {who: {}}
for ip in ips:
pings[who][ip] = "Unknown"
command = "fping -t 5000 " + " ".join(ips)
# fping each server
process = await asyncio.create_subprocess_shell(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_data, stderr_data = await process.communicate()
for line in stdout_data.decode().split("\n"):
for host in pings.keys():
for address in pings[host].keys():
if line.startswith(address):
response = line[len(address):].strip()
if response.startswith(":"):
response = response[2:].strip()
if response.startswith("is"):
response = response[3:].strip()
pings[host][address] = response
return pings
async def resolve(self, who: str):
try:
ret = [who]
addresses = await self.resolver.resolve(who, 443, socket.AF_INET)
for address in addresses:
ret.append(address['host'])
return ret
except Exception:
return [who] | en | 0.86828 | # just eat the error # Resolve google's addresses # fping each server | 1.98495 | 2 |
pretsa_FLDM.py | roeselfa/FeatureLearningBasedDistanceMetrics | 1 | 6625042 | <reponame>roeselfa/FeatureLearningBasedDistanceMetrics<gh_stars>1-10
from anytree import AnyNode, PreOrderIter
from featureLearningBasedDistanceMetrics import getDistance
import sys
from scipy.stats import wasserstein_distance
from scipy.stats import normaltest
import pandas as pd
import numpy as np
class PretsaFLDM:
def __init__(self, eventLog, model, eventDistanceMatrix):
root = AnyNode(id='Root', name="Root", cases=set(), sequence="", annotation=dict())
current = root
currentCase = ""
traceToSequenceDict = dict()
sequence = None
self.caseIDColName = "Case ID"
self.activityColName = "Activity"
self.annotationColName = "Duration"
self.constantEventNr = "Event_Nr"
self.annotationDataOverAll = dict()
self.normaltest_alpha = 0.05
self.eventDistanceMatrix = eventDistanceMatrix
self.model = model
for index, row in eventLog.iterrows():
activity = row[self.activityColName]
annotation = row[self.annotationColName]
if row[self.caseIDColName] != currentCase:
if not sequence is None:
traceToSequenceDict[currentCase] = sequence
currentCase = row[self.caseIDColName]
current = root
current.cases.add(currentCase)
sequence = ""
childAlreadyExists = False
sequence = sequence + "@" + activity
for child in current.children:
if child.name == activity:
childAlreadyExists = True
current = child
if not childAlreadyExists:
node = AnyNode(id=index, name=activity, parent=current, cases=set(), sequence=sequence,
annotations=dict())
current = node
current.cases.add(currentCase)
current.annotations[currentCase] = annotation
self.__addAnnotation(annotation, activity)
# Handle last case
traceToSequenceDict[currentCase] = sequence
self.tree = root
self.traceToSequenceDict = traceToSequenceDict
self.numberOfTracesOriginal = len(self.tree.cases)
self.sequentialPrunning = True
self.__setMaxDifferences()
def __addAnnotation(self, annotation, activity):
dataForActivity = self.annotationDataOverAll.get(activity, None)
if dataForActivity is None:
self.annotationDataOverAll[activity] = []
dataForActivity = self.annotationDataOverAll[activity]
dataForActivity.append(annotation)
def __setMaxDifferences(self):
self.annotationMaxDifferences = dict()
for key in self.annotationDataOverAll.keys():
maxVal = max(self.annotationDataOverAll[key])
minVal = min(self.annotationDataOverAll[key])
self.annotationMaxDifferences[key] = abs(maxVal - minVal)
def __violatesTCloseness(self, activity, annotations, t, cases):
distributionActivity = self.annotationDataOverAll[activity]
maxDifference = self.annotationMaxDifferences[activity]
# Consider only data from cases still in node
distributionEquivalenceClass = []
casesInClass = cases.intersection(set(annotations.keys()))
for caseInClass in casesInClass:
distributionEquivalenceClass.append(annotations[caseInClass])
if len(distributionEquivalenceClass) == 0: # No original annotation is left in the node
return False
if maxDifference == 0.0: # All annotations have the same value(most likely= 0.0)
return False
if (wasserstein_distance(distributionActivity, distributionEquivalenceClass) / maxDifference) >= t:
return True
else:
return False
def __treePrunning(self, k, t):
cutOutTraces = set()
for node in PreOrderIter(self.tree):
if node != self.tree:
node.cases = node.cases.difference(cutOutTraces)
if len(node.cases) < k or self.__violatesTCloseness(node.name, node.annotations, t, node.cases):
cutOutTraces = cutOutTraces.union(node.cases)
current = node.parent
node.parent = None
while current != self.tree:
current.cases = current.cases.difference(cutOutTraces)
current = current.parent
if self.sequentialPrunning:
break
return cutOutTraces
def __getAllPotentialSequencesTree(self, tree, sequence):
sequences = set()
sumCasesChildren = 0
for child in tree.children:
sumCasesChildren = sumCasesChildren + len(child.cases)
childSequence = sequence + "@" + child.name
sequences = sequences.union(self.__getAllPotentialSequencesTree(child, childSequence))
if len(tree.cases) > sumCasesChildren or sumCasesChildren == 0:
sequences.add(sequence)
return sequences
def __addCaseToTree(self, trace, sequence):
if trace != "":
activities = sequence.split("@")
currentNode = self.tree
self.tree.cases.add(trace)
for activity in activities:
for child in currentNode.children:
if child.name == activity:
child.cases.add(trace)
currentNode = child
break
def __combineTracesAndTree(self, traces):
# We transform the set of sequences into a list and sort it, to discretize the behaviour of the algorithm
sequencesTree = list(self.__getAllPotentialSequencesTree(self.tree, ""))
sequencesTree.sort()
for trace in traces:
bestSequence = ""
lowestDistance = sys.maxsize
traceSequence = self.traceToSequenceDict[trace]
for treeSequence in sequencesTree:
currentDistance = getDistance(traceSequence, treeSequence, self.eventDistanceMatrix)
if currentDistance < lowestDistance:
bestSequence = treeSequence
lowestDistance = currentDistance
self.__addCaseToTree(trace, bestSequence)
def runPretsa(self, k, t):
if self.sequentialPrunning:
cutOutCases = set()
cutOutCase = self.__treePrunning(k, t)
while len(cutOutCase) > 0:
self.__combineTracesAndTree(cutOutCase)
cutOutCases = cutOutCases.union(cutOutCase)
cutOutCase = self.__treePrunning(k, t)
else:
cutOutCases = self.__treePrunning(k, t)
self.__combineTracesAndTree(cutOutCases)
return cutOutCases
def __generateNewAnnotation(self, activity):
# normaltest works only with more than 8 samples
if (len(self.annotationDataOverAll[activity])) >= 8:
stat, p = normaltest(self.annotationDataOverAll[activity])
else:
p = 1.0
if p <= self.normaltest_alpha:
mean = np.mean(self.annotationDataOverAll[activity])
std = np.std(self.annotationDataOverAll[activity])
randomValue = np.random.normal(mean, std)
else:
randomValue = np.random.choice(self.annotationDataOverAll[activity])
return randomValue
def getPrivatisedEventLog(self):
eventLog = pd.DataFrame()
events = []
for node in PreOrderIter(self.tree):
if node != self.tree:
for case in node.cases:
event = dict()
event[self.activityColName] = node.name
event[self.caseIDColName] = case
event[self.annotationColName] = node.annotations.get(case, self.__generateNewAnnotation(node.name))
event[self.constantEventNr] = node.depth
events.append(event)
eventLog = pd.DataFrame(events)
if not eventLog.empty:
eventLog = eventLog.sort_values(by=[self.caseIDColName, self.constantEventNr])
return eventLog
| from anytree import AnyNode, PreOrderIter
from featureLearningBasedDistanceMetrics import getDistance
import sys
from scipy.stats import wasserstein_distance
from scipy.stats import normaltest
import pandas as pd
import numpy as np
class PretsaFLDM:
def __init__(self, eventLog, model, eventDistanceMatrix):
root = AnyNode(id='Root', name="Root", cases=set(), sequence="", annotation=dict())
current = root
currentCase = ""
traceToSequenceDict = dict()
sequence = None
self.caseIDColName = "Case ID"
self.activityColName = "Activity"
self.annotationColName = "Duration"
self.constantEventNr = "Event_Nr"
self.annotationDataOverAll = dict()
self.normaltest_alpha = 0.05
self.eventDistanceMatrix = eventDistanceMatrix
self.model = model
for index, row in eventLog.iterrows():
activity = row[self.activityColName]
annotation = row[self.annotationColName]
if row[self.caseIDColName] != currentCase:
if not sequence is None:
traceToSequenceDict[currentCase] = sequence
currentCase = row[self.caseIDColName]
current = root
current.cases.add(currentCase)
sequence = ""
childAlreadyExists = False
sequence = sequence + "@" + activity
for child in current.children:
if child.name == activity:
childAlreadyExists = True
current = child
if not childAlreadyExists:
node = AnyNode(id=index, name=activity, parent=current, cases=set(), sequence=sequence,
annotations=dict())
current = node
current.cases.add(currentCase)
current.annotations[currentCase] = annotation
self.__addAnnotation(annotation, activity)
# Handle last case
traceToSequenceDict[currentCase] = sequence
self.tree = root
self.traceToSequenceDict = traceToSequenceDict
self.numberOfTracesOriginal = len(self.tree.cases)
self.sequentialPrunning = True
self.__setMaxDifferences()
def __addAnnotation(self, annotation, activity):
dataForActivity = self.annotationDataOverAll.get(activity, None)
if dataForActivity is None:
self.annotationDataOverAll[activity] = []
dataForActivity = self.annotationDataOverAll[activity]
dataForActivity.append(annotation)
def __setMaxDifferences(self):
self.annotationMaxDifferences = dict()
for key in self.annotationDataOverAll.keys():
maxVal = max(self.annotationDataOverAll[key])
minVal = min(self.annotationDataOverAll[key])
self.annotationMaxDifferences[key] = abs(maxVal - minVal)
def __violatesTCloseness(self, activity, annotations, t, cases):
distributionActivity = self.annotationDataOverAll[activity]
maxDifference = self.annotationMaxDifferences[activity]
# Consider only data from cases still in node
distributionEquivalenceClass = []
casesInClass = cases.intersection(set(annotations.keys()))
for caseInClass in casesInClass:
distributionEquivalenceClass.append(annotations[caseInClass])
if len(distributionEquivalenceClass) == 0: # No original annotation is left in the node
return False
if maxDifference == 0.0: # All annotations have the same value(most likely= 0.0)
return False
if (wasserstein_distance(distributionActivity, distributionEquivalenceClass) / maxDifference) >= t:
return True
else:
return False
def __treePrunning(self, k, t):
cutOutTraces = set()
for node in PreOrderIter(self.tree):
if node != self.tree:
node.cases = node.cases.difference(cutOutTraces)
if len(node.cases) < k or self.__violatesTCloseness(node.name, node.annotations, t, node.cases):
cutOutTraces = cutOutTraces.union(node.cases)
current = node.parent
node.parent = None
while current != self.tree:
current.cases = current.cases.difference(cutOutTraces)
current = current.parent
if self.sequentialPrunning:
break
return cutOutTraces
def __getAllPotentialSequencesTree(self, tree, sequence):
sequences = set()
sumCasesChildren = 0
for child in tree.children:
sumCasesChildren = sumCasesChildren + len(child.cases)
childSequence = sequence + "@" + child.name
sequences = sequences.union(self.__getAllPotentialSequencesTree(child, childSequence))
if len(tree.cases) > sumCasesChildren or sumCasesChildren == 0:
sequences.add(sequence)
return sequences
def __addCaseToTree(self, trace, sequence):
if trace != "":
activities = sequence.split("@")
currentNode = self.tree
self.tree.cases.add(trace)
for activity in activities:
for child in currentNode.children:
if child.name == activity:
child.cases.add(trace)
currentNode = child
break
def __combineTracesAndTree(self, traces):
# We transform the set of sequences into a list and sort it, to discretize the behaviour of the algorithm
sequencesTree = list(self.__getAllPotentialSequencesTree(self.tree, ""))
sequencesTree.sort()
for trace in traces:
bestSequence = ""
lowestDistance = sys.maxsize
traceSequence = self.traceToSequenceDict[trace]
for treeSequence in sequencesTree:
currentDistance = getDistance(traceSequence, treeSequence, self.eventDistanceMatrix)
if currentDistance < lowestDistance:
bestSequence = treeSequence
lowestDistance = currentDistance
self.__addCaseToTree(trace, bestSequence)
def runPretsa(self, k, t):
if self.sequentialPrunning:
cutOutCases = set()
cutOutCase = self.__treePrunning(k, t)
while len(cutOutCase) > 0:
self.__combineTracesAndTree(cutOutCase)
cutOutCases = cutOutCases.union(cutOutCase)
cutOutCase = self.__treePrunning(k, t)
else:
cutOutCases = self.__treePrunning(k, t)
self.__combineTracesAndTree(cutOutCases)
return cutOutCases
def __generateNewAnnotation(self, activity):
# normaltest works only with more than 8 samples
if (len(self.annotationDataOverAll[activity])) >= 8:
stat, p = normaltest(self.annotationDataOverAll[activity])
else:
p = 1.0
if p <= self.normaltest_alpha:
mean = np.mean(self.annotationDataOverAll[activity])
std = np.std(self.annotationDataOverAll[activity])
randomValue = np.random.normal(mean, std)
else:
randomValue = np.random.choice(self.annotationDataOverAll[activity])
return randomValue
def getPrivatisedEventLog(self):
eventLog = pd.DataFrame()
events = []
for node in PreOrderIter(self.tree):
if node != self.tree:
for case in node.cases:
event = dict()
event[self.activityColName] = node.name
event[self.caseIDColName] = case
event[self.annotationColName] = node.annotations.get(case, self.__generateNewAnnotation(node.name))
event[self.constantEventNr] = node.depth
events.append(event)
eventLog = pd.DataFrame(events)
if not eventLog.empty:
eventLog = eventLog.sort_values(by=[self.caseIDColName, self.constantEventNr])
return eventLog | en | 0.866741 | # Handle last case # Consider only data from cases still in node # No original annotation is left in the node # All annotations have the same value(most likely= 0.0) # We transform the set of sequences into a list and sort it, to discretize the behaviour of the algorithm # normaltest works only with more than 8 samples | 2.267054 | 2 |
examples/animation/unchained.py | bsipocz/matplotlib | 1 | 6625043 | """
Comparative path demonstration of frequency from a fake signal of a pulsar.
(mostly known because of the cover for Joy Division's Unknown Pleasures)
Author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Create new Figure with black background
fig = plt.figure(figsize=(8, 8), facecolor='black')
# Add a subplot with no frame
ax = plt.subplot(111, frameon=False)
# Generate random data
data = np.random.uniform(0, 1, (64, 75))
X = np.linspace(-1, 1, data.shape[-1])
G = 1.5 * np.exp(-4 * X * X)
# Generate line plots
lines = []
for i in range(len(data)):
# Small reduction of the X extents to get a cheap perspective effect
xscale = 1 - i / 200.
# Same for linewidth (thicker strokes on bottom)
lw = 1.5 - i / 100.0
line, = ax.plot(xscale * X, i + G * data[i], color="w", lw=lw)
lines.append(line)
# Set y limit (or first line is cropped because of thickness)
ax.set_ylim(-1, 70)
# No ticks
ax.set_xticks([])
ax.set_yticks([])
# 2 part titles to get different font weights
ax.text(0.5, 1.0, "MATPLOTLIB ", transform=ax.transAxes,
ha="right", va="bottom", color="w",
family="sans-serif", fontweight="light", fontsize=16)
ax.text(0.5, 1.0, "UNCHAINED", transform=ax.transAxes,
ha="left", va="bottom", color="w",
family="sans-serif", fontweight="bold", fontsize=16)
# Update function
def update(*args):
# Shift all data to the right
data[:, 1:] = data[:, :-1]
# Fill-in new values
data[:, 0] = np.random.uniform(0, 1, len(data))
# Update data
for i in range(len(data)):
lines[i].set_ydata(i + G * data[i])
# Return modified artists
return lines
# Construct the animation, using the update function as the animation
# director.
anim = animation.FuncAnimation(fig, update, interval=10)
plt.show()
| """
Comparative path demonstration of frequency from a fake signal of a pulsar.
(mostly known because of the cover for Joy Division's Unknown Pleasures)
Author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Create new Figure with black background
fig = plt.figure(figsize=(8, 8), facecolor='black')
# Add a subplot with no frame
ax = plt.subplot(111, frameon=False)
# Generate random data
data = np.random.uniform(0, 1, (64, 75))
X = np.linspace(-1, 1, data.shape[-1])
G = 1.5 * np.exp(-4 * X * X)
# Generate line plots
lines = []
for i in range(len(data)):
# Small reduction of the X extents to get a cheap perspective effect
xscale = 1 - i / 200.
# Same for linewidth (thicker strokes on bottom)
lw = 1.5 - i / 100.0
line, = ax.plot(xscale * X, i + G * data[i], color="w", lw=lw)
lines.append(line)
# Set y limit (or first line is cropped because of thickness)
ax.set_ylim(-1, 70)
# No ticks
ax.set_xticks([])
ax.set_yticks([])
# 2 part titles to get different font weights
ax.text(0.5, 1.0, "MATPLOTLIB ", transform=ax.transAxes,
ha="right", va="bottom", color="w",
family="sans-serif", fontweight="light", fontsize=16)
ax.text(0.5, 1.0, "UNCHAINED", transform=ax.transAxes,
ha="left", va="bottom", color="w",
family="sans-serif", fontweight="bold", fontsize=16)
# Update function
def update(*args):
# Shift all data to the right
data[:, 1:] = data[:, :-1]
# Fill-in new values
data[:, 0] = np.random.uniform(0, 1, len(data))
# Update data
for i in range(len(data)):
lines[i].set_ydata(i + G * data[i])
# Return modified artists
return lines
# Construct the animation, using the update function as the animation
# director.
anim = animation.FuncAnimation(fig, update, interval=10)
plt.show()
| en | 0.806894 | Comparative path demonstration of frequency from a fake signal of a pulsar. (mostly known because of the cover for Joy Division's Unknown Pleasures) Author: <NAME> # Create new Figure with black background # Add a subplot with no frame # Generate random data # Generate line plots # Small reduction of the X extents to get a cheap perspective effect # Same for linewidth (thicker strokes on bottom) # Set y limit (or first line is cropped because of thickness) # No ticks # 2 part titles to get different font weights # Update function # Shift all data to the right # Fill-in new values # Update data # Return modified artists # Construct the animation, using the update function as the animation # director. | 2.737608 | 3 |
prints_a_multiplication_table_of_primes_numbers/tabulate_ext.py | mvj3/prints_a_multiplication_table_of_primes_numbers | 1 | 6625044 | # -*- coding: utf-8 -*-
__all__ = ["tabulate_prime"]
"""
Extend `tabulate` module with prime print format.
"""
import tabulate as tabulate_module
from tabulate import DataRow, Line, TableFormat
prime_format = TableFormat(
lineabove=None,
linebelowheader=Line("", "-", "-", ""),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None
)
tabulate_module._table_formats["prime"] = prime_format
orig_tabulate = tabulate_module.tabulate
def tabulate_prime(tabular_data):
"""
This `tabulate_prime` function only support prime table requirement,
just as ETL stuffs.
"""
# treat the second column as normal values.
tabular_data = [([row[0]] + ["|"] + row[1:]) for row in tabular_data]
# print table as customized format.
output = orig_tabulate(tabular_data, headers="firstrow",
tablefmt="prime", stralign="right",)
lines = output.split("\n")
# add "+" sign to horizontal line row.
first_line = lines[0]
second_line = lines[1]
sign_idx = first_line.index("|")
chars_in_line_2 = list(second_line)
chars_in_line_2[sign_idx] = "+"
lines[1] = "".join(chars_in_line_2)
# align the second horizontal line row.
last_line = lines[-1]
max_width = len(last_line)
lines[1] = lines[1][0:max_width]
# remote the column after "+" sign
lines = [line[0:sign_idx - 2] + line[sign_idx] + line[sign_idx + 2:]
for line in lines]
output = "\n".join(lines)
return output
| # -*- coding: utf-8 -*-
__all__ = ["tabulate_prime"]
"""
Extend `tabulate` module with prime print format.
"""
import tabulate as tabulate_module
from tabulate import DataRow, Line, TableFormat
prime_format = TableFormat(
lineabove=None,
linebelowheader=Line("", "-", "-", ""),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None
)
tabulate_module._table_formats["prime"] = prime_format
orig_tabulate = tabulate_module.tabulate
def tabulate_prime(tabular_data):
"""
This `tabulate_prime` function only support prime table requirement,
just as ETL stuffs.
"""
# treat the second column as normal values.
tabular_data = [([row[0]] + ["|"] + row[1:]) for row in tabular_data]
# print table as customized format.
output = orig_tabulate(tabular_data, headers="firstrow",
tablefmt="prime", stralign="right",)
lines = output.split("\n")
# add "+" sign to horizontal line row.
first_line = lines[0]
second_line = lines[1]
sign_idx = first_line.index("|")
chars_in_line_2 = list(second_line)
chars_in_line_2[sign_idx] = "+"
lines[1] = "".join(chars_in_line_2)
# align the second horizontal line row.
last_line = lines[-1]
max_width = len(last_line)
lines[1] = lines[1][0:max_width]
# remote the column after "+" sign
lines = [line[0:sign_idx - 2] + line[sign_idx] + line[sign_idx + 2:]
for line in lines]
output = "\n".join(lines)
return output
| en | 0.790497 | # -*- coding: utf-8 -*- Extend `tabulate` module with prime print format. This `tabulate_prime` function only support prime table requirement, just as ETL stuffs. # treat the second column as normal values. # print table as customized format. # add "+" sign to horizontal line row. # align the second horizontal line row. # remote the column after "+" sign | 3.295394 | 3 |
nodes/GCSTools.py | anqixu/sightedturtlesim | 1 | 6625045 | #!/usr/bin/env python
from math import *
M_PI = pi
EARTH_RADIUS_M = 6367500.0
DEG_TO_RAD = M_PI/180.0
RAD_TO_DEG = 180.0/M_PI
INVALID_DEGREE_VALUE = 361.0
BTC40_WIDTH = 752
BTC40_HEIGHT = 582
NAN = float('nan')
def EarthMeter2Deg(meter):
return meter / EARTH_RADIUS_M * RAD_TO_DEG
def EarthDeg2Meter(deg):
return deg * DEG_TO_RAD * EARTH_RADIUS_M
def Bearing2GCS(startLatDeg, startLonDeg, distMeter, absHeadingDeg):
if distMeter < 0:
distMeter = -distMeter
absHeadingDeg = absHeadingDeg + 180.0
DOverR = distMeter / EARTH_RADIUS_M
startLatRad = startLatDeg * DEG_TO_RAD
absHeadingRad = absHeadingDeg * DEG_TO_RAD
destLatRad = asin(sin(startLatRad)*cos(DOverR) + \
cos(startLatRad)*sin(DOverR)*cos(absHeadingRad))
destLonDeg = startLonDeg + \
atan2(sin(absHeadingRad)*sin(DOverR)*cos(startLatRad), \
cos(DOverR) - sin(startLatRad)*sin(destLatRad)) * RAD_TO_DEG
destLatDeg = destLatRad * RAD_TO_DEG
return (destLatDeg, destLonDeg)
def Meter2GCS(startLatDeg, startLonDeg, xMeter, yMeter):
return Bearing2GCS(startLatDeg, startLonDeg, \
sqrt(xMeter*xMeter+yMeter*yMeter), atan2(xMeter, -yMeter)*RAD_TO_DEG)
def GCS2Bearing(startLatDeg, startLonDeg, currLatDeg, currLonDeg):
startLatRad = startLatDeg * DEG_TO_RAD
currLatRad = currLatDeg * DEG_TO_RAD
deltaLonRad = (currLonDeg - startLonDeg) * DEG_TO_RAD
sinDeltaLatHalf = sin((currLatRad - startLatRad)/2)
sinDeltaLonHalf = sin(deltaLonRad/2)
a = sinDeltaLatHalf*sinDeltaLatHalf + \
cos(startLatRad)*cos(currLatRad)*sinDeltaLonHalf*sinDeltaLonHalf
distMeter = 2*atan2(sqrt(a), sqrt(1-a)) * EARTH_RADIUS_M
absHeadingDeg = atan2(sin(deltaLonRad)*cos(currLatRad), \
cos(startLatRad)*sin(currLatRad) - \
sin(startLatRad)*cos(currLatRad)*cos(deltaLonRad)) * RAD_TO_DEG
return (distMeter, absHeadingDeg)
def GCS2Meter(startLatDeg, startLonDeg, currLatDeg, currLonDeg):
bearing = GCS2Bearing(startLatDeg, startLonDeg, currLatDeg, currLonDeg)
distMeter = bearing[0]
absHeadingRad = bearing[1] * DEG_TO_RAD
return (distMeter * sin(absHeadingRad), -distMeter * cos(absHeadingRad))
def getCoordTransform(orientationDeg, pitchDeg, rollDeg, azimuthDeg, elevationDeg):
result = []
# NOTE: the following simplifications were obtained using MATLAB's symbolic math library
co = cos(orientationDeg * DEG_TO_RAD)
so = sin(orientationDeg * DEG_TO_RAD)
cp = cos(pitchDeg * DEG_TO_RAD)
sp = sin(pitchDeg * DEG_TO_RAD)
cr = cos(rollDeg * DEG_TO_RAD)
sr = sin(rollDeg * DEG_TO_RAD)
ca = cos(azimuthDeg * DEG_TO_RAD)
sa = sin(azimuthDeg * DEG_TO_RAD)
ce = cos(elevationDeg * DEG_TO_RAD)
se = sin(elevationDeg * DEG_TO_RAD)
result.append(ca*(co*cr + so*sp*sr) - cp*sa*so)
result.append(ce*(sa*(co*cr + so*sp*sr) + ca*cp*so) - se*(co*sr - cr*so*sp))
result.append(ce*(co*sr - cr*so*sp) + se*(sa*(co*cr + so*sp*sr) + ca*cp*so))
result.append(-ca*(cr*so - co*sp*sr) - co*cp*sa)
result.append(se*(so*sr + co*cr*sp) - ce*(sa*(cr*so - co*sp*sr) - ca*co*cp))
result.append(-ce*(so*sr + co*cr*sp) - se*(sa*(cr*so - co*sp*sr) - ca*co*cp))
result.append(-sa*sp - ca*cp*sr)
result.append(ce*(ca*sp - cp*sa*sr) - cp*cr*se)
result.append(se*(ca*sp - cp*sa*sr) + ce*cp*cr)
return result
def getGroundProjection(T, iPx, jPx, wPx, hPx, horizFOVDeg, altitudeM, aspectRatio = 0):
# NOTE: the following simplifications were obtained using MATLAB's symbolic math library
half_width = tan(horizFOVDeg/2/180*M_PI)
if aspectRatio <= 0:
aspectRatio = wPx/hPx
half_height = half_width/aspectRatio
iM = (2*iPx/wPx - 1)*half_width
jM = (1 - 2*jPx/hPx)*half_height
denom = T[7] + T[6]*iM + T[8]*jM
if denom >= 0: # the Z coordinate of the transformed line points upwards from the UAV
return (NAN, NAN)
xM = -(altitudeM*(T[1] + T[0]*iM + T[2]*jM))/denom
yM = -(altitudeM*(T[4] + T[3]*iM + T[5]*jM))/denom
return (xM, yM)
| #!/usr/bin/env python
from math import *
M_PI = pi
EARTH_RADIUS_M = 6367500.0
DEG_TO_RAD = M_PI/180.0
RAD_TO_DEG = 180.0/M_PI
INVALID_DEGREE_VALUE = 361.0
BTC40_WIDTH = 752
BTC40_HEIGHT = 582
NAN = float('nan')
def EarthMeter2Deg(meter):
return meter / EARTH_RADIUS_M * RAD_TO_DEG
def EarthDeg2Meter(deg):
return deg * DEG_TO_RAD * EARTH_RADIUS_M
def Bearing2GCS(startLatDeg, startLonDeg, distMeter, absHeadingDeg):
if distMeter < 0:
distMeter = -distMeter
absHeadingDeg = absHeadingDeg + 180.0
DOverR = distMeter / EARTH_RADIUS_M
startLatRad = startLatDeg * DEG_TO_RAD
absHeadingRad = absHeadingDeg * DEG_TO_RAD
destLatRad = asin(sin(startLatRad)*cos(DOverR) + \
cos(startLatRad)*sin(DOverR)*cos(absHeadingRad))
destLonDeg = startLonDeg + \
atan2(sin(absHeadingRad)*sin(DOverR)*cos(startLatRad), \
cos(DOverR) - sin(startLatRad)*sin(destLatRad)) * RAD_TO_DEG
destLatDeg = destLatRad * RAD_TO_DEG
return (destLatDeg, destLonDeg)
def Meter2GCS(startLatDeg, startLonDeg, xMeter, yMeter):
return Bearing2GCS(startLatDeg, startLonDeg, \
sqrt(xMeter*xMeter+yMeter*yMeter), atan2(xMeter, -yMeter)*RAD_TO_DEG)
def GCS2Bearing(startLatDeg, startLonDeg, currLatDeg, currLonDeg):
startLatRad = startLatDeg * DEG_TO_RAD
currLatRad = currLatDeg * DEG_TO_RAD
deltaLonRad = (currLonDeg - startLonDeg) * DEG_TO_RAD
sinDeltaLatHalf = sin((currLatRad - startLatRad)/2)
sinDeltaLonHalf = sin(deltaLonRad/2)
a = sinDeltaLatHalf*sinDeltaLatHalf + \
cos(startLatRad)*cos(currLatRad)*sinDeltaLonHalf*sinDeltaLonHalf
distMeter = 2*atan2(sqrt(a), sqrt(1-a)) * EARTH_RADIUS_M
absHeadingDeg = atan2(sin(deltaLonRad)*cos(currLatRad), \
cos(startLatRad)*sin(currLatRad) - \
sin(startLatRad)*cos(currLatRad)*cos(deltaLonRad)) * RAD_TO_DEG
return (distMeter, absHeadingDeg)
def GCS2Meter(startLatDeg, startLonDeg, currLatDeg, currLonDeg):
bearing = GCS2Bearing(startLatDeg, startLonDeg, currLatDeg, currLonDeg)
distMeter = bearing[0]
absHeadingRad = bearing[1] * DEG_TO_RAD
return (distMeter * sin(absHeadingRad), -distMeter * cos(absHeadingRad))
def getCoordTransform(orientationDeg, pitchDeg, rollDeg, azimuthDeg, elevationDeg):
result = []
# NOTE: the following simplifications were obtained using MATLAB's symbolic math library
co = cos(orientationDeg * DEG_TO_RAD)
so = sin(orientationDeg * DEG_TO_RAD)
cp = cos(pitchDeg * DEG_TO_RAD)
sp = sin(pitchDeg * DEG_TO_RAD)
cr = cos(rollDeg * DEG_TO_RAD)
sr = sin(rollDeg * DEG_TO_RAD)
ca = cos(azimuthDeg * DEG_TO_RAD)
sa = sin(azimuthDeg * DEG_TO_RAD)
ce = cos(elevationDeg * DEG_TO_RAD)
se = sin(elevationDeg * DEG_TO_RAD)
result.append(ca*(co*cr + so*sp*sr) - cp*sa*so)
result.append(ce*(sa*(co*cr + so*sp*sr) + ca*cp*so) - se*(co*sr - cr*so*sp))
result.append(ce*(co*sr - cr*so*sp) + se*(sa*(co*cr + so*sp*sr) + ca*cp*so))
result.append(-ca*(cr*so - co*sp*sr) - co*cp*sa)
result.append(se*(so*sr + co*cr*sp) - ce*(sa*(cr*so - co*sp*sr) - ca*co*cp))
result.append(-ce*(so*sr + co*cr*sp) - se*(sa*(cr*so - co*sp*sr) - ca*co*cp))
result.append(-sa*sp - ca*cp*sr)
result.append(ce*(ca*sp - cp*sa*sr) - cp*cr*se)
result.append(se*(ca*sp - cp*sa*sr) + ce*cp*cr)
return result
def getGroundProjection(T, iPx, jPx, wPx, hPx, horizFOVDeg, altitudeM, aspectRatio = 0):
# NOTE: the following simplifications were obtained using MATLAB's symbolic math library
half_width = tan(horizFOVDeg/2/180*M_PI)
if aspectRatio <= 0:
aspectRatio = wPx/hPx
half_height = half_width/aspectRatio
iM = (2*iPx/wPx - 1)*half_width
jM = (1 - 2*jPx/hPx)*half_height
denom = T[7] + T[6]*iM + T[8]*jM
if denom >= 0: # the Z coordinate of the transformed line points upwards from the UAV
return (NAN, NAN)
xM = -(altitudeM*(T[1] + T[0]*iM + T[2]*jM))/denom
yM = -(altitudeM*(T[4] + T[3]*iM + T[5]*jM))/denom
return (xM, yM)
| en | 0.915507 | #!/usr/bin/env python # NOTE: the following simplifications were obtained using MATLAB's symbolic math library # NOTE: the following simplifications were obtained using MATLAB's symbolic math library # the Z coordinate of the transformed line points upwards from the UAV | 2.834785 | 3 |
aiida/tools/graph/age_rules.py | louisponet/aiida-core | 0 | 6625046 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Rules for the AiiDA Graph Explorer utility"""
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import numpy as np
from aiida import orm
from aiida.tools.graph.age_entities import Basket
from aiida.common.lang import type_check
class Operation(metaclass=ABCMeta):
"""Base class for all AGE explorer classes"""
def __init__(self, max_iterations, track_edges):
"""Initialization method
:param max_iterations: maximum number of iterations to perform.
:param bool track_edges: if True, will also track and return the edges traversed.
"""
self.set_max_iterations(max_iterations)
self._track_edges = track_edges
self._iterations_done = None
def set_max_iterations(self, max_iterations):
"""Sets the max iterations"""
if not (isinstance(max_iterations, int) or max_iterations is np.inf):
raise TypeError('max_iterations has to be an integer or np.inf')
self._max_iterations = max_iterations
@property
def iterations_done(self):
"""Number of iterations performed"""
return self._iterations_done
@abstractmethod
def run(self, operational_set):
"""
Takes the operational_set and overwrites it with the set of nodes that results
from applying the rule (this might or not include the initial set of nodes as
well depending on the rule).
:type operational_set: :py:class:`aiida.tools.graph.age_entities.Basket`
:param operational_set: initital set of nodes to be overwritten by the rule.
"""
class QueryRule(Operation, metaclass=ABCMeta):
"""Parent class for every rule that implements a query.
QueryRules take a generic QueryBuilder instance and a set of starting nodes and then
perform successive iterations of that query, each one from the set of nodes that the
previous one found. Depending on the class of rule used the final result will be
either the whole set of nodes traversed (UpdateRule), or only the final set of nodes
found in the last iteration of the query (ReplaceRule).
"""
def __init__(self, querybuilder, max_iterations=1, track_edges=False):
"""Initialization method
:param querybuilder: an instance of the QueryBuilder class from which to take the
procedure for traversal
:param int max_iterations: the number of iterations to run this query on (must be
a finite number for ReplaceRules)
:param bool track_edges: whether to track which edges are traversed and store them
"""
super().__init__(max_iterations, track_edges=track_edges)
def get_spec_from_path(queryhelp, idx):
from aiida.orm.querybuilder import GROUP_ENTITY_TYPE_PREFIX
if (
queryhelp['path'][idx]['entity_type'].startswith('node') or
queryhelp['path'][idx]['entity_type'].startswith('data') or
queryhelp['path'][idx]['entity_type'].startswith('process') or
queryhelp['path'][idx]['entity_type'] == ''
):
result = 'nodes'
elif queryhelp['path'][idx]['entity_type'].startswith(GROUP_ENTITY_TYPE_PREFIX):
result = 'groups'
else:
raise Exception(f"not understood entity from ( {queryhelp['path'][idx]['entity_type']} )")
return result
queryhelp = querybuilder.queryhelp
# Check if there is any projection:
query_projections = queryhelp['project']
for projection_key in query_projections:
if query_projections[projection_key] != []:
raise ValueError(
'The input querybuilder must not have any projections.\n'
'Instead, it has the following:\n - Key: {}\n - Val: {}\n'.format(
projection_key, query_projections[projection_key]
)
)
for pathspec in queryhelp['path']:
if not pathspec['entity_type']:
pathspec['entity_type'] = 'node.Node.'
self._qbtemplate = orm.QueryBuilder(**queryhelp)
queryhelp = self._qbtemplate.queryhelp
self._first_tag = queryhelp['path'][0]['tag']
self._last_tag = queryhelp['path'][-1]['tag']
self._querybuilder = None
# All of these are set in _init_run:
self._edge_label = None
self._edge_keys = None
self._entity_to_identifier = None
self._entity_from = get_spec_from_path(queryhelp, 0)
self._entity_to = get_spec_from_path(queryhelp, -1)
self._accumulator_set = None
def set_edge_keys(self, edge_keys):
"""
Set the edge keys that are use to classify the edges during the run of this query.
:param edge_keys:
a list of projections on the edge itself, or a tuple that specifies
(tag, project) if the projection is not on the edge
Example: For node-to-node graph traversals, it is often convenient to save
the information on the links::
qb = QueryBuilder().append(Node, tag='n1').append(Node, tag='n2')
rule = RuleSequence(qb, track_edges=True)
rule.set_edge_keys(['input_id', 'output_id', 'label', 'type'])
# Now for UUIDS:
qb = QueryBuilder().append(Node, tag='n1').append(Node, tag='n2')
rule = RuleSequence(qb, track_edges=True)
rule.set_edge_keys([('n1','uuid'), ('n2','uuid'), 'label', 'type'])
"""
self._edge_keys = edge_keys[:]
def _init_run(self, operational_set):
"""Initialization Utility method
This method initializes a run. It initializes the accumulator_set in order
for it to only contain the operational_set, and to be of the same kind.
This function modifies the its QueryBuilder instance to give the right results.
:param operational_set: input with which to initialize the accumulator_set.
"""
type_check(operational_set, Basket)
if self._accumulator_set is not None:
type_check(self._accumulator_set, Basket)
self._accumulator_set.empty()
self._accumulator_set += operational_set
else:
self._accumulator_set = operational_set.copy()
# Copying qbtemplate so there's no problem if it is used again in a later run:
queryhelp = self._qbtemplate.queryhelp
self._querybuilder = orm.QueryBuilder(**queryhelp)
self._entity_to_identifier = operational_set[self._entity_to].identifier
# Now I add the necessary projections, which is the identifier of the
# last entity of the QueryBuilder path:
self._querybuilder.add_projection(self._last_tag, self._entity_to_identifier)
if self._track_edges:
# This requires additional projections and the edge_keys, which is a list of tuples (of length 2)
# that stores the information what I need to project as well, as in (tag, projection)
projections = defaultdict(list)
self._edge_keys = []
self._edge_label = queryhelp['path'][-1]['edge_tag']
# Need to get the edge_set: This is given by entity1_entity2. Here, the results needs to
# be sorted somehow in order to ensure that the same key is used when entity_from and
# entity_to are exchanged.
edge_set = operational_set.dict['{}_{}'.format(*sorted((self._entity_from, self._entity_to)))]
# Looping over the edge identifiers to figure out what I need to project and in which
# order. The order is important! The rules:
# r1 = Rule(QueryBuilder().append(Group).append(Node, with_group=Group) and
# r2 = Rule(QueryBuilder().append(Node).append(Group, with_node=Node)
# need still to save their results in the same order (i.e. group_id, node_id).
# Therefore, I am sorting the edge_keys according to edge_identifiers specified in the edge_set
for tag, projection in edge_set.edge_identifiers:
if tag == 'edge':
actual_tag = self._edge_label
elif tag == self._entity_from:
actual_tag = self._first_tag
elif tag == self._entity_to:
actual_tag = self._last_tag
else:
# For now I can only specify edge_identifiers as 'edge', ie. project on the edge
# itself, or by the entity_from, entity_to keyword, ie. groups or nodes.
# One could think of other keywords...
raise ValueError(f'This tag ({tag}) is not known')
self._edge_keys.append((actual_tag, projection))
projections[actual_tag].append(projection)
# Telling the QB about the additional projections:
for proj_tag, projectionlist in projections.items():
try:
self._querybuilder.add_projection(proj_tag, projectionlist)
except (TypeError, ValueError) as exc:
raise KeyError('The projection for the edge-identifier is invalid.\n') from exc
def _load_results(self, target_set, operational_set):
"""Single application of the rules to the operational set
:param target_set:
where the new results will be loaded (it will be first emptied of all previous content).
There is no returned value for this method.
:param operational_set: where the results originate from (walkers)
"""
primkeys = operational_set[self._entity_from].keyset
target_set.empty()
if primkeys:
self._querybuilder.add_filter(
self._first_tag, {operational_set[self._entity_from].identifier: {
'in': primkeys
}}
)
qres = self._querybuilder.dict()
# These are the new results returned by the query
target_set[self._entity_to].add_entities([
item[self._last_tag][self._entity_to_identifier] for item in qres
])
if self._track_edges:
# As in _init_run, I need the key for the edge_set
edge_key = '{}_{}'.format(*sorted((self._entity_from, self._entity_to)))
edge_set = operational_set.dict[edge_key]
namedtuple_ = edge_set.edge_namedtuple
target_set[edge_key].add_entities([
namedtuple_(*(item[key1][key2] for (key1, key2) in self._edge_keys)) for item in qres
])
def set_accumulator(self, accumulator_set):
self._accumulator_set = accumulator_set
def empty_accumulator(self):
if self._accumulator_set is not None:
self._accumulator_set.empty()
# Pylint complains if this is not here, but should be removed asap
def run(self, operational_set):
pass
class UpdateRule(QueryRule):
"""
The UpdateRule will accumulate every node visited and return it as a set of nodes
(and thus, without duplication). It can be used requesting both a finite number
of iterations or an infinite number of iterations (in which case it will stop once
no new nodes are added to the accumulation set).
"""
def run(self, operational_set):
self._init_run(operational_set)
self._iterations_done = 0
new_results = operational_set.get_template()
# The operational_set will be updated with the new_nodes that were not
# already in the _acumulator_set, so that we are not querying from the
# same nodes again and the cycle can end when no new nodes are found
while (operational_set and self._iterations_done < self._max_iterations):
self._iterations_done += 1
self._load_results(new_results, operational_set)
operational_set = new_results - self._accumulator_set
self._accumulator_set += new_results
return self._accumulator_set.copy()
class ReplaceRule(QueryRule):
"""
The ReplaceRule does not accumulate results, but just sets the operational_set to
new results. Therefore it can only function using a finite number of iterations,
since it does not keep track of which nodes where visited already (otherwise, if
it was following a cycle, it would run indefinitely).
"""
def __init__(self, querybuilder, max_iterations=1, track_edges=False):
if max_iterations == np.inf:
raise ValueError('You cannot have max_iterations to be infinitely large for replace rules')
super().__init__(querybuilder, max_iterations=max_iterations, track_edges=track_edges)
def run(self, operational_set):
self._init_run(operational_set)
self._iterations_done = 0
new_results = operational_set.get_template()
# The operational_set will be replaced by the new_nodes, even if these
# were already visited previously.
while (operational_set and self._iterations_done < self._max_iterations):
self._iterations_done += 1
self._load_results(new_results, operational_set)
operational_set = new_results
return operational_set.copy()
class RuleSaveWalkers(Operation):
"""Save the Walkers:
When initialized, this rule will save a pointer to an external stash variable.
When run, this stash will be emptied and a given operational_set will be saved
there instead.
"""
def __init__(self, stash):
"""Initialization method
:param stash: external variable in which to save the operational_set
"""
self._stash = stash
super().__init__(max_iterations=1, track_edges=True)
def run(self, operational_set):
self._stash.empty()
self._stash += operational_set
return operational_set
class RuleSetWalkers(Operation):
"""Set the Walkers:
When initialized, this rule will save a pointer to an external stash variable.
When run, the given operational_set will be emptied and the stash will be
loaded in it.
"""
def __init__(self, stash):
"""Initialization method
:param stash: external variable from which to load into the operational_set
"""
self._stash = stash
super().__init__(max_iterations=1, track_edges=True)
def run(self, operational_set):
operational_set.empty()
operational_set += self._stash
return operational_set
class RuleSequence(Operation):
"""Rule for concatenation
Rule Sequence is used to concatenate a series of rules together.
Concatenating querybuilders in a single rule its not enough because
one might want to stash results to perform two independent operations
in the starting set instead of a second operation from the results of
the first (see RuleSetWalkers and RuleSaveWalkers).
"""
def __init__(self, rules, max_iterations=1):
for rule in rules:
if not isinstance(rule, Operation):
raise TypeError('rule has to be an instance of Operation-subclass')
self._rules = rules
self._accumulator_set = None
self._visits_set = None
super().__init__(max_iterations, track_edges=False)
def run(self, operational_set):
type_check(operational_set, Basket)
if self._accumulator_set is not None:
type_check(self._accumulator_set, Basket)
self._accumulator_set.empty()
self._accumulator_set += operational_set
else:
self._accumulator_set = operational_set.copy()
if self._visits_set is not None:
type_check(self._visits_set, Basket)
self._visits_set.empty()
self._visits_set += operational_set
else:
self._visits_set = operational_set.copy()
new_results = operational_set.get_template()
self._iterations_done = 0
while (operational_set and self._iterations_done < self._max_iterations):
self._iterations_done += 1
new_results.empty()
# I iterate the operational_set through all the rules:
for _, rule in enumerate(self._rules):
operational_set = rule.run(operational_set)
new_results += operational_set
self._visits_set += operational_set
# I set the operational set to all results that have not been visited yet.
operational_set = new_results - self._accumulator_set
self._accumulator_set += new_results
return self._visits_set.copy()
def set_accumulator(self, accumulator_set):
"""Set the accumulator set"""
self._accumulator_set = accumulator_set
def empty_accumulator(self):
"""Empties the accumulator set"""
if self._accumulator_set is not None:
self._accumulator_set.empty()
def set_visits(self, visits_set):
"""Set the visits set"""
self._visits_set = visits_set
def empty_visits(self):
"""Empties the visits set"""
if self._visits_set is not None:
self._visits_set.empty()
| # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Rules for the AiiDA Graph Explorer utility"""
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import numpy as np
from aiida import orm
from aiida.tools.graph.age_entities import Basket
from aiida.common.lang import type_check
class Operation(metaclass=ABCMeta):
"""Base class for all AGE explorer classes"""
def __init__(self, max_iterations, track_edges):
"""Initialization method
:param max_iterations: maximum number of iterations to perform.
:param bool track_edges: if True, will also track and return the edges traversed.
"""
self.set_max_iterations(max_iterations)
self._track_edges = track_edges
self._iterations_done = None
def set_max_iterations(self, max_iterations):
"""Sets the max iterations"""
if not (isinstance(max_iterations, int) or max_iterations is np.inf):
raise TypeError('max_iterations has to be an integer or np.inf')
self._max_iterations = max_iterations
@property
def iterations_done(self):
"""Number of iterations performed"""
return self._iterations_done
@abstractmethod
def run(self, operational_set):
"""
Takes the operational_set and overwrites it with the set of nodes that results
from applying the rule (this might or not include the initial set of nodes as
well depending on the rule).
:type operational_set: :py:class:`aiida.tools.graph.age_entities.Basket`
:param operational_set: initital set of nodes to be overwritten by the rule.
"""
class QueryRule(Operation, metaclass=ABCMeta):
"""Parent class for every rule that implements a query.
QueryRules take a generic QueryBuilder instance and a set of starting nodes and then
perform successive iterations of that query, each one from the set of nodes that the
previous one found. Depending on the class of rule used the final result will be
either the whole set of nodes traversed (UpdateRule), or only the final set of nodes
found in the last iteration of the query (ReplaceRule).
"""
def __init__(self, querybuilder, max_iterations=1, track_edges=False):
"""Initialization method
:param querybuilder: an instance of the QueryBuilder class from which to take the
procedure for traversal
:param int max_iterations: the number of iterations to run this query on (must be
a finite number for ReplaceRules)
:param bool track_edges: whether to track which edges are traversed and store them
"""
super().__init__(max_iterations, track_edges=track_edges)
def get_spec_from_path(queryhelp, idx):
from aiida.orm.querybuilder import GROUP_ENTITY_TYPE_PREFIX
if (
queryhelp['path'][idx]['entity_type'].startswith('node') or
queryhelp['path'][idx]['entity_type'].startswith('data') or
queryhelp['path'][idx]['entity_type'].startswith('process') or
queryhelp['path'][idx]['entity_type'] == ''
):
result = 'nodes'
elif queryhelp['path'][idx]['entity_type'].startswith(GROUP_ENTITY_TYPE_PREFIX):
result = 'groups'
else:
raise Exception(f"not understood entity from ( {queryhelp['path'][idx]['entity_type']} )")
return result
queryhelp = querybuilder.queryhelp
# Check if there is any projection:
query_projections = queryhelp['project']
for projection_key in query_projections:
if query_projections[projection_key] != []:
raise ValueError(
'The input querybuilder must not have any projections.\n'
'Instead, it has the following:\n - Key: {}\n - Val: {}\n'.format(
projection_key, query_projections[projection_key]
)
)
for pathspec in queryhelp['path']:
if not pathspec['entity_type']:
pathspec['entity_type'] = 'node.Node.'
self._qbtemplate = orm.QueryBuilder(**queryhelp)
queryhelp = self._qbtemplate.queryhelp
self._first_tag = queryhelp['path'][0]['tag']
self._last_tag = queryhelp['path'][-1]['tag']
self._querybuilder = None
# All of these are set in _init_run:
self._edge_label = None
self._edge_keys = None
self._entity_to_identifier = None
self._entity_from = get_spec_from_path(queryhelp, 0)
self._entity_to = get_spec_from_path(queryhelp, -1)
self._accumulator_set = None
def set_edge_keys(self, edge_keys):
"""
Set the edge keys that are use to classify the edges during the run of this query.
:param edge_keys:
a list of projections on the edge itself, or a tuple that specifies
(tag, project) if the projection is not on the edge
Example: For node-to-node graph traversals, it is often convenient to save
the information on the links::
qb = QueryBuilder().append(Node, tag='n1').append(Node, tag='n2')
rule = RuleSequence(qb, track_edges=True)
rule.set_edge_keys(['input_id', 'output_id', 'label', 'type'])
# Now for UUIDS:
qb = QueryBuilder().append(Node, tag='n1').append(Node, tag='n2')
rule = RuleSequence(qb, track_edges=True)
rule.set_edge_keys([('n1','uuid'), ('n2','uuid'), 'label', 'type'])
"""
self._edge_keys = edge_keys[:]
def _init_run(self, operational_set):
"""Initialization Utility method
This method initializes a run. It initializes the accumulator_set in order
for it to only contain the operational_set, and to be of the same kind.
This function modifies the its QueryBuilder instance to give the right results.
:param operational_set: input with which to initialize the accumulator_set.
"""
type_check(operational_set, Basket)
if self._accumulator_set is not None:
type_check(self._accumulator_set, Basket)
self._accumulator_set.empty()
self._accumulator_set += operational_set
else:
self._accumulator_set = operational_set.copy()
# Copying qbtemplate so there's no problem if it is used again in a later run:
queryhelp = self._qbtemplate.queryhelp
self._querybuilder = orm.QueryBuilder(**queryhelp)
self._entity_to_identifier = operational_set[self._entity_to].identifier
# Now I add the necessary projections, which is the identifier of the
# last entity of the QueryBuilder path:
self._querybuilder.add_projection(self._last_tag, self._entity_to_identifier)
if self._track_edges:
# This requires additional projections and the edge_keys, which is a list of tuples (of length 2)
# that stores the information what I need to project as well, as in (tag, projection)
projections = defaultdict(list)
self._edge_keys = []
self._edge_label = queryhelp['path'][-1]['edge_tag']
# Need to get the edge_set: This is given by entity1_entity2. Here, the results needs to
# be sorted somehow in order to ensure that the same key is used when entity_from and
# entity_to are exchanged.
edge_set = operational_set.dict['{}_{}'.format(*sorted((self._entity_from, self._entity_to)))]
# Looping over the edge identifiers to figure out what I need to project and in which
# order. The order is important! The rules:
# r1 = Rule(QueryBuilder().append(Group).append(Node, with_group=Group) and
# r2 = Rule(QueryBuilder().append(Node).append(Group, with_node=Node)
# need still to save their results in the same order (i.e. group_id, node_id).
# Therefore, I am sorting the edge_keys according to edge_identifiers specified in the edge_set
for tag, projection in edge_set.edge_identifiers:
if tag == 'edge':
actual_tag = self._edge_label
elif tag == self._entity_from:
actual_tag = self._first_tag
elif tag == self._entity_to:
actual_tag = self._last_tag
else:
# For now I can only specify edge_identifiers as 'edge', ie. project on the edge
# itself, or by the entity_from, entity_to keyword, ie. groups or nodes.
# One could think of other keywords...
raise ValueError(f'This tag ({tag}) is not known')
self._edge_keys.append((actual_tag, projection))
projections[actual_tag].append(projection)
# Telling the QB about the additional projections:
for proj_tag, projectionlist in projections.items():
try:
self._querybuilder.add_projection(proj_tag, projectionlist)
except (TypeError, ValueError) as exc:
raise KeyError('The projection for the edge-identifier is invalid.\n') from exc
def _load_results(self, target_set, operational_set):
"""Single application of the rules to the operational set
:param target_set:
where the new results will be loaded (it will be first emptied of all previous content).
There is no returned value for this method.
:param operational_set: where the results originate from (walkers)
"""
primkeys = operational_set[self._entity_from].keyset
target_set.empty()
if primkeys:
self._querybuilder.add_filter(
self._first_tag, {operational_set[self._entity_from].identifier: {
'in': primkeys
}}
)
qres = self._querybuilder.dict()
# These are the new results returned by the query
target_set[self._entity_to].add_entities([
item[self._last_tag][self._entity_to_identifier] for item in qres
])
if self._track_edges:
# As in _init_run, I need the key for the edge_set
edge_key = '{}_{}'.format(*sorted((self._entity_from, self._entity_to)))
edge_set = operational_set.dict[edge_key]
namedtuple_ = edge_set.edge_namedtuple
target_set[edge_key].add_entities([
namedtuple_(*(item[key1][key2] for (key1, key2) in self._edge_keys)) for item in qres
])
def set_accumulator(self, accumulator_set):
self._accumulator_set = accumulator_set
def empty_accumulator(self):
if self._accumulator_set is not None:
self._accumulator_set.empty()
# Pylint complains if this is not here, but should be removed asap
def run(self, operational_set):
pass
class UpdateRule(QueryRule):
"""
The UpdateRule will accumulate every node visited and return it as a set of nodes
(and thus, without duplication). It can be used requesting both a finite number
of iterations or an infinite number of iterations (in which case it will stop once
no new nodes are added to the accumulation set).
"""
def run(self, operational_set):
self._init_run(operational_set)
self._iterations_done = 0
new_results = operational_set.get_template()
# The operational_set will be updated with the new_nodes that were not
# already in the _acumulator_set, so that we are not querying from the
# same nodes again and the cycle can end when no new nodes are found
while (operational_set and self._iterations_done < self._max_iterations):
self._iterations_done += 1
self._load_results(new_results, operational_set)
operational_set = new_results - self._accumulator_set
self._accumulator_set += new_results
return self._accumulator_set.copy()
class ReplaceRule(QueryRule):
"""
The ReplaceRule does not accumulate results, but just sets the operational_set to
new results. Therefore it can only function using a finite number of iterations,
since it does not keep track of which nodes where visited already (otherwise, if
it was following a cycle, it would run indefinitely).
"""
def __init__(self, querybuilder, max_iterations=1, track_edges=False):
if max_iterations == np.inf:
raise ValueError('You cannot have max_iterations to be infinitely large for replace rules')
super().__init__(querybuilder, max_iterations=max_iterations, track_edges=track_edges)
def run(self, operational_set):
self._init_run(operational_set)
self._iterations_done = 0
new_results = operational_set.get_template()
# The operational_set will be replaced by the new_nodes, even if these
# were already visited previously.
while (operational_set and self._iterations_done < self._max_iterations):
self._iterations_done += 1
self._load_results(new_results, operational_set)
operational_set = new_results
return operational_set.copy()
class RuleSaveWalkers(Operation):
"""Save the Walkers:
When initialized, this rule will save a pointer to an external stash variable.
When run, this stash will be emptied and a given operational_set will be saved
there instead.
"""
def __init__(self, stash):
"""Initialization method
:param stash: external variable in which to save the operational_set
"""
self._stash = stash
super().__init__(max_iterations=1, track_edges=True)
def run(self, operational_set):
self._stash.empty()
self._stash += operational_set
return operational_set
class RuleSetWalkers(Operation):
"""Set the Walkers:
When initialized, this rule will save a pointer to an external stash variable.
When run, the given operational_set will be emptied and the stash will be
loaded in it.
"""
def __init__(self, stash):
"""Initialization method
:param stash: external variable from which to load into the operational_set
"""
self._stash = stash
super().__init__(max_iterations=1, track_edges=True)
def run(self, operational_set):
operational_set.empty()
operational_set += self._stash
return operational_set
class RuleSequence(Operation):
"""Rule for concatenation
Rule Sequence is used to concatenate a series of rules together.
Concatenating querybuilders in a single rule its not enough because
one might want to stash results to perform two independent operations
in the starting set instead of a second operation from the results of
the first (see RuleSetWalkers and RuleSaveWalkers).
"""
def __init__(self, rules, max_iterations=1):
for rule in rules:
if not isinstance(rule, Operation):
raise TypeError('rule has to be an instance of Operation-subclass')
self._rules = rules
self._accumulator_set = None
self._visits_set = None
super().__init__(max_iterations, track_edges=False)
def run(self, operational_set):
type_check(operational_set, Basket)
if self._accumulator_set is not None:
type_check(self._accumulator_set, Basket)
self._accumulator_set.empty()
self._accumulator_set += operational_set
else:
self._accumulator_set = operational_set.copy()
if self._visits_set is not None:
type_check(self._visits_set, Basket)
self._visits_set.empty()
self._visits_set += operational_set
else:
self._visits_set = operational_set.copy()
new_results = operational_set.get_template()
self._iterations_done = 0
while (operational_set and self._iterations_done < self._max_iterations):
self._iterations_done += 1
new_results.empty()
# I iterate the operational_set through all the rules:
for _, rule in enumerate(self._rules):
operational_set = rule.run(operational_set)
new_results += operational_set
self._visits_set += operational_set
# I set the operational set to all results that have not been visited yet.
operational_set = new_results - self._accumulator_set
self._accumulator_set += new_results
return self._visits_set.copy()
def set_accumulator(self, accumulator_set):
"""Set the accumulator set"""
self._accumulator_set = accumulator_set
def empty_accumulator(self):
"""Empties the accumulator set"""
if self._accumulator_set is not None:
self._accumulator_set.empty()
def set_visits(self, visits_set):
"""Set the visits set"""
self._visits_set = visits_set
def empty_visits(self):
"""Empties the visits set"""
if self._visits_set is not None:
self._visits_set.empty()
| en | 0.865591 | # -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### Rules for the AiiDA Graph Explorer utility Base class for all AGE explorer classes Initialization method :param max_iterations: maximum number of iterations to perform. :param bool track_edges: if True, will also track and return the edges traversed. Sets the max iterations Number of iterations performed Takes the operational_set and overwrites it with the set of nodes that results from applying the rule (this might or not include the initial set of nodes as well depending on the rule). :type operational_set: :py:class:`aiida.tools.graph.age_entities.Basket` :param operational_set: initital set of nodes to be overwritten by the rule. Parent class for every rule that implements a query. QueryRules take a generic QueryBuilder instance and a set of starting nodes and then perform successive iterations of that query, each one from the set of nodes that the previous one found. Depending on the class of rule used the final result will be either the whole set of nodes traversed (UpdateRule), or only the final set of nodes found in the last iteration of the query (ReplaceRule). Initialization method :param querybuilder: an instance of the QueryBuilder class from which to take the procedure for traversal :param int max_iterations: the number of iterations to run this query on (must be a finite number for ReplaceRules) :param bool track_edges: whether to track which edges are traversed and store them # Check if there is any projection: # All of these are set in _init_run: Set the edge keys that are use to classify the edges during the run of this query. :param edge_keys: a list of projections on the edge itself, or a tuple that specifies (tag, project) if the projection is not on the edge Example: For node-to-node graph traversals, it is often convenient to save the information on the links:: qb = QueryBuilder().append(Node, tag='n1').append(Node, tag='n2') rule = RuleSequence(qb, track_edges=True) rule.set_edge_keys(['input_id', 'output_id', 'label', 'type']) # Now for UUIDS: qb = QueryBuilder().append(Node, tag='n1').append(Node, tag='n2') rule = RuleSequence(qb, track_edges=True) rule.set_edge_keys([('n1','uuid'), ('n2','uuid'), 'label', 'type']) Initialization Utility method This method initializes a run. It initializes the accumulator_set in order for it to only contain the operational_set, and to be of the same kind. This function modifies the its QueryBuilder instance to give the right results. :param operational_set: input with which to initialize the accumulator_set. # Copying qbtemplate so there's no problem if it is used again in a later run: # Now I add the necessary projections, which is the identifier of the # last entity of the QueryBuilder path: # This requires additional projections and the edge_keys, which is a list of tuples (of length 2) # that stores the information what I need to project as well, as in (tag, projection) # Need to get the edge_set: This is given by entity1_entity2. Here, the results needs to # be sorted somehow in order to ensure that the same key is used when entity_from and # entity_to are exchanged. # Looping over the edge identifiers to figure out what I need to project and in which # order. The order is important! The rules: # r1 = Rule(QueryBuilder().append(Group).append(Node, with_group=Group) and # r2 = Rule(QueryBuilder().append(Node).append(Group, with_node=Node) # need still to save their results in the same order (i.e. group_id, node_id). # Therefore, I am sorting the edge_keys according to edge_identifiers specified in the edge_set # For now I can only specify edge_identifiers as 'edge', ie. project on the edge # itself, or by the entity_from, entity_to keyword, ie. groups or nodes. # One could think of other keywords... # Telling the QB about the additional projections: Single application of the rules to the operational set :param target_set: where the new results will be loaded (it will be first emptied of all previous content). There is no returned value for this method. :param operational_set: where the results originate from (walkers) # These are the new results returned by the query # As in _init_run, I need the key for the edge_set # Pylint complains if this is not here, but should be removed asap The UpdateRule will accumulate every node visited and return it as a set of nodes (and thus, without duplication). It can be used requesting both a finite number of iterations or an infinite number of iterations (in which case it will stop once no new nodes are added to the accumulation set). # The operational_set will be updated with the new_nodes that were not # already in the _acumulator_set, so that we are not querying from the # same nodes again and the cycle can end when no new nodes are found The ReplaceRule does not accumulate results, but just sets the operational_set to new results. Therefore it can only function using a finite number of iterations, since it does not keep track of which nodes where visited already (otherwise, if it was following a cycle, it would run indefinitely). # The operational_set will be replaced by the new_nodes, even if these # were already visited previously. Save the Walkers: When initialized, this rule will save a pointer to an external stash variable. When run, this stash will be emptied and a given operational_set will be saved there instead. Initialization method :param stash: external variable in which to save the operational_set Set the Walkers: When initialized, this rule will save a pointer to an external stash variable. When run, the given operational_set will be emptied and the stash will be loaded in it. Initialization method :param stash: external variable from which to load into the operational_set Rule for concatenation Rule Sequence is used to concatenate a series of rules together. Concatenating querybuilders in a single rule its not enough because one might want to stash results to perform two independent operations in the starting set instead of a second operation from the results of the first (see RuleSetWalkers and RuleSaveWalkers). # I iterate the operational_set through all the rules: # I set the operational set to all results that have not been visited yet. Set the accumulator set Empties the accumulator set Set the visits set Empties the visits set | 2.352404 | 2 |
ImagePaste.py | lukauskas/imagepaste | 6 | 6625047 | <reponame>lukauskas/imagepaste
import sublime
import sublime_plugin
import os
import sys
import re
import datetime
from imagepaste.utils import os_appropriate_utils
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split('(\d+)', text) ]
class ImagePasteBase(object):
def __init__(self, *args, **kwgs):
super(ImagePasteBase, self).__init__(*args, **kwgs)
self.settings = sublime.load_settings('imagepaste.sublime-settings')
# get the image save dirname
self.image_directory = self.settings.get('image_directory_name', None)
self.paste_absolute_path = self.settings.get('paste_absolute_path', False)
if not self.image_directory:
self.image_directory = None
def paste_absolute(self):
return self.paste_absolute_path
def get_current_filename(self):
'''
returns filename without extension
'''
fullpath, extension = os.path.splitext(self.view.file_name())
basename = os.path.basename(fullpath)
return basename
def get_current_dir(self):
return os.path.dirname(self.view.file_name())
def get_image_directory(self):
''' relative path to image directory '''
root_dir = self.image_directory
subdir_name = self.get_current_filename()
# sanitize the name a bit
subdir_name = re.sub('[^a-zA-Z0-9]+', '_', subdir_name)
if root_dir:
return os.path.join(root_dir, subdir_name)
else:
return subdir_name
def get_image_abs_directory(self):
''' full path to image directory '''
return os.path.join(self.get_current_dir(), self.get_image_directory())
def get_image_path(self):
abs_directory = self.get_image_abs_directory()
rel_directory = self.get_image_directory()
now = datetime.datetime.now()
filename = '{0:%Y}{0:%m}{0:%d}{0:%H}{0:%M}{0:%S}.png'.format(now)
abs_path = os.path.join(abs_directory, filename)
rel_path = os.path.join(rel_directory, filename)
return abs_path, rel_path
class ImagePasteCommand(ImagePasteBase, sublime_plugin.TextCommand):
def __init__(self, *args, **kwgs):
super(ImagePasteCommand, self).__init__(*args, **kwgs)
self.image_data = None
self.image_path = ''
self.os_utils = os_appropriate_utils(self.settings)
def run(self, edit):
view = self.view
try:
image_data = self.os_utils.get_clipboard_image()
except Exception:
# Fallback to normal paste
view.run_command('paste')
raise
if not image_data:
# as normal Ctrl+V
view.run_command('paste')
return
if self.image_data != image_data:
image_abs_path, image_rel_path = self.save_image(image_data)
if self.paste_absolute():
image_path = image_abs_path
else:
image_path = image_rel_path
if not image_path:
return
# fix image path for html
image_path = image_path.replace('\\', '/')
self.image_path = image_path
self.image_data = image_data
selections = view.sel()
if not selections:
return
# get the cursor
selection_pos = selections[0].begin()
if view.scope_name(selection_pos).startswith('text.html.markdown'):
view.insert(edit, selection_pos, ''.format(self.image_path))
else:
view.insert(edit, selection_pos, '{}'.format(self.image_path))
def save_image(self, data):
image_dir = self.get_image_abs_directory()
if not os.path.lexists(image_dir):
os.makedirs(image_dir)
abs_path, rel_path = self.get_image_path()
with open(abs_path, 'wb') as f:
f.write(data)
return abs_path, rel_path
return None, None
| import sublime
import sublime_plugin
import os
import sys
import re
import datetime
from imagepaste.utils import os_appropriate_utils
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split('(\d+)', text) ]
class ImagePasteBase(object):
def __init__(self, *args, **kwgs):
super(ImagePasteBase, self).__init__(*args, **kwgs)
self.settings = sublime.load_settings('imagepaste.sublime-settings')
# get the image save dirname
self.image_directory = self.settings.get('image_directory_name', None)
self.paste_absolute_path = self.settings.get('paste_absolute_path', False)
if not self.image_directory:
self.image_directory = None
def paste_absolute(self):
return self.paste_absolute_path
def get_current_filename(self):
'''
returns filename without extension
'''
fullpath, extension = os.path.splitext(self.view.file_name())
basename = os.path.basename(fullpath)
return basename
def get_current_dir(self):
return os.path.dirname(self.view.file_name())
def get_image_directory(self):
''' relative path to image directory '''
root_dir = self.image_directory
subdir_name = self.get_current_filename()
# sanitize the name a bit
subdir_name = re.sub('[^a-zA-Z0-9]+', '_', subdir_name)
if root_dir:
return os.path.join(root_dir, subdir_name)
else:
return subdir_name
def get_image_abs_directory(self):
''' full path to image directory '''
return os.path.join(self.get_current_dir(), self.get_image_directory())
def get_image_path(self):
abs_directory = self.get_image_abs_directory()
rel_directory = self.get_image_directory()
now = datetime.datetime.now()
filename = '{0:%Y}{0:%m}{0:%d}{0:%H}{0:%M}{0:%S}.png'.format(now)
abs_path = os.path.join(abs_directory, filename)
rel_path = os.path.join(rel_directory, filename)
return abs_path, rel_path
class ImagePasteCommand(ImagePasteBase, sublime_plugin.TextCommand):
def __init__(self, *args, **kwgs):
super(ImagePasteCommand, self).__init__(*args, **kwgs)
self.image_data = None
self.image_path = ''
self.os_utils = os_appropriate_utils(self.settings)
def run(self, edit):
view = self.view
try:
image_data = self.os_utils.get_clipboard_image()
except Exception:
# Fallback to normal paste
view.run_command('paste')
raise
if not image_data:
# as normal Ctrl+V
view.run_command('paste')
return
if self.image_data != image_data:
image_abs_path, image_rel_path = self.save_image(image_data)
if self.paste_absolute():
image_path = image_abs_path
else:
image_path = image_rel_path
if not image_path:
return
# fix image path for html
image_path = image_path.replace('\\', '/')
self.image_path = image_path
self.image_data = image_data
selections = view.sel()
if not selections:
return
# get the cursor
selection_pos = selections[0].begin()
if view.scope_name(selection_pos).startswith('text.html.markdown'):
view.insert(edit, selection_pos, ''.format(self.image_path))
else:
view.insert(edit, selection_pos, '{}'.format(self.image_path))
def save_image(self, data):
image_dir = self.get_image_abs_directory()
if not os.path.lexists(image_dir):
os.makedirs(image_dir)
abs_path, rel_path = self.get_image_path()
with open(abs_path, 'wb') as f:
f.write(data)
return abs_path, rel_path
return None, None | en | 0.794448 | # get the image save dirname returns filename without extension relative path to image directory # sanitize the name a bit full path to image directory # Fallback to normal paste # as normal Ctrl+V # fix image path for html # get the cursor | 2.557527 | 3 |
application.py | AllieDeford/radremedy | 0 | 6625048 | <gh_stars>0
#!/usr/bin/env python
from remedy.radremedy import create_app
from remedy.bootstrap import strap
from remedy.get_save_data import run
import os
application, manager = (None, None)
if os.environ.get('RAD_PRODUCTION'):
print('Running production configuration')
application, manager = create_app('remedy.config.ProductionConfig')
else:
print('Running development configuration')
application, manager = create_app('remedy.config.DevelopmentConfig')
@manager.command
def bootstrap():
strap(application)
@manager.command
def scrape():
run(application)
if __name__ == '__main__':
manager.run()
| #!/usr/bin/env python
from remedy.radremedy import create_app
from remedy.bootstrap import strap
from remedy.get_save_data import run
import os
application, manager = (None, None)
if os.environ.get('RAD_PRODUCTION'):
print('Running production configuration')
application, manager = create_app('remedy.config.ProductionConfig')
else:
print('Running development configuration')
application, manager = create_app('remedy.config.DevelopmentConfig')
@manager.command
def bootstrap():
strap(application)
@manager.command
def scrape():
run(application)
if __name__ == '__main__':
manager.run() | ru | 0.26433 | #!/usr/bin/env python | 1.958965 | 2 |
utils/align_images.py | atapin/Caricature-Your-Face | 27 | 6625049 | import os
import sys
import bz2
import argparse
from keras.utils import get_file
from utils.face_alignment import image_align
from utils.landmarks_detector import LandmarksDetector
import multiprocessing
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
def unpack_bz2(src_path):
data = bz2.BZ2File(src_path).read()
dst_path = src_path[:-4]
with open(dst_path, 'wb') as fp:
fp.write(data)
return dst_path
if __name__ == "__main__":
"""
Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
python align_images.py /raw_images /aligned_images
"""
parser = argparse.ArgumentParser(description='Align faces from input images', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('raw_dir', help='Directory with raw images for face alignment')
parser.add_argument('aligned_dir', help='Directory for storing aligned images')
parser.add_argument('--output_size', default=1024, help='The dimension of images for input to the model', type=int)
parser.add_argument('--x_scale', default=1, help='Scaling factor for x dimension', type=float)
parser.add_argument('--y_scale', default=1, help='Scaling factor for y dimension', type=float)
parser.add_argument('--em_scale', default=0.1, help='Scaling factor for eye-mouth distance', type=float)
parser.add_argument('--use_alpha', default=False, help='Add an alpha channel for masking', type=bool)
args, other_args = parser.parse_known_args()
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
LANDMARKS_MODEL_URL, cache_subdir='temp'))
RAW_IMAGES_DIR = args.raw_dir
ALIGNED_IMAGES_DIR = args.aligned_dir
landmarks_detector = LandmarksDetector(landmarks_model_path)
for img_name in os.listdir(RAW_IMAGES_DIR):
print('Aligning %s ...' % img_name)
try:
raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
fn = face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], 1)
if os.path.isfile(fn):
continue
print('Getting landmarks...')
for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
try:
print('Starting face alignment...')
face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=args.output_size, x_scale=args.x_scale, y_scale=args.y_scale, em_scale=args.em_scale, alpha=args.use_alpha)
print('Wrote result %s' % aligned_face_path)
except:
print("Exception in face alignment!")
except:
print("Exception in landmark detection!") | import os
import sys
import bz2
import argparse
from keras.utils import get_file
from utils.face_alignment import image_align
from utils.landmarks_detector import LandmarksDetector
import multiprocessing
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
def unpack_bz2(src_path):
data = bz2.BZ2File(src_path).read()
dst_path = src_path[:-4]
with open(dst_path, 'wb') as fp:
fp.write(data)
return dst_path
if __name__ == "__main__":
"""
Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
python align_images.py /raw_images /aligned_images
"""
parser = argparse.ArgumentParser(description='Align faces from input images', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('raw_dir', help='Directory with raw images for face alignment')
parser.add_argument('aligned_dir', help='Directory for storing aligned images')
parser.add_argument('--output_size', default=1024, help='The dimension of images for input to the model', type=int)
parser.add_argument('--x_scale', default=1, help='Scaling factor for x dimension', type=float)
parser.add_argument('--y_scale', default=1, help='Scaling factor for y dimension', type=float)
parser.add_argument('--em_scale', default=0.1, help='Scaling factor for eye-mouth distance', type=float)
parser.add_argument('--use_alpha', default=False, help='Add an alpha channel for masking', type=bool)
args, other_args = parser.parse_known_args()
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
LANDMARKS_MODEL_URL, cache_subdir='temp'))
RAW_IMAGES_DIR = args.raw_dir
ALIGNED_IMAGES_DIR = args.aligned_dir
landmarks_detector = LandmarksDetector(landmarks_model_path)
for img_name in os.listdir(RAW_IMAGES_DIR):
print('Aligning %s ...' % img_name)
try:
raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
fn = face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], 1)
if os.path.isfile(fn):
continue
print('Getting landmarks...')
for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
try:
print('Starting face alignment...')
face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=args.output_size, x_scale=args.x_scale, y_scale=args.y_scale, em_scale=args.em_scale, alpha=args.use_alpha)
print('Wrote result %s' % aligned_face_path)
except:
print("Exception in face alignment!")
except:
print("Exception in landmark detection!") | en | 0.647821 | Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step python align_images.py /raw_images /aligned_images | 2.702927 | 3 |
mozillians/users/api/v2.py | justinpotts/mozillians | 0 | 6625050 | <reponame>justinpotts/mozillians<gh_stars>0
from django.db.models import Q
from django.shortcuts import get_object_or_404
import django_filters
from funfactory.urlresolvers import reverse
from rest_framework import viewsets, serializers
from rest_framework.response import Response
from mozillians.common.helpers import absolutify, markdown
from mozillians.users.managers import PUBLIC
from mozillians.users.models import ExternalAccount, GroupMembership, Language, UserProfile
# Serializers
class ExternalAccountSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='get_type_display')
privacy = serializers.CharField(source='get_privacy_display')
class Meta:
model = ExternalAccount
fields = ('type', 'identifier', 'privacy', 'name')
def transform_type(self, obj, value):
return value.lower()
class WebsiteSerializer(serializers.ModelSerializer):
website = serializers.CharField(source='identifier')
privacy = serializers.CharField(source='get_privacy_display')
class Meta:
model = ExternalAccount
fields = ('website', 'privacy')
class LanguageSerializer(serializers.ModelSerializer):
english = serializers.CharField(source='get_english')
native = serializers.CharField(source='get_native')
class Meta:
model = Language
fields = ('code', 'english', 'native')
class AlternateEmailSerializer(serializers.ModelSerializer):
email = serializers.CharField(source='identifier')
privacy = serializers.CharField(source='get_privacy_display')
class Meta:
model = ExternalAccount
fields = ('email', 'privacy')
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
username = serializers.Field(source='user.username')
class Meta:
model = UserProfile
fields = ('username', 'is_vouched', '_url')
class UserProfileDetailedSerializer(serializers.HyperlinkedModelSerializer):
username = serializers.Field(source='user.username')
email = serializers.Field(source='email')
alternate_emails = AlternateEmailSerializer(many=True, source='alternate_emails')
country = serializers.SerializerMethodField('get_country')
region = serializers.SerializerMethodField('get_region')
city = serializers.SerializerMethodField('get_city')
external_accounts = ExternalAccountSerializer(many=True, source='accounts')
languages = LanguageSerializer(many=True, source='languages')
websites = WebsiteSerializer(many=True, source='websites')
is_public = serializers.Field(source='is_public')
url = serializers.SerializerMethodField('get_url')
# Add profile URL
class Meta:
model = UserProfile
fields = ('username', 'full_name', 'email', 'alternate_emails', 'bio', 'photo',
'ircname', 'date_mozillian', 'timezone', 'title', 'story_link', 'languages',
'external_accounts', 'websites', 'tshirt', 'is_public', 'is_vouched',
'_url', 'url', 'city', 'region', 'country')
def _transform_privacy_wrapper(self, field):
def _transform_privacy(obj, value):
return {
'value': value,
'privacy': getattr(obj, 'get_privacy_{0}_display'.format(field))()
}
return _transform_privacy
def __init__(self, *args, **kwargs):
super(UserProfileDetailedSerializer, self).__init__(*args, **kwargs)
# If we don't define a custom transform method and if the
# field has a privacy setting, set the transform privacy
# wrapper.
for field in self.fields.keys():
method_name = 'transform_{0}'.format(field)
if ((not getattr(self, method_name, None) and
getattr(UserProfile, 'get_privacy_{0}_display'.format(field), None))):
setattr(self, method_name, self._transform_privacy_wrapper(field))
def get_url(self, obj):
return absolutify(reverse('phonebook:profile_view', kwargs={'username': obj.user.username}))
def transform_timezone(self, obj, value):
return {
'value': value,
'utc_offset': obj.timezone_offset(),
'privacy': obj.get_privacy_timezone_display(),
}
def transform_bio(self, obj, value):
return {
'value': value,
'html': unicode(markdown(value)),
'privacy': obj.get_privacy_bio_display(),
}
def transform_photo(self, obj, value):
return {
'value': obj.get_photo_url('300x300'),
'150x150': obj.get_photo_url('150x150'),
'300x300': obj.get_photo_url('300x300'),
'500x500': obj.get_photo_url('500x500'),
'privacy': obj.get_privacy_photo_display(),
}
def transform_tshirt(self, obj, value):
return {
'value': obj.tshirt,
'english': obj.get_tshirt_display(),
'privacy': obj.get_privacy_tshirt_display(),
}
def get_country(self, obj):
country = obj.geo_country
return {
'code': country.code if country else '',
'value': country.name if country else '',
'privacy': obj.get_privacy_geo_country_display(),
}
def get_region(self, obj):
region = obj.geo_region
return {
'value': region.name if region else '',
'privacy': obj.get_privacy_geo_region_display(),
}
def get_city(self, obj):
city = obj.geo_city
return {
'value': city.name if city else '',
'privacy': obj.get_privacy_geo_city_display(),
}
# Filters
class UserProfileFilter(django_filters.FilterSet):
city = django_filters.CharFilter(name='geo_city__name')
region = django_filters.CharFilter(name='geo_region__name')
country = django_filters.CharFilter(name='geo_country__name')
country_code = django_filters.CharFilter(name='geo_country__code')
username = django_filters.CharFilter(name='user__username')
email = django_filters.MethodFilter(action='filter_emails')
language = django_filters.CharFilter(name='language__code')
account = django_filters.CharFilter(name='externalaccount__identifier', distinct=True)
group = django_filters.MethodFilter(action='filter_group')
skill = django_filters.CharFilter(name='skills__name')
class Meta:
model = UserProfile
fields = ('is_vouched', 'city', 'region', 'country', 'country_code',
'username', 'email', 'ircname', 'full_name', 'language',
'account', 'group', 'skill')
def filter_emails(self, queryset, value):
"""Return users with email matching either primary or alternate email address"""
qs = ExternalAccount.objects.filter(type=ExternalAccount.TYPE_EMAIL, identifier=value)
users = qs.values_list('user__id', flat=True)
query = Q(id__in=users) | Q(user__email=value)
return self.queryset.filter(query).distinct()
def filter_group(self, queryset, value):
membership = GroupMembership.MEMBER
return self.queryset.filter(groups__name=value, groupmembership__status=membership)
# Views
class UserProfileViewSet(viewsets.ReadOnlyModelViewSet):
"""
Returns a list of Mozillians respecting authorization levels
and privacy settings.
"""
serializer_class = UserProfileSerializer
model = UserProfile
filter_class = UserProfileFilter
ordering = ('user__username',)
def get_queryset(self):
queryset = UserProfile.objects.complete()
privacy_level = self.request.privacy_level
if privacy_level == PUBLIC:
queryset = queryset.public()
queryset = queryset.privacy_level(privacy_level)
return queryset
def retrieve(self, request, pk):
user = get_object_or_404(self.get_queryset(), pk=pk)
serializer = UserProfileDetailedSerializer(user)
return Response(serializer.data)
| from django.db.models import Q
from django.shortcuts import get_object_or_404
import django_filters
from funfactory.urlresolvers import reverse
from rest_framework import viewsets, serializers
from rest_framework.response import Response
from mozillians.common.helpers import absolutify, markdown
from mozillians.users.managers import PUBLIC
from mozillians.users.models import ExternalAccount, GroupMembership, Language, UserProfile
# Serializers
class ExternalAccountSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='get_type_display')
privacy = serializers.CharField(source='get_privacy_display')
class Meta:
model = ExternalAccount
fields = ('type', 'identifier', 'privacy', 'name')
def transform_type(self, obj, value):
return value.lower()
class WebsiteSerializer(serializers.ModelSerializer):
website = serializers.CharField(source='identifier')
privacy = serializers.CharField(source='get_privacy_display')
class Meta:
model = ExternalAccount
fields = ('website', 'privacy')
class LanguageSerializer(serializers.ModelSerializer):
english = serializers.CharField(source='get_english')
native = serializers.CharField(source='get_native')
class Meta:
model = Language
fields = ('code', 'english', 'native')
class AlternateEmailSerializer(serializers.ModelSerializer):
email = serializers.CharField(source='identifier')
privacy = serializers.CharField(source='get_privacy_display')
class Meta:
model = ExternalAccount
fields = ('email', 'privacy')
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
username = serializers.Field(source='user.username')
class Meta:
model = UserProfile
fields = ('username', 'is_vouched', '_url')
class UserProfileDetailedSerializer(serializers.HyperlinkedModelSerializer):
username = serializers.Field(source='user.username')
email = serializers.Field(source='email')
alternate_emails = AlternateEmailSerializer(many=True, source='alternate_emails')
country = serializers.SerializerMethodField('get_country')
region = serializers.SerializerMethodField('get_region')
city = serializers.SerializerMethodField('get_city')
external_accounts = ExternalAccountSerializer(many=True, source='accounts')
languages = LanguageSerializer(many=True, source='languages')
websites = WebsiteSerializer(many=True, source='websites')
is_public = serializers.Field(source='is_public')
url = serializers.SerializerMethodField('get_url')
# Add profile URL
class Meta:
model = UserProfile
fields = ('username', 'full_name', 'email', 'alternate_emails', 'bio', 'photo',
'ircname', 'date_mozillian', 'timezone', 'title', 'story_link', 'languages',
'external_accounts', 'websites', 'tshirt', 'is_public', 'is_vouched',
'_url', 'url', 'city', 'region', 'country')
def _transform_privacy_wrapper(self, field):
def _transform_privacy(obj, value):
return {
'value': value,
'privacy': getattr(obj, 'get_privacy_{0}_display'.format(field))()
}
return _transform_privacy
def __init__(self, *args, **kwargs):
super(UserProfileDetailedSerializer, self).__init__(*args, **kwargs)
# If we don't define a custom transform method and if the
# field has a privacy setting, set the transform privacy
# wrapper.
for field in self.fields.keys():
method_name = 'transform_{0}'.format(field)
if ((not getattr(self, method_name, None) and
getattr(UserProfile, 'get_privacy_{0}_display'.format(field), None))):
setattr(self, method_name, self._transform_privacy_wrapper(field))
def get_url(self, obj):
return absolutify(reverse('phonebook:profile_view', kwargs={'username': obj.user.username}))
def transform_timezone(self, obj, value):
return {
'value': value,
'utc_offset': obj.timezone_offset(),
'privacy': obj.get_privacy_timezone_display(),
}
def transform_bio(self, obj, value):
return {
'value': value,
'html': unicode(markdown(value)),
'privacy': obj.get_privacy_bio_display(),
}
def transform_photo(self, obj, value):
return {
'value': obj.get_photo_url('300x300'),
'150x150': obj.get_photo_url('150x150'),
'300x300': obj.get_photo_url('300x300'),
'500x500': obj.get_photo_url('500x500'),
'privacy': obj.get_privacy_photo_display(),
}
def transform_tshirt(self, obj, value):
return {
'value': obj.tshirt,
'english': obj.get_tshirt_display(),
'privacy': obj.get_privacy_tshirt_display(),
}
def get_country(self, obj):
country = obj.geo_country
return {
'code': country.code if country else '',
'value': country.name if country else '',
'privacy': obj.get_privacy_geo_country_display(),
}
def get_region(self, obj):
region = obj.geo_region
return {
'value': region.name if region else '',
'privacy': obj.get_privacy_geo_region_display(),
}
def get_city(self, obj):
city = obj.geo_city
return {
'value': city.name if city else '',
'privacy': obj.get_privacy_geo_city_display(),
}
# Filters
class UserProfileFilter(django_filters.FilterSet):
city = django_filters.CharFilter(name='geo_city__name')
region = django_filters.CharFilter(name='geo_region__name')
country = django_filters.CharFilter(name='geo_country__name')
country_code = django_filters.CharFilter(name='geo_country__code')
username = django_filters.CharFilter(name='user__username')
email = django_filters.MethodFilter(action='filter_emails')
language = django_filters.CharFilter(name='language__code')
account = django_filters.CharFilter(name='externalaccount__identifier', distinct=True)
group = django_filters.MethodFilter(action='filter_group')
skill = django_filters.CharFilter(name='skills__name')
class Meta:
model = UserProfile
fields = ('is_vouched', 'city', 'region', 'country', 'country_code',
'username', 'email', 'ircname', 'full_name', 'language',
'account', 'group', 'skill')
def filter_emails(self, queryset, value):
"""Return users with email matching either primary or alternate email address"""
qs = ExternalAccount.objects.filter(type=ExternalAccount.TYPE_EMAIL, identifier=value)
users = qs.values_list('user__id', flat=True)
query = Q(id__in=users) | Q(user__email=value)
return self.queryset.filter(query).distinct()
def filter_group(self, queryset, value):
membership = GroupMembership.MEMBER
return self.queryset.filter(groups__name=value, groupmembership__status=membership)
# Views
class UserProfileViewSet(viewsets.ReadOnlyModelViewSet):
"""
Returns a list of Mozillians respecting authorization levels
and privacy settings.
"""
serializer_class = UserProfileSerializer
model = UserProfile
filter_class = UserProfileFilter
ordering = ('user__username',)
def get_queryset(self):
queryset = UserProfile.objects.complete()
privacy_level = self.request.privacy_level
if privacy_level == PUBLIC:
queryset = queryset.public()
queryset = queryset.privacy_level(privacy_level)
return queryset
def retrieve(self, request, pk):
user = get_object_or_404(self.get_queryset(), pk=pk)
serializer = UserProfileDetailedSerializer(user)
return Response(serializer.data) | en | 0.742744 | # Serializers # Add profile URL # If we don't define a custom transform method and if the # field has a privacy setting, set the transform privacy # wrapper. # Filters Return users with email matching either primary or alternate email address # Views Returns a list of Mozillians respecting authorization levels and privacy settings. | 1.971756 | 2 |