code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.options import Options
from time import time, sleep
import re
import requests
import os
import csv
def generate_driver():
options = Options()
options.add_argument("--headless")
driver = webdriver.Firefox(firefox_options=options)
return driver
def site_connect(driver, scrape_number):
attempts = 0
while attempts < 3:
try:
driver.get("https://www.pokemon.com/uk/pokedex/" +
'{:03}'.format(scrape_number))
WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'img.active'))
)
return True
except Exception:
print("Failure connecting to https://www.pokemon.com/uk/pokedex/" +
'{:03}'.format(scrape_number))
attempts += 1
def pokedex_write_csv(output_file, filename, fieldnames):
for row in output_file:
with open(filename, "a") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(row)
def pokedex_parse(driver, scrape_number):
site_connect(driver, scrape_number)
sleep(2)
# scrape variables
pokemon_title = driver.find_element_by_css_selector(
'.pokedex-pokemon-pagination-title > div:nth-child(1)').text
pokemon_name = re.search("^[A-Z][a-z]+", pokemon_title).group(0)
pokemon_number = re.search("[0-9]+$", pokemon_title).group(0)
pokemon_description_y = driver.find_element_by_css_selector(
'p.version-y').text
#pokemon_description_x = driver.find_element_by_css_selector('p.version-x').text
pokemon_height = driver.find_element_by_css_selector(
'div.column-7:nth-child(1) > ul:nth-child(1) > li:nth-child(1) > span:nth-child(2)').text
pokemon_height_m = re.search("^[0-9.]+", pokemon_height).group(0)
pokemon_weight = driver.find_element_by_css_selector(
'div.column-7:nth-child(1) > ul:nth-child(1) > li:nth-child(2) > span:nth-child(2)').text
pokemon_weight_kg = re.search("^[0-9.]+", pokemon_weight).group(0)
pokemon_category = driver.find_element_by_css_selector(
'div.column-7:nth-child(2) > ul:nth-child(1) > li:nth-child(1) > span:nth-child(2)').text
pokemon_stats_hp = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[1]/ul/li[1]').get_attribute("data-value")
pokemon_stats_attack = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[2]/ul/li[1]').get_attribute("data-value")
pokemon_stats_defense = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[3]/ul/li[1]').get_attribute("data-value")
pokemon_stats_spattack = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[4]/ul/li[1]').get_attribute("data-value")
pokemon_stats_spdefense = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[5]/ul/li[1]').get_attribute("data-value")
pokemon_stats_speed = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[6]/ul/li[1]').get_attribute("data-value")
# can this be done with a list comprehension?
pokemon_ability = []
for ability in driver.find_elements_by_class_name('moreInfo'):
# Script returning '' on some abilities - see Gyrados (#130), this is a dirty hack
if ability.text is not '':
pokemon_ability.append(ability.text)
pokemon_type = driver.find_element_by_css_selector(
'.dtm-type > ul:nth-child(2)').text.split("\n")
pokemon_weaknesses = driver.find_element_by_css_selector(
'.dtm-weaknesses > ul:nth-child(2)').text.split("\n")
# grab the image, don't save if it already exists
pokemon_image_url = driver.find_element_by_css_selector(
'img.active').get_attribute('src')
pokemon_path = "images/" + pokemon_number + "_" + pokemon_name + ".png"
if (os.path.isfile(pokemon_path) == False):
with open(pokemon_path, "wb") as f:
r = requests.get(pokemon_image_url)
f.write(r.content)
# add to dictionary
pokedex_entry = {
"number": pokemon_number,
"name": pokemon_name,
"description_y": pokemon_description_y,
"type": pokemon_type,
"weakness": pokemon_weaknesses,
"ability": pokemon_ability,
"category": pokemon_category,
"height": pokemon_height_m,
"weight": pokemon_weight_kg,
"stats_hp": pokemon_stats_hp,
"stats_attack": pokemon_stats_attack,
"stats_defense": pokemon_stats_defense,
"stats_spattack": pokemon_stats_spattack,
"stats_spdefense": pokemon_stats_spdefense,
"stats_speed": pokemon_stats_speed
}
return pokedex_entry
def pokedex_scraper(start=1, finish=150):
# setup variables
pokedex = []
current_pokedex = start
start_time = time()
print("Scraping Pokedex #" +
'{:03}'.format(start) + " to #" + '{:03}'.format(finish))
driver = generate_driver()
driver.get("https://www.pokemon.com/uk/pokedex/")
sleep(2)
# start scraping
while current_pokedex <= finish:
print("Scraping Pokedex #" +
'{:03}'.format(current_pokedex) + "/#" + str(finish) + "...")
pokedex.append(pokedex_parse(driver, current_pokedex))
current_pokedex += 1
# write the CSV
pokedex_fields = [
"number", "name", "description_y",
"type", "weakness", "ability", "category", "height",
"weight", "stats_hp", "stats_attack", "stats_defense",
"stats_spattack", "stats_spdefense", "stats_speed"
]
pokedex_write_csv(pokedex, "pokedex.csv", pokedex_fields)
# pack up
driver.quit()
end_time = time()
elapsed_time = end_time - start_time
print("Total run time: " + str(elapsed_time) + " seconds")
if __name__ == '__main__':
pokedex_scraper() | pokedex_scraper.py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.options import Options
from time import time, sleep
import re
import requests
import os
import csv
def generate_driver():
options = Options()
options.add_argument("--headless")
driver = webdriver.Firefox(firefox_options=options)
return driver
def site_connect(driver, scrape_number):
attempts = 0
while attempts < 3:
try:
driver.get("https://www.pokemon.com/uk/pokedex/" +
'{:03}'.format(scrape_number))
WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'img.active'))
)
return True
except Exception:
print("Failure connecting to https://www.pokemon.com/uk/pokedex/" +
'{:03}'.format(scrape_number))
attempts += 1
def pokedex_write_csv(output_file, filename, fieldnames):
for row in output_file:
with open(filename, "a") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(row)
def pokedex_parse(driver, scrape_number):
site_connect(driver, scrape_number)
sleep(2)
# scrape variables
pokemon_title = driver.find_element_by_css_selector(
'.pokedex-pokemon-pagination-title > div:nth-child(1)').text
pokemon_name = re.search("^[A-Z][a-z]+", pokemon_title).group(0)
pokemon_number = re.search("[0-9]+$", pokemon_title).group(0)
pokemon_description_y = driver.find_element_by_css_selector(
'p.version-y').text
#pokemon_description_x = driver.find_element_by_css_selector('p.version-x').text
pokemon_height = driver.find_element_by_css_selector(
'div.column-7:nth-child(1) > ul:nth-child(1) > li:nth-child(1) > span:nth-child(2)').text
pokemon_height_m = re.search("^[0-9.]+", pokemon_height).group(0)
pokemon_weight = driver.find_element_by_css_selector(
'div.column-7:nth-child(1) > ul:nth-child(1) > li:nth-child(2) > span:nth-child(2)').text
pokemon_weight_kg = re.search("^[0-9.]+", pokemon_weight).group(0)
pokemon_category = driver.find_element_by_css_selector(
'div.column-7:nth-child(2) > ul:nth-child(1) > li:nth-child(1) > span:nth-child(2)').text
pokemon_stats_hp = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[1]/ul/li[1]').get_attribute("data-value")
pokemon_stats_attack = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[2]/ul/li[1]').get_attribute("data-value")
pokemon_stats_defense = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[3]/ul/li[1]').get_attribute("data-value")
pokemon_stats_spattack = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[4]/ul/li[1]').get_attribute("data-value")
pokemon_stats_spdefense = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[5]/ul/li[1]').get_attribute("data-value")
pokemon_stats_speed = driver.find_element_by_xpath(
'/html/body/div[4]/section[3]/div[1]/div[3]/ul/li[6]/ul/li[1]').get_attribute("data-value")
# can this be done with a list comprehension?
pokemon_ability = []
for ability in driver.find_elements_by_class_name('moreInfo'):
# Script returning '' on some abilities - see Gyrados (#130), this is a dirty hack
if ability.text is not '':
pokemon_ability.append(ability.text)
pokemon_type = driver.find_element_by_css_selector(
'.dtm-type > ul:nth-child(2)').text.split("\n")
pokemon_weaknesses = driver.find_element_by_css_selector(
'.dtm-weaknesses > ul:nth-child(2)').text.split("\n")
# grab the image, don't save if it already exists
pokemon_image_url = driver.find_element_by_css_selector(
'img.active').get_attribute('src')
pokemon_path = "images/" + pokemon_number + "_" + pokemon_name + ".png"
if (os.path.isfile(pokemon_path) == False):
with open(pokemon_path, "wb") as f:
r = requests.get(pokemon_image_url)
f.write(r.content)
# add to dictionary
pokedex_entry = {
"number": pokemon_number,
"name": pokemon_name,
"description_y": pokemon_description_y,
"type": pokemon_type,
"weakness": pokemon_weaknesses,
"ability": pokemon_ability,
"category": pokemon_category,
"height": pokemon_height_m,
"weight": pokemon_weight_kg,
"stats_hp": pokemon_stats_hp,
"stats_attack": pokemon_stats_attack,
"stats_defense": pokemon_stats_defense,
"stats_spattack": pokemon_stats_spattack,
"stats_spdefense": pokemon_stats_spdefense,
"stats_speed": pokemon_stats_speed
}
return pokedex_entry
def pokedex_scraper(start=1, finish=150):
# setup variables
pokedex = []
current_pokedex = start
start_time = time()
print("Scraping Pokedex #" +
'{:03}'.format(start) + " to #" + '{:03}'.format(finish))
driver = generate_driver()
driver.get("https://www.pokemon.com/uk/pokedex/")
sleep(2)
# start scraping
while current_pokedex <= finish:
print("Scraping Pokedex #" +
'{:03}'.format(current_pokedex) + "/#" + str(finish) + "...")
pokedex.append(pokedex_parse(driver, current_pokedex))
current_pokedex += 1
# write the CSV
pokedex_fields = [
"number", "name", "description_y",
"type", "weakness", "ability", "category", "height",
"weight", "stats_hp", "stats_attack", "stats_defense",
"stats_spattack", "stats_spdefense", "stats_speed"
]
pokedex_write_csv(pokedex, "pokedex.csv", pokedex_fields)
# pack up
driver.quit()
end_time = time()
elapsed_time = end_time - start_time
print("Total run time: " + str(elapsed_time) + " seconds")
if __name__ == '__main__':
pokedex_scraper() | 0.247078 | 0.097777 |
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
import time
from sklearn.externals import joblib
from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split
from sklearn.metrics import roc_auc_score, average_precision_score, make_scorer
from sklearn import linear_model, tree, ensemble
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from ensampling.bagging import Blagging
n_runs=30 # Define the number of models to be trained
scorer = make_scorer(average_precision_score, needs_threshold=True, average="micro",)#make_scorer(cohen_kappa_score)#'roc_auc'
min_samples_leaf=5
n_estimators=10
criterion='entropy'
max_depth=np.arange(3,45,5)
max_depth=[3,4,5,7,10,15,20,30,50]
dataset='kaggle' #'data/bopredict.csv'
n_folds=5
save_run=10
df = pd.read_csv('data/'+dataset+'.csv')
X = df.drop(['went_on_backorder','sku'],axis=1).values
y = df['went_on_backorder'].values
print("dataset:",dataset)
estimators = [
("Logistic Regression", 'lgst',
linear_model.LogisticRegression(),
{'C':np.logspace(0,3,4),
'penalty':['l1','l2'],
}),
("Decision Tree", 'cart',
tree.DecisionTreeClassifier(min_samples_leaf=min_samples_leaf,
criterion=criterion),
{'max_depth':max_depth,
#'max_features':[3,5,10,None],
#'splitter':['best','random'],
'criterion':['entropy','gini'],
}),
("RandomUnderSampling", 'rus',
Pipeline([('res', RandomUnderSampler()),
('tree', tree.DecisionTreeClassifier(
min_samples_leaf=min_samples_leaf, criterion=criterion))
]),
{'tree__max_depth':max_depth,
}),
("SMOTE", 'smt',
Pipeline([('res', SMOTE()),
('tree', tree.DecisionTreeClassifier(
min_samples_leaf=min_samples_leaf, criterion=criterion))
]),
{'tree__max_depth':max_depth,
}),
("UnderBagging", 'ub',
Blagging(n_estimators=n_estimators,
base_estimator=tree.DecisionTreeClassifier(
min_samples_leaf=min_samples_leaf,criterion=criterion)),
{'max_depth':max_depth,
}),
("RandomForest", "rf",
ensemble.RandomForestClassifier(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf, criterion=criterion),
{'max_depth':max_depth,
}),
("GradientBoosting", "gb",
ensemble.GradientBoostingClassifier(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf),
{'max_depth':[10,],
}),
]
for est_full_name, est_name, est, params in estimators:
print ('\n%s\n%s\n' % ('-'*25, est_full_name))
print ('Run\tEst\tScore\tAUROC\tAUPRC\tTime\tBest parameters')
matriz = []
t0 = time.time()
for run in range(n_runs):
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
test_size=0.15, random_state=run)
kf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=int(run*9))
gs = GridSearchCV(est, params, cv=kf,# n_iter=n_iter_search,
scoring=scorer, verbose=0,n_jobs=-1)
t1 = time.time()
gs.fit(X_train, y_train)
y_prob0 = gs.best_estimator_.predict_proba(X_train)[:,1]
y_prob = gs.best_estimator_.predict_proba(X_test)[:,1]
roc = roc_auc_score(y_test, y_prob)
pr = average_precision_score(y_test, y_prob)
run_time = time.time() - t1
avg_time = run_time/gs.n_splits_
print ("%i\t%s\t%.4f\t%.4f\t%.4f\t%.2f\t%s" % (run, est_name,
gs.best_score_, roc, pr, avg_time, gs.best_params_))
# get importance
imp = []
mdl = gs.best_estimator_
if est_name in ['ub','sbag']:
imp = np.mean([
e.feature_importances_ for e in mdl.estimators_
], axis=0)
elif est_name in ['rus','smt']:
imp = mdl.named_steps['tree'].feature_importances_
elif est_name == 'lgst':
imp = mdl.coef_.ravel()
else:
imp = mdl.feature_importances_
matriz.append(
{ 'run' : run,
'estimator' : est_name,
'roc' : roc,
'pr' : pr,
'best_params' : gs.best_params_,
'avg_time' : avg_time,
'importance' : imp,
})
if run == save_run:
path = 'results/pkl/'+dataset+'/'+est_name.lower() + '.pkl'
joblib.dump(gs.best_estimator_, path)
print("Elapsed time: %0.3fs" % (time.time()-t0))
# Save results
data = pd.DataFrame(matriz) | model_selection.py | import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
import time
from sklearn.externals import joblib
from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split
from sklearn.metrics import roc_auc_score, average_precision_score, make_scorer
from sklearn import linear_model, tree, ensemble
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from ensampling.bagging import Blagging
n_runs=30 # Define the number of models to be trained
scorer = make_scorer(average_precision_score, needs_threshold=True, average="micro",)#make_scorer(cohen_kappa_score)#'roc_auc'
min_samples_leaf=5
n_estimators=10
criterion='entropy'
max_depth=np.arange(3,45,5)
max_depth=[3,4,5,7,10,15,20,30,50]
dataset='kaggle' #'data/bopredict.csv'
n_folds=5
save_run=10
df = pd.read_csv('data/'+dataset+'.csv')
X = df.drop(['went_on_backorder','sku'],axis=1).values
y = df['went_on_backorder'].values
print("dataset:",dataset)
estimators = [
("Logistic Regression", 'lgst',
linear_model.LogisticRegression(),
{'C':np.logspace(0,3,4),
'penalty':['l1','l2'],
}),
("Decision Tree", 'cart',
tree.DecisionTreeClassifier(min_samples_leaf=min_samples_leaf,
criterion=criterion),
{'max_depth':max_depth,
#'max_features':[3,5,10,None],
#'splitter':['best','random'],
'criterion':['entropy','gini'],
}),
("RandomUnderSampling", 'rus',
Pipeline([('res', RandomUnderSampler()),
('tree', tree.DecisionTreeClassifier(
min_samples_leaf=min_samples_leaf, criterion=criterion))
]),
{'tree__max_depth':max_depth,
}),
("SMOTE", 'smt',
Pipeline([('res', SMOTE()),
('tree', tree.DecisionTreeClassifier(
min_samples_leaf=min_samples_leaf, criterion=criterion))
]),
{'tree__max_depth':max_depth,
}),
("UnderBagging", 'ub',
Blagging(n_estimators=n_estimators,
base_estimator=tree.DecisionTreeClassifier(
min_samples_leaf=min_samples_leaf,criterion=criterion)),
{'max_depth':max_depth,
}),
("RandomForest", "rf",
ensemble.RandomForestClassifier(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf, criterion=criterion),
{'max_depth':max_depth,
}),
("GradientBoosting", "gb",
ensemble.GradientBoostingClassifier(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf),
{'max_depth':[10,],
}),
]
for est_full_name, est_name, est, params in estimators:
print ('\n%s\n%s\n' % ('-'*25, est_full_name))
print ('Run\tEst\tScore\tAUROC\tAUPRC\tTime\tBest parameters')
matriz = []
t0 = time.time()
for run in range(n_runs):
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
test_size=0.15, random_state=run)
kf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=int(run*9))
gs = GridSearchCV(est, params, cv=kf,# n_iter=n_iter_search,
scoring=scorer, verbose=0,n_jobs=-1)
t1 = time.time()
gs.fit(X_train, y_train)
y_prob0 = gs.best_estimator_.predict_proba(X_train)[:,1]
y_prob = gs.best_estimator_.predict_proba(X_test)[:,1]
roc = roc_auc_score(y_test, y_prob)
pr = average_precision_score(y_test, y_prob)
run_time = time.time() - t1
avg_time = run_time/gs.n_splits_
print ("%i\t%s\t%.4f\t%.4f\t%.4f\t%.2f\t%s" % (run, est_name,
gs.best_score_, roc, pr, avg_time, gs.best_params_))
# get importance
imp = []
mdl = gs.best_estimator_
if est_name in ['ub','sbag']:
imp = np.mean([
e.feature_importances_ for e in mdl.estimators_
], axis=0)
elif est_name in ['rus','smt']:
imp = mdl.named_steps['tree'].feature_importances_
elif est_name == 'lgst':
imp = mdl.coef_.ravel()
else:
imp = mdl.feature_importances_
matriz.append(
{ 'run' : run,
'estimator' : est_name,
'roc' : roc,
'pr' : pr,
'best_params' : gs.best_params_,
'avg_time' : avg_time,
'importance' : imp,
})
if run == save_run:
path = 'results/pkl/'+dataset+'/'+est_name.lower() + '.pkl'
joblib.dump(gs.best_estimator_, path)
print("Elapsed time: %0.3fs" % (time.time()-t0))
# Save results
data = pd.DataFrame(matriz) | 0.451568 | 0.259321 |
# %% import libraries
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, recall_score, precision_score
from sklearn.metrics import confusion_matrix, f1_score, roc_curve, roc_auc_score
import matplotlib.pyplot as plt
import seaborn as sns
#%matplotlib inline
%matplotlib qt5
# %% read data and explore
df = pd.read_csv("mushrooms.csv", sep=",")
print("Number of missing values:", len(df[df.isnull().any(axis=1)])) # no missing values!
# %% example rows
df.head()
list(df)
# %% describe data
pd.set_option('display.max_columns', 25) # to see all features
df.describe()
# %% detail of veil-type
df['veil-type'].unique()
# %% remove constant-valued veil-type
df = df.drop(columns=['veil-type'])
pd.set_option('display.max_columns', 0) # change max_columns back
# %% encoding to be able to run ML algorithms
le = LabelEncoder()
df = df.apply(le.fit_transform)
# %% histogram of data
df.hist();
# %% correlation matrix
corr = df.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True);
# %% check if dataset is balanced
df['class'].unique()
len(df[df['class']==1])/len(df)
# %% name the label and features
label = df['class']
features = df.drop(columns=['class'])
# %% train/Test Split
features_train, features_test, label_train, label_test = train_test_split(features,label, test_size=0.25, random_state = 42, shuffle=True, stratify=label)
features_train, features_valid, label_train, label_valid = train_test_split(features_train,label_train, test_size=(1/3), random_state = 42, shuffle=True, stratify=label_train)
# %% feature importances using RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, max_depth=2,random_state=0)
rf.fit(features_train, label_train)
feature_names = list(features_train)
print ("Features sorted by their score:\n")
print (sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), feature_names), reverse=True))
# %% remove features with low importance
df = df.drop(columns=['veil-color'])
df = df.drop(columns=['gill-attachment'])
# %% name the label and features
label = df['class']
features = df.drop(columns=['class'])
# %% train/Test Split
features_train, features_test, label_train, label_test = train_test_split(features,label, test_size=0.25, random_state = 42, shuffle=True, stratify=label)
features_train, features_valid, label_train, label_valid = train_test_split(features_train,label_train, test_size=(1/3), random_state = 42, shuffle=True, stratify=label_train)
# %% Gaussian Naive Bayes
gnb = GaussianNB()
gnb.fit(features_train, label_train)
label_train_pred = gnb.predict(features_train)
print ("\nGaussian NB Performance on Training Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_train, label_train_pred)))
print ("Recall: {0:.3f}".format(recall_score(label_train, label_train_pred)))
print ("Precision: {0:.3f}".format(precision_score(label_train, label_train_pred)))
print ("f1: {0:.3f}".format(f1_score(label_train, label_train_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_train, label_train_pred)))
label_valid_pred = gnb.predict(features_valid)
print ("\nGaussian NB Performance on Validation Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_valid, label_valid_pred)))
print ("Recall: {0:.3f}".format(recall_score(label_valid, label_valid_pred)))
print ("Precision: {0:.3f}".format(precision_score(label_valid, label_valid_pred)))
print ("f1: {0:.3f}".format(f1_score(label_valid, label_valid_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_valid, label_valid_pred)))
# %% Logistic Regression
lr = LogisticRegression(random_state=22)
lr.fit(features_train, label_train)
label_train_pred = lr.predict(features_train)
print ("\nLogistic Regression Performance on Training Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_train, label_train_pred)))
print ("Recall: {0:.3f}".format(recall_score(label_train, label_train_pred)))
print ("Precision: {0:.3f}".format(precision_score(label_train, label_train_pred)))
print ("f1: {0:.3f}".format(f1_score(label_train, label_train_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_train, label_train_pred)))
# %% Logistic Regression - Hyperparameter Tuning
grid={"C":[0.01,0.1,1.0,10.0,100.0,1000.0], "penalty":["l1","l2"]}
lr_cv=GridSearchCV(lr,grid, n_jobs=-1) # use all processors
lr_cv.fit(features_valid,label_valid)
print("Tuned Hyperparameters:",lr_cv.best_params_)
lr = LogisticRegression(random_state=22, C=1000.0, penalty="l2")
lr.fit(features_train, label_train)
label_valid_pred = lr.predict(features_valid)
print ("\nLogistic Regression Performance on Validation Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_valid, label_valid_pred)))
print ("Recall: {0:.3f}".format(recall_score(label_valid, label_valid_pred)))
print ("Precision: {0:.3f}".format(precision_score(label_valid, label_valid_pred)))
print ("f1: {0:.3f}".format(f1_score(label_valid, label_valid_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_valid, label_valid_pred)))
# %% Decision Tree
dt = DecisionTreeClassifier(random_state=22)
dt.fit(features_train, label_train)
label_train_pred = dt.predict(features_train)
print ("\nDecision Tree Performance on Training Data:")
print ("Accuracy: {}".format(accuracy_score(label_train, label_train_pred)))
print ("Recall: {}".format(recall_score(label_train, label_train_pred)))
print ("Precision: {}".format(precision_score(label_train, label_train_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_train, label_train_pred)))
label_valid_pred = dt.predict(features_valid) # No hyperparameter tuning
print ("\nDecision Tree Performance on Validation Data:")
print ("Accuracy: {}".format(accuracy_score(label_valid, label_valid_pred)))
print ("Recall: {}".format(recall_score(label_valid, label_valid_pred)))
print ("Precision: {}".format(precision_score(label_valid, label_valid_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_valid, label_valid_pred)))
# %% Final Performances
label_test_pred_gnb = gnb.predict(features_test)
print ("\nGaussian NB Performance on Test Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_test, label_test_pred_gnb)))
print ("Recall: {0:.3f}".format(recall_score(label_test, label_test_pred_gnb)))
print ("Precision: {0:.3f}".format(precision_score(label_test, label_test_pred_gnb)))
print ("f1: {0:.3f}".format(f1_score(label_test, label_test_pred_gnb)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_test, label_test_pred_gnb)))
label_test_pred_lr = lr.predict(features_test)
print ("\nLogistic Regression Performance on Test Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_test, label_test_pred_lr)))
print ("Recall: {0:.3f}".format(recall_score(label_test, label_test_pred_lr)))
print ("Precision: {0:.3f}".format(precision_score(label_test, label_test_pred_lr)))
print ("f1: {0:.3f}".format(f1_score(label_test, label_test_pred_lr)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_test, label_test_pred_lr)))
label_test_pred_dt = dt.predict(features_test)
print ("\nDecision Tree Performance on Test Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_test, label_test_pred_dt)))
print ("Recall: {0:.3f}".format(recall_score(label_test, label_test_pred_dt)))
print ("Precision: {0:.3f}".format(precision_score(label_test, label_test_pred_dt)))
print ("f1: {0:.3f}".format(f1_score(label_test, label_test_pred_dt)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_test, label_test_pred_dt)))
# %% Plot ROC Curve
plt.figure()
pred = label_test_pred_gnb
label = label_test
fpr, tpr, thresh = roc_curve(label, pred)
auc = roc_auc_score(label, pred)
plt.plot(fpr,tpr,label="Gaussian Naive Bayes, AUC="+str(auc))
pred = label_test_pred_lr
label = label_test
fpr, tpr, thresh = roc_curve(label, pred)
auc = roc_auc_score(label, pred)
plt.plot(fpr,tpr,label="Logistic Regression, AUC="+str(auc))
pred = label_test_pred_dt
label = label_test
fpr, tpr, thresh = roc_curve(label, pred)
auc = roc_auc_score(label, pred)
plt.plot(fpr,tpr,label="Decision Tree, AUC="+str(auc))
plt.legend(loc="lower right", fontsize='xx-large')
plt.show() | Project_Source_Code_Kutay_Erkan.py |
# %% import libraries
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, recall_score, precision_score
from sklearn.metrics import confusion_matrix, f1_score, roc_curve, roc_auc_score
import matplotlib.pyplot as plt
import seaborn as sns
#%matplotlib inline
%matplotlib qt5
# %% read data and explore
df = pd.read_csv("mushrooms.csv", sep=",")
print("Number of missing values:", len(df[df.isnull().any(axis=1)])) # no missing values!
# %% example rows
df.head()
list(df)
# %% describe data
pd.set_option('display.max_columns', 25) # to see all features
df.describe()
# %% detail of veil-type
df['veil-type'].unique()
# %% remove constant-valued veil-type
df = df.drop(columns=['veil-type'])
pd.set_option('display.max_columns', 0) # change max_columns back
# %% encoding to be able to run ML algorithms
le = LabelEncoder()
df = df.apply(le.fit_transform)
# %% histogram of data
df.hist();
# %% correlation matrix
corr = df.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True);
# %% check if dataset is balanced
df['class'].unique()
len(df[df['class']==1])/len(df)
# %% name the label and features
label = df['class']
features = df.drop(columns=['class'])
# %% train/Test Split
features_train, features_test, label_train, label_test = train_test_split(features,label, test_size=0.25, random_state = 42, shuffle=True, stratify=label)
features_train, features_valid, label_train, label_valid = train_test_split(features_train,label_train, test_size=(1/3), random_state = 42, shuffle=True, stratify=label_train)
# %% feature importances using RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, max_depth=2,random_state=0)
rf.fit(features_train, label_train)
feature_names = list(features_train)
print ("Features sorted by their score:\n")
print (sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), feature_names), reverse=True))
# %% remove features with low importance
df = df.drop(columns=['veil-color'])
df = df.drop(columns=['gill-attachment'])
# %% name the label and features
label = df['class']
features = df.drop(columns=['class'])
# %% train/Test Split
features_train, features_test, label_train, label_test = train_test_split(features,label, test_size=0.25, random_state = 42, shuffle=True, stratify=label)
features_train, features_valid, label_train, label_valid = train_test_split(features_train,label_train, test_size=(1/3), random_state = 42, shuffle=True, stratify=label_train)
# %% Gaussian Naive Bayes
gnb = GaussianNB()
gnb.fit(features_train, label_train)
label_train_pred = gnb.predict(features_train)
print ("\nGaussian NB Performance on Training Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_train, label_train_pred)))
print ("Recall: {0:.3f}".format(recall_score(label_train, label_train_pred)))
print ("Precision: {0:.3f}".format(precision_score(label_train, label_train_pred)))
print ("f1: {0:.3f}".format(f1_score(label_train, label_train_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_train, label_train_pred)))
label_valid_pred = gnb.predict(features_valid)
print ("\nGaussian NB Performance on Validation Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_valid, label_valid_pred)))
print ("Recall: {0:.3f}".format(recall_score(label_valid, label_valid_pred)))
print ("Precision: {0:.3f}".format(precision_score(label_valid, label_valid_pred)))
print ("f1: {0:.3f}".format(f1_score(label_valid, label_valid_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_valid, label_valid_pred)))
# %% Logistic Regression
lr = LogisticRegression(random_state=22)
lr.fit(features_train, label_train)
label_train_pred = lr.predict(features_train)
print ("\nLogistic Regression Performance on Training Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_train, label_train_pred)))
print ("Recall: {0:.3f}".format(recall_score(label_train, label_train_pred)))
print ("Precision: {0:.3f}".format(precision_score(label_train, label_train_pred)))
print ("f1: {0:.3f}".format(f1_score(label_train, label_train_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_train, label_train_pred)))
# %% Logistic Regression - Hyperparameter Tuning
grid={"C":[0.01,0.1,1.0,10.0,100.0,1000.0], "penalty":["l1","l2"]}
lr_cv=GridSearchCV(lr,grid, n_jobs=-1) # use all processors
lr_cv.fit(features_valid,label_valid)
print("Tuned Hyperparameters:",lr_cv.best_params_)
lr = LogisticRegression(random_state=22, C=1000.0, penalty="l2")
lr.fit(features_train, label_train)
label_valid_pred = lr.predict(features_valid)
print ("\nLogistic Regression Performance on Validation Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_valid, label_valid_pred)))
print ("Recall: {0:.3f}".format(recall_score(label_valid, label_valid_pred)))
print ("Precision: {0:.3f}".format(precision_score(label_valid, label_valid_pred)))
print ("f1: {0:.3f}".format(f1_score(label_valid, label_valid_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_valid, label_valid_pred)))
# %% Decision Tree
dt = DecisionTreeClassifier(random_state=22)
dt.fit(features_train, label_train)
label_train_pred = dt.predict(features_train)
print ("\nDecision Tree Performance on Training Data:")
print ("Accuracy: {}".format(accuracy_score(label_train, label_train_pred)))
print ("Recall: {}".format(recall_score(label_train, label_train_pred)))
print ("Precision: {}".format(precision_score(label_train, label_train_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_train, label_train_pred)))
label_valid_pred = dt.predict(features_valid) # No hyperparameter tuning
print ("\nDecision Tree Performance on Validation Data:")
print ("Accuracy: {}".format(accuracy_score(label_valid, label_valid_pred)))
print ("Recall: {}".format(recall_score(label_valid, label_valid_pred)))
print ("Precision: {}".format(precision_score(label_valid, label_valid_pred)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_valid, label_valid_pred)))
# %% Final Performances
label_test_pred_gnb = gnb.predict(features_test)
print ("\nGaussian NB Performance on Test Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_test, label_test_pred_gnb)))
print ("Recall: {0:.3f}".format(recall_score(label_test, label_test_pred_gnb)))
print ("Precision: {0:.3f}".format(precision_score(label_test, label_test_pred_gnb)))
print ("f1: {0:.3f}".format(f1_score(label_test, label_test_pred_gnb)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_test, label_test_pred_gnb)))
label_test_pred_lr = lr.predict(features_test)
print ("\nLogistic Regression Performance on Test Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_test, label_test_pred_lr)))
print ("Recall: {0:.3f}".format(recall_score(label_test, label_test_pred_lr)))
print ("Precision: {0:.3f}".format(precision_score(label_test, label_test_pred_lr)))
print ("f1: {0:.3f}".format(f1_score(label_test, label_test_pred_lr)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_test, label_test_pred_lr)))
label_test_pred_dt = dt.predict(features_test)
print ("\nDecision Tree Performance on Test Data:")
print ("Accuracy: {0:.3f}".format(accuracy_score(label_test, label_test_pred_dt)))
print ("Recall: {0:.3f}".format(recall_score(label_test, label_test_pred_dt)))
print ("Precision: {0:.3f}".format(precision_score(label_test, label_test_pred_dt)))
print ("f1: {0:.3f}".format(f1_score(label_test, label_test_pred_dt)))
print ("Confusion Matrix: \n{}\n".format(confusion_matrix(label_test, label_test_pred_dt)))
# %% Plot ROC Curve
plt.figure()
pred = label_test_pred_gnb
label = label_test
fpr, tpr, thresh = roc_curve(label, pred)
auc = roc_auc_score(label, pred)
plt.plot(fpr,tpr,label="Gaussian Naive Bayes, AUC="+str(auc))
pred = label_test_pred_lr
label = label_test
fpr, tpr, thresh = roc_curve(label, pred)
auc = roc_auc_score(label, pred)
plt.plot(fpr,tpr,label="Logistic Regression, AUC="+str(auc))
pred = label_test_pred_dt
label = label_test
fpr, tpr, thresh = roc_curve(label, pred)
auc = roc_auc_score(label, pred)
plt.plot(fpr,tpr,label="Decision Tree, AUC="+str(auc))
plt.legend(loc="lower right", fontsize='xx-large')
plt.show() | 0.61878 | 0.367753 |
import datetime
import math
import pytest
import pytz
from mer.constants import mars_year_0_start, sols_per_martian_year
from mer.retimers import EarthDateTime, MarsYearSol, MarsYearSolarLongitude, \
datetime_to_earthdatetime, sols_after_mars_year_0, sols_between_datetimes, \
sols_since_datetime
class TestEarthDateTime:
class TestNew:
@pytest.fixture
def utc_edt(self) -> datetime.datetime:
yield EarthDateTime(2020, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
@pytest.fixture
def eastern_edt(self) -> datetime.datetime:
eastern = pytz.timezone('US/Eastern')
yield EarthDateTime(2020, 1, 1, 0, 0, 0, 0, tzinfo=eastern)
@pytest.fixture
def native_edt(self) -> datetime.datetime:
yield EarthDateTime(2020, 1, 1, 0, 0, 0, 0)
def test_int_input_raises_type_error(self):
with pytest.raises(TypeError):
EarthDateTime(2837476)
def test_datetime_input_raises_type_error(self):
dt = datetime.datetime(2020, 1, 1, 0, 0, 0)
with pytest.raises(TypeError):
EarthDateTime(dt)
def test_utc_input_equals_unaware_input(self, utc_edt, native_edt):
assert utc_edt == native_edt
def test_utc_input_is_not_eastern_input(self, utc_edt, eastern_edt):
assert utc_edt != eastern_edt
class TestToFractionalMarsYear:
@pytest.fixture
def mars_year_10_start(self) -> EarthDateTime:
yield EarthDateTime(1972, 3, 15, 4, 48, 0)
@pytest.fixture
def mars_year_20_start(self) -> EarthDateTime:
yield EarthDateTime(1991, 1, 4, 21, 50, 24)
def test_mars_year_10_start_returns_10(self, mars_year_10_start):
assert mars_year_10_start.to_fractional_mars_year() == \
pytest.approx(10, abs=0.0001)
def test_mars_year_20_start_returns_20(self, mars_year_20_start):
assert mars_year_20_start.to_fractional_mars_year() == \
pytest.approx(20, abs=0.0001)
class TestToWholeMarsYear:
@pytest.fixture
def positive_date(self) -> EarthDateTime:
yield EarthDateTime(2000, 1, 1, 0, 0, 0, 0)
@pytest.fixture
def negative_date(self) -> EarthDateTime:
yield EarthDateTime(1900, 1, 1, 0, 0, 0, 0)
def test_positive_mars_year_returns_expected_value(self, positive_date):
assert positive_date.to_whole_mars_year() == 24
def test_negative_mars_year_returns_expected_value(self, negative_date):
assert negative_date.to_whole_mars_year() == -29
class TestToSol:
@pytest.fixture
def mars_year_0_start_edt(self) -> EarthDateTime:
yield datetime_to_earthdatetime(mars_year_0_start)
@pytest.fixture
def moment_before_mars_year_0_start(self) -> EarthDateTime:
yield datetime_to_earthdatetime(mars_year_0_start) - \
datetime.timedelta(milliseconds=1)
def test_first_moment_of_year_equals_0(self, mars_year_0_start_edt):
assert mars_year_0_start_edt.to_sol() == 0
def test_last_moment_of_mars_year_equals_yearly_sols(
self, moment_before_mars_year_0_start):
sol = moment_before_mars_year_0_start.to_sol()
assert sol == pytest.approx(sols_per_martian_year, abs=0.001)
class TestToSolarLongitude:
@pytest.fixture
def mars_year_0_start_edt(self) -> EarthDateTime:
yield datetime_to_earthdatetime(mars_year_0_start)
def test_start_of_mars_year_0_returns_0(self, mars_year_0_start_edt):
ls = mars_year_0_start_edt.to_solar_longitude()
assert math.sin(math.radians(ls)) == pytest.approx(0, abs=0.001)
class TestMarsYearSol:
class TestInit:
def test_int_mars_year_float_sol_raises_no_error(self):
MarsYearSol(0, 234.567)
def test_float_mars_year_raises_type_error(self):
with pytest.raises(TypeError):
MarsYearSol(14.0, 54)
def test_first_moment_of_year_raises_no_error(self):
MarsYearSol(14, 0)
def test_last_moment_of_year_raises_no_error(self):
MarsYearSol(14, sols_per_martian_year)
def test_negative_sol_raises_value_error(self):
with pytest.raises(ValueError):
MarsYearSol(14, -0.0001)
def test_large_sol_raises_value_error(self):
sol = sols_per_martian_year + 0.0001
with pytest.raises(ValueError):
MarsYearSol(14, sol)
class TestToDatetime:
def test_sol_0_of_my_0_matches_known_datetime(self):
assert MarsYearSol(0, 0).to_datetime() == mars_year_0_start
def test_far_future_date_raises_overflow_error(self) -> None:
with pytest.raises(OverflowError):
MarsYearSol(4279, 0).to_datetime()
def test_not_far_future_date_raises_no_error(self) -> None:
MarsYearSol(4278, 0).to_datetime()
def test_far_past_date_raises_overflow_error(self) -> None:
with pytest.raises(OverflowError):
MarsYearSol(-1039, 0).to_datetime()
def test_not_far_past_date_raises_no_error(self) -> None:
MarsYearSol(-1038, 0).to_datetime()
class TestToFractionalMarsYear:
def test_sol_0_returns_mars_year_number(self):
assert MarsYearSol(14, 0).to_fractional_mars_year() == 14
def test_last_sol_of_year_returns_1_more_than_year(self):
mars_year = MarsYearSol(14, sols_per_martian_year).to_fractional_mars_year()
assert mars_year == pytest.approx(15, abs=0.01)
def test_midpoint_sol_returns_half_greater_mars_year(self):
mars_year = MarsYearSol(14, sols_per_martian_year / 2).to_fractional_mars_year()
assert mars_year == pytest.approx(14.5, abs=0.01)
class TestToSolarLongitude:
def test_start_of_year_returns_0(self):
sin_ls = math.sin(math.radians(MarsYearSol(30, 0).to_solar_longitude()))
assert sin_ls == pytest.approx(0, abs=0.05)
def test_end_of_year_returns_0(self):
sin_ls = math.sin(math.radians(
MarsYearSol(30, sols_per_martian_year).to_solar_longitude()))
assert sin_ls == pytest.approx(0, abs=0.05)
def test_northern_summer_solstice_matches_lmd_value(self):
assert MarsYearSol(0, 193.47).to_solar_longitude() == \
pytest.approx(90, abs=0.1)
def test_northern_autumn_equinox_matches_lmd_value(self):
assert MarsYearSol(0, 371.99).to_solar_longitude() == \
pytest.approx(180, abs=0.2)
def test_northern_winter_solstice_matches_lmd_value(self):
assert MarsYearSol(0, 514.76).to_solar_longitude() == \
pytest.approx(270, abs=0.2)
class TestMarsYearSolarLongitude:
class TestInit:
def test_int_mars_year_float_ls_raises_no_error(self):
MarsYearSolarLongitude(0, 234.567)
def test_float_mars_year_raises_type_error(self):
with pytest.raises(TypeError):
MarsYearSolarLongitude(14.0, 54)
def test_first_moment_of_year_raises_no_error(self):
MarsYearSolarLongitude(14, 0)
def test_last_moment_of_year_raises_no_error(self):
MarsYearSolarLongitude(14, 360)
def test_negative_ls_raises_value_error(self):
with pytest.raises(ValueError):
MarsYearSolarLongitude(14, -0.0001)
def test_large_ls_raises_value_error(self):
with pytest.raises(ValueError):
MarsYearSolarLongitude(14, 360.0001)
class TestToDatetime:
def test_sol_0_of_my_0_matches_known_datetime(self):
assert MarsYearSol(0, 0).to_datetime() == mars_year_0_start
def test_far_future_date_raises_overflow_error(self) -> None:
with pytest.raises(OverflowError):
MarsYearSol(4279, 0).to_datetime()
def test_not_far_future_date_raises_no_error(self) -> None:
MarsYearSol(4278, 0).to_datetime()
def test_far_past_date_raises_overflow_error(self) -> None:
with pytest.raises(OverflowError):
MarsYearSol(-1039, 0).to_datetime()
def test_not_far_past_date_raises_no_error(self) -> None:
MarsYearSol(-1038, 0).to_datetime()
class TestToFractionalMarsYear:
def test_ls_0_returns_mars_year_number(self):
assert MarsYearSolarLongitude(14, 0).to_fractional_mars_year() == pytest.approx(14, abs=0.001)
def test_ls_360_of_year_returns_1_more_than_year(self):
mars_year = MarsYearSolarLongitude(14, 359.99).to_fractional_mars_year()
assert mars_year == pytest.approx(15, abs=0.01)
class TestToSol:
def test_start_of_year_returns_0(self):
sol = MarsYearSolarLongitude(30, 0).to_sol()
assert sol == pytest.approx(0, abs=0.05)
def test_end_of_year_returns_0(self):
sol = MarsYearSolarLongitude(30, 359.99).to_sol()
assert sol == pytest.approx(sols_per_martian_year, abs=0.05)
def test_northern_summer_solstice_matches_lmd_value(self):
sol = MarsYearSolarLongitude(0, 90).to_sol()
assert sol == pytest.approx(193.47, abs=0.2)
def test_northern_autumn_equinox_matches_lmd_value(self):
sol = MarsYearSolarLongitude(0, 180).to_sol()
assert sol == pytest.approx(371.99, abs=0.2)
def test_northern_winter_solstice_matches_lmd_value(self):
sol = MarsYearSolarLongitude(0, 270).to_sol()
assert sol == pytest.approx(514.76, abs=0.2)
class TestDatetimeToEarthdatetime:
def test_datetime_raises_no_error(self):
datetime_to_earthdatetime(mars_year_0_start)
def test_date_raises_type_error(self):
date = datetime.date(2020, 1, 1)
with pytest.raises(TypeError):
datetime_to_earthdatetime(date)
class TestSolsAfterMarsYear0:
@pytest.fixture
def maven_arrival_datetime(self) -> datetime.datetime:
yield datetime.datetime(2014, 9, 2, 2, 24, 0, 0, pytz.UTC)
def test_int_input_raises_type_error(self):
with pytest.raises(TypeError):
sols_after_mars_year_0(2000)
def test_date_input_raises_type_error(self, maven_arrival_datetime):
with pytest.raises(TypeError):
sols_after_mars_year_0(maven_arrival_datetime.date())
def test_start_of_mars_year_0_returns_0(self):
assert sols_after_mars_year_0(mars_year_0_start) == 0
def test_maven_arrival_matches_known_value(self, maven_arrival_datetime):
assert sols_after_mars_year_0(maven_arrival_datetime) == \
21781.872772174716
class TestSolsBetweenDatetimes:
@pytest.fixture
def generic_datetime(self) -> datetime.datetime:
yield datetime.datetime(2000, 1, 1, 0, 0, 0)
@pytest.fixture
def opportunity_start(self) -> datetime.datetime:
yield datetime.datetime(2004, 1, 25, 0, 0, 0)
@pytest.fixture
def opportunity_end(self) -> datetime.datetime:
yield datetime.datetime(2018, 6, 10, 0, 0, 0)
def test_int_first_input_raises_type_error(self, generic_datetime):
with pytest.raises(TypeError):
sols_between_datetimes(2000, generic_datetime)
def test_int_second_input_raises_type_error(self, generic_datetime):
with pytest.raises(TypeError):
sols_between_datetimes(generic_datetime, 2000)
def test_int_both_inputs_raises_type_error(self):
with pytest.raises(TypeError):
sols_between_datetimes(2000, 2001)
def test_identical_datetime_inputs_returns_0(self, generic_datetime):
assert sols_between_datetimes(generic_datetime, generic_datetime) == 0
def test_opportunity_length_matches_known_value(
self, opportunity_start, opportunity_end):
assert sols_between_datetimes(opportunity_start, opportunity_end) == \
5109.551211085292
class TestSolsSinceDatetime:
pass | mer/tests/test_retimers.py | import datetime
import math
import pytest
import pytz
from mer.constants import mars_year_0_start, sols_per_martian_year
from mer.retimers import EarthDateTime, MarsYearSol, MarsYearSolarLongitude, \
datetime_to_earthdatetime, sols_after_mars_year_0, sols_between_datetimes, \
sols_since_datetime
class TestEarthDateTime:
class TestNew:
@pytest.fixture
def utc_edt(self) -> datetime.datetime:
yield EarthDateTime(2020, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
@pytest.fixture
def eastern_edt(self) -> datetime.datetime:
eastern = pytz.timezone('US/Eastern')
yield EarthDateTime(2020, 1, 1, 0, 0, 0, 0, tzinfo=eastern)
@pytest.fixture
def native_edt(self) -> datetime.datetime:
yield EarthDateTime(2020, 1, 1, 0, 0, 0, 0)
def test_int_input_raises_type_error(self):
with pytest.raises(TypeError):
EarthDateTime(2837476)
def test_datetime_input_raises_type_error(self):
dt = datetime.datetime(2020, 1, 1, 0, 0, 0)
with pytest.raises(TypeError):
EarthDateTime(dt)
def test_utc_input_equals_unaware_input(self, utc_edt, native_edt):
assert utc_edt == native_edt
def test_utc_input_is_not_eastern_input(self, utc_edt, eastern_edt):
assert utc_edt != eastern_edt
class TestToFractionalMarsYear:
@pytest.fixture
def mars_year_10_start(self) -> EarthDateTime:
yield EarthDateTime(1972, 3, 15, 4, 48, 0)
@pytest.fixture
def mars_year_20_start(self) -> EarthDateTime:
yield EarthDateTime(1991, 1, 4, 21, 50, 24)
def test_mars_year_10_start_returns_10(self, mars_year_10_start):
assert mars_year_10_start.to_fractional_mars_year() == \
pytest.approx(10, abs=0.0001)
def test_mars_year_20_start_returns_20(self, mars_year_20_start):
assert mars_year_20_start.to_fractional_mars_year() == \
pytest.approx(20, abs=0.0001)
class TestToWholeMarsYear:
@pytest.fixture
def positive_date(self) -> EarthDateTime:
yield EarthDateTime(2000, 1, 1, 0, 0, 0, 0)
@pytest.fixture
def negative_date(self) -> EarthDateTime:
yield EarthDateTime(1900, 1, 1, 0, 0, 0, 0)
def test_positive_mars_year_returns_expected_value(self, positive_date):
assert positive_date.to_whole_mars_year() == 24
def test_negative_mars_year_returns_expected_value(self, negative_date):
assert negative_date.to_whole_mars_year() == -29
class TestToSol:
@pytest.fixture
def mars_year_0_start_edt(self) -> EarthDateTime:
yield datetime_to_earthdatetime(mars_year_0_start)
@pytest.fixture
def moment_before_mars_year_0_start(self) -> EarthDateTime:
yield datetime_to_earthdatetime(mars_year_0_start) - \
datetime.timedelta(milliseconds=1)
def test_first_moment_of_year_equals_0(self, mars_year_0_start_edt):
assert mars_year_0_start_edt.to_sol() == 0
def test_last_moment_of_mars_year_equals_yearly_sols(
self, moment_before_mars_year_0_start):
sol = moment_before_mars_year_0_start.to_sol()
assert sol == pytest.approx(sols_per_martian_year, abs=0.001)
class TestToSolarLongitude:
@pytest.fixture
def mars_year_0_start_edt(self) -> EarthDateTime:
yield datetime_to_earthdatetime(mars_year_0_start)
def test_start_of_mars_year_0_returns_0(self, mars_year_0_start_edt):
ls = mars_year_0_start_edt.to_solar_longitude()
assert math.sin(math.radians(ls)) == pytest.approx(0, abs=0.001)
class TestMarsYearSol:
class TestInit:
def test_int_mars_year_float_sol_raises_no_error(self):
MarsYearSol(0, 234.567)
def test_float_mars_year_raises_type_error(self):
with pytest.raises(TypeError):
MarsYearSol(14.0, 54)
def test_first_moment_of_year_raises_no_error(self):
MarsYearSol(14, 0)
def test_last_moment_of_year_raises_no_error(self):
MarsYearSol(14, sols_per_martian_year)
def test_negative_sol_raises_value_error(self):
with pytest.raises(ValueError):
MarsYearSol(14, -0.0001)
def test_large_sol_raises_value_error(self):
sol = sols_per_martian_year + 0.0001
with pytest.raises(ValueError):
MarsYearSol(14, sol)
class TestToDatetime:
def test_sol_0_of_my_0_matches_known_datetime(self):
assert MarsYearSol(0, 0).to_datetime() == mars_year_0_start
def test_far_future_date_raises_overflow_error(self) -> None:
with pytest.raises(OverflowError):
MarsYearSol(4279, 0).to_datetime()
def test_not_far_future_date_raises_no_error(self) -> None:
MarsYearSol(4278, 0).to_datetime()
def test_far_past_date_raises_overflow_error(self) -> None:
with pytest.raises(OverflowError):
MarsYearSol(-1039, 0).to_datetime()
def test_not_far_past_date_raises_no_error(self) -> None:
MarsYearSol(-1038, 0).to_datetime()
class TestToFractionalMarsYear:
def test_sol_0_returns_mars_year_number(self):
assert MarsYearSol(14, 0).to_fractional_mars_year() == 14
def test_last_sol_of_year_returns_1_more_than_year(self):
mars_year = MarsYearSol(14, sols_per_martian_year).to_fractional_mars_year()
assert mars_year == pytest.approx(15, abs=0.01)
def test_midpoint_sol_returns_half_greater_mars_year(self):
mars_year = MarsYearSol(14, sols_per_martian_year / 2).to_fractional_mars_year()
assert mars_year == pytest.approx(14.5, abs=0.01)
class TestToSolarLongitude:
def test_start_of_year_returns_0(self):
sin_ls = math.sin(math.radians(MarsYearSol(30, 0).to_solar_longitude()))
assert sin_ls == pytest.approx(0, abs=0.05)
def test_end_of_year_returns_0(self):
sin_ls = math.sin(math.radians(
MarsYearSol(30, sols_per_martian_year).to_solar_longitude()))
assert sin_ls == pytest.approx(0, abs=0.05)
def test_northern_summer_solstice_matches_lmd_value(self):
assert MarsYearSol(0, 193.47).to_solar_longitude() == \
pytest.approx(90, abs=0.1)
def test_northern_autumn_equinox_matches_lmd_value(self):
assert MarsYearSol(0, 371.99).to_solar_longitude() == \
pytest.approx(180, abs=0.2)
def test_northern_winter_solstice_matches_lmd_value(self):
assert MarsYearSol(0, 514.76).to_solar_longitude() == \
pytest.approx(270, abs=0.2)
class TestMarsYearSolarLongitude:
class TestInit:
def test_int_mars_year_float_ls_raises_no_error(self):
MarsYearSolarLongitude(0, 234.567)
def test_float_mars_year_raises_type_error(self):
with pytest.raises(TypeError):
MarsYearSolarLongitude(14.0, 54)
def test_first_moment_of_year_raises_no_error(self):
MarsYearSolarLongitude(14, 0)
def test_last_moment_of_year_raises_no_error(self):
MarsYearSolarLongitude(14, 360)
def test_negative_ls_raises_value_error(self):
with pytest.raises(ValueError):
MarsYearSolarLongitude(14, -0.0001)
def test_large_ls_raises_value_error(self):
with pytest.raises(ValueError):
MarsYearSolarLongitude(14, 360.0001)
class TestToDatetime:
def test_sol_0_of_my_0_matches_known_datetime(self):
assert MarsYearSol(0, 0).to_datetime() == mars_year_0_start
def test_far_future_date_raises_overflow_error(self) -> None:
with pytest.raises(OverflowError):
MarsYearSol(4279, 0).to_datetime()
def test_not_far_future_date_raises_no_error(self) -> None:
MarsYearSol(4278, 0).to_datetime()
def test_far_past_date_raises_overflow_error(self) -> None:
with pytest.raises(OverflowError):
MarsYearSol(-1039, 0).to_datetime()
def test_not_far_past_date_raises_no_error(self) -> None:
MarsYearSol(-1038, 0).to_datetime()
class TestToFractionalMarsYear:
def test_ls_0_returns_mars_year_number(self):
assert MarsYearSolarLongitude(14, 0).to_fractional_mars_year() == pytest.approx(14, abs=0.001)
def test_ls_360_of_year_returns_1_more_than_year(self):
mars_year = MarsYearSolarLongitude(14, 359.99).to_fractional_mars_year()
assert mars_year == pytest.approx(15, abs=0.01)
class TestToSol:
def test_start_of_year_returns_0(self):
sol = MarsYearSolarLongitude(30, 0).to_sol()
assert sol == pytest.approx(0, abs=0.05)
def test_end_of_year_returns_0(self):
sol = MarsYearSolarLongitude(30, 359.99).to_sol()
assert sol == pytest.approx(sols_per_martian_year, abs=0.05)
def test_northern_summer_solstice_matches_lmd_value(self):
sol = MarsYearSolarLongitude(0, 90).to_sol()
assert sol == pytest.approx(193.47, abs=0.2)
def test_northern_autumn_equinox_matches_lmd_value(self):
sol = MarsYearSolarLongitude(0, 180).to_sol()
assert sol == pytest.approx(371.99, abs=0.2)
def test_northern_winter_solstice_matches_lmd_value(self):
sol = MarsYearSolarLongitude(0, 270).to_sol()
assert sol == pytest.approx(514.76, abs=0.2)
class TestDatetimeToEarthdatetime:
def test_datetime_raises_no_error(self):
datetime_to_earthdatetime(mars_year_0_start)
def test_date_raises_type_error(self):
date = datetime.date(2020, 1, 1)
with pytest.raises(TypeError):
datetime_to_earthdatetime(date)
class TestSolsAfterMarsYear0:
@pytest.fixture
def maven_arrival_datetime(self) -> datetime.datetime:
yield datetime.datetime(2014, 9, 2, 2, 24, 0, 0, pytz.UTC)
def test_int_input_raises_type_error(self):
with pytest.raises(TypeError):
sols_after_mars_year_0(2000)
def test_date_input_raises_type_error(self, maven_arrival_datetime):
with pytest.raises(TypeError):
sols_after_mars_year_0(maven_arrival_datetime.date())
def test_start_of_mars_year_0_returns_0(self):
assert sols_after_mars_year_0(mars_year_0_start) == 0
def test_maven_arrival_matches_known_value(self, maven_arrival_datetime):
assert sols_after_mars_year_0(maven_arrival_datetime) == \
21781.872772174716
class TestSolsBetweenDatetimes:
@pytest.fixture
def generic_datetime(self) -> datetime.datetime:
yield datetime.datetime(2000, 1, 1, 0, 0, 0)
@pytest.fixture
def opportunity_start(self) -> datetime.datetime:
yield datetime.datetime(2004, 1, 25, 0, 0, 0)
@pytest.fixture
def opportunity_end(self) -> datetime.datetime:
yield datetime.datetime(2018, 6, 10, 0, 0, 0)
def test_int_first_input_raises_type_error(self, generic_datetime):
with pytest.raises(TypeError):
sols_between_datetimes(2000, generic_datetime)
def test_int_second_input_raises_type_error(self, generic_datetime):
with pytest.raises(TypeError):
sols_between_datetimes(generic_datetime, 2000)
def test_int_both_inputs_raises_type_error(self):
with pytest.raises(TypeError):
sols_between_datetimes(2000, 2001)
def test_identical_datetime_inputs_returns_0(self, generic_datetime):
assert sols_between_datetimes(generic_datetime, generic_datetime) == 0
def test_opportunity_length_matches_known_value(
self, opportunity_start, opportunity_end):
assert sols_between_datetimes(opportunity_start, opportunity_end) == \
5109.551211085292
class TestSolsSinceDatetime:
pass | 0.668339 | 0.742655 |
import os
import json
from datetime import datetime
import requests
from .utils import execute
STATUS_COPYRIGHTED = 0
STATUS_UNCOPYRIGHTED = 1
STATUS_UNDETECTED = 2
STATUS_WORKING = 3
OVERALL_WORKING = 3
OVERALL_HAS_COPYRIGHTED = 0
OVERALL_ALL_UNDETECTED = 2
OVERALL_UNDETECTED_UNCOPYRIGHTED = 1
# ==============================================================================
# mysystem
# ==============================================================================
def http_request(url, data=None, params=None, timeout=15, retry=1):
""" Send a GET or POST request, default will retry 2 times. """
resp = None
times = 1
logs = []
while times <= retry:
try:
if data is None:
resp = requests.get(url, params=params, timeout=timeout)
else:
resp = requests.post(url, params=params, data=data, timeout=timeout)
break
except requests.ConnectionError as e:
logs.append({
'level': 'error',
'action': 'http-request',
'error': str(e),
'info': {
'url': url,
'times': times,
}
})
times += 1
except requests.Timeout as e:
logs.append({
'level': 'error',
'action': 'http-request',
'error': str(e),
'info': {
'url': url,
'times': times,
}
})
times += 1
return resp, logs
class mysystemError(Exception): pass
class mysystem(object):
"""
Usage:
=======
>>> from common.mysystem import mysystem
>>> mw = mysystem(MEDIA_WISE_USER, MEDIA_WISE_PASSWD,
MEDIA_WISE_URL, MEDIA_WISE_REQ_TIMEOUT,
logger)
>>> print mw.query("url_hash#SOME-HASH-STRING")
(False, False)
"""
def __init__(self, user, passwd, query_url, all_matches, req_timeout, logger=None):
self.logger = logger
self.USER = user
self.PASSWD = <PASSWORD>
self.QUERY_URL = query_url
self.ALL_MATCHES = all_matches
self.REQ_TIMEOUT = req_timeout
def query(self, req_hash, uuid=None):
"""
::Reference: http://seals.mysite.cn/trac/vdna/wiki/thunder_result_management
"""
all_matches = 'True' if self.ALL_MATCHES else 'False'
params = {
'site_asset_id' : req_hash,
'all_matches': all_matches
}
resp, logs = http_request(self.QUERY_URL, params=params, timeout=self.REQ_TIMEOUT)
if self.logger:
for log in logs:
level = log.pop('level')
_logger = getattr(self.logger, level)
_logger("query-vddb-async#%s" % json.dumps(log))
if resp is None:
raise mysystemError('Request mysystem failed!')
# return: None or what self.result() need
listing = []
only1_status = None
tasks_count = None
if self.logger:
self.logger.info("query-vddb-async#%s" % json.dumps({
'action': 'show-response-info',
'uuid': uuid,
'info': {
'params': params,
'resp': resp.text
}
}))
ret_data = resp.json()
error = ret_data.get('error', None)
if error:
if self.logger:
self.logger.warning("query-vddb-async#%s" % json.dumps({
'action': 'mysystem-result-error.',
'uuid': uuid,
'info': {
'error': error,
'hash': req_hash
}
}))
return None, []
else:
result = ret_data.get('result', {})
tasks = result.get('tasks', [])
tasks_count = result['tasks_count']
if tasks_count == 1:
task = tasks[0]
only1_status = task['status']
elif tasks_count > 1:
for task in tasks:
site_asset_ids = task.get('site_asset_id', [])
is_tmp_case = False
for assert_id in site_asset_ids:
if assert_id == req_hash:
is_tmp_case = True
break
if is_tmp_case:
continue
status = task.get('status', None)
extra_infos = task.get('extra_info', [])
path = None
prefix = req_hash.split('#', 1)[0]
for extra_info in extra_infos:
_hash = extra_info.get(prefix)
if req_hash == _hash:
path = extra_info.get('file_path')
listing.append({
'status': status,
'path': path
})
overall = None
lst_len = len(listing)
# 1. Only one result
if tasks_count == 1:
overall = only1_status
# 2. Sub tasks is working
elif lst_len < tasks_count:
overall = 3
# 3. <fake + real> results
# 4. All real results
elif lst_len > 0:
copyrighted_cnt = 0
uncopyrighted_cnt = 0
undetected_cnt = 0
working_cnt = 0
for item in listing:
status = item['status']
if status == STATUS_WORKING:
working_cnt += 1
elif status == STATUS_COPYRIGHTED:
copyrighted_cnt += 1
elif status == STATUS_UNCOPYRIGHTED:
uncopyrighted_cnt += 1
elif status == STATUS_UNDETECTED:
undetected_cnt += 1
if working_cnt > 0:
overall = OVERALL_WORKING
elif copyrighted_cnt > 0:
overall = OVERALL_HAS_COPYRIGHTED
elif lst_len == undetected_cnt:
overall = OVERALL_ALL_UNDETECTED
elif lst_len == undetected_cnt + uncopyrighted_cnt:
overall = OVERALL_UNDETECTED_UNCOPYRIGHTED
if self.logger:
self.logger.info("query-vddb-async#%s" % json.dumps({
'action': 'mysystem-result-summary',
'uuid': uuid,
'info': {
'only1_status': only1_status,
'tasks_count': tasks_count,
'overall': overall,
'listing': listing
}
}))
return overall, listing
# ==============================================================================
# Swift (upload/)
# ==============================================================================
class SwiftInitError(Exception):
def __str__(self):
return str(self.__dict__)
class SwiftUploadError(Exception):
def __str__(self):
return str(self.__dict__)
class _Swift(object):
pass
class Swift(object):
"""
Usage:
======
>>> swift = Swift('tools/swift', 'http://192.168.200.10:8080/auth/v1.0', 'system:root', 'testpass')
>>> swift.upload('container', '/tmp/exists-file.txt')
file_path = '/tmp/new-file.txt'
try:
swift.upload(file_path, content='abcdefXYZ')
# Other code...
except SwiftUploadError as e:
logger.error(mk_logmsg({
'action': 'Upload swift.',
'uuid': self.uuid,
'error': {
'message': str(e),
}
}))
raise
finally:
if os.path.exists(file_path):
os.remove(file_path)
"""
def __init__(self, path, auth, user, key):
self.path = path
self.auth = auth
self.user = user
self.key = key
def container(self):
return datetime.now().strftime('%Y-%m-%d_%H')
def create_container(self):
""" Create container """
# Dead code below, because tools/swift will automatic create container.
args = {
'path': self.path,
'auth': self.auth,
'user': self.user,
'key': self.key,
'container': self.container()
}
cmd = '%(path)s -A %(auth)s -U %(user)s -K %(key)s post %(container)s' % args
_, _, err = execute(cmd)
err = err.strip()
if err:
ex = SwiftInitError('Init failed!')
ex.cmd = cmd
ex.error = err
raise ex
def upload(self, filepath, container=None, content=None, retry=2):
if not os.path.exists(filepath) and content is None:
raise SwiftUploadError('If you want upload as new file, content required !')
if isinstance(content, unicode):
content = content.encode('utf-8')
if not isinstance(content, str):
raise SwiftUploadError('`content` must be `str` type!')
if content is not None:
with open(filepath, 'w') as f:
f.write(content)
if not container:
container = self.container()
args = {
'path': self.path,
'auth': self.auth,
'user': self.user,
'key': self.key,
'container': container,
'filepath': filepath,
}
cmd = '%(path)s -A %(auth)s -U %(user)s -K %(key)s upload %(container)s "%(filepath)s"' % args
ok = False
errors = []
while retry > 0:
try:
_, _, err = execute(cmd)
err = err.strip()
if not err:
ok = True
break
else:
errors.append(err)
ok = False
retry -= 1
except OSError as e:
errors.append(e)
retry -= 1
if not ok:
ex = SwiftUploadError('Upload Failed')
ex.cmd = cmd
ex.error = errors
raise ex
return container | common/services.py |
import os
import json
from datetime import datetime
import requests
from .utils import execute
STATUS_COPYRIGHTED = 0
STATUS_UNCOPYRIGHTED = 1
STATUS_UNDETECTED = 2
STATUS_WORKING = 3
OVERALL_WORKING = 3
OVERALL_HAS_COPYRIGHTED = 0
OVERALL_ALL_UNDETECTED = 2
OVERALL_UNDETECTED_UNCOPYRIGHTED = 1
# ==============================================================================
# mysystem
# ==============================================================================
def http_request(url, data=None, params=None, timeout=15, retry=1):
""" Send a GET or POST request, default will retry 2 times. """
resp = None
times = 1
logs = []
while times <= retry:
try:
if data is None:
resp = requests.get(url, params=params, timeout=timeout)
else:
resp = requests.post(url, params=params, data=data, timeout=timeout)
break
except requests.ConnectionError as e:
logs.append({
'level': 'error',
'action': 'http-request',
'error': str(e),
'info': {
'url': url,
'times': times,
}
})
times += 1
except requests.Timeout as e:
logs.append({
'level': 'error',
'action': 'http-request',
'error': str(e),
'info': {
'url': url,
'times': times,
}
})
times += 1
return resp, logs
class mysystemError(Exception): pass
class mysystem(object):
"""
Usage:
=======
>>> from common.mysystem import mysystem
>>> mw = mysystem(MEDIA_WISE_USER, MEDIA_WISE_PASSWD,
MEDIA_WISE_URL, MEDIA_WISE_REQ_TIMEOUT,
logger)
>>> print mw.query("url_hash#SOME-HASH-STRING")
(False, False)
"""
def __init__(self, user, passwd, query_url, all_matches, req_timeout, logger=None):
self.logger = logger
self.USER = user
self.PASSWD = <PASSWORD>
self.QUERY_URL = query_url
self.ALL_MATCHES = all_matches
self.REQ_TIMEOUT = req_timeout
def query(self, req_hash, uuid=None):
"""
::Reference: http://seals.mysite.cn/trac/vdna/wiki/thunder_result_management
"""
all_matches = 'True' if self.ALL_MATCHES else 'False'
params = {
'site_asset_id' : req_hash,
'all_matches': all_matches
}
resp, logs = http_request(self.QUERY_URL, params=params, timeout=self.REQ_TIMEOUT)
if self.logger:
for log in logs:
level = log.pop('level')
_logger = getattr(self.logger, level)
_logger("query-vddb-async#%s" % json.dumps(log))
if resp is None:
raise mysystemError('Request mysystem failed!')
# return: None or what self.result() need
listing = []
only1_status = None
tasks_count = None
if self.logger:
self.logger.info("query-vddb-async#%s" % json.dumps({
'action': 'show-response-info',
'uuid': uuid,
'info': {
'params': params,
'resp': resp.text
}
}))
ret_data = resp.json()
error = ret_data.get('error', None)
if error:
if self.logger:
self.logger.warning("query-vddb-async#%s" % json.dumps({
'action': 'mysystem-result-error.',
'uuid': uuid,
'info': {
'error': error,
'hash': req_hash
}
}))
return None, []
else:
result = ret_data.get('result', {})
tasks = result.get('tasks', [])
tasks_count = result['tasks_count']
if tasks_count == 1:
task = tasks[0]
only1_status = task['status']
elif tasks_count > 1:
for task in tasks:
site_asset_ids = task.get('site_asset_id', [])
is_tmp_case = False
for assert_id in site_asset_ids:
if assert_id == req_hash:
is_tmp_case = True
break
if is_tmp_case:
continue
status = task.get('status', None)
extra_infos = task.get('extra_info', [])
path = None
prefix = req_hash.split('#', 1)[0]
for extra_info in extra_infos:
_hash = extra_info.get(prefix)
if req_hash == _hash:
path = extra_info.get('file_path')
listing.append({
'status': status,
'path': path
})
overall = None
lst_len = len(listing)
# 1. Only one result
if tasks_count == 1:
overall = only1_status
# 2. Sub tasks is working
elif lst_len < tasks_count:
overall = 3
# 3. <fake + real> results
# 4. All real results
elif lst_len > 0:
copyrighted_cnt = 0
uncopyrighted_cnt = 0
undetected_cnt = 0
working_cnt = 0
for item in listing:
status = item['status']
if status == STATUS_WORKING:
working_cnt += 1
elif status == STATUS_COPYRIGHTED:
copyrighted_cnt += 1
elif status == STATUS_UNCOPYRIGHTED:
uncopyrighted_cnt += 1
elif status == STATUS_UNDETECTED:
undetected_cnt += 1
if working_cnt > 0:
overall = OVERALL_WORKING
elif copyrighted_cnt > 0:
overall = OVERALL_HAS_COPYRIGHTED
elif lst_len == undetected_cnt:
overall = OVERALL_ALL_UNDETECTED
elif lst_len == undetected_cnt + uncopyrighted_cnt:
overall = OVERALL_UNDETECTED_UNCOPYRIGHTED
if self.logger:
self.logger.info("query-vddb-async#%s" % json.dumps({
'action': 'mysystem-result-summary',
'uuid': uuid,
'info': {
'only1_status': only1_status,
'tasks_count': tasks_count,
'overall': overall,
'listing': listing
}
}))
return overall, listing
# ==============================================================================
# Swift (upload/)
# ==============================================================================
class SwiftInitError(Exception):
def __str__(self):
return str(self.__dict__)
class SwiftUploadError(Exception):
def __str__(self):
return str(self.__dict__)
class _Swift(object):
pass
class Swift(object):
"""
Usage:
======
>>> swift = Swift('tools/swift', 'http://192.168.200.10:8080/auth/v1.0', 'system:root', 'testpass')
>>> swift.upload('container', '/tmp/exists-file.txt')
file_path = '/tmp/new-file.txt'
try:
swift.upload(file_path, content='abcdefXYZ')
# Other code...
except SwiftUploadError as e:
logger.error(mk_logmsg({
'action': 'Upload swift.',
'uuid': self.uuid,
'error': {
'message': str(e),
}
}))
raise
finally:
if os.path.exists(file_path):
os.remove(file_path)
"""
def __init__(self, path, auth, user, key):
self.path = path
self.auth = auth
self.user = user
self.key = key
def container(self):
return datetime.now().strftime('%Y-%m-%d_%H')
def create_container(self):
""" Create container """
# Dead code below, because tools/swift will automatic create container.
args = {
'path': self.path,
'auth': self.auth,
'user': self.user,
'key': self.key,
'container': self.container()
}
cmd = '%(path)s -A %(auth)s -U %(user)s -K %(key)s post %(container)s' % args
_, _, err = execute(cmd)
err = err.strip()
if err:
ex = SwiftInitError('Init failed!')
ex.cmd = cmd
ex.error = err
raise ex
def upload(self, filepath, container=None, content=None, retry=2):
if not os.path.exists(filepath) and content is None:
raise SwiftUploadError('If you want upload as new file, content required !')
if isinstance(content, unicode):
content = content.encode('utf-8')
if not isinstance(content, str):
raise SwiftUploadError('`content` must be `str` type!')
if content is not None:
with open(filepath, 'w') as f:
f.write(content)
if not container:
container = self.container()
args = {
'path': self.path,
'auth': self.auth,
'user': self.user,
'key': self.key,
'container': container,
'filepath': filepath,
}
cmd = '%(path)s -A %(auth)s -U %(user)s -K %(key)s upload %(container)s "%(filepath)s"' % args
ok = False
errors = []
while retry > 0:
try:
_, _, err = execute(cmd)
err = err.strip()
if not err:
ok = True
break
else:
errors.append(err)
ok = False
retry -= 1
except OSError as e:
errors.append(e)
retry -= 1
if not ok:
ex = SwiftUploadError('Upload Failed')
ex.cmd = cmd
ex.error = errors
raise ex
return container | 0.314787 | 0.050377 |
"""jquery-autocomplete-light widget implementation module."""
from __future__ import unicode_literals
from django import forms
try:
from django.urls import resolve
except ImportError:
from django.core.urlresolvers import resolve
from django.template.loader import render_to_string
from django.utils import safestring
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
try:
from django.forms.utils import flatatt
except ImportError:
from django.forms.util import flatatt
from dal.widgets import WidgetMixin
__all__ = ['JalWidgetMixin', 'JalChoiceWidget', 'JalMultipleChoiceWidget', 'JalTextWidget']
class JalWidgetMixin(object):
class Media:
"""Automatically include static files for form.media."""
css = {
'all': (
'autocomplete_light/vendor/jal/src/style.css',
),
}
js = (
'autocomplete_light/jquery.init.js',
'autocomplete_light/autocomplete.init.js',
'autocomplete_light/vendor/jal/src/autocomplete.js',
'autocomplete_light/vendor/jal/src/widget.js',
'autocomplete_light/forward.js',
'autocomplete_light/jal.js',
)
autocomplete_function = 'jal'
@property
def view(self):
view_func = resolve(self.url).func
module = importlib.import_module(view_func.__module__)
return getattr(module, view_func.__name__)
def render(self, name, value, attrs=None, renderer=None):
html = '''
<span id="{id}-wrapper" {attrs}>
<span id="{id}-deck" class="deck">
{choices}
</span>
<input type="text" name="{name}-autocomplete" id="{id}-autocomplete" value="" {input_attrs} />
<select style="display:none" class="value-select" name="{name}" id="{id}" multiple="multiple">
{options}
</select>
<span style="display:none" class="remove">ˣ</span>
<span style="display:none" class="choice-template">
<span class="choice prepend-remove append-option-html"></span>
</span>
</span>
'''.format(
id=attrs['id'],
name=name,
attrs=flatatt(self.build_attrs(attrs)),
input_attrs=flatatt(self.build_input_attrs()),
choices='',
options='',
)
return mark_safe(html)
def build_input_attrs(self, **kwargs):
attrs = {
'class': 'autocomplete vTextField',
'data-autocomplete-choice-selector': '[data-value]',
'data-autocomplete-url': self.url,
'placeholder': _('type some text to search in this autocomplete'),
}
return attrs
def build_attrs(self, *args, **kwargs):
attrs = super(JalWidgetMixin, self).build_attrs(*args, **kwargs)
if 'class' not in attrs:
attrs['class'] = ''
attrs['class'] += ' autocomplete-light-widget '
if attrs.get('data-widget-maximum-values', 0) == 1:
attrs['class'] += ' single'
else:
attrs['class'] += ' multiple'
return attrs
class JalChoiceWidget(JalWidgetMixin, WidgetMixin, forms.Select):
"""
Widget that provides an autocomplete for zero to one choice.
"""
def __init__(self, url=None, forward=None, widget_attrs=None, *args,
**kwargs):
forms.Select.__init__(self, *args, **kwargs)
WidgetMixin.__init__(
self,
url=url,
forward=forward
)
self.attrs.setdefault('data-widget-maximum-values', 1) | src/dal_jal/widgets.py | """jquery-autocomplete-light widget implementation module."""
from __future__ import unicode_literals
from django import forms
try:
from django.urls import resolve
except ImportError:
from django.core.urlresolvers import resolve
from django.template.loader import render_to_string
from django.utils import safestring
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
try:
from django.forms.utils import flatatt
except ImportError:
from django.forms.util import flatatt
from dal.widgets import WidgetMixin
__all__ = ['JalWidgetMixin', 'JalChoiceWidget', 'JalMultipleChoiceWidget', 'JalTextWidget']
class JalWidgetMixin(object):
class Media:
"""Automatically include static files for form.media."""
css = {
'all': (
'autocomplete_light/vendor/jal/src/style.css',
),
}
js = (
'autocomplete_light/jquery.init.js',
'autocomplete_light/autocomplete.init.js',
'autocomplete_light/vendor/jal/src/autocomplete.js',
'autocomplete_light/vendor/jal/src/widget.js',
'autocomplete_light/forward.js',
'autocomplete_light/jal.js',
)
autocomplete_function = 'jal'
@property
def view(self):
view_func = resolve(self.url).func
module = importlib.import_module(view_func.__module__)
return getattr(module, view_func.__name__)
def render(self, name, value, attrs=None, renderer=None):
html = '''
<span id="{id}-wrapper" {attrs}>
<span id="{id}-deck" class="deck">
{choices}
</span>
<input type="text" name="{name}-autocomplete" id="{id}-autocomplete" value="" {input_attrs} />
<select style="display:none" class="value-select" name="{name}" id="{id}" multiple="multiple">
{options}
</select>
<span style="display:none" class="remove">ˣ</span>
<span style="display:none" class="choice-template">
<span class="choice prepend-remove append-option-html"></span>
</span>
</span>
'''.format(
id=attrs['id'],
name=name,
attrs=flatatt(self.build_attrs(attrs)),
input_attrs=flatatt(self.build_input_attrs()),
choices='',
options='',
)
return mark_safe(html)
def build_input_attrs(self, **kwargs):
attrs = {
'class': 'autocomplete vTextField',
'data-autocomplete-choice-selector': '[data-value]',
'data-autocomplete-url': self.url,
'placeholder': _('type some text to search in this autocomplete'),
}
return attrs
def build_attrs(self, *args, **kwargs):
attrs = super(JalWidgetMixin, self).build_attrs(*args, **kwargs)
if 'class' not in attrs:
attrs['class'] = ''
attrs['class'] += ' autocomplete-light-widget '
if attrs.get('data-widget-maximum-values', 0) == 1:
attrs['class'] += ' single'
else:
attrs['class'] += ' multiple'
return attrs
class JalChoiceWidget(JalWidgetMixin, WidgetMixin, forms.Select):
"""
Widget that provides an autocomplete for zero to one choice.
"""
def __init__(self, url=None, forward=None, widget_attrs=None, *args,
**kwargs):
forms.Select.__init__(self, *args, **kwargs)
WidgetMixin.__init__(
self,
url=url,
forward=forward
)
self.attrs.setdefault('data-widget-maximum-values', 1) | 0.776369 | 0.115961 |
from keras import Input, Model, Sequential
from keras.layers import Flatten, Conv2D, Concatenate, Dense, Activation, concatenate
from keras.optimizers import Adam, SGD
from rl.agents import CEMAgent
from rl.memory import SequentialMemory, EpisodeParameterMemory
from hexagon_agent import *
from random import shuffle
from multi_agent import *
import sys
import hexagon_ui_api
import os
from square_grid import *
import numpy as np
from ppo import *
from episodic_memory import EpisodicMemory
from ai_gym import *
# ______________________________________________________________________________________________________________________________
class EnvDef:
HASH_POOL = 10000
NODE_FEATURE_COUNT = 5
DECISION_ACTION_SPACE = 2
MAX_ROUND = 2000
CELL_FEATURE = 1
MAX_GRID_LENGTH = 13
RADIUS = (MAX_GRID_LENGTH/2) + 1
SPATIAL_INPUT = (MAX_GRID_LENGTH, MAX_GRID_LENGTH)
SPATIAL_OUTPUT = (MAX_GRID_LENGTH * MAX_GRID_LENGTH, )
WARMUP = (MAX_GRID_LENGTH ** 2) * 5
LR = 0.0001
GAME_VERBOSE = False
# __________________________________________________________________________________________________________________________
class DecisionModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Decision_model_params.h5f' + str(r.uniform(0, 10000))
model = Sequential()
model.add(Flatten(input_shape=(1,) + (EnvDef.HASH_POOL * EnvDef.NODE_FEATURE_COUNT,)))
model.add(Dense(32, activation="relu"))
model.add(Dense(16, activation="relu"))
model.add(Dense(EnvDef.DECISION_ACTION_SPACE))
model.add(Activation('softmax'))
print(model.summary())
model.compile(loss="categorical_crossentropy",
optimizer='adadelta', metrics=['accuracy'])
self.model = model
# ______________________________________________________________________________________________________________________________
class AttackModel:
def __init__(self, modelName='Attack_model_params.h5f'):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))
state_input = Input(shape=(EnvDef.SPATIAL_INPUT + (1, )))
advantage = Input(shape=(1,))
old_prediction = Input(EnvDef.SPATIAL_OUTPUT)
conv_path = Conv2D(128, (5, 5), padding='same', activation='relu', name='INPUT_ATTACK')(state_input)
conv_path = Conv2D(64, (3, 3), padding='same', activation='relu')(conv_path)
conv_path = Conv2D(16, (3, 3), padding='same', activation='relu')(conv_path)
conv_path = Conv2D(4, (3, 3), padding='same', activation='relu')(conv_path)
conv_path = Conv2D(1, (3, 3), padding='same', activation='tanh')(conv_path)
conv_path = Flatten()(conv_path)
merged = concatenate([conv_path, advantage, old_prediction], axis=1)
merged = Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh')(merged)
actor_output = Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh')(merged)
model = Model(inputs=[state_input, advantage, old_prediction], outputs=[actor_output])
model.compile(optimizer=Adam(lr=EnvDef.LR),
loss=[proximal_policy_optimization_loss(
advantage=advantage,
old_prediction=old_prediction)])
self.model = model
critic_input = Input(shape=(EnvDef.SPATIAL_INPUT + (1, )))
critic_path = Flatten()(critic_input)
critic_path = Dense(256, activation='relu')(critic_path)
critic_path = Dense(256, activation='relu')(critic_path)
critic_out = Dense(1)(critic_path)
critic = Model(inputs=[critic_input], outputs=[critic_out])
critic.compile(optimizer=Adam(lr=EnvDef.LR), loss='mse')
self.critic = critic
@staticmethod
def prepare_x(X, batch=False):
"""
:type X: ndarray
:type batch: bool
:return:
"""
shape = EnvDef.SPATIAL_INPUT + (1, )
if batch:
shape = (X.shape[0], ) + shape
return np.reshape(X, shape)
@staticmethod
def prepare_y(Y, batch=False):
"""
:type Y: ndarray
:type batch: bool
:return:
"""
shape = EnvDef.SPATIAL_OUTPUT + (1, )
if batch:
shape = (Y.shape[0], ) + shape
return np.reshape(Y, shape)
@staticmethod
def process_y(Y):
"""
:type Y: ndarray
:return:
"""
return np.reshape(Y, EnvDef.SPATIAL_OUTPUT)
# ______________________________________________________________________________________________________________________________
if __name__ == '__main__':
args = menu()
env = HierarchicalCentaurEnv(opponent_randomness=args.randomness,
centaur_boost_likelihood=args.centaur_boost_likelihood,
boosting_off=args.boostingoff, attack_off=args.attackoff,
game_verbose=EnvDef.GAME_VERBOSE)
np.random.seed(42)
env.seed(42)
prc = CentaurDecisionProcessor()
dec_model = DecisionModel()
attack_model = AttackModel()
prc = MultiProcessor({AgentType.BoostDecision: prc, AgentType.Attack: CentaurAttackProcessor(EnvDef.SPATIAL_INPUT, random_action=args.randomaction)})
memory = EpisodeParameterMemory(limit=1000, window_length=1)
decision_agent = CEMAgent(model=dec_model.model, nb_actions=EnvDef.DECISION_ACTION_SPACE, memory=memory,
batch_size=50, nb_steps_warmup=200, train_interval=50, elite_frac=0.05)
decision_agent.compile()
memory2 = EpisodicMemory(experience_window_length=100000)
attack_agent = PPOAgent(nb_actions=EnvDef.SPATIAL_OUTPUT[0],
observation_shape=EnvDef.SPATIAL_INPUT + (1, ),
actor=attack_model.model,
processor=prc.inner_processors[AgentType.Attack],
critic=attack_model.critic,
memory=memory2, nb_steps_warmup=EnvDef.WARMUP,
masker=prc.inner_processors[AgentType.Attack])
agent = MultiAgent({AgentType.BoostDecision: decision_agent, AgentType.Attack: attack_agent}, processor=prc, save_frequency=0.05)
if args.model_name is not None:
agent.inner_agents[AgentType.Attack].load_weights(args.model_name)
if len(sys.argv) == 1:
print('Usage: python centaur_ai_gym.py (train|test)')
elif args.what == 'train':
hexagon_ui_api.run_in_background()
agent.fit(env, nb_steps=300 * 1000, visualize=False, verbose=2, interim_filenames={AgentType.Attack: attack_model.modelName})
agent.save_weights({AgentType.BoostDecision: dec_model.modelName, AgentType.Attack: attack_model.modelName}, overwrite=True)
elif args.what == 'test':
hexagon_ui_api.run_in_background()
agent.test(env, nb_episodes=args.testrounds)
else:
print('argument not recognised: ' + sys.argv[1]) | ppo_centaur_ai_gym.py | from keras import Input, Model, Sequential
from keras.layers import Flatten, Conv2D, Concatenate, Dense, Activation, concatenate
from keras.optimizers import Adam, SGD
from rl.agents import CEMAgent
from rl.memory import SequentialMemory, EpisodeParameterMemory
from hexagon_agent import *
from random import shuffle
from multi_agent import *
import sys
import hexagon_ui_api
import os
from square_grid import *
import numpy as np
from ppo import *
from episodic_memory import EpisodicMemory
from ai_gym import *
# ______________________________________________________________________________________________________________________________
class EnvDef:
HASH_POOL = 10000
NODE_FEATURE_COUNT = 5
DECISION_ACTION_SPACE = 2
MAX_ROUND = 2000
CELL_FEATURE = 1
MAX_GRID_LENGTH = 13
RADIUS = (MAX_GRID_LENGTH/2) + 1
SPATIAL_INPUT = (MAX_GRID_LENGTH, MAX_GRID_LENGTH)
SPATIAL_OUTPUT = (MAX_GRID_LENGTH * MAX_GRID_LENGTH, )
WARMUP = (MAX_GRID_LENGTH ** 2) * 5
LR = 0.0001
GAME_VERBOSE = False
# __________________________________________________________________________________________________________________________
class DecisionModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Decision_model_params.h5f' + str(r.uniform(0, 10000))
model = Sequential()
model.add(Flatten(input_shape=(1,) + (EnvDef.HASH_POOL * EnvDef.NODE_FEATURE_COUNT,)))
model.add(Dense(32, activation="relu"))
model.add(Dense(16, activation="relu"))
model.add(Dense(EnvDef.DECISION_ACTION_SPACE))
model.add(Activation('softmax'))
print(model.summary())
model.compile(loss="categorical_crossentropy",
optimizer='adadelta', metrics=['accuracy'])
self.model = model
# ______________________________________________________________________________________________________________________________
class AttackModel:
def __init__(self, modelName='Attack_model_params.h5f'):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))
state_input = Input(shape=(EnvDef.SPATIAL_INPUT + (1, )))
advantage = Input(shape=(1,))
old_prediction = Input(EnvDef.SPATIAL_OUTPUT)
conv_path = Conv2D(128, (5, 5), padding='same', activation='relu', name='INPUT_ATTACK')(state_input)
conv_path = Conv2D(64, (3, 3), padding='same', activation='relu')(conv_path)
conv_path = Conv2D(16, (3, 3), padding='same', activation='relu')(conv_path)
conv_path = Conv2D(4, (3, 3), padding='same', activation='relu')(conv_path)
conv_path = Conv2D(1, (3, 3), padding='same', activation='tanh')(conv_path)
conv_path = Flatten()(conv_path)
merged = concatenate([conv_path, advantage, old_prediction], axis=1)
merged = Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh')(merged)
actor_output = Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh')(merged)
model = Model(inputs=[state_input, advantage, old_prediction], outputs=[actor_output])
model.compile(optimizer=Adam(lr=EnvDef.LR),
loss=[proximal_policy_optimization_loss(
advantage=advantage,
old_prediction=old_prediction)])
self.model = model
critic_input = Input(shape=(EnvDef.SPATIAL_INPUT + (1, )))
critic_path = Flatten()(critic_input)
critic_path = Dense(256, activation='relu')(critic_path)
critic_path = Dense(256, activation='relu')(critic_path)
critic_out = Dense(1)(critic_path)
critic = Model(inputs=[critic_input], outputs=[critic_out])
critic.compile(optimizer=Adam(lr=EnvDef.LR), loss='mse')
self.critic = critic
@staticmethod
def prepare_x(X, batch=False):
"""
:type X: ndarray
:type batch: bool
:return:
"""
shape = EnvDef.SPATIAL_INPUT + (1, )
if batch:
shape = (X.shape[0], ) + shape
return np.reshape(X, shape)
@staticmethod
def prepare_y(Y, batch=False):
"""
:type Y: ndarray
:type batch: bool
:return:
"""
shape = EnvDef.SPATIAL_OUTPUT + (1, )
if batch:
shape = (Y.shape[0], ) + shape
return np.reshape(Y, shape)
@staticmethod
def process_y(Y):
"""
:type Y: ndarray
:return:
"""
return np.reshape(Y, EnvDef.SPATIAL_OUTPUT)
# ______________________________________________________________________________________________________________________________
if __name__ == '__main__':
args = menu()
env = HierarchicalCentaurEnv(opponent_randomness=args.randomness,
centaur_boost_likelihood=args.centaur_boost_likelihood,
boosting_off=args.boostingoff, attack_off=args.attackoff,
game_verbose=EnvDef.GAME_VERBOSE)
np.random.seed(42)
env.seed(42)
prc = CentaurDecisionProcessor()
dec_model = DecisionModel()
attack_model = AttackModel()
prc = MultiProcessor({AgentType.BoostDecision: prc, AgentType.Attack: CentaurAttackProcessor(EnvDef.SPATIAL_INPUT, random_action=args.randomaction)})
memory = EpisodeParameterMemory(limit=1000, window_length=1)
decision_agent = CEMAgent(model=dec_model.model, nb_actions=EnvDef.DECISION_ACTION_SPACE, memory=memory,
batch_size=50, nb_steps_warmup=200, train_interval=50, elite_frac=0.05)
decision_agent.compile()
memory2 = EpisodicMemory(experience_window_length=100000)
attack_agent = PPOAgent(nb_actions=EnvDef.SPATIAL_OUTPUT[0],
observation_shape=EnvDef.SPATIAL_INPUT + (1, ),
actor=attack_model.model,
processor=prc.inner_processors[AgentType.Attack],
critic=attack_model.critic,
memory=memory2, nb_steps_warmup=EnvDef.WARMUP,
masker=prc.inner_processors[AgentType.Attack])
agent = MultiAgent({AgentType.BoostDecision: decision_agent, AgentType.Attack: attack_agent}, processor=prc, save_frequency=0.05)
if args.model_name is not None:
agent.inner_agents[AgentType.Attack].load_weights(args.model_name)
if len(sys.argv) == 1:
print('Usage: python centaur_ai_gym.py (train|test)')
elif args.what == 'train':
hexagon_ui_api.run_in_background()
agent.fit(env, nb_steps=300 * 1000, visualize=False, verbose=2, interim_filenames={AgentType.Attack: attack_model.modelName})
agent.save_weights({AgentType.BoostDecision: dec_model.modelName, AgentType.Attack: attack_model.modelName}, overwrite=True)
elif args.what == 'test':
hexagon_ui_api.run_in_background()
agent.test(env, nb_episodes=args.testrounds)
else:
print('argument not recognised: ' + sys.argv[1]) | 0.737442 | 0.275687 |
__author__ = 'pieroit'
from DatabaseHulk import DatabaseHulk
import collections
def parseLine(line):
# take away newline and split by tab
return line.rstrip('\r\n').split('\t')
# make dictionary out of the list of names and their values
# order must be the same
def buildRecordObj(cols, vals):
newObj = {}
for i, col in enumerate(cols):
newObj[col] = vals[i]
return newObj
if __name__ == '__main__':
# prepare db
sqlCredentials = {
'dbtype': 'mysql',
'host': 'localhost',
'user': 'root',
'password': '<PASSWORD>',
'dbname': 'wsd2015'
}
db = DatabaseHulk(sqlCredentials)
variables = {
'GoalId': {'type':'int'},
'GoalName': {'type':'text'},
'TargetId': {'type':'int'},
'TargetName': {'type':'text'},
'IndicatorId': {'type':'int'},
'IndicatorName': {'type':'text'},
'IndicatorOrderClause': {'type':'text'},
'SeriesRowId': {'type':'int'},
'SeriesName': {'type':'text'},
'SeriesOrderClause': {'type':'int'},
'IsMdg': {'type':'int'},
'LastUpdated': {'type':'text'},
'CountryId': {'type':'int'},
'CountryName': {'type':'text'},
'ISO3Code': {'type':'text'},
'IsDeveloped': {'type':'int'},
'MdgRegions': {'type':'text'},
'isMdgCountry': {'type':'int'},
'IsFormer': {'type':'int'},
'IsLDC2014': {'type':'int'},
'IsLLDC': {'type':'int'},
'GDPpc2012': {'type':'float'},
'Population2012': {'type':'int'},
'Year': {'type':'int'},
'Value': {'type':'float'},
'Nature': {'type':'text'},
'FootnoteId': {'type':'text'},
'FootnoteText': {'type':'text'}
}
db.dropTable('wsd2015')
db.createTable('wsd2015', variables)
# open file
file = open('data/wsd2015.txt')
# loop over lines
count = 0
columnNames = []
for line in file:
if count == 0: #header
columnNames = parseLine(line)
else:
# obtain data record
recordValues = parseLine(line)
# create hash
recordObj = buildRecordObj(columnNames, recordValues)
# insert into db
db.insertRecord('wsd2015', recordObj, commit=False)
count += 1
if count%10000 == 0:
print count
db.commit() # inserting 10000 rows at once for speed
db.commit() # insert remaining records | import2sql.py | __author__ = 'pieroit'
from DatabaseHulk import DatabaseHulk
import collections
def parseLine(line):
# take away newline and split by tab
return line.rstrip('\r\n').split('\t')
# make dictionary out of the list of names and their values
# order must be the same
def buildRecordObj(cols, vals):
newObj = {}
for i, col in enumerate(cols):
newObj[col] = vals[i]
return newObj
if __name__ == '__main__':
# prepare db
sqlCredentials = {
'dbtype': 'mysql',
'host': 'localhost',
'user': 'root',
'password': '<PASSWORD>',
'dbname': 'wsd2015'
}
db = DatabaseHulk(sqlCredentials)
variables = {
'GoalId': {'type':'int'},
'GoalName': {'type':'text'},
'TargetId': {'type':'int'},
'TargetName': {'type':'text'},
'IndicatorId': {'type':'int'},
'IndicatorName': {'type':'text'},
'IndicatorOrderClause': {'type':'text'},
'SeriesRowId': {'type':'int'},
'SeriesName': {'type':'text'},
'SeriesOrderClause': {'type':'int'},
'IsMdg': {'type':'int'},
'LastUpdated': {'type':'text'},
'CountryId': {'type':'int'},
'CountryName': {'type':'text'},
'ISO3Code': {'type':'text'},
'IsDeveloped': {'type':'int'},
'MdgRegions': {'type':'text'},
'isMdgCountry': {'type':'int'},
'IsFormer': {'type':'int'},
'IsLDC2014': {'type':'int'},
'IsLLDC': {'type':'int'},
'GDPpc2012': {'type':'float'},
'Population2012': {'type':'int'},
'Year': {'type':'int'},
'Value': {'type':'float'},
'Nature': {'type':'text'},
'FootnoteId': {'type':'text'},
'FootnoteText': {'type':'text'}
}
db.dropTable('wsd2015')
db.createTable('wsd2015', variables)
# open file
file = open('data/wsd2015.txt')
# loop over lines
count = 0
columnNames = []
for line in file:
if count == 0: #header
columnNames = parseLine(line)
else:
# obtain data record
recordValues = parseLine(line)
# create hash
recordObj = buildRecordObj(columnNames, recordValues)
# insert into db
db.insertRecord('wsd2015', recordObj, commit=False)
count += 1
if count%10000 == 0:
print count
db.commit() # inserting 10000 rows at once for speed
db.commit() # insert remaining records | 0.282097 | 0.130812 |
from sciibo.core.colors import *
# Define our color pairs
BACKGROUND = create_color(CYAN, BLUE)
# Logos
LOGO = create_color(CYAN, BLUE)
LOGO_SHADOW = create_color(BLACK, BLUE)
LOGO_TEXT = create_color(YELLOW, YELLOW)
LOGO_TEXT_SLIDER = create_color(YELLOW, WHITE)
# Menus
MENU = create_color(CYAN, BLUE)
MENU_TEXT = create_color(WHITE, BLUE)
MENU_TEXT_BOLD = create_color(WHITE, BLUE, BOLD)
# Menu list
MENU_LIST = create_color(WHITE, BLUE)
MENU_LIST_SELECTED = create_color(BLACK, YELLOW)
# Form elements
FORM_LIST = create_color(WHITE, CYAN)
FORM_LIST_SELECTED = create_color(BLUE, WHITE)
FORM_LIST_ACTIVE = create_color(BLACK, YELLOW)
FORM_LIST_ACTIVE_SELECTED = create_color(YELLOW, BLACK)
FORM_BUTTON = create_color(BLACK, WHITE)
FORM_BUTTON_ACTIVE = create_color(BLACK, YELLOW)
FORM_INPUT = create_color(WHITE, CYAN)
FORM_INPUT_ACTIVE = create_color(BLACK, YELLOW)
FORM_INPUT_CURSOR = create_color(YELLOW, BLACK)
FORM_SELECTION = create_color(WHITE, CYAN)
FORM_SELECTION_SELECTED = create_color(BLUE, WHITE)
FORM_SELECTION_ACTIVE = create_color(BLACK, YELLOW)
FORM_SELECTION_ACTIVE_SELECTED = create_color(YELLOW, BLACK)
# Glyphs
GLYPH_3 = create_color(GREEN, BLACK, DIM)
GLYPH_2 = create_color(GREEN, BLACK, BOLD)
GLYPH_1 = create_color(WHITE, BLACK, BOLD)
# Alerts
ALERT_INFO = create_color(WHITE, CYAN)
ALERT_ERROR = create_color(WHITE, RED)
# Notification
NOTIFICATION = create_color(WHITE, BLACK, BOLD)
# Playfield
PLAYFIELD = create_color(CYAN, BLUE)
PLAYFIELD_TEXT = create_color(WHITE, BLUE)
# Cards
CARD_1234 = create_color(WHITE, CYAN, BOLD)
CARD_5678 = create_color(WHITE, GREEN, BOLD)
CARD_9012 = create_color(WHITE, MAGENTA, BOLD)
CARD_SB = create_color(WHITE, RED, BOLD)
CARD_BACK = create_color(YELLOW, YELLOW, BOLD)
# Discards
DISCARD = create_color(BLACK, WHITE, BOLD)
DISCARD_1234 = create_color(BLACK, CYAN)
DISCARD_5678 = create_color(BLACK, GREEN)
DISCARD_9012 = create_color(BLACK, MAGENTA)
DISCARD_SB = create_color(BLACK, RED)
# Statusbar
STATUSBAR = create_color(WHITE, CYAN)
STATUSBAR_ACTIVE = create_color(BLACK, YELLOW) | sciibo/graphics/colors.py | from sciibo.core.colors import *
# Define our color pairs
BACKGROUND = create_color(CYAN, BLUE)
# Logos
LOGO = create_color(CYAN, BLUE)
LOGO_SHADOW = create_color(BLACK, BLUE)
LOGO_TEXT = create_color(YELLOW, YELLOW)
LOGO_TEXT_SLIDER = create_color(YELLOW, WHITE)
# Menus
MENU = create_color(CYAN, BLUE)
MENU_TEXT = create_color(WHITE, BLUE)
MENU_TEXT_BOLD = create_color(WHITE, BLUE, BOLD)
# Menu list
MENU_LIST = create_color(WHITE, BLUE)
MENU_LIST_SELECTED = create_color(BLACK, YELLOW)
# Form elements
FORM_LIST = create_color(WHITE, CYAN)
FORM_LIST_SELECTED = create_color(BLUE, WHITE)
FORM_LIST_ACTIVE = create_color(BLACK, YELLOW)
FORM_LIST_ACTIVE_SELECTED = create_color(YELLOW, BLACK)
FORM_BUTTON = create_color(BLACK, WHITE)
FORM_BUTTON_ACTIVE = create_color(BLACK, YELLOW)
FORM_INPUT = create_color(WHITE, CYAN)
FORM_INPUT_ACTIVE = create_color(BLACK, YELLOW)
FORM_INPUT_CURSOR = create_color(YELLOW, BLACK)
FORM_SELECTION = create_color(WHITE, CYAN)
FORM_SELECTION_SELECTED = create_color(BLUE, WHITE)
FORM_SELECTION_ACTIVE = create_color(BLACK, YELLOW)
FORM_SELECTION_ACTIVE_SELECTED = create_color(YELLOW, BLACK)
# Glyphs
GLYPH_3 = create_color(GREEN, BLACK, DIM)
GLYPH_2 = create_color(GREEN, BLACK, BOLD)
GLYPH_1 = create_color(WHITE, BLACK, BOLD)
# Alerts
ALERT_INFO = create_color(WHITE, CYAN)
ALERT_ERROR = create_color(WHITE, RED)
# Notification
NOTIFICATION = create_color(WHITE, BLACK, BOLD)
# Playfield
PLAYFIELD = create_color(CYAN, BLUE)
PLAYFIELD_TEXT = create_color(WHITE, BLUE)
# Cards
CARD_1234 = create_color(WHITE, CYAN, BOLD)
CARD_5678 = create_color(WHITE, GREEN, BOLD)
CARD_9012 = create_color(WHITE, MAGENTA, BOLD)
CARD_SB = create_color(WHITE, RED, BOLD)
CARD_BACK = create_color(YELLOW, YELLOW, BOLD)
# Discards
DISCARD = create_color(BLACK, WHITE, BOLD)
DISCARD_1234 = create_color(BLACK, CYAN)
DISCARD_5678 = create_color(BLACK, GREEN)
DISCARD_9012 = create_color(BLACK, MAGENTA)
DISCARD_SB = create_color(BLACK, RED)
# Statusbar
STATUSBAR = create_color(WHITE, CYAN)
STATUSBAR_ACTIVE = create_color(BLACK, YELLOW) | 0.404625 | 0.097907 |
import optparse, os, shutil, subprocess, sys, tempfile
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def run_process ( cmd, name, tmp_dir, buffsize ):
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
raise Exception, 'Error in \'' + name + '\'. \n' + str( e )
def check_output ( output, canBeEmpty ):
if 0 < os.path.getsize( output ):
return True
elif False == canBeEmpty:
raise Exception, 'The output file is empty:' + output
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-a', '--alignmentScore', action='store_true', dest='alignmentScore', default=False, help='split alignments by alignment score instead of mapping quality' )
parser.add_option( '-b', '--bwa', action='store_true', dest='bwa', default=False, help='alignments are from BWA (SOLiD only)' )
parser.add_option( '-c', '--colorSpace', action='store_true', dest='colorSpace', default=False, help='generate reads in color space (SOLiD reads)' )
parser.add_option( '-d', '--scoreFactor', dest='scoreFactor', type='int', help='divide quality/alignment score by this factor' )
parser.add_option( '-g', '--wiggle', dest='wiggle', type='int', help='gap "wiggle"' )
parser.add_option( '-n', '--numReads', dest='numReads', type='int', help='number of raw input paired-end reads (otherwise, inferred from all SAM records present).' )
parser.add_option( '-q', '--minMapq', dest='minMapq', type='int', help='consider only alignments with this mapping quality or greater.' )
parser.add_option( '-z', '--singleEnd', action='store_true', dest='singleEnd', default=False, help='input contains only single end reads' )
parser.add_option( '-S', '--sam', dest='sam', default=None, help='input SAM' )
parser.add_option( '-B', '--bam', dest='bam', default=None, help='input BAM' )
parser.add_option( '-p', '--printIncorrect', action='store_true', dest='printIncorrect', default=False, help='print incorrect alignments' )
parser.add_option( '-s', '--numSnps', dest='numSnps', type="int", help='consider only alignments with the number of specified SNPs' )
parser.add_option( '-e', '--numErrors', dest='numErrors', type="int", default=False, help='consider only alignments with the number of specified errors' )
parser.add_option( '-i', '--indels', action='store_true', dest='indels', default=False, help='consider only alignments with indels' )
parser.add_option( '-o', '--output', dest='output', help='The file to save the output' )
(options, args) = parser.parse_args()
# output version # of tool
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='dwgsim_eval 2>&1', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( '%s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine DWGSIM_EVAL version\n' )
buffsize = 1048576
# make temp directory for dwgsim, requires trailing slash
tmp_dir = '%s/' % tempfile.mkdtemp()
#'generic' options used in all dwgsim commands here
try:
tmp_dir = '%s/' % tempfile.mkdtemp()
dwgsim_eval_cmd = 'dwgsim_eval'
if True == options.alignmentScore:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -a'
if True == options.bwa:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -b'
if True == options.colorSpace:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -c'
use_p = False
if 0 <= options.numSnps:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + (' -s %s' % options.numSnps)
if 0 <= options.numErrors:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + (' -e %s' % options.numErrors)
if True == options.indels:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -i'
if True == use_p or True == options.printIncorrect:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -p'
if True == options.singleEnd:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -z'
dwgsim_eval_cmd = '%s -d %s -g %s -n %s -q %s' % (dwgsim_eval_cmd, \
options.scoreFactor, \
options.wiggle, \
options.numReads, \
options.minMapq)
if None != options.sam:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -S ' + options.sam
elif None != options.bam:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' ' + options.bam
else:
raise Exception, 'Input file was neither a SAM nor BAM'
dwgsim_eval_cmd = dwgsim_eval_cmd + ' > ' + options.output
# need to nest try-except in try-finally to handle 2.4
try:
# dwgsim
run_process ( dwgsim_eval_cmd, 'dwgsim', tmp_dir, buffsize )
# check that there are results in the output file
check_output ( options.output, False )
sys.stdout.write( 'DWGSIM_EVAL successful' )
except Exception, e:
stop_err( 'DWGSIM_EVAL failed.\n' + str( e ) )
finally:
# clean up temp dir
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
if __name__=="__main__": __main__() | DWGSIMSrc/scripts/galaxy/dwgsim_eval_wrapper.py | import optparse, os, shutil, subprocess, sys, tempfile
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def run_process ( cmd, name, tmp_dir, buffsize ):
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
raise Exception, 'Error in \'' + name + '\'. \n' + str( e )
def check_output ( output, canBeEmpty ):
if 0 < os.path.getsize( output ):
return True
elif False == canBeEmpty:
raise Exception, 'The output file is empty:' + output
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-a', '--alignmentScore', action='store_true', dest='alignmentScore', default=False, help='split alignments by alignment score instead of mapping quality' )
parser.add_option( '-b', '--bwa', action='store_true', dest='bwa', default=False, help='alignments are from BWA (SOLiD only)' )
parser.add_option( '-c', '--colorSpace', action='store_true', dest='colorSpace', default=False, help='generate reads in color space (SOLiD reads)' )
parser.add_option( '-d', '--scoreFactor', dest='scoreFactor', type='int', help='divide quality/alignment score by this factor' )
parser.add_option( '-g', '--wiggle', dest='wiggle', type='int', help='gap "wiggle"' )
parser.add_option( '-n', '--numReads', dest='numReads', type='int', help='number of raw input paired-end reads (otherwise, inferred from all SAM records present).' )
parser.add_option( '-q', '--minMapq', dest='minMapq', type='int', help='consider only alignments with this mapping quality or greater.' )
parser.add_option( '-z', '--singleEnd', action='store_true', dest='singleEnd', default=False, help='input contains only single end reads' )
parser.add_option( '-S', '--sam', dest='sam', default=None, help='input SAM' )
parser.add_option( '-B', '--bam', dest='bam', default=None, help='input BAM' )
parser.add_option( '-p', '--printIncorrect', action='store_true', dest='printIncorrect', default=False, help='print incorrect alignments' )
parser.add_option( '-s', '--numSnps', dest='numSnps', type="int", help='consider only alignments with the number of specified SNPs' )
parser.add_option( '-e', '--numErrors', dest='numErrors', type="int", default=False, help='consider only alignments with the number of specified errors' )
parser.add_option( '-i', '--indels', action='store_true', dest='indels', default=False, help='consider only alignments with indels' )
parser.add_option( '-o', '--output', dest='output', help='The file to save the output' )
(options, args) = parser.parse_args()
# output version # of tool
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='dwgsim_eval 2>&1', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( '%s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine DWGSIM_EVAL version\n' )
buffsize = 1048576
# make temp directory for dwgsim, requires trailing slash
tmp_dir = '%s/' % tempfile.mkdtemp()
#'generic' options used in all dwgsim commands here
try:
tmp_dir = '%s/' % tempfile.mkdtemp()
dwgsim_eval_cmd = 'dwgsim_eval'
if True == options.alignmentScore:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -a'
if True == options.bwa:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -b'
if True == options.colorSpace:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -c'
use_p = False
if 0 <= options.numSnps:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + (' -s %s' % options.numSnps)
if 0 <= options.numErrors:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + (' -e %s' % options.numErrors)
if True == options.indels:
use_p = True
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -i'
if True == use_p or True == options.printIncorrect:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -p'
if True == options.singleEnd:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -z'
dwgsim_eval_cmd = '%s -d %s -g %s -n %s -q %s' % (dwgsim_eval_cmd, \
options.scoreFactor, \
options.wiggle, \
options.numReads, \
options.minMapq)
if None != options.sam:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' -S ' + options.sam
elif None != options.bam:
dwgsim_eval_cmd = dwgsim_eval_cmd + ' ' + options.bam
else:
raise Exception, 'Input file was neither a SAM nor BAM'
dwgsim_eval_cmd = dwgsim_eval_cmd + ' > ' + options.output
# need to nest try-except in try-finally to handle 2.4
try:
# dwgsim
run_process ( dwgsim_eval_cmd, 'dwgsim', tmp_dir, buffsize )
# check that there are results in the output file
check_output ( options.output, False )
sys.stdout.write( 'DWGSIM_EVAL successful' )
except Exception, e:
stop_err( 'DWGSIM_EVAL failed.\n' + str( e ) )
finally:
# clean up temp dir
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
if __name__=="__main__": __main__() | 0.134804 | 0.079854 |
import secrets, os, bcrypt, subprocess, json, math, shutil
from decouple import config
passwords_dir = 'web/.passwords'
# Directory for display templates (not Flask templates)
templates_dir = 'templates'
services_config_path = 'cache/services.json'
env_file = '.env'
def generate_secret():
secret = secrets.token_hex()
set_config_key('FLASK_SECRET', secret)
return secret
def check_password(username, password):
if not os.path.exists(f"{passwords_dir}/{username}"):
return False
f = open(f"{passwords_dir}/{username}", 'r')
hashed = f.read()
f.close()
return bcrypt.checkpw(password.encode('utf-8'), hashed.encode('utf-8'))
def store_password(username, password):
password = password.<PASSWORD>('<PASSWORD>')
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
if not os.path.exists(passwords_dir):
os.mkdir(passwords_dir)
f = open(f"{passwords_dir}/{username}", 'w')
f.write(hashed.decode())
f.close()
def is_registered(username):
return os.path.exists(f"{passwords_dir}/{username}")
def get_device_info():
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime = human_time_from_seconds(uptime_seconds)
one_minute, five_minutes, fifteen_minutes = os.getloadavg()
total, used, free = shutil.disk_usage("/")
return {
"ip_address": subprocess.run(['hostname', '-I'], capture_output=True, text=True).stdout.strip().split()[0],
"uptime": uptime,
"load_average_one_min": str(one_minute),
"load_average_five_min": str(five_minutes),
"load_average_fifteen_min": str(fifteen_minutes),
"disk_usage": "%d / %d GB" % (used // (2**30), total // (2**30)),
"space_available": "%d GB" % (free // (2**30))
}
def human_time_from_seconds(seconds):
seconds_in_day = 24*60*60
seconds_in_hour = 60*60
seconds_in_minute = 60
if (seconds >= seconds_in_day):
days = math.floor(seconds / seconds_in_day)
return str(days) + " " + ("day" if days == 1 else "days")
if (seconds >= seconds_in_hour):
hours = math.floor(seconds / seconds_in_hour)
return str(hours) + " " + ("hour" if hours == 1 else "hours")
if (seconds >= seconds_in_minute):
minutes = math.floor(seconds / seconds_in_minute)
return str(minutes) + " " + ("minute" if minutes == 1 else "minutes")
return 'less than 1 minute'
def get_display():
template = config('TEMPLATE_CONFIG_FILE', default='Default', cast=str)
display_configs = [
{
'name': 'screen_width',
'label': 'Screen Width',
'value': config('SCREEN_WIDTH', default=0, cast=int),
'type': 'int'
},
{
'name': 'screen_height',
'label': 'Screen Height',
'value': config('SCREEN_HEIGHT', default=0, cast=int),
'type': 'int'
},
{
'name': 'grid_margin',
'label': 'Grid Margin',
'value': config('GRID_MARGIN', default=0, cast=int),
'type': 'int'
},
{
'name': 'icon_scale',
'label': 'Icon Scale',
'value': config('ICON_SCALE', default=0, cast=int),
'type': 'float'
},
{
'name': 'font_scale',
'label': 'Font Scale',
'value': config('FONT_SCALE', default=0, cast=float),
'type': 'float'
},
{
'name': 'font_name',
'label': 'Font Name',
'value': config('FONT_NAME', default="", cast=str),
'type': 'str'
},
{
'name': 'fps',
'label': 'FPS',
'value': config('FPS', default=0, cast=int),
'type': 'int'
},
{
'name': 'show_mouse',
'label': 'Show Mouse',
'value': config('SHOW_MOUSE', default=False, cast=bool),
'type': 'bool'
},
{
'name': 'show_device_info',
'label': 'Show Device Info',
'value': config('SHOW_DEVICE_INFO', default=False, cast=bool),
'type': 'bool'
},
{
'name': 'debug_grid',
'label': 'Debug Grid',
'value': config('DEBUG_GRID', default=False, cast=bool),
'type': 'bool'
},
]
return {
"resolution": config('SCREEN_WIDTH', default='', cast=str) + " x " + config('SCREEN_HEIGHT', default='', cast=str),
"template": template if template else 'Default',
"configs": display_configs
}
def format_service_configs(configs):
formatted_configs = []
for config in configs:
formatted_configs.append({
'name': config['name'],
'label': config['label'] if 'label' in config else config['name'].replace("_", " ").capitalize(),
'value': config['value'],
'type': config['type'] if 'type' in config else type(config['value']).__name__
})
return formatted_configs
def get_services(service_name=""):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
if service_name:
for service in service_data['services']:
if service['service'] == service_name:
service['configs'] = format_service_configs(service['configs'])
return service
return False
services = []
for service in service_data['services']:
service['configs'] = format_service_configs(service['configs'])
services.append(service)
return services
# Enables or disables a service
def toggle_service(service_name, enable):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
for i in range(len(service_data['services'])):
if service_data['services'][i]['service'] == service_name:
service_data['services'][i]['enabled'] = enable
update_services(service_data)
return True
def save_service_config(name, data):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
for i in range(len(service_data['services'])):
if service_data['services'][i]['service'] == name:
service_data['services'][i] = data
update_services(service_data)
def update_services(data):
f = open(services_config_path, 'w')
f.write(json.dumps(data, indent=4))
# Get display templates (not Flask templates)
def get_templates():
templates = [
{
'name': 'Default',
'value': 'default'
}
]
if os.path.exists(templates_dir):
files = os.listdir(templates_dir)
for template_file in files:
templates.append({
'name': template_file,
'value': template_file
})
return templates
def set_config_key(key, value):
f = open(env_file, 'r')
lines = f.readlines()
f.close()
config_key_value = key + '=' + value + "\n"
config_value_exists = False
for i in range(len(lines)):
if lines[i].startswith(key):
config_value_exists = True
lines[i] = config_key_value
if not config_value_exists:
f = open(env_file, 'a')
f.write(config_key_value)
else:
f = open(env_file, 'w')
f.writelines(lines)
f.close()
def get_locations():
with open('cache/locations.json') as json_file:
locations_config = json.load(json_file)
return locations_config['locations'] | web/utilities.py | import secrets, os, bcrypt, subprocess, json, math, shutil
from decouple import config
passwords_dir = 'web/.passwords'
# Directory for display templates (not Flask templates)
templates_dir = 'templates'
services_config_path = 'cache/services.json'
env_file = '.env'
def generate_secret():
secret = secrets.token_hex()
set_config_key('FLASK_SECRET', secret)
return secret
def check_password(username, password):
if not os.path.exists(f"{passwords_dir}/{username}"):
return False
f = open(f"{passwords_dir}/{username}", 'r')
hashed = f.read()
f.close()
return bcrypt.checkpw(password.encode('utf-8'), hashed.encode('utf-8'))
def store_password(username, password):
password = password.<PASSWORD>('<PASSWORD>')
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
if not os.path.exists(passwords_dir):
os.mkdir(passwords_dir)
f = open(f"{passwords_dir}/{username}", 'w')
f.write(hashed.decode())
f.close()
def is_registered(username):
return os.path.exists(f"{passwords_dir}/{username}")
def get_device_info():
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime = human_time_from_seconds(uptime_seconds)
one_minute, five_minutes, fifteen_minutes = os.getloadavg()
total, used, free = shutil.disk_usage("/")
return {
"ip_address": subprocess.run(['hostname', '-I'], capture_output=True, text=True).stdout.strip().split()[0],
"uptime": uptime,
"load_average_one_min": str(one_minute),
"load_average_five_min": str(five_minutes),
"load_average_fifteen_min": str(fifteen_minutes),
"disk_usage": "%d / %d GB" % (used // (2**30), total // (2**30)),
"space_available": "%d GB" % (free // (2**30))
}
def human_time_from_seconds(seconds):
seconds_in_day = 24*60*60
seconds_in_hour = 60*60
seconds_in_minute = 60
if (seconds >= seconds_in_day):
days = math.floor(seconds / seconds_in_day)
return str(days) + " " + ("day" if days == 1 else "days")
if (seconds >= seconds_in_hour):
hours = math.floor(seconds / seconds_in_hour)
return str(hours) + " " + ("hour" if hours == 1 else "hours")
if (seconds >= seconds_in_minute):
minutes = math.floor(seconds / seconds_in_minute)
return str(minutes) + " " + ("minute" if minutes == 1 else "minutes")
return 'less than 1 minute'
def get_display():
template = config('TEMPLATE_CONFIG_FILE', default='Default', cast=str)
display_configs = [
{
'name': 'screen_width',
'label': 'Screen Width',
'value': config('SCREEN_WIDTH', default=0, cast=int),
'type': 'int'
},
{
'name': 'screen_height',
'label': 'Screen Height',
'value': config('SCREEN_HEIGHT', default=0, cast=int),
'type': 'int'
},
{
'name': 'grid_margin',
'label': 'Grid Margin',
'value': config('GRID_MARGIN', default=0, cast=int),
'type': 'int'
},
{
'name': 'icon_scale',
'label': 'Icon Scale',
'value': config('ICON_SCALE', default=0, cast=int),
'type': 'float'
},
{
'name': 'font_scale',
'label': 'Font Scale',
'value': config('FONT_SCALE', default=0, cast=float),
'type': 'float'
},
{
'name': 'font_name',
'label': 'Font Name',
'value': config('FONT_NAME', default="", cast=str),
'type': 'str'
},
{
'name': 'fps',
'label': 'FPS',
'value': config('FPS', default=0, cast=int),
'type': 'int'
},
{
'name': 'show_mouse',
'label': 'Show Mouse',
'value': config('SHOW_MOUSE', default=False, cast=bool),
'type': 'bool'
},
{
'name': 'show_device_info',
'label': 'Show Device Info',
'value': config('SHOW_DEVICE_INFO', default=False, cast=bool),
'type': 'bool'
},
{
'name': 'debug_grid',
'label': 'Debug Grid',
'value': config('DEBUG_GRID', default=False, cast=bool),
'type': 'bool'
},
]
return {
"resolution": config('SCREEN_WIDTH', default='', cast=str) + " x " + config('SCREEN_HEIGHT', default='', cast=str),
"template": template if template else 'Default',
"configs": display_configs
}
def format_service_configs(configs):
formatted_configs = []
for config in configs:
formatted_configs.append({
'name': config['name'],
'label': config['label'] if 'label' in config else config['name'].replace("_", " ").capitalize(),
'value': config['value'],
'type': config['type'] if 'type' in config else type(config['value']).__name__
})
return formatted_configs
def get_services(service_name=""):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
if service_name:
for service in service_data['services']:
if service['service'] == service_name:
service['configs'] = format_service_configs(service['configs'])
return service
return False
services = []
for service in service_data['services']:
service['configs'] = format_service_configs(service['configs'])
services.append(service)
return services
# Enables or disables a service
def toggle_service(service_name, enable):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
for i in range(len(service_data['services'])):
if service_data['services'][i]['service'] == service_name:
service_data['services'][i]['enabled'] = enable
update_services(service_data)
return True
def save_service_config(name, data):
with open(services_config_path) as json_file:
service_data = json.load(json_file)
for i in range(len(service_data['services'])):
if service_data['services'][i]['service'] == name:
service_data['services'][i] = data
update_services(service_data)
def update_services(data):
f = open(services_config_path, 'w')
f.write(json.dumps(data, indent=4))
# Get display templates (not Flask templates)
def get_templates():
templates = [
{
'name': 'Default',
'value': 'default'
}
]
if os.path.exists(templates_dir):
files = os.listdir(templates_dir)
for template_file in files:
templates.append({
'name': template_file,
'value': template_file
})
return templates
def set_config_key(key, value):
f = open(env_file, 'r')
lines = f.readlines()
f.close()
config_key_value = key + '=' + value + "\n"
config_value_exists = False
for i in range(len(lines)):
if lines[i].startswith(key):
config_value_exists = True
lines[i] = config_key_value
if not config_value_exists:
f = open(env_file, 'a')
f.write(config_key_value)
else:
f = open(env_file, 'w')
f.writelines(lines)
f.close()
def get_locations():
with open('cache/locations.json') as json_file:
locations_config = json.load(json_file)
return locations_config['locations'] | 0.424054 | 0.092811 |
from rest_framework import status
from core.entity.user import User, UserSet
from core.entity.entity_exceptions import EntityNotFoundException
from core.entity.entry_points.authorizations import AuthorizationModule
from ..base_view_test import BaseViewTest
from .synchronization_client import SynchronizationClient
class TestSynchronization(BaseViewTest):
"""
Provides all synchronization test
"""
superuser_required = True
ordinary_user_required = True
support_token = None
synchronization_path = "/api/{version}/account-synchronization/".format(version=BaseViewTest.API_VERSION)
@classmethod
def setUpTestData(cls):
super().setUpTestData()
support = UserSet().get("support")
cls.support_token = AuthorizationModule.issue_token(support)
def test_synchronization_no_auth(self):
"""
Tests whether synchronization process can be performed by a non-authorized user
:return: nothing
"""
response = self.client.post(self.synchronization_path)
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED, "Unexpected status code")
def test_synchronization_forbidden(self):
"""
Tests whether synchronization is forbidden for ordinary users
:return: nothing
"""
headers = self.get_authorization_headers("ordinary_user")
response = self.client.post(self.synchronization_path, **headers)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN, "Unexpected status code")
def test_synchronization_teapot(self):
"""
Tests whether the user can perform synchronization when all synchronization modules were switched off
:return: nothing
"""
headers = self.get_authorization_headers("superuser")
response = self.client.post(self.synchronization_path, **headers)
self.assertEquals(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, "unexpected status code")
def test_synchronization_partial_fail(self):
"""
Tests whether synchronization shall be successful when one of the synchronizing users is currently logged ins
:return: nothing
"""
self.enable_synchronization_module()
sync_client = SynchronizationClient(self.client, self, self.get_authorization_headers("superuser"))
sync_client.synchronize()
self.assertEquals(len(sync_client.details), 1, "There must be one error")
self.assertEquals(sync_client.details[0]['login'], "superuser", "The error must be about the superuser")
self.assertEquals(sync_client.details[0]['name'], "Superuser", "The user's name must be 'superuser'")
self.assertEquals(sync_client.details[0]['surname'], "Superuserov", "The user's surname must be 'Superuserov'")
self.assertEquals(sync_client.details[0]['action'], "remove",
"The error must be occured during the user remove")
user_set = UserSet()
user = user_set.get("superuser")
self.assertEquals(user.name, "Superuser", "The user name must be saved intact")
self.assertEquals(user.surname, "Superuserov", "The user surname must be saved intact")
self.assertGreater(len(user_set), 10, "The users shall be added and downloaded successfully")
user_set.get("support")
with self.assertRaises(EntityNotFoundException,
msg="Ordinary user shall be removed successfully"):
user_set.get("user")
def enable_synchronization_module(self):
"""
Enables the synchronization module
:return: nothing
"""
from core.synchronizations.ihna_employees import IhnaSynchronization
sync = IhnaSynchronization()
sync.is_enabled = True
sync.update()
del BaseViewTest | corefacility/core/test/views/synchronization_test/test_synchronization.py | from rest_framework import status
from core.entity.user import User, UserSet
from core.entity.entity_exceptions import EntityNotFoundException
from core.entity.entry_points.authorizations import AuthorizationModule
from ..base_view_test import BaseViewTest
from .synchronization_client import SynchronizationClient
class TestSynchronization(BaseViewTest):
"""
Provides all synchronization test
"""
superuser_required = True
ordinary_user_required = True
support_token = None
synchronization_path = "/api/{version}/account-synchronization/".format(version=BaseViewTest.API_VERSION)
@classmethod
def setUpTestData(cls):
super().setUpTestData()
support = UserSet().get("support")
cls.support_token = AuthorizationModule.issue_token(support)
def test_synchronization_no_auth(self):
"""
Tests whether synchronization process can be performed by a non-authorized user
:return: nothing
"""
response = self.client.post(self.synchronization_path)
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED, "Unexpected status code")
def test_synchronization_forbidden(self):
"""
Tests whether synchronization is forbidden for ordinary users
:return: nothing
"""
headers = self.get_authorization_headers("ordinary_user")
response = self.client.post(self.synchronization_path, **headers)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN, "Unexpected status code")
def test_synchronization_teapot(self):
"""
Tests whether the user can perform synchronization when all synchronization modules were switched off
:return: nothing
"""
headers = self.get_authorization_headers("superuser")
response = self.client.post(self.synchronization_path, **headers)
self.assertEquals(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, "unexpected status code")
def test_synchronization_partial_fail(self):
"""
Tests whether synchronization shall be successful when one of the synchronizing users is currently logged ins
:return: nothing
"""
self.enable_synchronization_module()
sync_client = SynchronizationClient(self.client, self, self.get_authorization_headers("superuser"))
sync_client.synchronize()
self.assertEquals(len(sync_client.details), 1, "There must be one error")
self.assertEquals(sync_client.details[0]['login'], "superuser", "The error must be about the superuser")
self.assertEquals(sync_client.details[0]['name'], "Superuser", "The user's name must be 'superuser'")
self.assertEquals(sync_client.details[0]['surname'], "Superuserov", "The user's surname must be 'Superuserov'")
self.assertEquals(sync_client.details[0]['action'], "remove",
"The error must be occured during the user remove")
user_set = UserSet()
user = user_set.get("superuser")
self.assertEquals(user.name, "Superuser", "The user name must be saved intact")
self.assertEquals(user.surname, "Superuserov", "The user surname must be saved intact")
self.assertGreater(len(user_set), 10, "The users shall be added and downloaded successfully")
user_set.get("support")
with self.assertRaises(EntityNotFoundException,
msg="Ordinary user shall be removed successfully"):
user_set.get("user")
def enable_synchronization_module(self):
"""
Enables the synchronization module
:return: nothing
"""
from core.synchronizations.ihna_employees import IhnaSynchronization
sync = IhnaSynchronization()
sync.is_enabled = True
sync.update()
del BaseViewTest | 0.746416 | 0.162712 |
import os
import sys
base_dir = os.path.dirname(__file__)
data_dir = os.path.join(base_dir, "resources")
sys.path.extend([os.path.join(base_dir, '../..')])
import unittest
import warnings
import pandas as pd
from enricher.enrich import regulon_utils, regulon_enrichment
warnings.simplefilter("ignore", UserWarning)
def load_test_sif(sif='test.sif'):
return pd.read_table(os.path.join(data_dir, sif), index_col=0)
def load_test_expr(expr='test_expr.tsv'):
return pd.read_csv(os.path.join(data_dir, expr), index_col=0, sep = '\t')
class RegulonUtilsTestCase(unittest.TestCase):
def test_load_test_sif(self):
sif = load_test_sif()
self.assertSequenceEqual(sif.shape, (1302, 3))
def test_filter_sif(self):
sif = load_test_sif()
self.assertEqual(regulon_utils.filter_sif(sif).shape[0], 800)
self.assertEqual(regulon_utils.filter_sif(sif)['Type'].unique().tolist()[0], 'controls-expression-of')
def test_load_test_expr(self):
expr = load_test_expr().T
self.assertSequenceEqual(expr.shape, (6, 8723))
def test_prune_regulon(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
self.assertSequenceEqual(filtered_regulon.shape, (433, 3))
def test_regulon_weight_assignment(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
self.assertSequenceEqual(regul_weights.shape, (433, 3))
self.assertSequenceEqual(regul_weights.columns.tolist(), ['Target', 'MoA', 'likelihood'])
self.assertEqual(regul_weights.iloc[0, :].tolist()[0], 'AARS')
self.assertAlmostEqual(regul_weights.iloc[0, :].tolist()[1], 0.1724812122096268)
self.assertAlmostEqual(regul_weights.iloc[0, :].tolist()[2], 0.8717434402332361)
def test_quantile_nes_score(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
nes = regulon_enrichment.quantile_nes_score(regul_weights, expr.T)
self.assertSequenceEqual(nes.columns.tolist(),
['Test_A1', 'Test_A2', 'Test_A3', 'Test_D1', 'Test_D2', 'Test_D3'])
self.assertAlmostEqual(nes.values.mean(), -1.5392231513623145)
def test_score_enrichment(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
nes = regulon_enrichment.quantile_nes_score(regul_weights, expr.T)
enrichment = regulon_enrichment.score_enrichment('TP53', expr, regul_weights, nes)
self.assertAlmostEqual(enrichment.values.mean(), -1.6208941454109855)
if __name__ == '__main__':
unittest.main() | enricher/tests/test_regulon.py | import os
import sys
base_dir = os.path.dirname(__file__)
data_dir = os.path.join(base_dir, "resources")
sys.path.extend([os.path.join(base_dir, '../..')])
import unittest
import warnings
import pandas as pd
from enricher.enrich import regulon_utils, regulon_enrichment
warnings.simplefilter("ignore", UserWarning)
def load_test_sif(sif='test.sif'):
return pd.read_table(os.path.join(data_dir, sif), index_col=0)
def load_test_expr(expr='test_expr.tsv'):
return pd.read_csv(os.path.join(data_dir, expr), index_col=0, sep = '\t')
class RegulonUtilsTestCase(unittest.TestCase):
def test_load_test_sif(self):
sif = load_test_sif()
self.assertSequenceEqual(sif.shape, (1302, 3))
def test_filter_sif(self):
sif = load_test_sif()
self.assertEqual(regulon_utils.filter_sif(sif).shape[0], 800)
self.assertEqual(regulon_utils.filter_sif(sif)['Type'].unique().tolist()[0], 'controls-expression-of')
def test_load_test_expr(self):
expr = load_test_expr().T
self.assertSequenceEqual(expr.shape, (6, 8723))
def test_prune_regulon(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
self.assertSequenceEqual(filtered_regulon.shape, (433, 3))
def test_regulon_weight_assignment(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
self.assertSequenceEqual(regul_weights.shape, (433, 3))
self.assertSequenceEqual(regul_weights.columns.tolist(), ['Target', 'MoA', 'likelihood'])
self.assertEqual(regul_weights.iloc[0, :].tolist()[0], 'AARS')
self.assertAlmostEqual(regul_weights.iloc[0, :].tolist()[1], 0.1724812122096268)
self.assertAlmostEqual(regul_weights.iloc[0, :].tolist()[2], 0.8717434402332361)
def test_quantile_nes_score(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
nes = regulon_enrichment.quantile_nes_score(regul_weights, expr.T)
self.assertSequenceEqual(nes.columns.tolist(),
['Test_A1', 'Test_A2', 'Test_A3', 'Test_D1', 'Test_D2', 'Test_D3'])
self.assertAlmostEqual(nes.values.mean(), -1.5392231513623145)
def test_score_enrichment(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
nes = regulon_enrichment.quantile_nes_score(regul_weights, expr.T)
enrichment = regulon_enrichment.score_enrichment('TP53', expr, regul_weights, nes)
self.assertAlmostEqual(enrichment.values.mean(), -1.6208941454109855)
if __name__ == '__main__':
unittest.main() | 0.233444 | 0.508849 |
from flask import Blueprint, redirect, url_for, render_template, request
from data.models import Person, db
from utils.csv2dict import convert
from constants import attributes as attrs
from datetime import datetime
from os.path import join
main = Blueprint('main', __name__)
langs_dict = {}
convert(langs_dict, csv_file=join('languages', 'langs.csv'))
def get_lang(locale='ru'):
return langs_dict[f"LANG_{locale.upper()}"]
def format_datetime(dt):
date_patterns = ["%d-%m-%Y", '%d.%m.%Y', '%d/%m/%Y']
for pattern in date_patterns:
try:
return datetime.strptime(s_date, pattern).date()
except:
pass
return datetime.now()
@main.route("")
def home():
return render_template('index.html', content=['tim', 'joe', 'bill'])
@main.route("/upload", methods=["GET", "POST"])
def upload():
if request.method == "POST":
if request.files:
try:
raw_list = request.get_array(field_name='excel')
except Exception as e:
return render_template('error.html', error_msg='Неверный формат файла')
return str(e)
removed_empty_list = [line for line in raw_list if sum(len(str(elem)) for elem in line)>10]
column_names = [get_lang('ru')[attr] for attr in attrs]
first_row = 0
first_column = 0
while first_row < len(removed_empty_list):
try:
tmp_date = datetime.strptime(str(removed_empty_list[first_row][first_column]), '%d.%m.%Y')
break
except Exception as e:
if first_column == 0:
first_column = 1
else:
first_row += 1
first_column = 0
else:
return render_template('error.html', error_msg='Неверно заполненная таблица')
return 'incorrect_file_format'
column_values = removed_empty_list[first_row:]
for line in column_values:
entry_date = format_datetime(line[first_column])
flight = str(line[first_column+1])
fullname = str(line[first_column+2])
id_number = str(line[first_column+3])
birth_date = format_datetime(line[first_column+4])
passport_number = str(line[first_column+5])
citizenship = str(line[first_column+6])
phone = str(line[first_column+7])
before_arrival = str(line[first_column+8])
region = str(line[first_column+9])
residence = str(line[first_column+10])
work_place = str(line[first_column+11])
found = line[first_column+12].lower()=='да'
hospitalized = line[first_column+13].lower()=='да'
hospital = str(line[first_column+14])
person = Person(entry_date=entry_date, flight=flight, fullname=fullname, id_number=id_number,
passport_number=passport_number, birth_date=birth_date, citizenship=citizenship, phone=phone,
before_arrival=before_arrival, region=region, residence=residence, work_place=work_place,
found=found, hospitalized=hospitalized, hospital=hospital)
db.session.add(person)
db.session.commit()
return render_template("upload_result.html", column_names=column_names, column_values=column_values)
return render_template('upload.html', lang=get_lang('ru'))
@main.route("/show")
def show():
persons = Person.query.all()
return render_template('show.html', attr_names=get_lang('ru'), attrs=attrs, persons=persons)
@main.route("/add", methods=["GET", "POST"])
def add():
if request.method == "POST":
entry_date = datetime.strptime(request.form.get('entry_date'), '%Y-%m-%d')
flight = str(request.form.get('flight'))
fullname = str(request.form.get('fullname'))
id_number = str(request.form.get('id_number'))
birth_date = datetime.strptime(request.form.get('birth_date'), '%Y-%m-%d')
passport_number = str(request.form.get('passport_number'))
citizenship = str(request.form.get('citizenship'))
phone = str(request.form.get('phone'))
before_arrival = str(request.form.get('before_arrival'))
region = str(request.form.get('region'))
residence = str(request.form.get('residence'))
work_place = str(request.form.get('work_place'))
found = request.form.get('found') == 'yes'
hospitalized = request.form.get('hospitalized') == 'yes'
hospital = str(request.form.get('hospital'))
person = Person(entry_date=entry_date, flight=flight, fullname=fullname, id_number=id_number,
passport_number=passport_number, birth_date=birth_date, citizenship=citizenship, phone=phone,
before_arrival=before_arrival, region=region, residence=residence, work_place=work_place,
found=found, hospitalized=hospitalized, hospital=hospital)
db.session.add(person)
db.session.commit()
return render_template('add.html') | main/controllers.py | from flask import Blueprint, redirect, url_for, render_template, request
from data.models import Person, db
from utils.csv2dict import convert
from constants import attributes as attrs
from datetime import datetime
from os.path import join
main = Blueprint('main', __name__)
langs_dict = {}
convert(langs_dict, csv_file=join('languages', 'langs.csv'))
def get_lang(locale='ru'):
return langs_dict[f"LANG_{locale.upper()}"]
def format_datetime(dt):
date_patterns = ["%d-%m-%Y", '%d.%m.%Y', '%d/%m/%Y']
for pattern in date_patterns:
try:
return datetime.strptime(s_date, pattern).date()
except:
pass
return datetime.now()
@main.route("")
def home():
return render_template('index.html', content=['tim', 'joe', 'bill'])
@main.route("/upload", methods=["GET", "POST"])
def upload():
if request.method == "POST":
if request.files:
try:
raw_list = request.get_array(field_name='excel')
except Exception as e:
return render_template('error.html', error_msg='Неверный формат файла')
return str(e)
removed_empty_list = [line for line in raw_list if sum(len(str(elem)) for elem in line)>10]
column_names = [get_lang('ru')[attr] for attr in attrs]
first_row = 0
first_column = 0
while first_row < len(removed_empty_list):
try:
tmp_date = datetime.strptime(str(removed_empty_list[first_row][first_column]), '%d.%m.%Y')
break
except Exception as e:
if first_column == 0:
first_column = 1
else:
first_row += 1
first_column = 0
else:
return render_template('error.html', error_msg='Неверно заполненная таблица')
return 'incorrect_file_format'
column_values = removed_empty_list[first_row:]
for line in column_values:
entry_date = format_datetime(line[first_column])
flight = str(line[first_column+1])
fullname = str(line[first_column+2])
id_number = str(line[first_column+3])
birth_date = format_datetime(line[first_column+4])
passport_number = str(line[first_column+5])
citizenship = str(line[first_column+6])
phone = str(line[first_column+7])
before_arrival = str(line[first_column+8])
region = str(line[first_column+9])
residence = str(line[first_column+10])
work_place = str(line[first_column+11])
found = line[first_column+12].lower()=='да'
hospitalized = line[first_column+13].lower()=='да'
hospital = str(line[first_column+14])
person = Person(entry_date=entry_date, flight=flight, fullname=fullname, id_number=id_number,
passport_number=passport_number, birth_date=birth_date, citizenship=citizenship, phone=phone,
before_arrival=before_arrival, region=region, residence=residence, work_place=work_place,
found=found, hospitalized=hospitalized, hospital=hospital)
db.session.add(person)
db.session.commit()
return render_template("upload_result.html", column_names=column_names, column_values=column_values)
return render_template('upload.html', lang=get_lang('ru'))
@main.route("/show")
def show():
persons = Person.query.all()
return render_template('show.html', attr_names=get_lang('ru'), attrs=attrs, persons=persons)
@main.route("/add", methods=["GET", "POST"])
def add():
if request.method == "POST":
entry_date = datetime.strptime(request.form.get('entry_date'), '%Y-%m-%d')
flight = str(request.form.get('flight'))
fullname = str(request.form.get('fullname'))
id_number = str(request.form.get('id_number'))
birth_date = datetime.strptime(request.form.get('birth_date'), '%Y-%m-%d')
passport_number = str(request.form.get('passport_number'))
citizenship = str(request.form.get('citizenship'))
phone = str(request.form.get('phone'))
before_arrival = str(request.form.get('before_arrival'))
region = str(request.form.get('region'))
residence = str(request.form.get('residence'))
work_place = str(request.form.get('work_place'))
found = request.form.get('found') == 'yes'
hospitalized = request.form.get('hospitalized') == 'yes'
hospital = str(request.form.get('hospital'))
person = Person(entry_date=entry_date, flight=flight, fullname=fullname, id_number=id_number,
passport_number=passport_number, birth_date=birth_date, citizenship=citizenship, phone=phone,
before_arrival=before_arrival, region=region, residence=residence, work_place=work_place,
found=found, hospitalized=hospitalized, hospital=hospital)
db.session.add(person)
db.session.commit()
return render_template('add.html') | 0.234757 | 0.104021 |
from asyncio import sleep
from logging import getLogger
import aiologstash
from aiologstash import create_tcp_handler
from async_asgi_testclient import TestClient
from pytest import mark, raises
from yellowbox.extras.logstash import FakeLogstashService
from heksher._version import __version__
from heksher.main import app
@mark.asyncio
async def test_startup_existing_contexts(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('user', 0), ('trust', 1), ('theme', 2);
""")
async with TestClient(app):
pass
@mark.asyncio
async def test_startup_existing_unexpected_contexts(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('user', 0), ('trust', 1), ('theme', 2), ('color', 3);
""")
with raises(Exception):
''' async_asgi_testclient raises Exception for any exception on startup, so we can't be more specific '''
async with TestClient(app):
pass
@mark.asyncio
async def test_startup_existing_bad_order(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('user', 0), ('trust', 2), ('theme', 1);
""")
with raises(Exception):
async with TestClient(app):
pass
@mark.asyncio
async def test_startup_existing_contexts_with_bad_indices(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('user', 0), ('trust', 3), ('theme', 5);
""")
async with TestClient(app):
pass
with sql_service.connection() as connection:
results = connection.execute("""
SELECT * FROM context_features;
""")
rows = {row['name']: row['index'] for row in results}
assert rows == {
'user': 0,
'trust': 1,
'theme': 2
}
@mark.asyncio
async def test_startup_existing_contexts_new_contexts(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('trust', 0);
""")
async with TestClient(app):
pass
with sql_service.connection() as connection:
results = connection.execute("""
SELECT * FROM context_features;
""")
rows = {row['name']: row['index'] for row in results}
assert rows == {
'user': 0,
'trust': 1,
'theme': 2
}
@mark.asyncio
async def test_startup_logstash(monkeypatch, sql_service, purge_sql):
with FakeLogstashService().start() as logstash:
monkeypatch.setenv('HEKSHER_LOGSTASH_HOST', logstash.local_host)
monkeypatch.setenv('HEKSHER_LOGSTASH_PORT', str(logstash.port))
monkeypatch.setenv('HEKSHER_LOGSTASH_TAGS', 'a:b c:d')
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
handler = None
async def mock_create_handler(*args, **kwargs):
global handler
handler = await create_tcp_handler(*args, **kwargs)
return handler
monkeypatch.setattr(aiologstash, 'create_tcp_handler', mock_create_handler)
async with TestClient(app):
await sleep(0.1) # wait for logstash records
# new context features were added, we should be seeing their logs now
assert logstash.records
for record in logstash.records:
assert record['heksher_version'] == __version__
assert record['a'] == 'b'
assert record['c'] == 'd'
getLogger().removeHandler(handler) | tests/blackbox/app/test_startup.py | from asyncio import sleep
from logging import getLogger
import aiologstash
from aiologstash import create_tcp_handler
from async_asgi_testclient import TestClient
from pytest import mark, raises
from yellowbox.extras.logstash import FakeLogstashService
from heksher._version import __version__
from heksher.main import app
@mark.asyncio
async def test_startup_existing_contexts(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('user', 0), ('trust', 1), ('theme', 2);
""")
async with TestClient(app):
pass
@mark.asyncio
async def test_startup_existing_unexpected_contexts(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('user', 0), ('trust', 1), ('theme', 2), ('color', 3);
""")
with raises(Exception):
''' async_asgi_testclient raises Exception for any exception on startup, so we can't be more specific '''
async with TestClient(app):
pass
@mark.asyncio
async def test_startup_existing_bad_order(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('user', 0), ('trust', 2), ('theme', 1);
""")
with raises(Exception):
async with TestClient(app):
pass
@mark.asyncio
async def test_startup_existing_contexts_with_bad_indices(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('user', 0), ('trust', 3), ('theme', 5);
""")
async with TestClient(app):
pass
with sql_service.connection() as connection:
results = connection.execute("""
SELECT * FROM context_features;
""")
rows = {row['name']: row['index'] for row in results}
assert rows == {
'user': 0,
'trust': 1,
'theme': 2
}
@mark.asyncio
async def test_startup_existing_contexts_new_contexts(monkeypatch, sql_service, purge_sql):
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
with sql_service.connection() as connection:
connection.execute("""
INSERT into context_features VALUES ('trust', 0);
""")
async with TestClient(app):
pass
with sql_service.connection() as connection:
results = connection.execute("""
SELECT * FROM context_features;
""")
rows = {row['name']: row['index'] for row in results}
assert rows == {
'user': 0,
'trust': 1,
'theme': 2
}
@mark.asyncio
async def test_startup_logstash(monkeypatch, sql_service, purge_sql):
with FakeLogstashService().start() as logstash:
monkeypatch.setenv('HEKSHER_LOGSTASH_HOST', logstash.local_host)
monkeypatch.setenv('HEKSHER_LOGSTASH_PORT', str(logstash.port))
monkeypatch.setenv('HEKSHER_LOGSTASH_TAGS', 'a:b c:d')
monkeypatch.setenv('HEKSHER_DB_CONNECTION_STRING', sql_service.local_connection_string())
monkeypatch.setenv('HEKSHER_STARTUP_CONTEXT_FEATURES', 'user;trust;theme')
handler = None
async def mock_create_handler(*args, **kwargs):
global handler
handler = await create_tcp_handler(*args, **kwargs)
return handler
monkeypatch.setattr(aiologstash, 'create_tcp_handler', mock_create_handler)
async with TestClient(app):
await sleep(0.1) # wait for logstash records
# new context features were added, we should be seeing their logs now
assert logstash.records
for record in logstash.records:
assert record['heksher_version'] == __version__
assert record['a'] == 'b'
assert record['c'] == 'd'
getLogger().removeHandler(handler) | 0.403332 | 0.111338 |
import asyncio
import typing as t
from dataclasses import asdict, dataclass, field
from datetime import datetime, timedelta
import discord
from discord.ext import commands
from bot import ModmailBot
from core import checks
from core.models import PermissionLevel, getLogger
from core.thread import Thread
from .utils import async_tasks
# Remove view perms from this role while pining, so only on-duty mods get the ping.
MOD_TEAM_ROLE_ID = 267629731250176001
log = getLogger(__name__)
@dataclass
class PingConfig:
"""Hold the current ping configuration."""
_id: str = field(repr=False, default="ping-delay-config")
ping_string: str = "@here"
initial_wait_duration: int = 5 * 60
delayed_wait_duration: int = 10 * 60
ignored_categories: list[int] = field(default_factory=list)
@dataclass(frozen=True)
class PingTask:
"""Data about an individual ping later task."""
when_to_ping: str # ISO datetime stamp
channel_id: int
already_delayed: bool = False # Whether the PingTask has been delayed already
class PingManager(commands.Cog):
"""A plugin to manage what and when to ping in ModMail threads."""
def __init__(self, bot: ModmailBot):
self.bot = bot
self.mod_team_role: discord.Role = None
self.config: t.Optional[PingConfig] = None
self.ping_tasks: list[PingTask] = None
self.db = bot.api.get_plugin_partition(self)
self.init_task = async_tasks.create_task(self.init_plugin(), self.bot.loop)
async def init_plugin(self) -> None:
"""Fetch the current config from the db."""
db_config = await self.db.find_one({"_id": "ping-delay-config"})
db_config = db_config or {}
self.config = PingConfig(**db_config)
self.mod_team_role = self.bot.guild.get_role(MOD_TEAM_ROLE_ID)
db_ping_tasks = await self.db.find_one({"_id": "ping-delay-tasks"})
db_ping_tasks = db_ping_tasks or {}
self.ping_tasks = [PingTask(**task) for task in db_ping_tasks.get("ping_tasks", [])]
log.info("Loaded config: %s", self.config)
log.info("Loaded %d ping tasks", len(self.ping_tasks))
for task in self.ping_tasks:
async_tasks.create_task(self.maybe_ping_later(task), self.bot.loop)
@commands.group(invoke_without_command=True)
@checks.has_permissions(PermissionLevel.SUPPORTER)
async def ping_delay(self, ctx: commands.Context) -> None:
"""Manage when to ping in threads without a staff response."""
await ctx.send_help(ctx.command)
@ping_delay.group(name="set", invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def set_delay(self, ctx: commands.Context) -> None:
"""Set the times when to ping in threads without a staff response."""
await ctx.send_help(ctx.command)
@set_delay.command(name="initial")
@checks.has_permissions(PermissionLevel.OWNER)
async def set_initial(self, ctx: commands.Context, wait_duration: int) -> None:
"""Set the number of seconds to wait after a thread is opened to ping."""
await self.init_task
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$set": {"initial_wait_duration": wait_duration}},
upsert=True,
)
self.config.initial_wait_duration = wait_duration
await ctx.send(f":+1: Set initial ping delay to {wait_duration} seconds.")
@set_delay.command(name="delayed")
@checks.has_permissions(PermissionLevel.OWNER)
async def set_delayed(self, ctx: commands.Context, wait_duration: int) -> None:
"""Set the number of seconds to wait after a thread is opened to ping."""
await self.init_task
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$set": {"delayed_wait_duration": wait_duration}},
upsert=True,
)
self.config.delayed_wait_duration = wait_duration
await ctx.send(f":+1: Set the delayed ping delay to {wait_duration} seconds.")
@ping_delay.command(name="get")
@checks.has_permissions(PermissionLevel.SUPPORTER)
async def get_delay(self, ctx: commands.Context) -> None:
"""Get the number of seconds to wait after a thread is opened to ping."""
await ctx.send(
f"The current ping delay is initial={self.config.initial_wait_duration}s "
f"delayed={self.config.delayed_wait_duration}s."
)
@commands.group(invoke_without_command=True)
@checks.has_permissions(PermissionLevel.SUPPORTER)
async def ping_string(self, ctx: commands.Context) -> None:
"""Manage what message to send in threads without a staff response."""
await ctx.send_help(ctx.command)
@checks.has_permissions(PermissionLevel.OWNER)
@ping_string.command(name="set")
async def set_ping(self, ctx: commands.Context, ping_string: str) -> None:
"""Set what to send after a waiting for a thread to be responded to."""
await self.init_task
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$set": {"ping_string": ping_string}},
upsert=True,
)
self.config.ping_string = ping_string
await ctx.send(f":+1: Set ping string to {ping_string}.", allowed_mentions=None)
@checks.has_permissions(PermissionLevel.SUPPORTER)
@ping_string.command(name="get")
async def get_ping(self, ctx: commands.Context) -> None:
"""Get the number of seconds to wait after a thread is opened to ping."""
await ctx.send(f"The ping string is {self.config.ping_string}.", allowed_mentions=None)
@commands.group(invoke_without_command=True, aliases=("ping_ignored_categories", "ping_ignore"))
@checks.has_permissions(PermissionLevel.SUPPORTER)
async def ping_ignore_categories(self, ctx: commands.Context) -> None:
"""Manage what categories never get sent pings in them."""
await ctx.send_help(ctx.command)
@checks.has_permissions(PermissionLevel.OWNER)
@ping_ignore_categories.command(name="add", aliases=("set",))
async def set_category(self, ctx: commands.Context, category_to_ignore: discord.CategoryChannel) -> None:
"""Add a category to the list of ignored categories."""
await self.init_task
if category_to_ignore.id in self.config.ignored_categories:
await ctx.send(f":x: {category_to_ignore} already in the ignored categories.")
return
self.config.ignored_categories.append(category_to_ignore.id)
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$addToSet": {"ignored_categories": category_to_ignore.id}},
upsert=True,
)
await ctx.send(f":+1: Added {category_to_ignore} to the ignored categories list.")
@checks.has_permissions(PermissionLevel.SUPPORTER)
@ping_ignore_categories.command(name="get")
async def get_category(self, ctx: commands.Context) -> None:
"""Get the list of ignored categories."""
await self.init_task
if not self.config.ignored_categories:
await ctx.send("There are currently no ignored categories.")
return
ignored_categories_str = ', '.join(map(str, self.config.ignored_categories))
await ctx.send(f"The currently ignored categories are: {ignored_categories_str}.")
@checks.has_permissions(PermissionLevel.OWNER)
@ping_ignore_categories.command(name="delete", aliases=("remove", "del", "rem"))
async def del_category(self, ctx: commands.Context, category_to_ignore: discord.CategoryChannel) -> None:
"""Remove a category from the list of ignored categories."""
await self.init_task
if category_to_ignore.id not in self.config.ignored_categories:
await ctx.send(f":x: {category_to_ignore} isn't in the ignored categories list.")
return
self.config.ignored_categories.remove(category_to_ignore.id)
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$pull": {"ignored_categories": category_to_ignore.id}},
upsert=True,
)
await ctx.send(f":+1: Removed {category_to_ignore} from the ignored categories list.")
async def add_ping_task(self, task: PingTask) -> None:
"""Adds a ping task to the internal cache and to the db."""
self.ping_tasks.append(task)
await self.db.find_one_and_update(
{"_id": "ping-delay-tasks"},
{"$addToSet": {"ping_tasks": asdict(task)}},
upsert=True,
)
async_tasks.create_task(self.maybe_ping_later(task), self.bot.loop)
async def remove_ping_task(self, task: PingTask) -> None:
"""Removes a ping task to the internal cache and to the db."""
self.ping_tasks.remove(task)
await self.db.find_one_and_update(
{"_id": "ping-delay-tasks"},
{"$pull": {"ping_tasks": asdict(task)}},
upsert=True,
)
async def should_ping(self, channel: discord.TextChannel, already_delayed: bool) -> bool:
"""Check if a ping should be sent to a thread depending on current config."""
if channel.category_id in self.config.ignored_categories:
log.info("Not pinging in %s as it's currently in an ignored category", channel)
return False
has_internal_message = False
logs = await self.bot.api.get_log(channel.id)
for message in reversed(logs["messages"]):
# Look through logged messages in reverse order since replies are likely to be last.
if message["author"]["mod"] and message["type"] == "thread_message":
log.info("Not pinging in %s as a mod has sent a reply in the thread.", channel)
return False
if message["author"]["mod"]:
has_internal_message = True
# Falling out of the above loop means there are no thread replies from mods.
if has_internal_message and not already_delayed:
# If there was an internal message, and the ping hasn't already been delayed,
# delay a ping to be sent later.
log.info(
"Delaying pinging in %s by %d seconds as a mod has sent an internal message in the thread.",
channel,
self.config.delayed_wait_duration
)
ping_task = PingTask(
when_to_ping=(datetime.utcnow() + timedelta(seconds=self.config.delayed_wait_duration)).isoformat(),
channel_id=channel.id,
already_delayed=True
)
await self.add_ping_task(ping_task)
return False
return True
async def maybe_ping_later(self, ping_task: PingTask) -> None:
"""Pings conditionally after waiting the configured wait duration."""
when_to_ping = datetime.fromisoformat(ping_task.when_to_ping)
now = datetime.utcnow()
seconds_to_sleep = (when_to_ping - now).total_seconds()
if seconds_to_sleep < 0:
log.info("Pinging for %d is overdue, pinging now.", ping_task.channel_id)
else:
await asyncio.sleep(seconds_to_sleep)
if not (channel := self.bot.get_channel(ping_task.channel_id)):
log.info("Channel closed before we could ping.")
await self.remove_ping_task(ping_task)
else:
channel: discord.TextChannel
try:
if await self.should_ping(channel, ping_task.already_delayed):
# Remove overwrites for off-duty mods, ping, then add back.
await channel.set_permissions(self.mod_team_role, overwrite=None)
await channel.send(
f"{self.config.ping_string}"
f"{' no one has replied yet!' if ping_task.already_delayed else ''}"
)
await channel.edit(sync_permissions=True)
except discord.NotFound:
# Fail silently if the channel gets deleted during processing.
pass
finally:
# Ensure the task always gets removed.
await self.remove_ping_task(ping_task)
@commands.Cog.listener()
async def on_thread_ready(self, thread: Thread, *args) -> None:
"""Schedule a task to check if the bot should ping in the thread after the defined wait duration."""
await self.init_task
now = datetime.utcnow()
ping_task = PingTask(
when_to_ping=(now + timedelta(seconds=self.config.initial_wait_duration)).isoformat(),
channel_id=thread.channel.id
)
await self.add_ping_task(ping_task)
def setup(bot: ModmailBot) -> None:
"""Add the PingManager plugin."""
bot.add_cog(PingManager(bot)) | ping_manager/ping_manager.py | import asyncio
import typing as t
from dataclasses import asdict, dataclass, field
from datetime import datetime, timedelta
import discord
from discord.ext import commands
from bot import ModmailBot
from core import checks
from core.models import PermissionLevel, getLogger
from core.thread import Thread
from .utils import async_tasks
# Remove view perms from this role while pining, so only on-duty mods get the ping.
MOD_TEAM_ROLE_ID = 267629731250176001
log = getLogger(__name__)
@dataclass
class PingConfig:
"""Hold the current ping configuration."""
_id: str = field(repr=False, default="ping-delay-config")
ping_string: str = "@here"
initial_wait_duration: int = 5 * 60
delayed_wait_duration: int = 10 * 60
ignored_categories: list[int] = field(default_factory=list)
@dataclass(frozen=True)
class PingTask:
"""Data about an individual ping later task."""
when_to_ping: str # ISO datetime stamp
channel_id: int
already_delayed: bool = False # Whether the PingTask has been delayed already
class PingManager(commands.Cog):
"""A plugin to manage what and when to ping in ModMail threads."""
def __init__(self, bot: ModmailBot):
self.bot = bot
self.mod_team_role: discord.Role = None
self.config: t.Optional[PingConfig] = None
self.ping_tasks: list[PingTask] = None
self.db = bot.api.get_plugin_partition(self)
self.init_task = async_tasks.create_task(self.init_plugin(), self.bot.loop)
async def init_plugin(self) -> None:
"""Fetch the current config from the db."""
db_config = await self.db.find_one({"_id": "ping-delay-config"})
db_config = db_config or {}
self.config = PingConfig(**db_config)
self.mod_team_role = self.bot.guild.get_role(MOD_TEAM_ROLE_ID)
db_ping_tasks = await self.db.find_one({"_id": "ping-delay-tasks"})
db_ping_tasks = db_ping_tasks or {}
self.ping_tasks = [PingTask(**task) for task in db_ping_tasks.get("ping_tasks", [])]
log.info("Loaded config: %s", self.config)
log.info("Loaded %d ping tasks", len(self.ping_tasks))
for task in self.ping_tasks:
async_tasks.create_task(self.maybe_ping_later(task), self.bot.loop)
@commands.group(invoke_without_command=True)
@checks.has_permissions(PermissionLevel.SUPPORTER)
async def ping_delay(self, ctx: commands.Context) -> None:
"""Manage when to ping in threads without a staff response."""
await ctx.send_help(ctx.command)
@ping_delay.group(name="set", invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def set_delay(self, ctx: commands.Context) -> None:
"""Set the times when to ping in threads without a staff response."""
await ctx.send_help(ctx.command)
@set_delay.command(name="initial")
@checks.has_permissions(PermissionLevel.OWNER)
async def set_initial(self, ctx: commands.Context, wait_duration: int) -> None:
"""Set the number of seconds to wait after a thread is opened to ping."""
await self.init_task
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$set": {"initial_wait_duration": wait_duration}},
upsert=True,
)
self.config.initial_wait_duration = wait_duration
await ctx.send(f":+1: Set initial ping delay to {wait_duration} seconds.")
@set_delay.command(name="delayed")
@checks.has_permissions(PermissionLevel.OWNER)
async def set_delayed(self, ctx: commands.Context, wait_duration: int) -> None:
"""Set the number of seconds to wait after a thread is opened to ping."""
await self.init_task
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$set": {"delayed_wait_duration": wait_duration}},
upsert=True,
)
self.config.delayed_wait_duration = wait_duration
await ctx.send(f":+1: Set the delayed ping delay to {wait_duration} seconds.")
@ping_delay.command(name="get")
@checks.has_permissions(PermissionLevel.SUPPORTER)
async def get_delay(self, ctx: commands.Context) -> None:
"""Get the number of seconds to wait after a thread is opened to ping."""
await ctx.send(
f"The current ping delay is initial={self.config.initial_wait_duration}s "
f"delayed={self.config.delayed_wait_duration}s."
)
@commands.group(invoke_without_command=True)
@checks.has_permissions(PermissionLevel.SUPPORTER)
async def ping_string(self, ctx: commands.Context) -> None:
"""Manage what message to send in threads without a staff response."""
await ctx.send_help(ctx.command)
@checks.has_permissions(PermissionLevel.OWNER)
@ping_string.command(name="set")
async def set_ping(self, ctx: commands.Context, ping_string: str) -> None:
"""Set what to send after a waiting for a thread to be responded to."""
await self.init_task
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$set": {"ping_string": ping_string}},
upsert=True,
)
self.config.ping_string = ping_string
await ctx.send(f":+1: Set ping string to {ping_string}.", allowed_mentions=None)
@checks.has_permissions(PermissionLevel.SUPPORTER)
@ping_string.command(name="get")
async def get_ping(self, ctx: commands.Context) -> None:
"""Get the number of seconds to wait after a thread is opened to ping."""
await ctx.send(f"The ping string is {self.config.ping_string}.", allowed_mentions=None)
@commands.group(invoke_without_command=True, aliases=("ping_ignored_categories", "ping_ignore"))
@checks.has_permissions(PermissionLevel.SUPPORTER)
async def ping_ignore_categories(self, ctx: commands.Context) -> None:
"""Manage what categories never get sent pings in them."""
await ctx.send_help(ctx.command)
@checks.has_permissions(PermissionLevel.OWNER)
@ping_ignore_categories.command(name="add", aliases=("set",))
async def set_category(self, ctx: commands.Context, category_to_ignore: discord.CategoryChannel) -> None:
"""Add a category to the list of ignored categories."""
await self.init_task
if category_to_ignore.id in self.config.ignored_categories:
await ctx.send(f":x: {category_to_ignore} already in the ignored categories.")
return
self.config.ignored_categories.append(category_to_ignore.id)
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$addToSet": {"ignored_categories": category_to_ignore.id}},
upsert=True,
)
await ctx.send(f":+1: Added {category_to_ignore} to the ignored categories list.")
@checks.has_permissions(PermissionLevel.SUPPORTER)
@ping_ignore_categories.command(name="get")
async def get_category(self, ctx: commands.Context) -> None:
"""Get the list of ignored categories."""
await self.init_task
if not self.config.ignored_categories:
await ctx.send("There are currently no ignored categories.")
return
ignored_categories_str = ', '.join(map(str, self.config.ignored_categories))
await ctx.send(f"The currently ignored categories are: {ignored_categories_str}.")
@checks.has_permissions(PermissionLevel.OWNER)
@ping_ignore_categories.command(name="delete", aliases=("remove", "del", "rem"))
async def del_category(self, ctx: commands.Context, category_to_ignore: discord.CategoryChannel) -> None:
"""Remove a category from the list of ignored categories."""
await self.init_task
if category_to_ignore.id not in self.config.ignored_categories:
await ctx.send(f":x: {category_to_ignore} isn't in the ignored categories list.")
return
self.config.ignored_categories.remove(category_to_ignore.id)
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$pull": {"ignored_categories": category_to_ignore.id}},
upsert=True,
)
await ctx.send(f":+1: Removed {category_to_ignore} from the ignored categories list.")
async def add_ping_task(self, task: PingTask) -> None:
"""Adds a ping task to the internal cache and to the db."""
self.ping_tasks.append(task)
await self.db.find_one_and_update(
{"_id": "ping-delay-tasks"},
{"$addToSet": {"ping_tasks": asdict(task)}},
upsert=True,
)
async_tasks.create_task(self.maybe_ping_later(task), self.bot.loop)
async def remove_ping_task(self, task: PingTask) -> None:
"""Removes a ping task to the internal cache and to the db."""
self.ping_tasks.remove(task)
await self.db.find_one_and_update(
{"_id": "ping-delay-tasks"},
{"$pull": {"ping_tasks": asdict(task)}},
upsert=True,
)
async def should_ping(self, channel: discord.TextChannel, already_delayed: bool) -> bool:
"""Check if a ping should be sent to a thread depending on current config."""
if channel.category_id in self.config.ignored_categories:
log.info("Not pinging in %s as it's currently in an ignored category", channel)
return False
has_internal_message = False
logs = await self.bot.api.get_log(channel.id)
for message in reversed(logs["messages"]):
# Look through logged messages in reverse order since replies are likely to be last.
if message["author"]["mod"] and message["type"] == "thread_message":
log.info("Not pinging in %s as a mod has sent a reply in the thread.", channel)
return False
if message["author"]["mod"]:
has_internal_message = True
# Falling out of the above loop means there are no thread replies from mods.
if has_internal_message and not already_delayed:
# If there was an internal message, and the ping hasn't already been delayed,
# delay a ping to be sent later.
log.info(
"Delaying pinging in %s by %d seconds as a mod has sent an internal message in the thread.",
channel,
self.config.delayed_wait_duration
)
ping_task = PingTask(
when_to_ping=(datetime.utcnow() + timedelta(seconds=self.config.delayed_wait_duration)).isoformat(),
channel_id=channel.id,
already_delayed=True
)
await self.add_ping_task(ping_task)
return False
return True
async def maybe_ping_later(self, ping_task: PingTask) -> None:
"""Pings conditionally after waiting the configured wait duration."""
when_to_ping = datetime.fromisoformat(ping_task.when_to_ping)
now = datetime.utcnow()
seconds_to_sleep = (when_to_ping - now).total_seconds()
if seconds_to_sleep < 0:
log.info("Pinging for %d is overdue, pinging now.", ping_task.channel_id)
else:
await asyncio.sleep(seconds_to_sleep)
if not (channel := self.bot.get_channel(ping_task.channel_id)):
log.info("Channel closed before we could ping.")
await self.remove_ping_task(ping_task)
else:
channel: discord.TextChannel
try:
if await self.should_ping(channel, ping_task.already_delayed):
# Remove overwrites for off-duty mods, ping, then add back.
await channel.set_permissions(self.mod_team_role, overwrite=None)
await channel.send(
f"{self.config.ping_string}"
f"{' no one has replied yet!' if ping_task.already_delayed else ''}"
)
await channel.edit(sync_permissions=True)
except discord.NotFound:
# Fail silently if the channel gets deleted during processing.
pass
finally:
# Ensure the task always gets removed.
await self.remove_ping_task(ping_task)
@commands.Cog.listener()
async def on_thread_ready(self, thread: Thread, *args) -> None:
"""Schedule a task to check if the bot should ping in the thread after the defined wait duration."""
await self.init_task
now = datetime.utcnow()
ping_task = PingTask(
when_to_ping=(now + timedelta(seconds=self.config.initial_wait_duration)).isoformat(),
channel_id=thread.channel.id
)
await self.add_ping_task(ping_task)
def setup(bot: ModmailBot) -> None:
"""Add the PingManager plugin."""
bot.add_cog(PingManager(bot)) | 0.715424 | 0.15925 |
import FWCore.ParameterSet.Config as cms
ecal_pulse_shape_templates = cms.PSet(
EBPulseShapeTemplate = cms.vdouble (
1.13979e-02, 7.58151e-01, 1.00000e+00, 8.87744e-01, 6.73548e-01, 4.74332e-01, 3.19561e-01, 2.15144e-01, 1.47464e-01, 1.01087e-01, 6.93181e-02, 4.75044e-02
) ,
EEPulseShapeTemplate = cms.vdouble (
1.16442e-01, 7.56246e-01, 1.00000e+00, 8.97182e-01, 6.86831e-01, 4.91506e-01, 3.44111e-01, 2.45731e-01, 1.74115e-01, 1.23361e-01, 8.74288e-02, 6.19570e-02
)
)
ecal_pulse_shape_covariances = cms.PSet(
EBPulseShapeCovariance = cms.vdouble (
3.001e-06, 1.233e-05, 0.000e+00, -4.416e-06, -4.571e-06, -3.614e-06, -2.636e-06, -1.286e-06, -8.410e-07, -5.296e-07, 0.000e+00, 0.000e+00,
1.233e-05, 6.154e-05, 0.000e+00, -2.200e-05, -2.309e-05, -1.838e-05, -1.373e-05, -7.334e-06, -5.088e-06, -3.745e-06, -2.428e-06, 0.000e+00,
0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
-4.416e-06, -2.200e-05, 0.000e+00, 8.319e-06, 8.545e-06, 6.792e-06, 5.059e-06, 2.678e-06, 1.816e-06, 1.223e-06, 8.245e-07, 5.589e-07,
-4.571e-06, -2.309e-05, 0.000e+00, 8.545e-06, 9.182e-06, 7.219e-06, 5.388e-06, 2.853e-06, 1.944e-06, 1.324e-06, 9.083e-07, 6.335e-07,
-3.614e-06, -1.838e-05, 0.000e+00, 6.792e-06, 7.219e-06, 6.016e-06, 4.437e-06, 2.385e-06, 1.636e-06, 1.118e-06, 7.754e-07, 5.556e-07,
-2.636e-06, -1.373e-05, 0.000e+00, 5.059e-06, 5.388e-06, 4.437e-06, 3.602e-06, 1.917e-06, 1.322e-06, 9.079e-07, 6.529e-07, 4.752e-07,
-1.286e-06, -7.334e-06, 0.000e+00, 2.678e-06, 2.853e-06, 2.385e-06, 1.917e-06, 1.375e-06, 9.100e-07, 6.455e-07, 4.693e-07, 3.657e-07,
-8.410e-07, -5.088e-06, 0.000e+00, 1.816e-06, 1.944e-06, 1.636e-06, 1.322e-06, 9.100e-07, 9.115e-07, 6.062e-07, 4.436e-07, 3.422e-07,
-5.296e-07, -3.745e-06, 0.000e+00, 1.223e-06, 1.324e-06, 1.118e-06, 9.079e-07, 6.455e-07, 6.062e-07, 7.217e-07, 4.862e-07, 3.768e-07,
0.000e+00, -2.428e-06, 0.000e+00, 8.245e-07, 9.083e-07, 7.754e-07, 6.529e-07, 4.693e-07, 4.436e-07, 4.862e-07, 6.509e-07, 4.418e-07,
0.000e+00, 0.000e+00, 0.000e+00, 5.589e-07, 6.335e-07, 5.556e-07, 4.752e-07, 3.657e-07, 3.422e-07, 3.768e-07, 4.418e-07, 6.142e-07,
),
EEPulseShapeCovariance = cms.vdouble (
3.941e-05, 3.333e-05, 0.000e+00, -1.449e-05, -1.661e-05, -1.424e-05, -1.183e-05, -6.842e-06, -4.915e-06, -3.411e-06, 0.000e+00, 0.000e+00,
3.333e-05, 2.862e-05, 0.000e+00, -1.244e-05, -1.431e-05, -1.233e-05, -1.032e-05, -5.883e-06, -4.154e-06, -2.902e-06, -2.128e-06, 0.000e+00,
0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
-1.449e-05, -1.244e-05, 0.000e+00, 5.840e-06, 6.649e-06, 5.720e-06, 4.812e-06, 2.708e-06, 1.869e-06, 1.330e-06, 9.186e-07, 6.446e-07,
-1.661e-05, -1.431e-05, 0.000e+00, 6.649e-06, 7.966e-06, 6.898e-06, 5.794e-06, 3.157e-06, 2.184e-06, 1.567e-06, 1.084e-06, 7.575e-07,
-1.424e-05, -1.233e-05, 0.000e+00, 5.720e-06, 6.898e-06, 6.341e-06, 5.347e-06, 2.859e-06, 1.991e-06, 1.431e-06, 9.839e-07, 6.886e-07,
-1.183e-05, -1.032e-05, 0.000e+00, 4.812e-06, 5.794e-06, 5.347e-06, 4.854e-06, 2.628e-06, 1.809e-06, 1.289e-06, 9.020e-07, 6.146e-07,
-6.842e-06, -5.883e-06, 0.000e+00, 2.708e-06, 3.157e-06, 2.859e-06, 2.628e-06, 1.863e-06, 1.296e-06, 8.882e-07, 6.108e-07, 4.283e-07,
-4.915e-06, -4.154e-06, 0.000e+00, 1.869e-06, 2.184e-06, 1.991e-06, 1.809e-06, 1.296e-06, 1.217e-06, 8.669e-07, 5.751e-07, 3.882e-07,
-3.411e-06, -2.902e-06, 0.000e+00, 1.330e-06, 1.567e-06, 1.431e-06, 1.289e-06, 8.882e-07, 8.669e-07, 9.522e-07, 6.717e-07, 4.293e-07,
0.000e+00, -2.128e-06, 0.000e+00, 9.186e-07, 1.084e-06, 9.839e-07, 9.020e-07, 6.108e-07, 5.751e-07, 6.717e-07, 7.911e-07, 5.493e-07,
0.000e+00, 0.000e+00, 0.000e+00, 6.446e-07, 7.575e-07, 6.886e-07, 6.146e-07, 4.283e-07, 3.882e-07, 4.293e-07, 5.493e-07, 7.027e-07,
)
)
from SimCalorimetry.EcalSimProducers.ecalDigiParameters_cff import *
ecal_pulse_shape_parameters = cms.PSet(
ecal_pulse_shape_templates,
ecal_digi_parameters,
ecal_pulse_shape_covariances
) | RecoLocalCalo/EcalRecProducers/python/ecalPulseShapeParameters_cff.py | import FWCore.ParameterSet.Config as cms
ecal_pulse_shape_templates = cms.PSet(
EBPulseShapeTemplate = cms.vdouble (
1.13979e-02, 7.58151e-01, 1.00000e+00, 8.87744e-01, 6.73548e-01, 4.74332e-01, 3.19561e-01, 2.15144e-01, 1.47464e-01, 1.01087e-01, 6.93181e-02, 4.75044e-02
) ,
EEPulseShapeTemplate = cms.vdouble (
1.16442e-01, 7.56246e-01, 1.00000e+00, 8.97182e-01, 6.86831e-01, 4.91506e-01, 3.44111e-01, 2.45731e-01, 1.74115e-01, 1.23361e-01, 8.74288e-02, 6.19570e-02
)
)
ecal_pulse_shape_covariances = cms.PSet(
EBPulseShapeCovariance = cms.vdouble (
3.001e-06, 1.233e-05, 0.000e+00, -4.416e-06, -4.571e-06, -3.614e-06, -2.636e-06, -1.286e-06, -8.410e-07, -5.296e-07, 0.000e+00, 0.000e+00,
1.233e-05, 6.154e-05, 0.000e+00, -2.200e-05, -2.309e-05, -1.838e-05, -1.373e-05, -7.334e-06, -5.088e-06, -3.745e-06, -2.428e-06, 0.000e+00,
0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
-4.416e-06, -2.200e-05, 0.000e+00, 8.319e-06, 8.545e-06, 6.792e-06, 5.059e-06, 2.678e-06, 1.816e-06, 1.223e-06, 8.245e-07, 5.589e-07,
-4.571e-06, -2.309e-05, 0.000e+00, 8.545e-06, 9.182e-06, 7.219e-06, 5.388e-06, 2.853e-06, 1.944e-06, 1.324e-06, 9.083e-07, 6.335e-07,
-3.614e-06, -1.838e-05, 0.000e+00, 6.792e-06, 7.219e-06, 6.016e-06, 4.437e-06, 2.385e-06, 1.636e-06, 1.118e-06, 7.754e-07, 5.556e-07,
-2.636e-06, -1.373e-05, 0.000e+00, 5.059e-06, 5.388e-06, 4.437e-06, 3.602e-06, 1.917e-06, 1.322e-06, 9.079e-07, 6.529e-07, 4.752e-07,
-1.286e-06, -7.334e-06, 0.000e+00, 2.678e-06, 2.853e-06, 2.385e-06, 1.917e-06, 1.375e-06, 9.100e-07, 6.455e-07, 4.693e-07, 3.657e-07,
-8.410e-07, -5.088e-06, 0.000e+00, 1.816e-06, 1.944e-06, 1.636e-06, 1.322e-06, 9.100e-07, 9.115e-07, 6.062e-07, 4.436e-07, 3.422e-07,
-5.296e-07, -3.745e-06, 0.000e+00, 1.223e-06, 1.324e-06, 1.118e-06, 9.079e-07, 6.455e-07, 6.062e-07, 7.217e-07, 4.862e-07, 3.768e-07,
0.000e+00, -2.428e-06, 0.000e+00, 8.245e-07, 9.083e-07, 7.754e-07, 6.529e-07, 4.693e-07, 4.436e-07, 4.862e-07, 6.509e-07, 4.418e-07,
0.000e+00, 0.000e+00, 0.000e+00, 5.589e-07, 6.335e-07, 5.556e-07, 4.752e-07, 3.657e-07, 3.422e-07, 3.768e-07, 4.418e-07, 6.142e-07,
),
EEPulseShapeCovariance = cms.vdouble (
3.941e-05, 3.333e-05, 0.000e+00, -1.449e-05, -1.661e-05, -1.424e-05, -1.183e-05, -6.842e-06, -4.915e-06, -3.411e-06, 0.000e+00, 0.000e+00,
3.333e-05, 2.862e-05, 0.000e+00, -1.244e-05, -1.431e-05, -1.233e-05, -1.032e-05, -5.883e-06, -4.154e-06, -2.902e-06, -2.128e-06, 0.000e+00,
0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
-1.449e-05, -1.244e-05, 0.000e+00, 5.840e-06, 6.649e-06, 5.720e-06, 4.812e-06, 2.708e-06, 1.869e-06, 1.330e-06, 9.186e-07, 6.446e-07,
-1.661e-05, -1.431e-05, 0.000e+00, 6.649e-06, 7.966e-06, 6.898e-06, 5.794e-06, 3.157e-06, 2.184e-06, 1.567e-06, 1.084e-06, 7.575e-07,
-1.424e-05, -1.233e-05, 0.000e+00, 5.720e-06, 6.898e-06, 6.341e-06, 5.347e-06, 2.859e-06, 1.991e-06, 1.431e-06, 9.839e-07, 6.886e-07,
-1.183e-05, -1.032e-05, 0.000e+00, 4.812e-06, 5.794e-06, 5.347e-06, 4.854e-06, 2.628e-06, 1.809e-06, 1.289e-06, 9.020e-07, 6.146e-07,
-6.842e-06, -5.883e-06, 0.000e+00, 2.708e-06, 3.157e-06, 2.859e-06, 2.628e-06, 1.863e-06, 1.296e-06, 8.882e-07, 6.108e-07, 4.283e-07,
-4.915e-06, -4.154e-06, 0.000e+00, 1.869e-06, 2.184e-06, 1.991e-06, 1.809e-06, 1.296e-06, 1.217e-06, 8.669e-07, 5.751e-07, 3.882e-07,
-3.411e-06, -2.902e-06, 0.000e+00, 1.330e-06, 1.567e-06, 1.431e-06, 1.289e-06, 8.882e-07, 8.669e-07, 9.522e-07, 6.717e-07, 4.293e-07,
0.000e+00, -2.128e-06, 0.000e+00, 9.186e-07, 1.084e-06, 9.839e-07, 9.020e-07, 6.108e-07, 5.751e-07, 6.717e-07, 7.911e-07, 5.493e-07,
0.000e+00, 0.000e+00, 0.000e+00, 6.446e-07, 7.575e-07, 6.886e-07, 6.146e-07, 4.283e-07, 3.882e-07, 4.293e-07, 5.493e-07, 7.027e-07,
)
)
from SimCalorimetry.EcalSimProducers.ecalDigiParameters_cff import *
ecal_pulse_shape_parameters = cms.PSet(
ecal_pulse_shape_templates,
ecal_digi_parameters,
ecal_pulse_shape_covariances
) | 0.169887 | 0.475544 |
import cv2
import time
import numpy as np
MODE = "BODY25"
if MODE is "COCO":
protoFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/coco/pose_deploy_linevec.prototxt"
weightsFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/coco/pose_iter_440000.caffemodel"
nPoints = 18
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12],
[12, 13], [0, 14], [0, 15], [14, 16], [15, 17]]
elif MODE is "MPI":
protoFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt"
weightsFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/mpi/pose_iter_160000.caffemodel"
nPoints = 15
POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 14], [14, 8], [8, 9], [9, 10], [14, 11],
[11, 12], [12, 13]]
elif MODE is "BODY25":
protoFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/coco/pose_deploy_linevec.prototxt"
weightsFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/coco/pose_iter_440000.caffemodel"
nPoints = 25
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12],
[12, 13], [0, 14], [0, 15], [14, 16], [15, 17],
[10, 11], [8, 12], [12, 13], [13, 14], [1, 0], [0, 15], [0, 16], [16, 18], [2, 17], [5, 18], [14, 19],
[19, 20], [14, 21], [11, 22], [22, 23], [11, 24]]
frame = cv2.imread("capture.jpg")
frame = cv2.resize(frame, dsize=(1000, 800))
frameCopy = np.copy(frame)
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
threshold = 0.1
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
t = time.time()
# input image dimensions for the network
inWidth = 368
inHeight = 368
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
output = net.forward()
print("time taken by network : {:.3f}".format(time.time() - t))
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
is_None = False
df_is_empty = True
for i in range(nPoints):
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold:
cv2.circle(frameCopy, (int(x), int(y)), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frameCopy, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
points.append((int(x), int(y)))
else:
points.append(None)
is_None = True
if is_None:
continue
flat_point = [e for l in points for e in l]
# print(flat_point)
flat_array = np.array([e for l in points for e in l]) / 400
point_dict = {i: flat_array[i] for i in np.arange(len(flat_array))}
# Draw Skeleton
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if points[partA] and points[partB]:
cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2)
cv2.circle(frame, points[partA], 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
cv2.imshow('Output-Keypoints', frameCopy)
cv2.imshow('Output-Skeleton', frame)
cv2.imwrite('./destination/Output-Keypoints.jpg', frameCopy)
cv2.imwrite('./destination/Output-Skeleton.jpg', frame)
print("Total time taken : {:.3f}".format(time.time() - t))
cv2.waitKey(0) | OpenPoseImage.py | import cv2
import time
import numpy as np
MODE = "BODY25"
if MODE is "COCO":
protoFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/coco/pose_deploy_linevec.prototxt"
weightsFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/coco/pose_iter_440000.caffemodel"
nPoints = 18
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12],
[12, 13], [0, 14], [0, 15], [14, 16], [15, 17]]
elif MODE is "MPI":
protoFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt"
weightsFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/mpi/pose_iter_160000.caffemodel"
nPoints = 15
POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 14], [14, 8], [8, 9], [9, 10], [14, 11],
[11, 12], [12, 13]]
elif MODE is "BODY25":
protoFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/coco/pose_deploy_linevec.prototxt"
weightsFile = "C:/Users/romanrosh/openpose-1.4.0-win64-gpu-binaries/models/pose/coco/pose_iter_440000.caffemodel"
nPoints = 25
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12],
[12, 13], [0, 14], [0, 15], [14, 16], [15, 17],
[10, 11], [8, 12], [12, 13], [13, 14], [1, 0], [0, 15], [0, 16], [16, 18], [2, 17], [5, 18], [14, 19],
[19, 20], [14, 21], [11, 22], [22, 23], [11, 24]]
frame = cv2.imread("capture.jpg")
frame = cv2.resize(frame, dsize=(1000, 800))
frameCopy = np.copy(frame)
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
threshold = 0.1
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
t = time.time()
# input image dimensions for the network
inWidth = 368
inHeight = 368
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
output = net.forward()
print("time taken by network : {:.3f}".format(time.time() - t))
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
is_None = False
df_is_empty = True
for i in range(nPoints):
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold:
cv2.circle(frameCopy, (int(x), int(y)), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frameCopy, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
points.append((int(x), int(y)))
else:
points.append(None)
is_None = True
if is_None:
continue
flat_point = [e for l in points for e in l]
# print(flat_point)
flat_array = np.array([e for l in points for e in l]) / 400
point_dict = {i: flat_array[i] for i in np.arange(len(flat_array))}
# Draw Skeleton
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if points[partA] and points[partB]:
cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2)
cv2.circle(frame, points[partA], 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
cv2.imshow('Output-Keypoints', frameCopy)
cv2.imshow('Output-Skeleton', frame)
cv2.imwrite('./destination/Output-Keypoints.jpg', frameCopy)
cv2.imwrite('./destination/Output-Skeleton.jpg', frame)
print("Total time taken : {:.3f}".format(time.time() - t))
cv2.waitKey(0) | 0.289472 | 0.335895 |
from .base_endpoint import BaseEndpoint
# The base CheckIns endpoint
class CheckInsEndpoint(BaseEndpoint): pass
# All CheckIns endpoints
class CheckIns(CheckInsEndpoint):
"""An attendance record for an event.
If someone was checked out, `checked_out_at` will be present.
You can scope check-ins in a few ways:
- `regular`s, `guest`s, and `volunteer`s correspond to the option selected when checking in.
- `attendee`s are `regular`s and `guest`s together.
- `one_time_guest`s are check-ins which were created without a corresponding person record.
- `not_one_time_guest`s are check-ins which had a corresponding person record when they were created.
- `checked_out` are check-ins where `checked_out_at` is present (meaning they were checked out from a station).
- `first_time`s are check-ins which are the person's first for a given event. (One-time guests are not included here.)
"""
pass
class EventTimes(CheckInsEndpoint):
"""A time that someone may check in. Times are copied from session to session.
"""
pass
class Events(CheckInsEndpoint):
"""A recurring event which people may attend.
Each recurrence is an _event period_ (which often corresponds to a week).
Event periods have _event times_ where people may actually check in.
"""
pass
class Headcounts(CheckInsEndpoint):
"""A tally of attendees for a given event time and attendance type.
If one does not exist, the count may have been zero.
"""
pass
class Labels(CheckInsEndpoint):
"""Labels can be set to print for events (through `EventLabel`s),
locations (through `LocationLabel`s) or options.
Label type (security label / name label) is expressed with the
`prints_for` attribute. `prints_for="Person"` is a name label,
`prints_for="Group"` is a security label.
"""
pass
class Passes(CheckInsEndpoint):
"""Enables quick lookup of a person via barcode reader.
"""
pass
class People(CheckInsEndpoint):
"""An attendee, volunteer or administrator.
_Usually_, a person who checked in will be present as a `Person`. In some cases, they may not be present:
- The person was manually deleted from the admin interface
- The check-in was created as a "One-time guest" (which doesn't create a corresponding person record)
"""
pass
class Stations(CheckInsEndpoint):
"""A device where people can be checked in.
A device may also be connected to a printer
and print labels for itself or other stations.
"""
pass
class Themes(CheckInsEndpoint):
"""A custom style which may be applied to stations.
"""
pass | pypco/endpoints/check_ins.py |
from .base_endpoint import BaseEndpoint
# The base CheckIns endpoint
class CheckInsEndpoint(BaseEndpoint): pass
# All CheckIns endpoints
class CheckIns(CheckInsEndpoint):
"""An attendance record for an event.
If someone was checked out, `checked_out_at` will be present.
You can scope check-ins in a few ways:
- `regular`s, `guest`s, and `volunteer`s correspond to the option selected when checking in.
- `attendee`s are `regular`s and `guest`s together.
- `one_time_guest`s are check-ins which were created without a corresponding person record.
- `not_one_time_guest`s are check-ins which had a corresponding person record when they were created.
- `checked_out` are check-ins where `checked_out_at` is present (meaning they were checked out from a station).
- `first_time`s are check-ins which are the person's first for a given event. (One-time guests are not included here.)
"""
pass
class EventTimes(CheckInsEndpoint):
"""A time that someone may check in. Times are copied from session to session.
"""
pass
class Events(CheckInsEndpoint):
"""A recurring event which people may attend.
Each recurrence is an _event period_ (which often corresponds to a week).
Event periods have _event times_ where people may actually check in.
"""
pass
class Headcounts(CheckInsEndpoint):
"""A tally of attendees for a given event time and attendance type.
If one does not exist, the count may have been zero.
"""
pass
class Labels(CheckInsEndpoint):
"""Labels can be set to print for events (through `EventLabel`s),
locations (through `LocationLabel`s) or options.
Label type (security label / name label) is expressed with the
`prints_for` attribute. `prints_for="Person"` is a name label,
`prints_for="Group"` is a security label.
"""
pass
class Passes(CheckInsEndpoint):
"""Enables quick lookup of a person via barcode reader.
"""
pass
class People(CheckInsEndpoint):
"""An attendee, volunteer or administrator.
_Usually_, a person who checked in will be present as a `Person`. In some cases, they may not be present:
- The person was manually deleted from the admin interface
- The check-in was created as a "One-time guest" (which doesn't create a corresponding person record)
"""
pass
class Stations(CheckInsEndpoint):
"""A device where people can be checked in.
A device may also be connected to a printer
and print labels for itself or other stations.
"""
pass
class Themes(CheckInsEndpoint):
"""A custom style which may be applied to stations.
"""
pass | 0.742328 | 0.480357 |
from __future__ import absolute_import, division
import bcrypt
import flask.ext.testing
import functools
import hashlib
import inspect
import os
import shutil
import tempfile
from . import create_app
from . import settings
from .model import db
from .site import Site
def create_tempdir():
d = tempfile.mkdtemp()
def cleanup():
shutil.rmtree(d)
return d, cleanup
def default_from_self(f):
@functools.wraps(f)
def method(self, **provided_kw):
args = inspect.getargspec(f).args
kw = dict()
for k in args[1:]:
v = provided_kw.pop(k, None)
kw[k] = v if v is not None else getattr(self, k)
return f(self, **kw)
return method
def ls(path):
return list(sorted(os.listdir(path)))
def make_fileset(directory, format='foo', parts=3, set_idx=0,
prefix=None, empty_idx=None, missing_idx=None,
missing_md5_idx=None, content_template=None,
template='set%(set_idx)s.file%(file_idx)s.%(format)s'):
for i in range(parts):
if i == missing_idx:
continue
if prefix is not None:
template = '{}{}'.format(prefix, template)
if content_template is None:
content_template = 'content_{}'.format(template)
substitutions = dict(set_idx=set_idx, file_idx=i, format=format)
filename = template % substitutions
path = os.path.join(directory, filename)
content = content_template % substitutions if i != empty_idx else ''
with open(path, 'wb') as f:
f.write(content)
if i != missing_md5_idx:
md5 = hashlib.md5(content).hexdigest()
with open('{}.md5'.format(path), 'wb') as f:
f.write('{} {}'.format(md5, filename))
# list md5s of a storage on filesystem
def list_nondot(path):
return [x for x in sorted(os.listdir(path)) if x[0] != '.']
DEFAULT_USERNAME = 'm'
DEFAULT_PASSWORD = 'm'
class FlaskTestCase(flask.ext.testing.TestCase, settings.Testing):
def __call__(self, result=None):
# flask.ext.testing.TestCase catches exceptions in _pre_setup
# and create_app ...
try:
self._pre_setup()
# we skip one
super(flask.ext.testing.TestCase, self).__call__(result)
except:
import sys
import traceback
traceback.print_exc(file=sys.stdout)
finally:
self._post_teardown()
def create_app(self):
instance_path, self.instance_cleanup = create_tempdir()
Site(instance_path).init_root()
if self.DB_SQLITE:
shutil.copy2(self.DB_SQLITE, instance_path)
self.SQLALCHEMY_DATABASE_URI = \
'sqlite:///{path}/db.sqlite'.format(path=instance_path)
with open(os.sep.join([instance_path, 'users.txt']), 'wb') as f:
f.write('{}:{}\n'.format(
DEFAULT_USERNAME,
bcrypt.hashpw(DEFAULT_PASSWORD, bcrypt.gensalt())
))
return create_app(self, INSTANCE_PATH=instance_path)
def _pre_setup(self):
super(FlaskTestCase, self)._pre_setup()
if self.DB_CREATE_ALL:
db.create_all()
def _post_teardown(self):
db.session.remove()
db.drop_all()
self.instance_cleanup()
super(FlaskTestCase, self)._post_teardown()
def login(self, username=DEFAULT_USERNAME, password=<PASSWORD>):
return self.client.post('/marv/api/_login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(self):
return self.client.get('/marv/api/_logout', follow_redirects=True) | src/marv/marv/testing.py |
from __future__ import absolute_import, division
import bcrypt
import flask.ext.testing
import functools
import hashlib
import inspect
import os
import shutil
import tempfile
from . import create_app
from . import settings
from .model import db
from .site import Site
def create_tempdir():
d = tempfile.mkdtemp()
def cleanup():
shutil.rmtree(d)
return d, cleanup
def default_from_self(f):
@functools.wraps(f)
def method(self, **provided_kw):
args = inspect.getargspec(f).args
kw = dict()
for k in args[1:]:
v = provided_kw.pop(k, None)
kw[k] = v if v is not None else getattr(self, k)
return f(self, **kw)
return method
def ls(path):
return list(sorted(os.listdir(path)))
def make_fileset(directory, format='foo', parts=3, set_idx=0,
prefix=None, empty_idx=None, missing_idx=None,
missing_md5_idx=None, content_template=None,
template='set%(set_idx)s.file%(file_idx)s.%(format)s'):
for i in range(parts):
if i == missing_idx:
continue
if prefix is not None:
template = '{}{}'.format(prefix, template)
if content_template is None:
content_template = 'content_{}'.format(template)
substitutions = dict(set_idx=set_idx, file_idx=i, format=format)
filename = template % substitutions
path = os.path.join(directory, filename)
content = content_template % substitutions if i != empty_idx else ''
with open(path, 'wb') as f:
f.write(content)
if i != missing_md5_idx:
md5 = hashlib.md5(content).hexdigest()
with open('{}.md5'.format(path), 'wb') as f:
f.write('{} {}'.format(md5, filename))
# list md5s of a storage on filesystem
def list_nondot(path):
return [x for x in sorted(os.listdir(path)) if x[0] != '.']
DEFAULT_USERNAME = 'm'
DEFAULT_PASSWORD = 'm'
class FlaskTestCase(flask.ext.testing.TestCase, settings.Testing):
def __call__(self, result=None):
# flask.ext.testing.TestCase catches exceptions in _pre_setup
# and create_app ...
try:
self._pre_setup()
# we skip one
super(flask.ext.testing.TestCase, self).__call__(result)
except:
import sys
import traceback
traceback.print_exc(file=sys.stdout)
finally:
self._post_teardown()
def create_app(self):
instance_path, self.instance_cleanup = create_tempdir()
Site(instance_path).init_root()
if self.DB_SQLITE:
shutil.copy2(self.DB_SQLITE, instance_path)
self.SQLALCHEMY_DATABASE_URI = \
'sqlite:///{path}/db.sqlite'.format(path=instance_path)
with open(os.sep.join([instance_path, 'users.txt']), 'wb') as f:
f.write('{}:{}\n'.format(
DEFAULT_USERNAME,
bcrypt.hashpw(DEFAULT_PASSWORD, bcrypt.gensalt())
))
return create_app(self, INSTANCE_PATH=instance_path)
def _pre_setup(self):
super(FlaskTestCase, self)._pre_setup()
if self.DB_CREATE_ALL:
db.create_all()
def _post_teardown(self):
db.session.remove()
db.drop_all()
self.instance_cleanup()
super(FlaskTestCase, self)._post_teardown()
def login(self, username=DEFAULT_USERNAME, password=<PASSWORD>):
return self.client.post('/marv/api/_login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(self):
return self.client.get('/marv/api/_logout', follow_redirects=True) | 0.466846 | 0.081082 |
from pyglet import event
from pyglet.gl import glClearColor, GL_TRIANGLES
from pyglet.window import key, Window
from pyglet.shapes import Rectangle
from pyglet.text import Label, caret
from pyglet.text.document import FormattedDocument
from pyglet.text.layout import IncrementalTextLayout, ScrollableTextLayout
from settings import *
import model
class OrganizerWindow(Window):
"""Main program window, handles the background color an resizing"""
def __init__(self, organizer, batch, groups):
if FULLSCREEN:
super().__init__(caption=WINDOW_TITLE, fullscreen=True)
else:
super().__init__(
WINDOW_WIDTH, WINDOW_HEIGHT, WINDOW_TITLE, resizable=True)
glClearColor(
BACKGROUND_COLOR[0]/255, BACKGROUND_COLOR[1]/255,
BACKGROUND_COLOR[2]/255, 1)
self.organizer = organizer
# define callbacks used by caret in text input
def click_callback(x, y): self.on_click(x, y)
def enter_callback(): self.on_enter()
def motion_callback(motion): self.on_motion(motion)
def find_callback(text): self.on_search(text)
# create text input and item list
self.text_input = TextInput(
batch, groups, click_callback,
enter_callback, motion_callback, find_callback)
self.item_list = ItemList(self.text_input.font_height, batch, groups)
# initialize member variables
self.batch = batch
self.prev_w = 0
self.prev_h = 0
self.active_drawer = None
self.found = []
self.item_drawers = []
self.text = ''
self.drawer_selected = False
self.renaming = False
def on_draw(self):
"""Window content needs to be redrawn, resize contents if necassary"""
self.clear()
if (self.width != self.prev_w or self.height != self.prev_h):
self.prev_w = self.width
self.prev_h = self.height
self.organizer.resize(
self.prev_w, self.prev_h, self.text_input, self.item_list)
self.batch.draw()
self.push_handlers(self.text_input.caret)
def on_click(self, x, y):
"""Mouse has been clicked at given coordinate, (de-)select drawer"""
drawer = self.organizer.get_drawer(x, y)
if (drawer is not self.active_drawer):
self.clear_all()
self.text_input.clear_text()
self.activate_drawer(drawer, True)
def on_enter(self):
"""Enter has been pressed, handle adding and renaming items"""
text = self.text_input.get_text().strip()
selected = self.item_list.get_selected()
self.text_input.clear_text()
if (text != ''):
if (self.drawer_selected):
if (self.renaming):
item = self.item_list.items[selected]
item.name = text
item.lower = text.lower()
self.renaming = False
else:
self.active_drawer.add_item(text)
items = self.active_drawer.get_items()
self.item_list.set_items(items)
elif (selected >= 0):
drawer = self.item_drawers[selected]
self.activate_drawer(drawer, True)
else:
self.clear_all()
elif (self.active_drawer is not None):
if (self.drawer_selected):
if (selected >= 0):
self.renaming = not self.renaming
if (self.renaming):
text = self.item_list.items[selected].name
self.text_input.set_text(text)
self.text_input.caret.position = len(text)
else:
self.active_drawer.highlight(HIGHLIGHT_MASK)
self.drawer_selected = False
else:
self.active_drawer.highlight(SELECT_MASK)
self.drawer_selected = True
items = self.active_drawer.get_items()
self.item_list.set_items(items)
def on_key_press(self, symbol, mod):
"""Clear all input on ESC"""
if (symbol == key.ESCAPE):
self.clear_all()
self.text_input.clear_text()
self.renaming = False
def on_motion(self, motion):
"""Handle motion input like up, down, left, right and delete"""
if (not self.renaming):
if (self.drawer_selected or self.text != ''):
selected = self.item_list.get_selected()
if (motion == key.MOTION_DOWN):
self.item_list.select(selected + 1)
elif (motion == key.MOTION_UP):
self.item_list.select(selected - 1)
elif (motion == key.MOTION_DELETE):
if (selected >= 0 and self.active_drawer is not None):
del self.active_drawer.get_items()[selected]
items = self.active_drawer.get_items()
self.item_list.set_items(items)
self.item_list.select()
if (self.text != '' and not self.drawer_selected):
new_selected = self.item_list.get_selected()
if (selected != new_selected):
if (selected >= 0):
self.item_drawers[selected].highlight(HIGHLIGHT_MASK)
if (new_selected >= 0):
self.item_drawers[new_selected].highlight(SELECT_MASK)
elif (self.active_drawer is None):
self.activate_drawer(self.get_box(0, 0).subelems[-1])
else:
self.move(motion)
def on_search(self, text):
"""Handle the search of the organizer for items"""
if (not self.drawer_selected):
if (self.text != text):
self.clear_all()
self.text = text
if (len(text.strip()) >= FIND_MIN_LENGTH):
items = []
self.item_drawers = []
self.found = self.organizer.find(text.lower().split())
for drawer, i in self.found:
drawer.highlight(HIGHLIGHT_MASK)
items += i
self.item_drawers += [drawer] * len(i)
self.item_list.set_items(items)
def clear_all(self):
"""Clear all selections, highlights and text input"""
for drawer, items in self.found:
drawer.highlight()
self.found = []
self.item_list.select()
self.item_list.set_items()
if (self.active_drawer is not None):
self.active_drawer.highlight()
self.drawer_selected = False
self.text = ''
def move(self, motion):
"""Select drawer using the arrow keys"""
box = self.active_drawer.box
drawers = box.subelems
drawer_index = drawers.index(self.active_drawer)
if (motion == key.DOWN):
if (drawer_index < len(drawers) - 1):
self.activate_drawer(drawers[drawer_index + 1])
else:
new_box = self.get_box(box.x, box.y - 1)
if (new_box is not None):
self.activate_drawer(new_box.subelems[0])
elif (motion == key.UP):
if (drawer_index > 0):
self.activate_drawer(drawers[drawer_index - 1])
else:
new_box = self.get_box(box.x, box.y + box.h)
if (new_box is not None):
self.activate_drawer(new_box.subelems[-1])
elif (motion == key.LEFT):
new_box = self.get_box(box.x - 1, box.y)
if (new_box is not None):
index = drawer_index * len(new_box.subelems) // len(drawers)
self.activate_drawer(new_box.subelems[index])
elif (motion == key.RIGHT):
new_box = self.get_box(box.x + box.w, box.y)
if (new_box is not None):
index = drawer_index * len(new_box.subelems) // len(drawers)
self.activate_drawer(new_box.subelems[index])
def activate_drawer(self, drawer, selected=False):
"""Set the active drawer and highlight it accordingly"""
if (drawer is not self.activate_drawer):
self.clear_all()
if (self.active_drawer is not None):
self.active_drawer.highlight()
else:
self.text_input.clear_text()
if (drawer is not None):
if (selected):
drawer.highlight(SELECT_MASK)
else:
drawer.highlight(HIGHLIGHT_MASK)
self.drawer_selected = selected
items = drawer.get_items()
self.item_list.set_items(items)
else:
self.text_input.clear_text()
self.drawer_selected = False
self.active_drawer = drawer
def get_box(self, x, y):
"""Return the box at a given coordinate"""
for box in self.organizer.subelems:
if (x >= box.x and x <= box.x + box.w - 1
and y >= box.y and y <= box.y + box.h - 1):
return box
return None
class OrganizerGUI(model.Element):
"""Class of organizer objects with resizeable GUI"""
def __init__(self, organizer, batch, groups):
# draw organizer background
self.rect = Rectangle(
0, 0, 100, 100, color=BOX_COLOR, batch=batch, group=groups[0])
# create list of BoxGUI objects from boxes
boxes_gui = []
for box in organizer.subelems:
boxes_gui.append(BoxGUI(
box.subelems, box.x, box.y, box.w, box.h, batch, groups))
# intialize parent class with newly created drawers_gui
super().__init__(boxes_gui)
self.w = organizer.w
self.h = organizer.h
def resize(self, window_w, window_h, text_input, item_list):
"""Resize the organizer and its subelements to a given window size"""
try:
# check width to height ratio
if (window_w / window_h >= self.w / self.h):
# scale organizer to fill height
self.rect.height = window_h * (1 - 2 * WINDOW_MARGIN)
self.rect.width = self.rect.height * self.w / self.h
self.rect.x = self.rect.y = window_h * WINDOW_MARGIN
# calulate text input position and width
text_x = self.rect.width + 2 * self.rect.x
text_y = (window_h * (1 - WINDOW_MARGIN)
- text_input.rect.height)
text_w = max(0, window_w - text_x - self.rect.x)
list_y = text_y - window_h * LIST_OFFSET
else:
# scale organizer to fill width
self.rect.width = text_w = window_w * (1 - 2 * WINDOW_MARGIN)
self.rect.height = self.rect.width * self.h / self.w
self.rect.x = text_x = window_w * WINDOW_MARGIN
self.rect.y = window_h - self.rect.height - self.rect.x
text_y = (window_h - self.rect.height - 2 * text_x
- text_input.rect.height)
list_y = text_y - window_w * LIST_OFFSET
list_h = max(0, list_y - window_h * WINDOW_MARGIN)
# resize text input
text_input.resize(text_x, text_y, text_w)
item_list.resize(text_x, list_y, text_w, list_h)
# resize all subelements as well
block_size = self.rect.width / self.w
bm = block_size * BOX_MARGIN
dm = block_size * DRAWER_MARGIN
hh = block_size * HANDLE_HEIGHT
ht = block_size * HANDLE_THICKNESS
for box in self.subelems:
box_x = box.x * block_size + self.rect.x
box_y = box.y * block_size + self.rect.y
box_w = box.w * block_size
box_h = box.h * block_size
box.resize(box_x, box_y, box_w, box_h, bm, dm, hh, ht)
except ZeroDivisionError:
pass
def get_drawer(self, x, y):
"""Return the drawer corresponding to a given coordinate"""
for box in self.subelems:
for drawer in box.subelems:
if (drawer.is_clicked(x, y)):
return drawer
return None
class BoxGUI(model.Element):
"""Class of box objects with resizeable GUI"""
def __init__(self, drawers, x, y, w, h, batch, groups):
self.x = x
self.y = y
self.w = w
self.h = h
# create list of DrawerGUI objects from drawers
drawers_gui = []
for drawer in drawers:
drawers_gui.append(DrawerGUI(drawer.subelems, self, batch, groups))
# intialize parent class with newly created drawers_gui
super().__init__(drawers_gui)
def resize(self, x, y, w, h, bm, dm, hh, ht):
"""Resize the box and its subelements to a given pixel size"""
# calcultate drawer parameters
drawer_num = len(self.subelems)
module_h = (h - 2 * bm) / drawer_num
drawer_w = w - 2 * (bm + dm)
drawer_h = module_h - 2 * dm
drawer_x = x + bm + dm
module_y = y + bm + dm
# create list of DrawerGUI objects from drawers
for i,drawer in enumerate(self.subelems):
drawer_y = module_y + (drawer_num - 1 - i) * module_h
drawer.resize(drawer_x, drawer_y, drawer_w, drawer_h, hh, ht)
class DrawerGUI(model.Element):
"""Class of drawer objects with resizeable GUI"""
def __init__(self, items, box, batch, groups):
super().__init__(items)
self.box = box
self.rect = Rectangle(
0, 0, 1, 1, color=DRAWER_COLOR, batch=batch, group=groups[1])
self.handle = batch.add(
TRIANGLE_COUNT * 3, GL_TRIANGLES, groups[2], 'v2f', 'c3B')
self.handle.colors = HANDLE_COLOR * TRIANGLE_COUNT * 3
def is_clicked(self, x, y):
"""Test if drawer is clicked"""
return (x >= self.rect.x and x <= self.rect.x + self.rect.width
and y >= self.rect.y and y <= self.rect.y + self.rect.height)
def resize(self, x, y, w, h, hh, ht):
"""Resize the drawer to a given pixel size"""
self.rect.x = x
self.rect.y = y
self.rect.height = h
self.rect.width = w
hw = w * HANDLE_WIDTH
ox = x + (w - hw) / 2
hhy = hh
self.handle.vertices = [
ox, y, ox + hh, y, ox + hh, y - hhy,
ox + hh, y - hhy, ox + hh, y, ox + hw - hh, y,
ox + hh, y - hhy, ox + hw - hh, y, ox + hw - hh, y - hhy,
ox + hw, y, ox + hw - hh, y, ox + hw - hh, y - hhy,
ox, y, ox, y + ht, ox + hw, y,
ox + hw, y, ox, y + ht, ox + hw, y + ht]
def highlight(self, color_mask=(0, 0, 0)):
"""Highlight the drawer with a given color mask, remove highlighting
if no input is given"""
color_rect = []
color_handle = []
for i, c in enumerate(color_mask):
color_rect.append(DRAWER_COLOR[i] + c)
color_handle.append(HANDLE_COLOR[i] + c)
self.rect.color = color_rect
self.handle.colors = color_handle * TRIANGLE_COUNT * 3
def add_item(self, name, amount=None):
"""Add an item to the drawer"""
self.subelems.append(model.Item(name, amount))
def get_items(self):
"""Return all items of the drawer"""
return self.subelems
def find(self, str):
"""Return all items containting str together with the drawer"""
items = []
for item in self.subelems:
items += item.find(str)
if (items != []):
return [(self, items)]
return []
class TextInput:
"""Text input field"""
def __init__(self, batch, groups,
on_click, on_enter, on_motion, on_search):
# create document
self.document = FormattedDocument(' ')
self.document.set_style(
0, 1, dict(font_name=FONT_NAME,
font_size=FONT_SIZE, color=FONT_COLOR))
# calculate font height and margin
font = self.document.get_font(0)
self.font_height = font.ascent - font.descent
self.margin = self.font_height * TEXT_INPUT_MARGIN
# create text input
self.input = IncrementalTextLayout(
self.document, 100, self.font_height, batch=batch, group=groups[1])
self.input.x = 100
self.input.y = 100
# creating a caret and push it to window handlers
self.caret = Caret(
self.input, FONT_COLOR[:3],
on_click, on_enter, on_motion, on_search)
self.clear_text()
# create background rectangle
self.rect = Rectangle(
0, 0, 100, self.font_height + 2 * self.margin,
color=TEXT_INPUT_COLOR, batch=batch, group=groups[0])
def resize(self, x, y, w):
"""Resize the text input with given coordinates and width"""
self.rect.x = x
self.rect.y = y
self.rect.width = w
self.input.x = x + self.margin
self.input.y = y + self.margin
self.input.width = w - 2 * self.margin
def get_text(self):
"""Return currently displayed text"""
return self.document.text
def set_text(self, text):
"""Set the text to display"""
self.document.text = text
def clear_text(self):
"""Clear displayed text"""
self.document.text = ''
class ItemList:
"""List of items below the text input field drawn to available space"""
def __init__(self, font_height, batch, groups):
self.font_height = font_height
self.margin = font_height * TEXT_INPUT_MARGIN
self.line_height = self.font_height + 2 * self.margin
# create document
self.text_box = Label(
'', FONT_NAME, FONT_SIZE, width=1,
multiline=True, anchor_y = 'top', color=ITEM_FONT_COLOR,
batch=batch, group=groups[2])
# create background rectangle
self.rect = Rectangle(
0, 0, 100, self.line_height,
color=ITEM_LIST_COLOR, batch=batch, group=groups[0])
# create select rectangle
self.select_rect = Rectangle(
0, 0, 100, self.line_height,
color=ITEM_SELECT_COLOR, batch=batch, group=groups[1])
self.select_rect.visible = False
# initialze member variables
self.lines = 0
self.select_num = 0
self.items = []
self.max_h = 1
self.y = 0
def resize(self, x, y, w, max_h):
"""Resize the list content to given coordinates, width and
maximum height"""
self.y = y
self.max_h = max_h
self.__update()
self.rect.x = x
self.rect.width = w
self.select_rect.x = x
self.select_rect.width = w
self.text_box.x = x + self.margin
self.text_box.y = y - self.margin
self.text_box.width = w - 2 * self.margin
def select(self, num=-1):
"""Select an item via list index"""
if (num < 0):
self.select_rect.visible = False
self.__update()
elif (num < self.lines):
self.select_rect.visible = True
self.select_num = num
self.__update()
def get_selected(self):
"""Return index of the selected item, -1 if no item is selected"""
if (self.select_rect.visible):
return self.select_num
return -1
def set_items(self, items=[]):
"""Set list content to a given list of items"""
self.items = items
self.__update()
def __update(self):
"""Update size and list elements"""
self.lines = min(len(self.items), int(self.max_h // self.line_height))
h = self.line_height * self.lines
self.rect.height = h
self.rect.y = self.y - h
self.select_rect.y = self.y - self.line_height * (1 + self.select_num)
self.text_box.text = '\n\n'.join([i.name for i in self.items[:self.lines]])
class Caret(caret.Caret):
"""Custom caret to handle specific events with callbacks"""
def __init__(self, input, color, on_click, on_enter, on_motion, on_search):
super().__init__(input, color=color)
self.prev_x = 0
self.prev_y = 0
self.document = input.document
self.on_click = on_click
self.on_enter = on_enter
self.on_motion = on_motion
self.on_search = on_search
def on_mouse_press(self, x, y, button, modifiers):
"""Handle mouse press"""
if (x != self.prev_x and y != self.prev_y):
self.prev_x = x
self.prev_y = y
self.on_click(x, y)
def on_text(self, text):
"""Handle text input by detecting enter/return events"""
if (text == CHAR_ENTER):
self.on_enter()
else:
super().on_text(text)
self.on_search(self.document.text)
return event.EVENT_HANDLED
def on_text_motion(self, motion, select=False):
"""Handle motion input events"""
super().on_text_motion(motion, select)
self.on_motion(motion)
self.on_search(self.document.text)
return event.EVENT_HANDLED | gui.py | from pyglet import event
from pyglet.gl import glClearColor, GL_TRIANGLES
from pyglet.window import key, Window
from pyglet.shapes import Rectangle
from pyglet.text import Label, caret
from pyglet.text.document import FormattedDocument
from pyglet.text.layout import IncrementalTextLayout, ScrollableTextLayout
from settings import *
import model
class OrganizerWindow(Window):
"""Main program window, handles the background color an resizing"""
def __init__(self, organizer, batch, groups):
if FULLSCREEN:
super().__init__(caption=WINDOW_TITLE, fullscreen=True)
else:
super().__init__(
WINDOW_WIDTH, WINDOW_HEIGHT, WINDOW_TITLE, resizable=True)
glClearColor(
BACKGROUND_COLOR[0]/255, BACKGROUND_COLOR[1]/255,
BACKGROUND_COLOR[2]/255, 1)
self.organizer = organizer
# define callbacks used by caret in text input
def click_callback(x, y): self.on_click(x, y)
def enter_callback(): self.on_enter()
def motion_callback(motion): self.on_motion(motion)
def find_callback(text): self.on_search(text)
# create text input and item list
self.text_input = TextInput(
batch, groups, click_callback,
enter_callback, motion_callback, find_callback)
self.item_list = ItemList(self.text_input.font_height, batch, groups)
# initialize member variables
self.batch = batch
self.prev_w = 0
self.prev_h = 0
self.active_drawer = None
self.found = []
self.item_drawers = []
self.text = ''
self.drawer_selected = False
self.renaming = False
def on_draw(self):
"""Window content needs to be redrawn, resize contents if necassary"""
self.clear()
if (self.width != self.prev_w or self.height != self.prev_h):
self.prev_w = self.width
self.prev_h = self.height
self.organizer.resize(
self.prev_w, self.prev_h, self.text_input, self.item_list)
self.batch.draw()
self.push_handlers(self.text_input.caret)
def on_click(self, x, y):
"""Mouse has been clicked at given coordinate, (de-)select drawer"""
drawer = self.organizer.get_drawer(x, y)
if (drawer is not self.active_drawer):
self.clear_all()
self.text_input.clear_text()
self.activate_drawer(drawer, True)
def on_enter(self):
"""Enter has been pressed, handle adding and renaming items"""
text = self.text_input.get_text().strip()
selected = self.item_list.get_selected()
self.text_input.clear_text()
if (text != ''):
if (self.drawer_selected):
if (self.renaming):
item = self.item_list.items[selected]
item.name = text
item.lower = text.lower()
self.renaming = False
else:
self.active_drawer.add_item(text)
items = self.active_drawer.get_items()
self.item_list.set_items(items)
elif (selected >= 0):
drawer = self.item_drawers[selected]
self.activate_drawer(drawer, True)
else:
self.clear_all()
elif (self.active_drawer is not None):
if (self.drawer_selected):
if (selected >= 0):
self.renaming = not self.renaming
if (self.renaming):
text = self.item_list.items[selected].name
self.text_input.set_text(text)
self.text_input.caret.position = len(text)
else:
self.active_drawer.highlight(HIGHLIGHT_MASK)
self.drawer_selected = False
else:
self.active_drawer.highlight(SELECT_MASK)
self.drawer_selected = True
items = self.active_drawer.get_items()
self.item_list.set_items(items)
def on_key_press(self, symbol, mod):
"""Clear all input on ESC"""
if (symbol == key.ESCAPE):
self.clear_all()
self.text_input.clear_text()
self.renaming = False
def on_motion(self, motion):
"""Handle motion input like up, down, left, right and delete"""
if (not self.renaming):
if (self.drawer_selected or self.text != ''):
selected = self.item_list.get_selected()
if (motion == key.MOTION_DOWN):
self.item_list.select(selected + 1)
elif (motion == key.MOTION_UP):
self.item_list.select(selected - 1)
elif (motion == key.MOTION_DELETE):
if (selected >= 0 and self.active_drawer is not None):
del self.active_drawer.get_items()[selected]
items = self.active_drawer.get_items()
self.item_list.set_items(items)
self.item_list.select()
if (self.text != '' and not self.drawer_selected):
new_selected = self.item_list.get_selected()
if (selected != new_selected):
if (selected >= 0):
self.item_drawers[selected].highlight(HIGHLIGHT_MASK)
if (new_selected >= 0):
self.item_drawers[new_selected].highlight(SELECT_MASK)
elif (self.active_drawer is None):
self.activate_drawer(self.get_box(0, 0).subelems[-1])
else:
self.move(motion)
def on_search(self, text):
"""Handle the search of the organizer for items"""
if (not self.drawer_selected):
if (self.text != text):
self.clear_all()
self.text = text
if (len(text.strip()) >= FIND_MIN_LENGTH):
items = []
self.item_drawers = []
self.found = self.organizer.find(text.lower().split())
for drawer, i in self.found:
drawer.highlight(HIGHLIGHT_MASK)
items += i
self.item_drawers += [drawer] * len(i)
self.item_list.set_items(items)
def clear_all(self):
"""Clear all selections, highlights and text input"""
for drawer, items in self.found:
drawer.highlight()
self.found = []
self.item_list.select()
self.item_list.set_items()
if (self.active_drawer is not None):
self.active_drawer.highlight()
self.drawer_selected = False
self.text = ''
def move(self, motion):
"""Select drawer using the arrow keys"""
box = self.active_drawer.box
drawers = box.subelems
drawer_index = drawers.index(self.active_drawer)
if (motion == key.DOWN):
if (drawer_index < len(drawers) - 1):
self.activate_drawer(drawers[drawer_index + 1])
else:
new_box = self.get_box(box.x, box.y - 1)
if (new_box is not None):
self.activate_drawer(new_box.subelems[0])
elif (motion == key.UP):
if (drawer_index > 0):
self.activate_drawer(drawers[drawer_index - 1])
else:
new_box = self.get_box(box.x, box.y + box.h)
if (new_box is not None):
self.activate_drawer(new_box.subelems[-1])
elif (motion == key.LEFT):
new_box = self.get_box(box.x - 1, box.y)
if (new_box is not None):
index = drawer_index * len(new_box.subelems) // len(drawers)
self.activate_drawer(new_box.subelems[index])
elif (motion == key.RIGHT):
new_box = self.get_box(box.x + box.w, box.y)
if (new_box is not None):
index = drawer_index * len(new_box.subelems) // len(drawers)
self.activate_drawer(new_box.subelems[index])
def activate_drawer(self, drawer, selected=False):
"""Set the active drawer and highlight it accordingly"""
if (drawer is not self.activate_drawer):
self.clear_all()
if (self.active_drawer is not None):
self.active_drawer.highlight()
else:
self.text_input.clear_text()
if (drawer is not None):
if (selected):
drawer.highlight(SELECT_MASK)
else:
drawer.highlight(HIGHLIGHT_MASK)
self.drawer_selected = selected
items = drawer.get_items()
self.item_list.set_items(items)
else:
self.text_input.clear_text()
self.drawer_selected = False
self.active_drawer = drawer
def get_box(self, x, y):
"""Return the box at a given coordinate"""
for box in self.organizer.subelems:
if (x >= box.x and x <= box.x + box.w - 1
and y >= box.y and y <= box.y + box.h - 1):
return box
return None
class OrganizerGUI(model.Element):
"""Class of organizer objects with resizeable GUI"""
def __init__(self, organizer, batch, groups):
# draw organizer background
self.rect = Rectangle(
0, 0, 100, 100, color=BOX_COLOR, batch=batch, group=groups[0])
# create list of BoxGUI objects from boxes
boxes_gui = []
for box in organizer.subelems:
boxes_gui.append(BoxGUI(
box.subelems, box.x, box.y, box.w, box.h, batch, groups))
# intialize parent class with newly created drawers_gui
super().__init__(boxes_gui)
self.w = organizer.w
self.h = organizer.h
def resize(self, window_w, window_h, text_input, item_list):
"""Resize the organizer and its subelements to a given window size"""
try:
# check width to height ratio
if (window_w / window_h >= self.w / self.h):
# scale organizer to fill height
self.rect.height = window_h * (1 - 2 * WINDOW_MARGIN)
self.rect.width = self.rect.height * self.w / self.h
self.rect.x = self.rect.y = window_h * WINDOW_MARGIN
# calulate text input position and width
text_x = self.rect.width + 2 * self.rect.x
text_y = (window_h * (1 - WINDOW_MARGIN)
- text_input.rect.height)
text_w = max(0, window_w - text_x - self.rect.x)
list_y = text_y - window_h * LIST_OFFSET
else:
# scale organizer to fill width
self.rect.width = text_w = window_w * (1 - 2 * WINDOW_MARGIN)
self.rect.height = self.rect.width * self.h / self.w
self.rect.x = text_x = window_w * WINDOW_MARGIN
self.rect.y = window_h - self.rect.height - self.rect.x
text_y = (window_h - self.rect.height - 2 * text_x
- text_input.rect.height)
list_y = text_y - window_w * LIST_OFFSET
list_h = max(0, list_y - window_h * WINDOW_MARGIN)
# resize text input
text_input.resize(text_x, text_y, text_w)
item_list.resize(text_x, list_y, text_w, list_h)
# resize all subelements as well
block_size = self.rect.width / self.w
bm = block_size * BOX_MARGIN
dm = block_size * DRAWER_MARGIN
hh = block_size * HANDLE_HEIGHT
ht = block_size * HANDLE_THICKNESS
for box in self.subelems:
box_x = box.x * block_size + self.rect.x
box_y = box.y * block_size + self.rect.y
box_w = box.w * block_size
box_h = box.h * block_size
box.resize(box_x, box_y, box_w, box_h, bm, dm, hh, ht)
except ZeroDivisionError:
pass
def get_drawer(self, x, y):
"""Return the drawer corresponding to a given coordinate"""
for box in self.subelems:
for drawer in box.subelems:
if (drawer.is_clicked(x, y)):
return drawer
return None
class BoxGUI(model.Element):
"""Class of box objects with resizeable GUI"""
def __init__(self, drawers, x, y, w, h, batch, groups):
self.x = x
self.y = y
self.w = w
self.h = h
# create list of DrawerGUI objects from drawers
drawers_gui = []
for drawer in drawers:
drawers_gui.append(DrawerGUI(drawer.subelems, self, batch, groups))
# intialize parent class with newly created drawers_gui
super().__init__(drawers_gui)
def resize(self, x, y, w, h, bm, dm, hh, ht):
"""Resize the box and its subelements to a given pixel size"""
# calcultate drawer parameters
drawer_num = len(self.subelems)
module_h = (h - 2 * bm) / drawer_num
drawer_w = w - 2 * (bm + dm)
drawer_h = module_h - 2 * dm
drawer_x = x + bm + dm
module_y = y + bm + dm
# create list of DrawerGUI objects from drawers
for i,drawer in enumerate(self.subelems):
drawer_y = module_y + (drawer_num - 1 - i) * module_h
drawer.resize(drawer_x, drawer_y, drawer_w, drawer_h, hh, ht)
class DrawerGUI(model.Element):
"""Class of drawer objects with resizeable GUI"""
def __init__(self, items, box, batch, groups):
super().__init__(items)
self.box = box
self.rect = Rectangle(
0, 0, 1, 1, color=DRAWER_COLOR, batch=batch, group=groups[1])
self.handle = batch.add(
TRIANGLE_COUNT * 3, GL_TRIANGLES, groups[2], 'v2f', 'c3B')
self.handle.colors = HANDLE_COLOR * TRIANGLE_COUNT * 3
def is_clicked(self, x, y):
"""Test if drawer is clicked"""
return (x >= self.rect.x and x <= self.rect.x + self.rect.width
and y >= self.rect.y and y <= self.rect.y + self.rect.height)
def resize(self, x, y, w, h, hh, ht):
"""Resize the drawer to a given pixel size"""
self.rect.x = x
self.rect.y = y
self.rect.height = h
self.rect.width = w
hw = w * HANDLE_WIDTH
ox = x + (w - hw) / 2
hhy = hh
self.handle.vertices = [
ox, y, ox + hh, y, ox + hh, y - hhy,
ox + hh, y - hhy, ox + hh, y, ox + hw - hh, y,
ox + hh, y - hhy, ox + hw - hh, y, ox + hw - hh, y - hhy,
ox + hw, y, ox + hw - hh, y, ox + hw - hh, y - hhy,
ox, y, ox, y + ht, ox + hw, y,
ox + hw, y, ox, y + ht, ox + hw, y + ht]
def highlight(self, color_mask=(0, 0, 0)):
"""Highlight the drawer with a given color mask, remove highlighting
if no input is given"""
color_rect = []
color_handle = []
for i, c in enumerate(color_mask):
color_rect.append(DRAWER_COLOR[i] + c)
color_handle.append(HANDLE_COLOR[i] + c)
self.rect.color = color_rect
self.handle.colors = color_handle * TRIANGLE_COUNT * 3
def add_item(self, name, amount=None):
"""Add an item to the drawer"""
self.subelems.append(model.Item(name, amount))
def get_items(self):
"""Return all items of the drawer"""
return self.subelems
def find(self, str):
"""Return all items containting str together with the drawer"""
items = []
for item in self.subelems:
items += item.find(str)
if (items != []):
return [(self, items)]
return []
class TextInput:
"""Text input field"""
def __init__(self, batch, groups,
on_click, on_enter, on_motion, on_search):
# create document
self.document = FormattedDocument(' ')
self.document.set_style(
0, 1, dict(font_name=FONT_NAME,
font_size=FONT_SIZE, color=FONT_COLOR))
# calculate font height and margin
font = self.document.get_font(0)
self.font_height = font.ascent - font.descent
self.margin = self.font_height * TEXT_INPUT_MARGIN
# create text input
self.input = IncrementalTextLayout(
self.document, 100, self.font_height, batch=batch, group=groups[1])
self.input.x = 100
self.input.y = 100
# creating a caret and push it to window handlers
self.caret = Caret(
self.input, FONT_COLOR[:3],
on_click, on_enter, on_motion, on_search)
self.clear_text()
# create background rectangle
self.rect = Rectangle(
0, 0, 100, self.font_height + 2 * self.margin,
color=TEXT_INPUT_COLOR, batch=batch, group=groups[0])
def resize(self, x, y, w):
"""Resize the text input with given coordinates and width"""
self.rect.x = x
self.rect.y = y
self.rect.width = w
self.input.x = x + self.margin
self.input.y = y + self.margin
self.input.width = w - 2 * self.margin
def get_text(self):
"""Return currently displayed text"""
return self.document.text
def set_text(self, text):
"""Set the text to display"""
self.document.text = text
def clear_text(self):
"""Clear displayed text"""
self.document.text = ''
class ItemList:
"""List of items below the text input field drawn to available space"""
def __init__(self, font_height, batch, groups):
self.font_height = font_height
self.margin = font_height * TEXT_INPUT_MARGIN
self.line_height = self.font_height + 2 * self.margin
# create document
self.text_box = Label(
'', FONT_NAME, FONT_SIZE, width=1,
multiline=True, anchor_y = 'top', color=ITEM_FONT_COLOR,
batch=batch, group=groups[2])
# create background rectangle
self.rect = Rectangle(
0, 0, 100, self.line_height,
color=ITEM_LIST_COLOR, batch=batch, group=groups[0])
# create select rectangle
self.select_rect = Rectangle(
0, 0, 100, self.line_height,
color=ITEM_SELECT_COLOR, batch=batch, group=groups[1])
self.select_rect.visible = False
# initialze member variables
self.lines = 0
self.select_num = 0
self.items = []
self.max_h = 1
self.y = 0
def resize(self, x, y, w, max_h):
"""Resize the list content to given coordinates, width and
maximum height"""
self.y = y
self.max_h = max_h
self.__update()
self.rect.x = x
self.rect.width = w
self.select_rect.x = x
self.select_rect.width = w
self.text_box.x = x + self.margin
self.text_box.y = y - self.margin
self.text_box.width = w - 2 * self.margin
def select(self, num=-1):
"""Select an item via list index"""
if (num < 0):
self.select_rect.visible = False
self.__update()
elif (num < self.lines):
self.select_rect.visible = True
self.select_num = num
self.__update()
def get_selected(self):
"""Return index of the selected item, -1 if no item is selected"""
if (self.select_rect.visible):
return self.select_num
return -1
def set_items(self, items=[]):
"""Set list content to a given list of items"""
self.items = items
self.__update()
def __update(self):
"""Update size and list elements"""
self.lines = min(len(self.items), int(self.max_h // self.line_height))
h = self.line_height * self.lines
self.rect.height = h
self.rect.y = self.y - h
self.select_rect.y = self.y - self.line_height * (1 + self.select_num)
self.text_box.text = '\n\n'.join([i.name for i in self.items[:self.lines]])
class Caret(caret.Caret):
"""Custom caret to handle specific events with callbacks"""
def __init__(self, input, color, on_click, on_enter, on_motion, on_search):
super().__init__(input, color=color)
self.prev_x = 0
self.prev_y = 0
self.document = input.document
self.on_click = on_click
self.on_enter = on_enter
self.on_motion = on_motion
self.on_search = on_search
def on_mouse_press(self, x, y, button, modifiers):
"""Handle mouse press"""
if (x != self.prev_x and y != self.prev_y):
self.prev_x = x
self.prev_y = y
self.on_click(x, y)
def on_text(self, text):
"""Handle text input by detecting enter/return events"""
if (text == CHAR_ENTER):
self.on_enter()
else:
super().on_text(text)
self.on_search(self.document.text)
return event.EVENT_HANDLED
def on_text_motion(self, motion, select=False):
"""Handle motion input events"""
super().on_text_motion(motion, select)
self.on_motion(motion)
self.on_search(self.document.text)
return event.EVENT_HANDLED | 0.265785 | 0.155271 |
import os
from math import exp
from parsable import parsable
import pomagma.util
import util
SPECS = {}
SPECS['sk'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
# 'S B': 3.5036,
# 'F': 3.72682,
# 'S I': 4.12483,
'W': 4.36313,
# 'W B': 4.3719,
# 'W I': 6.21147,
},
}
SPECS['skj'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'symmetric_probs': {
'JOIN': 0.0569286,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
'W': 4.36313,
# 'W B': 4.3719,
# 'W I': 6.21147,
'U': 6.3754,
},
}
SPECS['skja'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'symmetric_probs': {
'JOIN': 0.0569286,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
'DIV': 3.06752,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
'SEMI': 4.18665,
'W': 4.36313,
'UNIT': 4.3634,
# 'W B': 4.3719,
'A': 5.0,
# 'SECTION': 5.0,
# 'RETRACT': 5.0,
'BOOL': 5.21614,
# 'W I': 6.21147,
'U': 6.3754,
'BOOOL': 7.0,
# 'PROD': 12.0,
# 'SUM': 12.0,
# 'MAYBE': 12.0,
# 'SSET': 12.0,
},
}
SPECS['skrj'] = {
'binary_probs': {
'APP': 0.34,
'COMP': 0.18,
},
'symmetric_probs': {
'JOIN': 0.05,
'RAND': 0.05,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'R': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
# 'DIV': 3.06752,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
# 'SEMI': 4.18665,
'W': 4.36313,
# 'UNIT': 4.3634,
# 'W B': 4.3719,
# 'A': 5.0,
# 'SECTION': 5.0,
# 'RETRACT': 5.0,
# 'BOOL': 5.21614,
# 'W I': 6.21147,
'U': 6.3754,
# 'PROD': 12.0,
# 'SUM': 12.0,
# 'MAYBE': 12.0,
# 'SSET': 12.0,
},
}
@parsable
def make(theory):
"""Bootstrap a language from Johann.
Inputs: theory in ['sk', 'skj', 'skja', 'skrj']
"""
spec = SPECS[theory]
nullary_weights = spec.get('nullary_weights', {})
injective_probs = spec.get('injective_probs', {})
binary_probs = spec.get('binary_probs', {})
symmetric_probs = spec.get('symmetric_probs', {})
compound_prob = (
sum(injective_probs.values()) +
sum(binary_probs.values()) +
sum(symmetric_probs.values()))
assert compound_prob < 1
nullary_prob = 1.0 - compound_prob
nullary_probs = {
key: exp(-val)
for key, val in nullary_weights.iteritems()
}
scale = nullary_prob / sum(nullary_probs.values())
for key in nullary_probs.keys():
nullary_probs[key] *= scale
probs = {
'NULLARY': nullary_probs,
'INJECTIVE': injective_probs,
'BINARY': binary_probs,
'SYMMETRIC': symmetric_probs,
}
for arity, group in probs.items():
if not group:
del probs[arity]
with pomagma.util.chdir(os.path.dirname(os.path.abspath(__file__))):
util.json_dump(probs, '{}.json'.format(theory))
# util.compile('{}.json'.format(theory), '{}.language'.format(theory))
if __name__ == '__main__':
parsable() | src/language/bootstrap.py | import os
from math import exp
from parsable import parsable
import pomagma.util
import util
SPECS = {}
SPECS['sk'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
# 'S B': 3.5036,
# 'F': 3.72682,
# 'S I': 4.12483,
'W': 4.36313,
# 'W B': 4.3719,
# 'W I': 6.21147,
},
}
SPECS['skj'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'symmetric_probs': {
'JOIN': 0.0569286,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
'W': 4.36313,
# 'W B': 4.3719,
# 'W I': 6.21147,
'U': 6.3754,
},
}
SPECS['skja'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'symmetric_probs': {
'JOIN': 0.0569286,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
'DIV': 3.06752,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
'SEMI': 4.18665,
'W': 4.36313,
'UNIT': 4.3634,
# 'W B': 4.3719,
'A': 5.0,
# 'SECTION': 5.0,
# 'RETRACT': 5.0,
'BOOL': 5.21614,
# 'W I': 6.21147,
'U': 6.3754,
'BOOOL': 7.0,
# 'PROD': 12.0,
# 'SUM': 12.0,
# 'MAYBE': 12.0,
# 'SSET': 12.0,
},
}
SPECS['skrj'] = {
'binary_probs': {
'APP': 0.34,
'COMP': 0.18,
},
'symmetric_probs': {
'JOIN': 0.05,
'RAND': 0.05,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'R': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
# 'DIV': 3.06752,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
# 'SEMI': 4.18665,
'W': 4.36313,
# 'UNIT': 4.3634,
# 'W B': 4.3719,
# 'A': 5.0,
# 'SECTION': 5.0,
# 'RETRACT': 5.0,
# 'BOOL': 5.21614,
# 'W I': 6.21147,
'U': 6.3754,
# 'PROD': 12.0,
# 'SUM': 12.0,
# 'MAYBE': 12.0,
# 'SSET': 12.0,
},
}
@parsable
def make(theory):
"""Bootstrap a language from Johann.
Inputs: theory in ['sk', 'skj', 'skja', 'skrj']
"""
spec = SPECS[theory]
nullary_weights = spec.get('nullary_weights', {})
injective_probs = spec.get('injective_probs', {})
binary_probs = spec.get('binary_probs', {})
symmetric_probs = spec.get('symmetric_probs', {})
compound_prob = (
sum(injective_probs.values()) +
sum(binary_probs.values()) +
sum(symmetric_probs.values()))
assert compound_prob < 1
nullary_prob = 1.0 - compound_prob
nullary_probs = {
key: exp(-val)
for key, val in nullary_weights.iteritems()
}
scale = nullary_prob / sum(nullary_probs.values())
for key in nullary_probs.keys():
nullary_probs[key] *= scale
probs = {
'NULLARY': nullary_probs,
'INJECTIVE': injective_probs,
'BINARY': binary_probs,
'SYMMETRIC': symmetric_probs,
}
for arity, group in probs.items():
if not group:
del probs[arity]
with pomagma.util.chdir(os.path.dirname(os.path.abspath(__file__))):
util.json_dump(probs, '{}.json'.format(theory))
# util.compile('{}.json'.format(theory), '{}.language'.format(theory))
if __name__ == '__main__':
parsable() | 0.397354 | 0.25799 |
import pandas as pd
def read_csv(path=str, sep=';'):
"""
Leer un archivo csv y los transforma a un dataframe
:param path: dirección del archivo csv
:param sep: separador del archvio
:return: dataframe
"""
# Leemos el archivo con la funcioón read_csv()
df = pd.read_csv(path, sep=sep)
return df
def merge_df(df1, df2, how, on=None):
"""
Combina dos datframes
:param df1: primer dataframe
:param df2: segundo dataframe
:param how: tipo de combinación
:param on: columna a combinar
:return: datafre combinado
"""
# Combinamos con la función merge() de pandas
df = pd.merge(df1, df2, how=how, on=[on])
return df
def denormalized_df(path1=str, path2=str, path3=str, sep=';', how=str, on1=None, on2=None):
"""
Desnormaliza 3 dataframes en uno
:param path1: ruta del primer dataframe
:param path2: ruta del segundo dataframe
:param path3: ruta del tercer dataframe
:param sep: delimitador de los archivos csv
:param how: tipo de combinación
:param on1: columna de combinación primer merge
:param on2: columna de combinación segundo merge
:return: dataframe desnormalizado
"""
# Leemos los csv con nuestro método
df1 = read_csv(path1, sep=sep)
df2 = read_csv(path2, sep=sep)
df3 = read_csv(path3, sep=sep)
# Combinamos los dataframes tracks y albums
tracks_albums = merge_df(df1, df2, how, on1)
# Renombremos la columna por la que combinar
tracks_albums = tracks_albums.rename(columns={'artist_id_x': 'artist_id'})
# Combinamos los dataframes trakcs_albums con artist
result = merge_df(tracks_albums, df3, how, on2)
# Renombremos las columnas a nombres más comprensibles
result = result.rename(columns={'name': 'artist_name', 'name_x': 'track_name',
'name_y': 'album_name', 'popularity': 'artist_popularity',
'popularity_x': 'track_popularity', 'popularity_y': 'album_popularity'})
return result | discover/features/build_features.py | import pandas as pd
def read_csv(path=str, sep=';'):
"""
Leer un archivo csv y los transforma a un dataframe
:param path: dirección del archivo csv
:param sep: separador del archvio
:return: dataframe
"""
# Leemos el archivo con la funcioón read_csv()
df = pd.read_csv(path, sep=sep)
return df
def merge_df(df1, df2, how, on=None):
"""
Combina dos datframes
:param df1: primer dataframe
:param df2: segundo dataframe
:param how: tipo de combinación
:param on: columna a combinar
:return: datafre combinado
"""
# Combinamos con la función merge() de pandas
df = pd.merge(df1, df2, how=how, on=[on])
return df
def denormalized_df(path1=str, path2=str, path3=str, sep=';', how=str, on1=None, on2=None):
"""
Desnormaliza 3 dataframes en uno
:param path1: ruta del primer dataframe
:param path2: ruta del segundo dataframe
:param path3: ruta del tercer dataframe
:param sep: delimitador de los archivos csv
:param how: tipo de combinación
:param on1: columna de combinación primer merge
:param on2: columna de combinación segundo merge
:return: dataframe desnormalizado
"""
# Leemos los csv con nuestro método
df1 = read_csv(path1, sep=sep)
df2 = read_csv(path2, sep=sep)
df3 = read_csv(path3, sep=sep)
# Combinamos los dataframes tracks y albums
tracks_albums = merge_df(df1, df2, how, on1)
# Renombremos la columna por la que combinar
tracks_albums = tracks_albums.rename(columns={'artist_id_x': 'artist_id'})
# Combinamos los dataframes trakcs_albums con artist
result = merge_df(tracks_albums, df3, how, on2)
# Renombremos las columnas a nombres más comprensibles
result = result.rename(columns={'name': 'artist_name', 'name_x': 'track_name',
'name_y': 'album_name', 'popularity': 'artist_popularity',
'popularity_x': 'track_popularity', 'popularity_y': 'album_popularity'})
return result | 0.561575 | 0.603435 |
import cherrypy
import re, json
from flask_restful import reqparse, abort, Api, Resource
class OptionsController(Resource):
def __init__(self):
print("starting Options Controller")
def OPTIONS(self, *args, **kargs):
return ""
class CardsController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self):
output=dict()
dbfIdList=self.cdb.get_cards()
dictList=list()
output["result"]= "success"
for dbfId in dbfIdList:
dictList.append(self.cdb.get_card(dbfId))
output["cards"]=dictList
return output
def post(self):
# Get json text
the_body= cherrypy.request.body.read().decode()
the_body= json.loads(the_body)
# Determine new dbfId
newID=max(self.cdb.cards.keys())
newID=newID+1
# Create a new list of attributes
myList=list()
# Determine how what type of card is being added.
myList.append(the_body["type"])
myList.append(the_body["name"])
myList.append(the_body["cost"])
myList.append(the_body["rarity"])
myList.append(the_body["class"])
if(the_body["type"]=="MINION"):
myList.append(the_body["attack"])
myList.append(the_body["health"])
myList.append(the_body["url"])
# Set the card given dbfId and list
self.cdb.set_card(newID, myList)
output={'result':'success', "dbfId" : newID}
return output
def DELETE(self):
self.cdb.movies=dict()
output={'result': 'success'}
return output
class CardsKeyController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self, dbfId):
output=dict()
output["card"]=self.cdb.get_card(int(dbfId))
output["result"]="success"
return output
def put(self, dbfId):
# Get json text
the_body= cherrypy.request.body.read().decode()
the_body= json.loads(the_body)
# Create a new list of attributes
myList=list()
# Determine how what type of card is being added.
myList.append(the_body["type"])
myList.append(the_body["name"])
myList.append(the_body["cost"])
myList.append(the_body["rarity"])
myList.append(the_body["class"])
if(the_body["type"]=="MINION"):
myList.append(the_body["attack"])
myList.append(the_body["health"])
myList.append(the_body["url"])
self.cdb.set_card(int(dbfId), myList)
output={'result':'success'}
return output
def delete(self, dbfId):
output=self.cdb.delete_card(int(dbfId))
return output
class MinionsController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self):
output=dict()
output["result"]= "success"
output["minions"]=self.cdb.get_minions()
return output
class MinionsAttackController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["minions"]=self.cdb.get_minions_attackRange(int(low),int(high))
return output
class MinionsHealthController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["minions"]=self.cdb.get_minions_healthRange(int(low),int(high))
return output
class SpellsController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self):
output=dict()
output["result"]= "success"
output["spells"]=self.cdb.get_spells()
return output
class CostController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["cards"]=self.cdb.get_cards_costRange(int(low),int(high))
return output
class NameController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,NAME):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_name(NAME)
return output
class ClassController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,CLASS):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_class(CLASS)
return output
class RarityController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self, RARITY):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_rarity(RARITY)
return output | server/controller.py | import cherrypy
import re, json
from flask_restful import reqparse, abort, Api, Resource
class OptionsController(Resource):
def __init__(self):
print("starting Options Controller")
def OPTIONS(self, *args, **kargs):
return ""
class CardsController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self):
output=dict()
dbfIdList=self.cdb.get_cards()
dictList=list()
output["result"]= "success"
for dbfId in dbfIdList:
dictList.append(self.cdb.get_card(dbfId))
output["cards"]=dictList
return output
def post(self):
# Get json text
the_body= cherrypy.request.body.read().decode()
the_body= json.loads(the_body)
# Determine new dbfId
newID=max(self.cdb.cards.keys())
newID=newID+1
# Create a new list of attributes
myList=list()
# Determine how what type of card is being added.
myList.append(the_body["type"])
myList.append(the_body["name"])
myList.append(the_body["cost"])
myList.append(the_body["rarity"])
myList.append(the_body["class"])
if(the_body["type"]=="MINION"):
myList.append(the_body["attack"])
myList.append(the_body["health"])
myList.append(the_body["url"])
# Set the card given dbfId and list
self.cdb.set_card(newID, myList)
output={'result':'success', "dbfId" : newID}
return output
def DELETE(self):
self.cdb.movies=dict()
output={'result': 'success'}
return output
class CardsKeyController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self, dbfId):
output=dict()
output["card"]=self.cdb.get_card(int(dbfId))
output["result"]="success"
return output
def put(self, dbfId):
# Get json text
the_body= cherrypy.request.body.read().decode()
the_body= json.loads(the_body)
# Create a new list of attributes
myList=list()
# Determine how what type of card is being added.
myList.append(the_body["type"])
myList.append(the_body["name"])
myList.append(the_body["cost"])
myList.append(the_body["rarity"])
myList.append(the_body["class"])
if(the_body["type"]=="MINION"):
myList.append(the_body["attack"])
myList.append(the_body["health"])
myList.append(the_body["url"])
self.cdb.set_card(int(dbfId), myList)
output={'result':'success'}
return output
def delete(self, dbfId):
output=self.cdb.delete_card(int(dbfId))
return output
class MinionsController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self):
output=dict()
output["result"]= "success"
output["minions"]=self.cdb.get_minions()
return output
class MinionsAttackController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["minions"]=self.cdb.get_minions_attackRange(int(low),int(high))
return output
class MinionsHealthController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["minions"]=self.cdb.get_minions_healthRange(int(low),int(high))
return output
class SpellsController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self):
output=dict()
output["result"]= "success"
output["spells"]=self.cdb.get_spells()
return output
class CostController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["cards"]=self.cdb.get_cards_costRange(int(low),int(high))
return output
class NameController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,NAME):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_name(NAME)
return output
class ClassController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,CLASS):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_class(CLASS)
return output
class RarityController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self, RARITY):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_rarity(RARITY)
return output | 0.220678 | 0.067731 |
# Copyright (c) 2016-2020, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing the Repeat function class."""
import wx
from ytranslate import t
from sharp import Function
class Repeat(Function):
"""Function SharpScript '#repeat'.
This function can be used to repeat a command, once or several
times. It has different syntax:
Repeat the last command:
#repeat 1
Repeat three times the 'look' command:
#repeat look 3
"""
description = "Repeat a command X times"
def run(self, times, command=None):
"""Repeat a command."""
if not self.client:
return
client = self.client
panel = client.factory.panel
if not command:
for last in reversed(panel.extensions["history"].commands[:-1]):
if not last.startswith("#") or last.startswith("##"):
command = last
times = int(times)
if command:
for time in range(times):
self.client.write(command)
def display(self, dialog, times="1", command=""):
"""Display the function's arguments."""
l_times = self.t("times", "Number of times to repeat the command")
l_command = self.t("command", "Command to repeat (leave blank " \
"to send the last command in your history")
# Times
l_times = wx.StaticText(dialog, label=l_times)
t_times = wx.TextCtrl(dialog, value=times)
dialog.times = t_times
dialog.top.Add(l_times)
dialog.top.Add(t_times)
# Command
l_command = wx.StaticText(dialog, label=l_command)
t_command = wx.TextCtrl(dialog, value=command)
dialog.command = t_command
dialog.top.Add(l_command)
dialog.top.Add(t_command)
def complete(self, dialog):
"""The user pressed 'ok' in the dialog."""
times = dialog.times.GetValue()
empty_times = self.t("empty_times",
"You didn't specify the number of times you want " \
"this command to repeat. Specify 1 at least.")
invalid_times = self.t("invalid_times",
"The number of times you specified isn't a valid number.")
if not times:
wx.MessageBox(empty_times, t("ui.alert.error"),
wx.OK | wx.ICON_ERROR)
dialog.times.SetFocus()
return None
if not times.isdigit():
wx.MessageBox(invalid_times, t("ui.alert.error"),
wx.OK | wx.ICON_ERROR)
dialog.times.SetFocus()
return None
command = dialog.command.GetValue()
arguments = [times]
if command:
arguments.append(command)
return tuple(arguments) | src/sharp/functions/repeat.py | # Copyright (c) 2016-2020, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing the Repeat function class."""
import wx
from ytranslate import t
from sharp import Function
class Repeat(Function):
"""Function SharpScript '#repeat'.
This function can be used to repeat a command, once or several
times. It has different syntax:
Repeat the last command:
#repeat 1
Repeat three times the 'look' command:
#repeat look 3
"""
description = "Repeat a command X times"
def run(self, times, command=None):
"""Repeat a command."""
if not self.client:
return
client = self.client
panel = client.factory.panel
if not command:
for last in reversed(panel.extensions["history"].commands[:-1]):
if not last.startswith("#") or last.startswith("##"):
command = last
times = int(times)
if command:
for time in range(times):
self.client.write(command)
def display(self, dialog, times="1", command=""):
"""Display the function's arguments."""
l_times = self.t("times", "Number of times to repeat the command")
l_command = self.t("command", "Command to repeat (leave blank " \
"to send the last command in your history")
# Times
l_times = wx.StaticText(dialog, label=l_times)
t_times = wx.TextCtrl(dialog, value=times)
dialog.times = t_times
dialog.top.Add(l_times)
dialog.top.Add(t_times)
# Command
l_command = wx.StaticText(dialog, label=l_command)
t_command = wx.TextCtrl(dialog, value=command)
dialog.command = t_command
dialog.top.Add(l_command)
dialog.top.Add(t_command)
def complete(self, dialog):
"""The user pressed 'ok' in the dialog."""
times = dialog.times.GetValue()
empty_times = self.t("empty_times",
"You didn't specify the number of times you want " \
"this command to repeat. Specify 1 at least.")
invalid_times = self.t("invalid_times",
"The number of times you specified isn't a valid number.")
if not times:
wx.MessageBox(empty_times, t("ui.alert.error"),
wx.OK | wx.ICON_ERROR)
dialog.times.SetFocus()
return None
if not times.isdigit():
wx.MessageBox(invalid_times, t("ui.alert.error"),
wx.OK | wx.ICON_ERROR)
dialog.times.SetFocus()
return None
command = dialog.command.GetValue()
arguments = [times]
if command:
arguments.append(command)
return tuple(arguments) | 0.661486 | 0.059757 |
import os
import sys
import copy
import pytz
import datetime
import traceback
import numpy as np
from django.conf import settings
from django.core import management
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from dining_room import constants
from dining_room.models import User, StudyManagement, StudyAction
from dining_room.models.domain import State, Transition, Suggestions
from dining_room.views import get_next_state_json
# Create the Command class
class Command(BaseCommand):
"""
Fetch the actions data from dropbox, and replay the suggestions the user
might've seen
"""
help = "Load actions data from dropbox replay the suggestions a user might've seen. Run `sync_actions` before this command"
V1_NOISE_USAGE_CONDITIONS = {
User.StudyConditions.BASELINE,
User.StudyConditions.DX_100,
User.StudyConditions.AX_100,
User.StudyConditions.DXAX_100,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument('-a', '--all', action='store_true', help='Whether to simulate suggestions for all users, or only those with valid / relevant data')
def handle(self, *args, **options):
verbosity = options.get('verbosity')
if options['all']:
users = User.objects.filter(is_staff=False).exclude(studyaction=None)
else:
users = User.objects.filter(
Q(is_staff=False) &
Q(date_survey_completed__isnull=False) &
(Q(ignore_data_reason__isnull=True) | Q(ignore_data_reason=''))
)
self.stdout.write(f"Simulating experiences of {users.count()} users")
# Create a sim user
try:
sim_user = User.objects.get(username='sim_user')
except Exception as e:
sim_user = User.objects.create_user('sim_user', 'sim_user')
sim_user.save()
# Iterate through the users and get their data
for uidx, user in enumerate(users.order_by('study_condition')):
# Assert that the user has actions
assert user.studyaction_set.count() > 0, f"User {user} is missing actions"
# Simulate the user's experience
self._simulate_user(user)
# Print a status message
if verbosity > 0:
self.stdout.write(f"({uidx+1}/{len(users)}) Simulated suggestions for {user}")
# Delete the sim user
sim_user.refresh_from_db()
sim_user.delete()
# Print a completion message
self.stdout.write(self.style.SUCCESS("Suggestions synchronized!"))
def _simulate_user(self, user):
"""Simulate the user's experience"""
actions = user.studyaction_set.order_by('start_timestamp')
# Get the simulated user and reset them
sim_user = User.objects.get(username='sim_user')
sim_user.study_condition = user.study_condition
sim_user.start_condition = user.start_condition
sim_user.rng_state = Suggestions.DEFAULT_RNG_SEED
sim_user.number_state_requests = -1
sim_user.save()
# For each state visited by the user, simulate the suggestions
prev_action = None
start_state = State(user.start_condition.split('.'))
schk = Suggestions() # Just a means to get the optimal alternatives
for action in actions:
next_state = State(eval(action.start_state))
# Get the next state and verify it
if sim_user.study_condition in Command.V1_NOISE_USAGE_CONDITIONS:
# Send an empty user, then update the json with the simulated
# suggestions from the old way of doing things
json = get_next_state_json(start_state.tuple, prev_action, None)
json.update(self._v1_noise_get_suggestions_json(next_state, sim_user))
else:
json = get_next_state_json(start_state.tuple, prev_action, sim_user)
assert tuple(json['server_state_tuple']) == next_state.tuple, \
f"({start_state.tuple}, {prev_action}): {json['server_state_tuple']} != {next_state.tuple}"
# Add the suggestions
if user.show_dx_suggestions:
action.dx_suggestions = json['dx_suggestions']
if user.show_ax_suggestions:
action.ax_suggestions = json['ax_suggestions']
# Add a boolean if the data was corrupted
if user.noise_level > 0:
if (
user.show_dx_suggestions and
schk.ordered_diagnoses(start_state, prev_action)[0] not in action.dx_suggestions
):
action.corrupted_dx_suggestions = True
if (
user.show_ax_suggestions and
schk.optimal_action(start_state, prev_action)[0] not in action.ax_suggestions
):
action.corrupted_ax_suggestions = True
# Save the action
action.save()
prev_action = action.action
start_state = State(eval(action.next_state))
# Simulate the last suggestions call
if sim_user.study_condition in Command.V1_NOISE_USAGE_CONDITIONS:
json = get_next_state_json(start_state.tuple, prev_action, None)
json.update(self._v1_noise_get_suggestions_json(start_state, sim_user))
else:
json = get_next_state_json(start_state.tuple, prev_action, sim_user)
# Check the RNG state
try:
assert sim_user.rng_state == user.rng_state, \
f"Mismatch end state... FML: {user}... {user.rng_state} != {sim_user.rng_state}"
except Exception as e:
self.stdout.write(self.style.ERROR(f"{e}"))
def _v1_noise_get_suggestions_json(self, state, user):
"""Use the old style of garnering suggestions from the server"""
suggestions_json = {}
user.refresh_from_db()
suggestions_provider = Suggestions(user)
def add_noise_and_pad(suggestions, alternatives, pad):
"""The old definition of the noise + pad function"""
pad = pad or len(suggestions)
should_corrupt = (suggestions_provider.rng.uniform() < user.noise_level)
if should_corrupt:
suggestions = suggestions_provider.rng.choice(alternatives, size=len(suggestions), replace=False).tolist()
alternatives = set(alternatives) - set(suggestions)
while len(suggestions) < pad:
suggestions.append(suggestions_provider.rng.choice(list(sorted(alternatives))))
alternatives.discard(suggestions[-1])
return suggestions
# First add the DX suggestions
if user.show_dx_suggestions:
suggestions = suggestions_provider.ordered_diagnoses(state, None, accumulate=True)
else:
suggestions = []
alternatives = [x for x in constants.DIAGNOSES.keys() if x not in suggestions]
suggestions = add_noise_and_pad(
suggestions[:user.study_management.max_dx_suggestions],
alternatives,
user.study_management.max_dx_suggestions if user.study_management.pad_suggestions else None
)
suggestions_json['dx_suggestions'] = suggestions
# Update the RNG
user.rng_state = Suggestions.get_next_rng_seed(suggestions_provider.rng)
user.save()
# Second the AX suggestions
if user.show_ax_suggestions:
suggestions = suggestions_provider.optimal_action(state, None)
else:
suggestions = []
valid_actions = state.get_valid_actions()
alternatives = [k for k, v in valid_actions.items() if (k not in suggestions and v)]
suggestions = add_noise_and_pad(
suggestions[:user.study_management.max_ax_suggestions],
alternatives,
user.study_management.max_ax_suggestions if user.study_management.pad_suggestions else None
)
suggestions_json['ax_suggestions'] = suggestions
# Update the RNG
user.rng_state = Suggestions.get_next_rng_seed(suggestions_provider.rng)
user.save()
# Return the JSON
return suggestions_json | website/management/commands/sync_suggestions.py |
import os
import sys
import copy
import pytz
import datetime
import traceback
import numpy as np
from django.conf import settings
from django.core import management
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from dining_room import constants
from dining_room.models import User, StudyManagement, StudyAction
from dining_room.models.domain import State, Transition, Suggestions
from dining_room.views import get_next_state_json
# Create the Command class
class Command(BaseCommand):
"""
Fetch the actions data from dropbox, and replay the suggestions the user
might've seen
"""
help = "Load actions data from dropbox replay the suggestions a user might've seen. Run `sync_actions` before this command"
V1_NOISE_USAGE_CONDITIONS = {
User.StudyConditions.BASELINE,
User.StudyConditions.DX_100,
User.StudyConditions.AX_100,
User.StudyConditions.DXAX_100,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument('-a', '--all', action='store_true', help='Whether to simulate suggestions for all users, or only those with valid / relevant data')
def handle(self, *args, **options):
verbosity = options.get('verbosity')
if options['all']:
users = User.objects.filter(is_staff=False).exclude(studyaction=None)
else:
users = User.objects.filter(
Q(is_staff=False) &
Q(date_survey_completed__isnull=False) &
(Q(ignore_data_reason__isnull=True) | Q(ignore_data_reason=''))
)
self.stdout.write(f"Simulating experiences of {users.count()} users")
# Create a sim user
try:
sim_user = User.objects.get(username='sim_user')
except Exception as e:
sim_user = User.objects.create_user('sim_user', 'sim_user')
sim_user.save()
# Iterate through the users and get their data
for uidx, user in enumerate(users.order_by('study_condition')):
# Assert that the user has actions
assert user.studyaction_set.count() > 0, f"User {user} is missing actions"
# Simulate the user's experience
self._simulate_user(user)
# Print a status message
if verbosity > 0:
self.stdout.write(f"({uidx+1}/{len(users)}) Simulated suggestions for {user}")
# Delete the sim user
sim_user.refresh_from_db()
sim_user.delete()
# Print a completion message
self.stdout.write(self.style.SUCCESS("Suggestions synchronized!"))
def _simulate_user(self, user):
"""Simulate the user's experience"""
actions = user.studyaction_set.order_by('start_timestamp')
# Get the simulated user and reset them
sim_user = User.objects.get(username='sim_user')
sim_user.study_condition = user.study_condition
sim_user.start_condition = user.start_condition
sim_user.rng_state = Suggestions.DEFAULT_RNG_SEED
sim_user.number_state_requests = -1
sim_user.save()
# For each state visited by the user, simulate the suggestions
prev_action = None
start_state = State(user.start_condition.split('.'))
schk = Suggestions() # Just a means to get the optimal alternatives
for action in actions:
next_state = State(eval(action.start_state))
# Get the next state and verify it
if sim_user.study_condition in Command.V1_NOISE_USAGE_CONDITIONS:
# Send an empty user, then update the json with the simulated
# suggestions from the old way of doing things
json = get_next_state_json(start_state.tuple, prev_action, None)
json.update(self._v1_noise_get_suggestions_json(next_state, sim_user))
else:
json = get_next_state_json(start_state.tuple, prev_action, sim_user)
assert tuple(json['server_state_tuple']) == next_state.tuple, \
f"({start_state.tuple}, {prev_action}): {json['server_state_tuple']} != {next_state.tuple}"
# Add the suggestions
if user.show_dx_suggestions:
action.dx_suggestions = json['dx_suggestions']
if user.show_ax_suggestions:
action.ax_suggestions = json['ax_suggestions']
# Add a boolean if the data was corrupted
if user.noise_level > 0:
if (
user.show_dx_suggestions and
schk.ordered_diagnoses(start_state, prev_action)[0] not in action.dx_suggestions
):
action.corrupted_dx_suggestions = True
if (
user.show_ax_suggestions and
schk.optimal_action(start_state, prev_action)[0] not in action.ax_suggestions
):
action.corrupted_ax_suggestions = True
# Save the action
action.save()
prev_action = action.action
start_state = State(eval(action.next_state))
# Simulate the last suggestions call
if sim_user.study_condition in Command.V1_NOISE_USAGE_CONDITIONS:
json = get_next_state_json(start_state.tuple, prev_action, None)
json.update(self._v1_noise_get_suggestions_json(start_state, sim_user))
else:
json = get_next_state_json(start_state.tuple, prev_action, sim_user)
# Check the RNG state
try:
assert sim_user.rng_state == user.rng_state, \
f"Mismatch end state... FML: {user}... {user.rng_state} != {sim_user.rng_state}"
except Exception as e:
self.stdout.write(self.style.ERROR(f"{e}"))
def _v1_noise_get_suggestions_json(self, state, user):
"""Use the old style of garnering suggestions from the server"""
suggestions_json = {}
user.refresh_from_db()
suggestions_provider = Suggestions(user)
def add_noise_and_pad(suggestions, alternatives, pad):
"""The old definition of the noise + pad function"""
pad = pad or len(suggestions)
should_corrupt = (suggestions_provider.rng.uniform() < user.noise_level)
if should_corrupt:
suggestions = suggestions_provider.rng.choice(alternatives, size=len(suggestions), replace=False).tolist()
alternatives = set(alternatives) - set(suggestions)
while len(suggestions) < pad:
suggestions.append(suggestions_provider.rng.choice(list(sorted(alternatives))))
alternatives.discard(suggestions[-1])
return suggestions
# First add the DX suggestions
if user.show_dx_suggestions:
suggestions = suggestions_provider.ordered_diagnoses(state, None, accumulate=True)
else:
suggestions = []
alternatives = [x for x in constants.DIAGNOSES.keys() if x not in suggestions]
suggestions = add_noise_and_pad(
suggestions[:user.study_management.max_dx_suggestions],
alternatives,
user.study_management.max_dx_suggestions if user.study_management.pad_suggestions else None
)
suggestions_json['dx_suggestions'] = suggestions
# Update the RNG
user.rng_state = Suggestions.get_next_rng_seed(suggestions_provider.rng)
user.save()
# Second the AX suggestions
if user.show_ax_suggestions:
suggestions = suggestions_provider.optimal_action(state, None)
else:
suggestions = []
valid_actions = state.get_valid_actions()
alternatives = [k for k, v in valid_actions.items() if (k not in suggestions and v)]
suggestions = add_noise_and_pad(
suggestions[:user.study_management.max_ax_suggestions],
alternatives,
user.study_management.max_ax_suggestions if user.study_management.pad_suggestions else None
)
suggestions_json['ax_suggestions'] = suggestions
# Update the RNG
user.rng_state = Suggestions.get_next_rng_seed(suggestions_provider.rng)
user.save()
# Return the JSON
return suggestions_json | 0.495361 | 0.282332 |
import discord
class Bot(discord.Client):
def __init__(self, guild_name, channel_id, logger=None, **kwargs):
"""
Bot for use with a single Discord Guild and Text Channel
:param guild_name: The name of the Discord Guild to join
:type guild_name: string
:param channel_id: The ID of the channel to be active in
:type channel_id: int
:param logger: An optional logger to
"""
super().__init__(**kwargs)
self._main_guild_name = guild_name
self._main_channel_id = channel_id
self._logger = logger
self.main_guild = None
self.main_channel = None
self._on_ready_msg = None
self._message_handler = None
def set_ready_message(self, msg):
""" Set a message to be sent to the channel on connection
:param msg: Message to be sent
:type msg: string
"""
self._on_ready_msg = msg
def set_message_handler(self, handler):
""" Set a handler to respond to Discord messages in the main channel.
The handler must be an async function which takes a discord.Message.
:param handler: The handler for message
:type handler: An async function (discord.Message)
"""
self._message_handler = handler
async def on_ready(self):
self._log_info("Connection Established")
for guild in self.guilds:
if guild.name == self._main_guild_name:
self.main_guild = guild
break
self.main_channel = self.main_guild.get_channel(self._main_channel_id)
if msg := self._on_ready_msg:
await self.send_message(msg)
async def on_message(self, message):
if message.channel.id == self._main_channel_id and not message.author.id == self.user.id:
self._log_info("Received message in main channel. ID: {}".format(message.id))
await self._message_handler(message)
async def send_message(self, message):
""" Send a message to the main channel
:param message: The text to send to the server
:type message: string
"""
self._log_info("Sending message: {}".format(message))
await self.main_channel.send(message)
def _log_info(self, info):
if self._logger:
self._logger.log_message(info) | chuckle_bot/single_channel_bot.py | import discord
class Bot(discord.Client):
def __init__(self, guild_name, channel_id, logger=None, **kwargs):
"""
Bot for use with a single Discord Guild and Text Channel
:param guild_name: The name of the Discord Guild to join
:type guild_name: string
:param channel_id: The ID of the channel to be active in
:type channel_id: int
:param logger: An optional logger to
"""
super().__init__(**kwargs)
self._main_guild_name = guild_name
self._main_channel_id = channel_id
self._logger = logger
self.main_guild = None
self.main_channel = None
self._on_ready_msg = None
self._message_handler = None
def set_ready_message(self, msg):
""" Set a message to be sent to the channel on connection
:param msg: Message to be sent
:type msg: string
"""
self._on_ready_msg = msg
def set_message_handler(self, handler):
""" Set a handler to respond to Discord messages in the main channel.
The handler must be an async function which takes a discord.Message.
:param handler: The handler for message
:type handler: An async function (discord.Message)
"""
self._message_handler = handler
async def on_ready(self):
self._log_info("Connection Established")
for guild in self.guilds:
if guild.name == self._main_guild_name:
self.main_guild = guild
break
self.main_channel = self.main_guild.get_channel(self._main_channel_id)
if msg := self._on_ready_msg:
await self.send_message(msg)
async def on_message(self, message):
if message.channel.id == self._main_channel_id and not message.author.id == self.user.id:
self._log_info("Received message in main channel. ID: {}".format(message.id))
await self._message_handler(message)
async def send_message(self, message):
""" Send a message to the main channel
:param message: The text to send to the server
:type message: string
"""
self._log_info("Sending message: {}".format(message))
await self.main_channel.send(message)
def _log_info(self, info):
if self._logger:
self._logger.log_message(info) | 0.662251 | 0.093969 |
import argparse
import os.path
import sys
sys.path.append(os.path.dirname(__file__))
from signing import notarize
from signing.config import CodeSignConfig
def main():
parser = argparse.ArgumentParser(
description='Notarize and staple an application binary or archive.')
parser.add_argument(
'--user',
'-u',
required=True,
help='The username to access the Apple notary service.')
parser.add_argument(
'--password',
'-p',
required=True,
help='The password or password reference (e.g. @keychain, see '
'`xcrun altool -h`) to access the Apple notary service.')
parser.add_argument(
'--asc-provider',
help='The ASC provider string to be used as the `--asc-provider` '
'argument to `xcrun altool`, to be used when --user is associated with '
'with multiple Apple developer teams. See `xcrun altool -h`. Run '
'`iTMSTransporter -m provider -account_type itunes_connect -v off -u '
'USERNAME -p PASSWORD` to list valid providers.')
parser.add_argument(
'--no-staple',
action='store_true',
help='Wait for notarization results, but do not staple after '
'successful notarization.')
parser.add_argument(
'--bundle-id',
required=False,
help='Force the use of the specified bundle ID when uploading for '
'notarization, rather than the one from a config.')
parser.add_argument(
'file',
nargs='+',
help='The file(s) to have notarized. Each file can be a zipped .app '
'bundle, a .dmg, or a .pkg. `xcrun altool -h` for information on '
'supported formats.')
args = parser.parse_args()
config_class = CodeSignConfig
if args.bundle_id:
class OverrideBundleIDConfig(CodeSignConfig):
@property
def base_bundle_id(self):
return args.bundle_id
config_class = OverrideBundleIDConfig
config = config_class('notused', 'notused', args.user, args.password,
args.asc_provider)
uuids = []
for path in args.file:
print('Submitting {} for notarization'.format(path))
uuid = notarize.submit(path, config)
uuids.append(uuid)
for uuid in notarize.wait_for_results(uuids, config):
print('Notarization results acquired for request {}'.format(uuid))
if not args.no_staple:
for path in args.file:
print('Stapling notarization ticket for {}'.format(path))
notarize.staple(path)
if __name__ == '__main__':
main() | chrome/installer/mac/notarize_thing.py |
import argparse
import os.path
import sys
sys.path.append(os.path.dirname(__file__))
from signing import notarize
from signing.config import CodeSignConfig
def main():
parser = argparse.ArgumentParser(
description='Notarize and staple an application binary or archive.')
parser.add_argument(
'--user',
'-u',
required=True,
help='The username to access the Apple notary service.')
parser.add_argument(
'--password',
'-p',
required=True,
help='The password or password reference (e.g. @keychain, see '
'`xcrun altool -h`) to access the Apple notary service.')
parser.add_argument(
'--asc-provider',
help='The ASC provider string to be used as the `--asc-provider` '
'argument to `xcrun altool`, to be used when --user is associated with '
'with multiple Apple developer teams. See `xcrun altool -h`. Run '
'`iTMSTransporter -m provider -account_type itunes_connect -v off -u '
'USERNAME -p PASSWORD` to list valid providers.')
parser.add_argument(
'--no-staple',
action='store_true',
help='Wait for notarization results, but do not staple after '
'successful notarization.')
parser.add_argument(
'--bundle-id',
required=False,
help='Force the use of the specified bundle ID when uploading for '
'notarization, rather than the one from a config.')
parser.add_argument(
'file',
nargs='+',
help='The file(s) to have notarized. Each file can be a zipped .app '
'bundle, a .dmg, or a .pkg. `xcrun altool -h` for information on '
'supported formats.')
args = parser.parse_args()
config_class = CodeSignConfig
if args.bundle_id:
class OverrideBundleIDConfig(CodeSignConfig):
@property
def base_bundle_id(self):
return args.bundle_id
config_class = OverrideBundleIDConfig
config = config_class('notused', 'notused', args.user, args.password,
args.asc_provider)
uuids = []
for path in args.file:
print('Submitting {} for notarization'.format(path))
uuid = notarize.submit(path, config)
uuids.append(uuid)
for uuid in notarize.wait_for_results(uuids, config):
print('Notarization results acquired for request {}'.format(uuid))
if not args.no_staple:
for path in args.file:
print('Stapling notarization ticket for {}'.format(path))
notarize.staple(path)
if __name__ == '__main__':
main() | 0.466846 | 0.06666 |
import hashlib
import json
import numpy as np
from .utils import FeatureData
class CMSRecordTest0(FeatureData):
def __init__(self, data: ('CMSDataPopularity', dict) = {}):
super(CMSRecordTest0, self).__init__()
self.__tot_wrap_cpu = 0.0
self.__tot_requests = 1
self.__num_next_window_hits = 0
self.__class = "UNKNOWN"
if isinstance(data, CMSDataPopularity):
for feature, value in data.features:
self.add_feature(feature, value)
self.__tot_wrap_cpu += float(data.WrapCPU)
else:
if 'features' in data:
for feature, value in data['features'].items():
self.add_feature(feature, value)
if 'data' in data:
self.__tot_wrap_cpu += float(data['data']['WrapCPU'])
def to_dict(self) -> dict:
return {
'tot_wrap_cpu': self.__tot_wrap_cpu,
'tot_requests': self.__tot_requests,
'features': self._features,
'class': self.__class,
'id': self._id
}
def set_class(self, class_: str):
assert class_ == "good" or class_ == "bad", "Class could be 'good' or 'bad'"
self.__class = class_
return self
def __setstate__(self, state):
"""Make object loaded by pickle."""
self.__tot_wrap_cpu = state['tot_wrap_cpu']
self.__tot_requests = state['tot_requests']
self._features = state['features']
self.__class = state['class']
self._id = state['id']
return self
def load(self, data: dict) -> 'CMSRecordTest0':
self.__tot_wrap_cpu = data['tot_wrap_cpu']
self.__tot_requests = data['tot_requests']
self._features = data['features']
self.__class = data['class']
self._id = data['id']
return self
@property
def record_class(self) -> str:
return self.__class
@property
def tot_wrap_cpu(self) -> float:
return self.__tot_wrap_cpu
@property
def tot_requests(self) -> int:
return self.__tot_requests
@property
def score(self) -> float:
return float(self.__tot_wrap_cpu / self.__tot_requests)
def inc_hits(self):
self.__num_next_window_hits += 1
return self
def __add__(self, other: 'CMSRecordTest0'):
self.__tot_wrap_cpu += other.tot_wrap_cpu
self.__tot_requests += other.tot_requests
return self
def __iadd__(self, other: 'CMSRecordTest0'):
self.__tot_wrap_cpu += other.tot_wrap_cpu
self.__tot_requests += other.tot_requests
return self
class CMSDataPopularity(FeatureData):
def __init__(self, data: dict,
filters=[
('store_type', lambda elm: elm == "data" or elm == "mc")
]
):
super(CMSDataPopularity, self).__init__()
self.__data = data
self.__id = None
self.__valid = False
self.__filters = filters
self.__extract_features()
def __setstate__(self, state):
"""Make object loaded by pickle."""
self.__data = state['data']
self._features = state['features']
self.__id = state['id']
self.__valid = state['valid']
return self
def to_dict(self) -> dict:
return {
'data': self.__data,
'features': self._features,
'id': self.__id,
'valid': self.__valid,
}
def __bool__(self):
return self.__valid
def __getattr__(self, name):
if name in self.__data:
return self.__data[name]
else:
raise AttributeError("Attribute '{}' not found...".format(name))
def __extract_features(self):
cur_file = self.__data['FileName']
if cur_file != "unknown":
logical_file_name = [elm for elm in cur_file.split("/") if elm]
try:
store_type, campaign, process, file_type = logical_file_name[1:5]
self.add_feature('store_type', store_type)
self.add_feature('campaign', campaign)
self.add_feature('process', process)
self.add_feature('file_type', file_type)
# Check validity
self.__valid = all(
[fun(self.feature[name]) for name, fun in self.__filters]
)
if self.__valid:
self.__gen_id()
except ValueError as err:
print(
"Cannot extract features from '{}'".format(cur_file))
print(err)
pass
class CMSDataPopularityRaw(FeatureData):
def __init__(
self,
data: dict = {},
feature_list=[
'FileName',
'Application',
'ApplicationVersion',
'BlockId',
'BlockName',
'ExeCPU',
'FileType',
'FinishedTimeStamp',
'GenericType',
'GridName',
'InputCollection',
'InputSE',
'IsParentFile',
'JobExecExitCode',
'JobExecExitTimeStamp',
'JobId',
'JobMonitorId',
'JobType',
'LumiRanges',
'NCores',
'NEventsPerJob',
'NEvProc',
'NEvReq',
'NewGenericType',
'NewType',
'NTaskSteps',
'ProtocolUsed',
'SchedulerJobIdV2',
'SchedulerName',
'SiteName',
'StartedRunningTimeStamp',
'StrippedBlocks',
'StrippedFiles',
'SubmissionTool',
'SuccessFlag',
'TargetCE',
'TaskId',
'TaskJobId',
'TaskMonitorId',
'Type',
'UserId',
'ValidityFlag',
'WNHostName',
'WrapCPU',
'WrapWC',
],
filters=[
('Type', lambda elm: elm == "analysis")
]
):
super(CMSDataPopularityRaw, self).__init__()
self.__valid = False
if data:
self.__id = data[feature_list[0]]
self.__valid = all(
[fun(data[name]) for name, fun in filters]
)
if self.__valid:
for feature in feature_list:
self.add_feature(feature, data[feature])
def __setstate__(self, state) -> 'CMSDataPopularityRaw':
"""Make object loaded by pickle."""
self._features = state['features']
self.__id = state['id']
self.__valid = state['valid']
return self
def to_dict(self) -> dict:
return {
'features': self._features,
'id': self.__id,
'valid': self.__valid
}
def loads(self, input_string) -> 'CMSDataPopularityRaw':
data = json.loads(input_string)
self._features = data['features']
self.__id = data['id']
self.__valid = data['valid']
return self
@property
def valid(self) -> bool:
return self.__valid
def __bool__(self) -> bool:
return self.valid
def __getattr__(self, name):
if name in self._features:
return self._features[name]
else:
raise AttributeError("Attribute '{}' not found...".format(name)) | DataManager/collector/datafeatures/extractor.py | import hashlib
import json
import numpy as np
from .utils import FeatureData
class CMSRecordTest0(FeatureData):
def __init__(self, data: ('CMSDataPopularity', dict) = {}):
super(CMSRecordTest0, self).__init__()
self.__tot_wrap_cpu = 0.0
self.__tot_requests = 1
self.__num_next_window_hits = 0
self.__class = "UNKNOWN"
if isinstance(data, CMSDataPopularity):
for feature, value in data.features:
self.add_feature(feature, value)
self.__tot_wrap_cpu += float(data.WrapCPU)
else:
if 'features' in data:
for feature, value in data['features'].items():
self.add_feature(feature, value)
if 'data' in data:
self.__tot_wrap_cpu += float(data['data']['WrapCPU'])
def to_dict(self) -> dict:
return {
'tot_wrap_cpu': self.__tot_wrap_cpu,
'tot_requests': self.__tot_requests,
'features': self._features,
'class': self.__class,
'id': self._id
}
def set_class(self, class_: str):
assert class_ == "good" or class_ == "bad", "Class could be 'good' or 'bad'"
self.__class = class_
return self
def __setstate__(self, state):
"""Make object loaded by pickle."""
self.__tot_wrap_cpu = state['tot_wrap_cpu']
self.__tot_requests = state['tot_requests']
self._features = state['features']
self.__class = state['class']
self._id = state['id']
return self
def load(self, data: dict) -> 'CMSRecordTest0':
self.__tot_wrap_cpu = data['tot_wrap_cpu']
self.__tot_requests = data['tot_requests']
self._features = data['features']
self.__class = data['class']
self._id = data['id']
return self
@property
def record_class(self) -> str:
return self.__class
@property
def tot_wrap_cpu(self) -> float:
return self.__tot_wrap_cpu
@property
def tot_requests(self) -> int:
return self.__tot_requests
@property
def score(self) -> float:
return float(self.__tot_wrap_cpu / self.__tot_requests)
def inc_hits(self):
self.__num_next_window_hits += 1
return self
def __add__(self, other: 'CMSRecordTest0'):
self.__tot_wrap_cpu += other.tot_wrap_cpu
self.__tot_requests += other.tot_requests
return self
def __iadd__(self, other: 'CMSRecordTest0'):
self.__tot_wrap_cpu += other.tot_wrap_cpu
self.__tot_requests += other.tot_requests
return self
class CMSDataPopularity(FeatureData):
def __init__(self, data: dict,
filters=[
('store_type', lambda elm: elm == "data" or elm == "mc")
]
):
super(CMSDataPopularity, self).__init__()
self.__data = data
self.__id = None
self.__valid = False
self.__filters = filters
self.__extract_features()
def __setstate__(self, state):
"""Make object loaded by pickle."""
self.__data = state['data']
self._features = state['features']
self.__id = state['id']
self.__valid = state['valid']
return self
def to_dict(self) -> dict:
return {
'data': self.__data,
'features': self._features,
'id': self.__id,
'valid': self.__valid,
}
def __bool__(self):
return self.__valid
def __getattr__(self, name):
if name in self.__data:
return self.__data[name]
else:
raise AttributeError("Attribute '{}' not found...".format(name))
def __extract_features(self):
cur_file = self.__data['FileName']
if cur_file != "unknown":
logical_file_name = [elm for elm in cur_file.split("/") if elm]
try:
store_type, campaign, process, file_type = logical_file_name[1:5]
self.add_feature('store_type', store_type)
self.add_feature('campaign', campaign)
self.add_feature('process', process)
self.add_feature('file_type', file_type)
# Check validity
self.__valid = all(
[fun(self.feature[name]) for name, fun in self.__filters]
)
if self.__valid:
self.__gen_id()
except ValueError as err:
print(
"Cannot extract features from '{}'".format(cur_file))
print(err)
pass
class CMSDataPopularityRaw(FeatureData):
def __init__(
self,
data: dict = {},
feature_list=[
'FileName',
'Application',
'ApplicationVersion',
'BlockId',
'BlockName',
'ExeCPU',
'FileType',
'FinishedTimeStamp',
'GenericType',
'GridName',
'InputCollection',
'InputSE',
'IsParentFile',
'JobExecExitCode',
'JobExecExitTimeStamp',
'JobId',
'JobMonitorId',
'JobType',
'LumiRanges',
'NCores',
'NEventsPerJob',
'NEvProc',
'NEvReq',
'NewGenericType',
'NewType',
'NTaskSteps',
'ProtocolUsed',
'SchedulerJobIdV2',
'SchedulerName',
'SiteName',
'StartedRunningTimeStamp',
'StrippedBlocks',
'StrippedFiles',
'SubmissionTool',
'SuccessFlag',
'TargetCE',
'TaskId',
'TaskJobId',
'TaskMonitorId',
'Type',
'UserId',
'ValidityFlag',
'WNHostName',
'WrapCPU',
'WrapWC',
],
filters=[
('Type', lambda elm: elm == "analysis")
]
):
super(CMSDataPopularityRaw, self).__init__()
self.__valid = False
if data:
self.__id = data[feature_list[0]]
self.__valid = all(
[fun(data[name]) for name, fun in filters]
)
if self.__valid:
for feature in feature_list:
self.add_feature(feature, data[feature])
def __setstate__(self, state) -> 'CMSDataPopularityRaw':
"""Make object loaded by pickle."""
self._features = state['features']
self.__id = state['id']
self.__valid = state['valid']
return self
def to_dict(self) -> dict:
return {
'features': self._features,
'id': self.__id,
'valid': self.__valid
}
def loads(self, input_string) -> 'CMSDataPopularityRaw':
data = json.loads(input_string)
self._features = data['features']
self.__id = data['id']
self.__valid = data['valid']
return self
@property
def valid(self) -> bool:
return self.__valid
def __bool__(self) -> bool:
return self.valid
def __getattr__(self, name):
if name in self._features:
return self._features[name]
else:
raise AttributeError("Attribute '{}' not found...".format(name)) | 0.532911 | 0.214434 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
rest3client is a requests-based library providing simple methods to enable consumption of HTTP REST APIs.
The library further abstracts the underlying requests calls providing HTTP verb equivalent methods for GET, POST, PATCH, PUT and DELETE. The library includes a RESTclient class that implements a consistent approach for processing request responses, extracting error messages from responses, and providing standard headers to request calls. Enabling the consumer to focus on their business logic and less on the complexites of setting up and processing the requests repsonses.
A subclass inheriting RESTclient can override the base methods providing further customization and flexibility. The library supports most popular authentication schemes; including no-auth, basic auth, api-key and token-based.
"""
from pybuilder.core import use_plugin
from pybuilder.core import init
from pybuilder.core import Author
from pybuilder.core import task
from pybuilder.pluginhelper.external_command import ExternalCommandBuilder
use_plugin('python.core')
use_plugin('python.unittest')
use_plugin('python.install_dependencies')
use_plugin('python.flake8')
use_plugin('python.coverage')
use_plugin('python.distutils')
use_plugin('filter_resources')
name = 'rest3client'
authors = [
Author('<NAME>', '<EMAIL>')
]
summary = 'A Python class providing primitive methods for enabling consumption of REST APIs'
url = 'https://github.com/soda480/rest3client'
version = '0.0.5'
default_task = [
'clean',
'analyze',
'cyclomatic_complexity',
'package'
]
license = 'Apache License, Version 2.0'
description = __doc__
@init
def set_properties(project):
project.set_property('unittest_module_glob', 'test_*.py')
project.set_property('coverage_break_build', False)
project.set_property('flake8_max_line_length', 120)
project.set_property('flake8_verbose_output', True)
project.set_property('flake8_break_build', True)
project.set_property('flake8_include_scripts', True)
project.set_property('flake8_include_test_sources', True)
project.set_property('flake8_ignore', 'E501, W503, F401')
project.get_property('filter_resources_glob').extend(['**/rest3client/*'])
project.build_depends_on_requirements('requirements-build.txt')
project.depends_on_requirements('requirements.txt')
project.set_property('distutils_classifiers', [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration'
])
@task('cyclomatic_complexity', description='calculates and publishes cyclomatic complexity')
def cyclomatic_complexity(project, logger):
try:
command = ExternalCommandBuilder('radon', project)
command.use_argument('cc')
command.use_argument('-a')
result = command.run_on_production_source_files(logger)
if len(result.error_report_lines) > 0:
logger.error('Errors while running radon, see {0}'.format(result.error_report_file))
for line in result.report_lines[:-1]:
logger.debug(line.strip())
if not result.report_lines:
return
average_complexity_line = result.report_lines[-1].strip()
logger.info(average_complexity_line)
except Exception as exception:
print('Unable to execute cyclomatic complexity due to ERROR: {}'.format(str(exception))) | build.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
rest3client is a requests-based library providing simple methods to enable consumption of HTTP REST APIs.
The library further abstracts the underlying requests calls providing HTTP verb equivalent methods for GET, POST, PATCH, PUT and DELETE. The library includes a RESTclient class that implements a consistent approach for processing request responses, extracting error messages from responses, and providing standard headers to request calls. Enabling the consumer to focus on their business logic and less on the complexites of setting up and processing the requests repsonses.
A subclass inheriting RESTclient can override the base methods providing further customization and flexibility. The library supports most popular authentication schemes; including no-auth, basic auth, api-key and token-based.
"""
from pybuilder.core import use_plugin
from pybuilder.core import init
from pybuilder.core import Author
from pybuilder.core import task
from pybuilder.pluginhelper.external_command import ExternalCommandBuilder
use_plugin('python.core')
use_plugin('python.unittest')
use_plugin('python.install_dependencies')
use_plugin('python.flake8')
use_plugin('python.coverage')
use_plugin('python.distutils')
use_plugin('filter_resources')
name = 'rest3client'
authors = [
Author('<NAME>', '<EMAIL>')
]
summary = 'A Python class providing primitive methods for enabling consumption of REST APIs'
url = 'https://github.com/soda480/rest3client'
version = '0.0.5'
default_task = [
'clean',
'analyze',
'cyclomatic_complexity',
'package'
]
license = 'Apache License, Version 2.0'
description = __doc__
@init
def set_properties(project):
project.set_property('unittest_module_glob', 'test_*.py')
project.set_property('coverage_break_build', False)
project.set_property('flake8_max_line_length', 120)
project.set_property('flake8_verbose_output', True)
project.set_property('flake8_break_build', True)
project.set_property('flake8_include_scripts', True)
project.set_property('flake8_include_test_sources', True)
project.set_property('flake8_ignore', 'E501, W503, F401')
project.get_property('filter_resources_glob').extend(['**/rest3client/*'])
project.build_depends_on_requirements('requirements-build.txt')
project.depends_on_requirements('requirements.txt')
project.set_property('distutils_classifiers', [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration'
])
@task('cyclomatic_complexity', description='calculates and publishes cyclomatic complexity')
def cyclomatic_complexity(project, logger):
try:
command = ExternalCommandBuilder('radon', project)
command.use_argument('cc')
command.use_argument('-a')
result = command.run_on_production_source_files(logger)
if len(result.error_report_lines) > 0:
logger.error('Errors while running radon, see {0}'.format(result.error_report_file))
for line in result.report_lines[:-1]:
logger.debug(line.strip())
if not result.report_lines:
return
average_complexity_line = result.report_lines[-1].strip()
logger.info(average_complexity_line)
except Exception as exception:
print('Unable to execute cyclomatic complexity due to ERROR: {}'.format(str(exception))) | 0.870597 | 0.223091 |
class Stack(object):
"""Docstring for Stack """
def __init__(self):
"""@todo: to be defined1 """
self.items = []
def pushFromList(self, list):
"""Push the list in the stack
:param list: a list
"""
for i in list[::-1]:
self.push(i)
def isEmpty(self):
""" Says if the stack is empty
:returns: @todo
"""
return self.items == []
def push(self, item):
"""Push an item in the stack
:param item: @todo
:returns: @todo
"""
self.items.append(item)
def pop(self):
"""Getting the last item and remove it
:returns: last item
"""
return self.items.pop()
def peek(self, posi = 0):
"""Getting the last item
:param posi: which item to peek 0 (last) 1 (the onebefore the last)...
:returns: the item
"""
return self.items[-1 - posi]
def __len__(self):
return len(self.items)
def __str__(self):
return str(self.items) + " -> "
def __add__(self, addList):
return self.items + addList
def flatten_list(a, result=None):
"""Flattens a nested list.
>>> flatten_list([ [1, 2, [3, 4] ], [5, 6], 7])
[1, 2, 3, 4, 5, 6, 7]
"""
if result is None:
result = []
for x in a:
if isinstance(x, list):
flatten_list(x, result)
else:
result.append(x)
return result
def first_elem(ll):
"""Get the first element in imbricates lists
# TODO: Fonction pourrie mais j'ai pas le temps de faire mieux! |mar. janv. 28 22:32:22 CET 2014
:param list: list of lists of lists...
:returns: the first element
>>> first_elem(1)
1
>>> first_elem([1,2])
1
>>> first_elem([["abc"]])
'a'
>>> first_elem("abc")
'a'
>>> first_elem([[[1,2],[3,4]], [5,6]])
1
>>> first_elem([[["ab",2],[3,4]], [5,6]])
'a'
"""
if hasattr(ll, '__contains__'):
if len(ll) == 1 and type(ll) == str:
return ll[0]
else:
return first_elem(ll[0])
else:
return ll
def last_elem(ll):
"""Get the last element in imbricates lists
# TODO: Fonction pourrie mais j'ai pas le temps de faire mieux! |mar. janv. 28 22:32:22 CET 2014
:param list: list of lists of lists...
:returns: the last element
>>> last_elem(1)
1
>>> last_elem([1,2])
2
>>> last_elem([["abc"]])
'c'
>>> last_elem("abc")
'c'
>>> last_elem([[[1,2],[3,4]], [5,6]])
6
>>> last_elem([[["ab",2],[3,4]], [5,6]])
6
"""
if hasattr(ll, '__contains__'):
if len(ll) == 1 and type(ll) == str:
return ll[-1]
else:
return last_elem(ll[-1])
else:
return ll
def expand_list(list_list):
"""Expand list of list
>>> expand_list([1,2,[3,4],5,[6,7,8]])
[[1, 2, 3, 5, 6], [1, 2, 4, 5, 7], [1, 2, 4, 5, 8]]
>>> expand_list([1,2,4,5,6,7,8])
[[1, 2, 4, 5, 6, 7, 8]]
"""
list_in_list = [i for i in list_list if type(i) == list].copy()
try:
nbr_ans_list = max([len(i) for i in list_in_list])
ans = [list_list.copy() for i in range(nbr_ans_list)]
for (i,l) in enumerate(ans):
for (j,e) in enumerate(l):
if type(e) == list:
ans[i][j] = e[min(i,len(e)-1)]
# S'il n'y a pas de liste dans la liste (2e exemple)
except ValueError:
ans = [list_list]
return ans
def add_in_dict(dict1, dict2):
"""Merge dictionary keys and add the content from dict1 and dict2
:param dict1: first dictionary
:param dict2: second dictionary
:returns: merged and added dictionary
>>> add_in_dict({'a':1, 'b':2}, {'c':3, 'd': 4}) == {'d': 4, 'a': 1, 'c': 3, 'b': 2}
True
>>> add_in_dict({'a':1, 'b':2}, {'a':3, 'b': 4}) == {'a': 4, 'b': 6}
True
>>> add_in_dict({'a':1, 'b':2}, {'a':3, 'c': 4}) == {'a': 4, 'b': 2, 'c': 4}
True
"""
new_dict = {}
new_dict.update(dict1)
for (k,v) in dict2.items():
if k in new_dict.keys():
new_dict[k] += v
else:
new_dict[k] = v
return new_dict
def remove_in_dict(d, value = 0):
""" In a dictionary, remove keys which have certain value
:param d: the dictionary
:param value: value to remove
:returns: new dictionary whithout unwanted value
>>> remove_in_dict({'b': 1, 'a': 0}) == {'b': 1}
True
>>> remove_in_dict({'b': 1, 'a': 0}, 1) == {'a': 0}
True
"""
new_dict = {}
for (k,v) in d.items():
if v != value:
new_dict[k] = v
return new_dict
def convolution_dict(D1, D2, op = lambda x,y:x*y,\
op_key = lambda x,y: x + y, \
commutative = True, op_twice = lambda x,y: x + y):
"""Convolution of two dictionaries
:param D1: First dictionary
:param D2: Second dictionary
:param op: Operation of perform in value
:param commutative: keys are commutative?
:param op_twice: operation on value if the key appear twice
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "":4}) == {"aa":2, "a": 4, "ba":6, "b":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}) == {"aa":2, "ab":10, "bb":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, commutative = False) == {"aa":2, "ab":10, "bb":12}
False
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, commutative = False) == {"aa":2, "ab":4,"ba":6, "bb":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, \
op_twice = lambda x,y:[x,y]) == {"aa":2, "ab":[4,6], "bb":12}
True
"""
new_dict = {}
for k1 in sorted(D1.keys()):
for k2 in sorted(D2.keys()):
if op_key(k1,k2) in new_dict.keys():
key = op_key(k1,k2)
new_dict[key] = op_twice(new_dict[key], op(D1[k1],D2[k2]))
elif op_key(k2,k1) in new_dict.keys() and commutative:
key = op_key(k2,k1)
new_dict[key] = op_twice(new_dict[key], op(D1[k1],D2[k2]))
else:
key = op_key(k1,k2)
new_dict[key] = op(D1[k1],D2[k2])
return new_dict
if __name__ == '__main__':
import doctest
doctest.testmod()
# -----------------------------
# Reglages pour 'vim'
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
# cursor: 16 del | pymath/generic.py |
class Stack(object):
"""Docstring for Stack """
def __init__(self):
"""@todo: to be defined1 """
self.items = []
def pushFromList(self, list):
"""Push the list in the stack
:param list: a list
"""
for i in list[::-1]:
self.push(i)
def isEmpty(self):
""" Says if the stack is empty
:returns: @todo
"""
return self.items == []
def push(self, item):
"""Push an item in the stack
:param item: @todo
:returns: @todo
"""
self.items.append(item)
def pop(self):
"""Getting the last item and remove it
:returns: last item
"""
return self.items.pop()
def peek(self, posi = 0):
"""Getting the last item
:param posi: which item to peek 0 (last) 1 (the onebefore the last)...
:returns: the item
"""
return self.items[-1 - posi]
def __len__(self):
return len(self.items)
def __str__(self):
return str(self.items) + " -> "
def __add__(self, addList):
return self.items + addList
def flatten_list(a, result=None):
"""Flattens a nested list.
>>> flatten_list([ [1, 2, [3, 4] ], [5, 6], 7])
[1, 2, 3, 4, 5, 6, 7]
"""
if result is None:
result = []
for x in a:
if isinstance(x, list):
flatten_list(x, result)
else:
result.append(x)
return result
def first_elem(ll):
"""Get the first element in imbricates lists
# TODO: Fonction pourrie mais j'ai pas le temps de faire mieux! |mar. janv. 28 22:32:22 CET 2014
:param list: list of lists of lists...
:returns: the first element
>>> first_elem(1)
1
>>> first_elem([1,2])
1
>>> first_elem([["abc"]])
'a'
>>> first_elem("abc")
'a'
>>> first_elem([[[1,2],[3,4]], [5,6]])
1
>>> first_elem([[["ab",2],[3,4]], [5,6]])
'a'
"""
if hasattr(ll, '__contains__'):
if len(ll) == 1 and type(ll) == str:
return ll[0]
else:
return first_elem(ll[0])
else:
return ll
def last_elem(ll):
"""Get the last element in imbricates lists
# TODO: Fonction pourrie mais j'ai pas le temps de faire mieux! |mar. janv. 28 22:32:22 CET 2014
:param list: list of lists of lists...
:returns: the last element
>>> last_elem(1)
1
>>> last_elem([1,2])
2
>>> last_elem([["abc"]])
'c'
>>> last_elem("abc")
'c'
>>> last_elem([[[1,2],[3,4]], [5,6]])
6
>>> last_elem([[["ab",2],[3,4]], [5,6]])
6
"""
if hasattr(ll, '__contains__'):
if len(ll) == 1 and type(ll) == str:
return ll[-1]
else:
return last_elem(ll[-1])
else:
return ll
def expand_list(list_list):
"""Expand list of list
>>> expand_list([1,2,[3,4],5,[6,7,8]])
[[1, 2, 3, 5, 6], [1, 2, 4, 5, 7], [1, 2, 4, 5, 8]]
>>> expand_list([1,2,4,5,6,7,8])
[[1, 2, 4, 5, 6, 7, 8]]
"""
list_in_list = [i for i in list_list if type(i) == list].copy()
try:
nbr_ans_list = max([len(i) for i in list_in_list])
ans = [list_list.copy() for i in range(nbr_ans_list)]
for (i,l) in enumerate(ans):
for (j,e) in enumerate(l):
if type(e) == list:
ans[i][j] = e[min(i,len(e)-1)]
# S'il n'y a pas de liste dans la liste (2e exemple)
except ValueError:
ans = [list_list]
return ans
def add_in_dict(dict1, dict2):
"""Merge dictionary keys and add the content from dict1 and dict2
:param dict1: first dictionary
:param dict2: second dictionary
:returns: merged and added dictionary
>>> add_in_dict({'a':1, 'b':2}, {'c':3, 'd': 4}) == {'d': 4, 'a': 1, 'c': 3, 'b': 2}
True
>>> add_in_dict({'a':1, 'b':2}, {'a':3, 'b': 4}) == {'a': 4, 'b': 6}
True
>>> add_in_dict({'a':1, 'b':2}, {'a':3, 'c': 4}) == {'a': 4, 'b': 2, 'c': 4}
True
"""
new_dict = {}
new_dict.update(dict1)
for (k,v) in dict2.items():
if k in new_dict.keys():
new_dict[k] += v
else:
new_dict[k] = v
return new_dict
def remove_in_dict(d, value = 0):
""" In a dictionary, remove keys which have certain value
:param d: the dictionary
:param value: value to remove
:returns: new dictionary whithout unwanted value
>>> remove_in_dict({'b': 1, 'a': 0}) == {'b': 1}
True
>>> remove_in_dict({'b': 1, 'a': 0}, 1) == {'a': 0}
True
"""
new_dict = {}
for (k,v) in d.items():
if v != value:
new_dict[k] = v
return new_dict
def convolution_dict(D1, D2, op = lambda x,y:x*y,\
op_key = lambda x,y: x + y, \
commutative = True, op_twice = lambda x,y: x + y):
"""Convolution of two dictionaries
:param D1: First dictionary
:param D2: Second dictionary
:param op: Operation of perform in value
:param commutative: keys are commutative?
:param op_twice: operation on value if the key appear twice
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "":4}) == {"aa":2, "a": 4, "ba":6, "b":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}) == {"aa":2, "ab":10, "bb":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, commutative = False) == {"aa":2, "ab":10, "bb":12}
False
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, commutative = False) == {"aa":2, "ab":4,"ba":6, "bb":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, \
op_twice = lambda x,y:[x,y]) == {"aa":2, "ab":[4,6], "bb":12}
True
"""
new_dict = {}
for k1 in sorted(D1.keys()):
for k2 in sorted(D2.keys()):
if op_key(k1,k2) in new_dict.keys():
key = op_key(k1,k2)
new_dict[key] = op_twice(new_dict[key], op(D1[k1],D2[k2]))
elif op_key(k2,k1) in new_dict.keys() and commutative:
key = op_key(k2,k1)
new_dict[key] = op_twice(new_dict[key], op(D1[k1],D2[k2]))
else:
key = op_key(k1,k2)
new_dict[key] = op(D1[k1],D2[k2])
return new_dict
if __name__ == '__main__':
import doctest
doctest.testmod()
# -----------------------------
# Reglages pour 'vim'
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
# cursor: 16 del | 0.592549 | 0.499207 |
""" Main menu """
import sys
import os
import datetime
import asyncio
import curses
import npyscreen
from error_handler import error_handler
from main_menu_list import MainMenuList
from widgets.net_interfaces_widget import NetInterfacesWidget
APP_NAME = "{} ASR".format(os.environ.get("APP_NAME", "Speechmatics"))
# pylint: disable=too-many-ancestors
# pylint: disable=too-many-instance-attributes
class MainMenu(npyscreen.ActionFormV2):
""" Main menu form """
def __init__(self, *args, **kwargs):
super(MainMenu, self).__init__(*args, **kwargs)
self.ping_message = "?????"
self.last_successful_ping = None
@error_handler("API exception")
def populate_status(self):
""" Fill in the status widget """
self.status_bar.entry_widget.values = ["????", ]
version = self.management_api.about()
sm_license = self.licensing_api.get_license()
free_space = self.management_api.get_free_storage()
self.status_bar.entry_widget.values = [
"{:<16}{}".format("Version", version.version),
"{:<16}{}".format("License status", sm_license.status),
"{:<16}{}".format("License code", sm_license.license_code),
"{:<16}{}".format("Licensed", sm_license.licensed)
]
for entry in free_space.items:
self.status_bar.entry_widget.values.append(
"{:<16}{} -> {}".format("Disk free ", entry.path, entry.bytes)
)
@staticmethod
def call_in_background(target, *, loop=None, executor=None):
"""Schedules and starts target callable as a background task
If not given, *loop* defaults to the current thread's event loop
If not given, *executor* defaults to the loop's default executor
Returns the scheduled task.
"""
if loop is None:
loop = asyncio.get_event_loop()
if callable(target):
return loop.run_in_executor(executor, target)
raise TypeError("target must be a callable, not {!r}".format(type(target)))
def set_ping_message(self):
""" Ping the API and set the ping message describing the result """
try:
if self.last_successful_ping:
since_last_ping = (datetime.datetime.utcnow() - self.last_successful_ping).total_seconds()
self.ping_message += "."
if since_last_ping < 10:
return
self.management_api.about()
self.ping_message = "API responding"
self.last_successful_ping = datetime.datetime.utcnow()
except Exception: # pylint: disable=broad-except
if self.last_successful_ping:
since_good_ping = (datetime.datetime.utcnow() - self.last_successful_ping).total_seconds()
self.ping_message = "API not responding for {}s".format(int(since_good_ping))
else:
self.ping_message("No responses from API since start")
def update_safe_status(self):
"""
Update those parts of the status which don't depend on the remote end working.
Because we know these operations can be done safely without throwing up an error,
they can be done repeatedly in a loop.
"""
now_utc = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).replace(microsecond=0).isoformat()
self.call_in_background(self.set_ping_message)
self.system_status_bar.entry_widget.values = [
"{:<15}: {}".format("System time (UTC)", now_utc),
self.ping_message
]
self.network_status_bar.entry_widget.update()
self.system_status_bar.entry_widget.update()
def h_refresh(self, data): # pylint: disable=unused-argument
"""
Refresh the main menu display.
Try to avoid calling this too often as the npyscreen docs warn against it.
"""
self.populate_status()
self.display()
self.wg_options.update()
def create(self):
"""
Create the main menu, initialize the swagger clients
"""
# The member is added dynamically
columns = curses.COLS # pylint: disable=no-member
self.management_api = self.parentApp.management_api
self.licensing_api = self.parentApp.licensing_api
self.add_handlers({curses.KEY_F5: self.h_refresh})
centred_title = ("{:^" + str(columns) + "}").format(APP_NAME)
self.title = self.add(npyscreen.FixedText, name=APP_NAME, value=centred_title, editable=False, relx=1, rely=1,
width=int(columns))
self.wg_options = self.add(MainMenuList, name="main_menu", max_width=12, rely=2)
self.system_status_bar = self.add(npyscreen.BoxTitle, name="System status", editable=False, rely=2, relx=14,
max_height=4)
self.network_status_bar = self.add(NetInterfacesWidget, name="Network status", editable=False, rely=6, relx=14,
max_height=8)
self.status_bar = self.add(npyscreen.BoxTitle, name="ASR status", editable=False, relx=14, rely=14,
max_height=8)
self.nextrely += 1
def beforeEditing(self): # pylint: disable=invalid-name
""" Called after the screen is created but before editing is allowed """
self.populate_status()
def on_ok(self):
""" Quit app on OK """
sys.exit(0)
def on_cancel(self):
""" Quit app on cancel (TODO: remove cancel button from main screen only) """
sys.exit(0)
def while_waiting(self):
""" Run on a timer to update the status """
self.update_safe_status()
class Reboot(npyscreen.ActionPopup):
""" Pop-up menu defining the reboot confirmation screen """
@error_handler("API Error (management_api)")
def reboot(self):
"""
If remote host, use api, if localhost, use real system function (TODO)
"""
self.parentApp.management_api.hard_reboot()
def create(self):
""" Initialize the popup """
self.add(npyscreen.MultiLine, values=["Choose OK to reboot the system."], editable=False)
def on_ok(self):
""" OK selected, reboot the system """
self.reboot()
self.parentApp.switchFormPrevious()
def on_cancel(self):
""" Cancel selected, return to previous form """
self.parentApp.switchFormPrevious()
class Shutdown(npyscreen.ActionPopup):
""" Popup menu defining the shutdown confirmation screen """
@error_handler("API Error (management_api)")
def shutdown(self):
"""
Shutdown the system using the API or the local shutdown command
"""
self.parentApp.management_api.hard_shutdown()
def create(self):
self.add(npyscreen.MultiLine, values=["Choose OK to shut down the system."], editable=False)
def on_ok(self):
self.shutdown()
self.parentApp.switchFormPrevious()
def on_cancel(self):
self.parentApp.switchFormPrevious() | main_menu.py |
""" Main menu """
import sys
import os
import datetime
import asyncio
import curses
import npyscreen
from error_handler import error_handler
from main_menu_list import MainMenuList
from widgets.net_interfaces_widget import NetInterfacesWidget
APP_NAME = "{} ASR".format(os.environ.get("APP_NAME", "Speechmatics"))
# pylint: disable=too-many-ancestors
# pylint: disable=too-many-instance-attributes
class MainMenu(npyscreen.ActionFormV2):
""" Main menu form """
def __init__(self, *args, **kwargs):
super(MainMenu, self).__init__(*args, **kwargs)
self.ping_message = "?????"
self.last_successful_ping = None
@error_handler("API exception")
def populate_status(self):
""" Fill in the status widget """
self.status_bar.entry_widget.values = ["????", ]
version = self.management_api.about()
sm_license = self.licensing_api.get_license()
free_space = self.management_api.get_free_storage()
self.status_bar.entry_widget.values = [
"{:<16}{}".format("Version", version.version),
"{:<16}{}".format("License status", sm_license.status),
"{:<16}{}".format("License code", sm_license.license_code),
"{:<16}{}".format("Licensed", sm_license.licensed)
]
for entry in free_space.items:
self.status_bar.entry_widget.values.append(
"{:<16}{} -> {}".format("Disk free ", entry.path, entry.bytes)
)
@staticmethod
def call_in_background(target, *, loop=None, executor=None):
"""Schedules and starts target callable as a background task
If not given, *loop* defaults to the current thread's event loop
If not given, *executor* defaults to the loop's default executor
Returns the scheduled task.
"""
if loop is None:
loop = asyncio.get_event_loop()
if callable(target):
return loop.run_in_executor(executor, target)
raise TypeError("target must be a callable, not {!r}".format(type(target)))
def set_ping_message(self):
""" Ping the API and set the ping message describing the result """
try:
if self.last_successful_ping:
since_last_ping = (datetime.datetime.utcnow() - self.last_successful_ping).total_seconds()
self.ping_message += "."
if since_last_ping < 10:
return
self.management_api.about()
self.ping_message = "API responding"
self.last_successful_ping = datetime.datetime.utcnow()
except Exception: # pylint: disable=broad-except
if self.last_successful_ping:
since_good_ping = (datetime.datetime.utcnow() - self.last_successful_ping).total_seconds()
self.ping_message = "API not responding for {}s".format(int(since_good_ping))
else:
self.ping_message("No responses from API since start")
def update_safe_status(self):
"""
Update those parts of the status which don't depend on the remote end working.
Because we know these operations can be done safely without throwing up an error,
they can be done repeatedly in a loop.
"""
now_utc = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).replace(microsecond=0).isoformat()
self.call_in_background(self.set_ping_message)
self.system_status_bar.entry_widget.values = [
"{:<15}: {}".format("System time (UTC)", now_utc),
self.ping_message
]
self.network_status_bar.entry_widget.update()
self.system_status_bar.entry_widget.update()
def h_refresh(self, data): # pylint: disable=unused-argument
"""
Refresh the main menu display.
Try to avoid calling this too often as the npyscreen docs warn against it.
"""
self.populate_status()
self.display()
self.wg_options.update()
def create(self):
"""
Create the main menu, initialize the swagger clients
"""
# The member is added dynamically
columns = curses.COLS # pylint: disable=no-member
self.management_api = self.parentApp.management_api
self.licensing_api = self.parentApp.licensing_api
self.add_handlers({curses.KEY_F5: self.h_refresh})
centred_title = ("{:^" + str(columns) + "}").format(APP_NAME)
self.title = self.add(npyscreen.FixedText, name=APP_NAME, value=centred_title, editable=False, relx=1, rely=1,
width=int(columns))
self.wg_options = self.add(MainMenuList, name="main_menu", max_width=12, rely=2)
self.system_status_bar = self.add(npyscreen.BoxTitle, name="System status", editable=False, rely=2, relx=14,
max_height=4)
self.network_status_bar = self.add(NetInterfacesWidget, name="Network status", editable=False, rely=6, relx=14,
max_height=8)
self.status_bar = self.add(npyscreen.BoxTitle, name="ASR status", editable=False, relx=14, rely=14,
max_height=8)
self.nextrely += 1
def beforeEditing(self): # pylint: disable=invalid-name
""" Called after the screen is created but before editing is allowed """
self.populate_status()
def on_ok(self):
""" Quit app on OK """
sys.exit(0)
def on_cancel(self):
""" Quit app on cancel (TODO: remove cancel button from main screen only) """
sys.exit(0)
def while_waiting(self):
""" Run on a timer to update the status """
self.update_safe_status()
class Reboot(npyscreen.ActionPopup):
""" Pop-up menu defining the reboot confirmation screen """
@error_handler("API Error (management_api)")
def reboot(self):
"""
If remote host, use api, if localhost, use real system function (TODO)
"""
self.parentApp.management_api.hard_reboot()
def create(self):
""" Initialize the popup """
self.add(npyscreen.MultiLine, values=["Choose OK to reboot the system."], editable=False)
def on_ok(self):
""" OK selected, reboot the system """
self.reboot()
self.parentApp.switchFormPrevious()
def on_cancel(self):
""" Cancel selected, return to previous form """
self.parentApp.switchFormPrevious()
class Shutdown(npyscreen.ActionPopup):
""" Popup menu defining the shutdown confirmation screen """
@error_handler("API Error (management_api)")
def shutdown(self):
"""
Shutdown the system using the API or the local shutdown command
"""
self.parentApp.management_api.hard_shutdown()
def create(self):
self.add(npyscreen.MultiLine, values=["Choose OK to shut down the system."], editable=False)
def on_ok(self):
self.shutdown()
self.parentApp.switchFormPrevious()
def on_cancel(self):
self.parentApp.switchFormPrevious() | 0.393502 | 0.11129 |
from json import loads
from logging import getLogger
from os import environ, path
from typing import Any, Callable, Dict, Final, List, Optional
from fastapi import APIRouter, FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.routing import Match
logger: Final = getLogger(__file__)
def router_middleware(app: FastAPI, router: APIRouter):
"""Add middleware to a specific router, assumes no router prefix."""
def deco(func: Callable) -> Callable:
async def _middleware(request: Request, call_next):
# Check if scopes match
matches = any(
[
route.matches(request.scope)[0] == Match.FULL
for route in router.routes
]
)
if matches: # Run the middleware if they do
return await func(request, call_next)
else: # Otherwise skip the middleware
return await call_next(request)
app.add_middleware(BaseHTTPMiddleware, dispatch=_middleware)
return func
return deco
class MiddlewareConfig:
"""Represents a middleware class plus any configuration detail."""
def __init__(self, middleware: Any, config: Optional[Dict[str, Any]] = None):
"""Defaults config to empty dictionary if not provided."""
self.middleware = middleware
self.config = {} if config is None else config
def append_runtime_middlewares(
existing_middlewares: List[MiddlewareConfig],
) -> List[MiddlewareConfig]:
"""Add any middlewares specified via environment variable and configure if appropriate."""
return existing_middlewares + [
addition
for addition in [_append_cors_middleware(existing_middlewares)]
if addition is not None
]
def _append_cors_middleware(
existing_middlewares: List[MiddlewareConfig],
) -> Optional[MiddlewareConfig]:
has_cors_middleware = (
len(
[
entry
for entry in existing_middlewares
if entry.middleware == CORSMiddleware
]
)
> 0
)
cors_config_location_key: Final = "CORS_CONFIG_LOCATION"
if cors_config_location_key in environ:
cors_config_path = environ[cors_config_location_key]
if has_cors_middleware:
logger.warning(
f"CORSMiddleware already configured; ignoring config at {cors_config_path}"
)
else:
logger.info(f"looking for CORS config file at {cors_config_path}")
if path.exists(cors_config_path):
try:
with open(cors_config_path, "r") as cors_config_file:
cors_config = loads(cors_config_file.read())
logger.debug(f"loaded CORS config {cors_config}")
return MiddlewareConfig(CORSMiddleware, cors_config)
except ValueError as e:
logger.error(f"error parsing JSON at {cors_config_path}: {e}")
except OSError as e:
logger.error(f"error reading {cors_config_path}: {e}")
else:
logger.warning(f"CORS config not found at {cors_config_path}") | stac_fastapi/api/stac_fastapi/api/middleware.py |
from json import loads
from logging import getLogger
from os import environ, path
from typing import Any, Callable, Dict, Final, List, Optional
from fastapi import APIRouter, FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.routing import Match
logger: Final = getLogger(__file__)
def router_middleware(app: FastAPI, router: APIRouter):
"""Add middleware to a specific router, assumes no router prefix."""
def deco(func: Callable) -> Callable:
async def _middleware(request: Request, call_next):
# Check if scopes match
matches = any(
[
route.matches(request.scope)[0] == Match.FULL
for route in router.routes
]
)
if matches: # Run the middleware if they do
return await func(request, call_next)
else: # Otherwise skip the middleware
return await call_next(request)
app.add_middleware(BaseHTTPMiddleware, dispatch=_middleware)
return func
return deco
class MiddlewareConfig:
"""Represents a middleware class plus any configuration detail."""
def __init__(self, middleware: Any, config: Optional[Dict[str, Any]] = None):
"""Defaults config to empty dictionary if not provided."""
self.middleware = middleware
self.config = {} if config is None else config
def append_runtime_middlewares(
existing_middlewares: List[MiddlewareConfig],
) -> List[MiddlewareConfig]:
"""Add any middlewares specified via environment variable and configure if appropriate."""
return existing_middlewares + [
addition
for addition in [_append_cors_middleware(existing_middlewares)]
if addition is not None
]
def _append_cors_middleware(
existing_middlewares: List[MiddlewareConfig],
) -> Optional[MiddlewareConfig]:
has_cors_middleware = (
len(
[
entry
for entry in existing_middlewares
if entry.middleware == CORSMiddleware
]
)
> 0
)
cors_config_location_key: Final = "CORS_CONFIG_LOCATION"
if cors_config_location_key in environ:
cors_config_path = environ[cors_config_location_key]
if has_cors_middleware:
logger.warning(
f"CORSMiddleware already configured; ignoring config at {cors_config_path}"
)
else:
logger.info(f"looking for CORS config file at {cors_config_path}")
if path.exists(cors_config_path):
try:
with open(cors_config_path, "r") as cors_config_file:
cors_config = loads(cors_config_file.read())
logger.debug(f"loaded CORS config {cors_config}")
return MiddlewareConfig(CORSMiddleware, cors_config)
except ValueError as e:
logger.error(f"error parsing JSON at {cors_config_path}: {e}")
except OSError as e:
logger.error(f"error reading {cors_config_path}: {e}")
else:
logger.warning(f"CORS config not found at {cors_config_path}") | 0.876806 | 0.069827 |
import os
from typing import Tuple, Union, Optional
from PIL import Image
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import is_image_file
from torchvision.transforms import RandomCrop, CenterCrop
from torchvision.transforms.functional import resize
_PIL_IMAGE_MODES_ = ('L', 'F', 'I', 'HSV', 'LAB', 'RGB', 'YCbCr', 'CMYK', 'RGBA', '1')
class ImagesFromFolder(VisionDataset):
"""
Loads images from folder to create dataset. Input and target are equal to loaded image
(possibly with different transforms applied).
"""
def __init__(self, root: str, transform=None, target_transform=None, image_mode: str = 'RGB'):
super(ImagesFromFolder, self).__init__(root,
transform=transform,
target_transform=target_transform)
assert image_mode in _PIL_IMAGE_MODES_, 'Unknown PIL image mode.'
assert os.path.isdir(root), 'Image folder not found.'
self.images = [os.path.join(root, x) for x in os.listdir(root) if is_image_file(x)]
self.mode = image_mode
def __getitem__(self, index):
img = Image.open(self.images[index]).convert(self.mode)
target = img.copy()
if self.transform:
img = self.transform(img)
if self.target_transform:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.images)
class IsrDataset(VisionDataset):
"""
Wraps VisionDataset to create image super resolution dataset. Targets are image crops, inputs
are targets scaled by given factor.
"""
def __init__(
self,
wrapped_dataset: VisionDataset,
scale_factor: int = 2,
output_size: Optional[Union[int, Tuple[int, int]]] = None,
deterministic: bool = False,
scale_mode: int = Image.BICUBIC,
base_image_transform=None,
transform=None,
target_transform=None
):
super(IsrDataset, self).__init__(wrapped_dataset.root,
transform=transform,
target_transform=target_transform)
assert scale_factor > 1, 'scale factor must be >= 2'
self.scale_factor = scale_factor
self.scale_mode = scale_mode
self._dataset = wrapped_dataset
self.base_img_transform = base_image_transform
if output_size is None:
self._crop = None
elif deterministic:
self._crop = CenterCrop(size=output_size)
else:
self._crop = RandomCrop(size=output_size)
def _scale(self, img: Image) -> Image:
width, height = img.size[0] // self.scale_factor, img.size[1] // self.scale_factor
return resize(img, (height, width), self.scale_mode)
def __len__(self):
return len(self._dataset)
def __getitem__(self, item):
base_img = self._dataset[item][0]
if self.base_img_transform is not None:
base_img = self.base_img_transform(base_img)
target = self._crop(base_img) if self._crop else base_img
img = self._scale(target)
if self.target_transform is not None:
target = self.target_transform(target)
if self.transform is not None:
img = self.transform(img)
return img, target | isr/datasets/isr.py | import os
from typing import Tuple, Union, Optional
from PIL import Image
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import is_image_file
from torchvision.transforms import RandomCrop, CenterCrop
from torchvision.transforms.functional import resize
_PIL_IMAGE_MODES_ = ('L', 'F', 'I', 'HSV', 'LAB', 'RGB', 'YCbCr', 'CMYK', 'RGBA', '1')
class ImagesFromFolder(VisionDataset):
"""
Loads images from folder to create dataset. Input and target are equal to loaded image
(possibly with different transforms applied).
"""
def __init__(self, root: str, transform=None, target_transform=None, image_mode: str = 'RGB'):
super(ImagesFromFolder, self).__init__(root,
transform=transform,
target_transform=target_transform)
assert image_mode in _PIL_IMAGE_MODES_, 'Unknown PIL image mode.'
assert os.path.isdir(root), 'Image folder not found.'
self.images = [os.path.join(root, x) for x in os.listdir(root) if is_image_file(x)]
self.mode = image_mode
def __getitem__(self, index):
img = Image.open(self.images[index]).convert(self.mode)
target = img.copy()
if self.transform:
img = self.transform(img)
if self.target_transform:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.images)
class IsrDataset(VisionDataset):
"""
Wraps VisionDataset to create image super resolution dataset. Targets are image crops, inputs
are targets scaled by given factor.
"""
def __init__(
self,
wrapped_dataset: VisionDataset,
scale_factor: int = 2,
output_size: Optional[Union[int, Tuple[int, int]]] = None,
deterministic: bool = False,
scale_mode: int = Image.BICUBIC,
base_image_transform=None,
transform=None,
target_transform=None
):
super(IsrDataset, self).__init__(wrapped_dataset.root,
transform=transform,
target_transform=target_transform)
assert scale_factor > 1, 'scale factor must be >= 2'
self.scale_factor = scale_factor
self.scale_mode = scale_mode
self._dataset = wrapped_dataset
self.base_img_transform = base_image_transform
if output_size is None:
self._crop = None
elif deterministic:
self._crop = CenterCrop(size=output_size)
else:
self._crop = RandomCrop(size=output_size)
def _scale(self, img: Image) -> Image:
width, height = img.size[0] // self.scale_factor, img.size[1] // self.scale_factor
return resize(img, (height, width), self.scale_mode)
def __len__(self):
return len(self._dataset)
def __getitem__(self, item):
base_img = self._dataset[item][0]
if self.base_img_transform is not None:
base_img = self.base_img_transform(base_img)
target = self._crop(base_img) if self._crop else base_img
img = self._scale(target)
if self.target_transform is not None:
target = self.target_transform(target)
if self.transform is not None:
img = self.transform(img)
return img, target | 0.907972 | 0.486149 |
import re
import math
from os import system
import sys
import traceback
def inlst(lst, value, key=lambda x:x):
for i in range(len(lst)):
each = lst[i]
if key(each) == value:
return i
return -1
recipes = {}
def main():
global recipes
prog = re.compile(r'"(.+?)"')
progcnt = re.compile(r'@count:([0-9]+?)')
print(r''' IC2合成计算器 alpha v1.0.3
作者: 墨滢
详细使用说明见“使用说明.txt”,更新见“更新记录.txt”
发现bug请将截图发给我,十分感谢
''')
# 读取汉化
with open('itempanel.csv','r',encoding='gb18030') as f:
data = f.read().splitlines()
del data[0]
for i in range(len(data)):
each = data[i].split(',')
data[i] = each.copy()
#print(data[0],data[1],data[2])
data.sort(key=lambda x:int(x[2]))
translate = {}
for each in data:
itemname = each[0]
itemmeta = int(each[2])
tranname = each[4]
if itemname not in translate:
translate[itemname] = tranname
translate['%s@%s'%(itemname,itemmeta)] = tranname
print('[INFO]读取到%s条汉化文本'%len(translate))
# 读取OreDict
with open('oredict.csv','r',encoding='gb18030') as f:
data = f.read().splitlines()
del data[0]
oredict = {}
for e in data:
each = e.split(',')
orename = 'OreDict:'+each[0]
itemid = each[2]
itemmeta = each[1]
if itemmeta.count('@') != 0:
itemmeta = itemmeta.split('@')[1]
else:
itemmeta = 0
if orename not in oredict:
oredict[orename] = '%s@%s'%(itemid,itemmeta)
oredict['Fluid:water'] = 'minecraft:water_bucket@*'
print('[INFO]读取到%s条矿物词典规则'%len(oredict))
recipes = {}
# 读取有序合成表
with open('./config/有序.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
right = right.split(',')
outname = translate[left]
if outname in recipes:
temp = recipes[outname][0]
else:
temp = []
for inp in right:
itemcnt = {}
_t = prog.search(inp)
table = _t.group(1).replace('|','')
inp = inp[_t.span()[1]:].strip()
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]]
inp = inp.replace('@*','').strip()
inp = inp.split(' ')
for initem in inp:
sign,ininame = initem.split(':',1)
itemcnt[translate[ininame]] = table.count(sign)
temp.append((itemcnt,outcnt))
if outname == '生碳网':
print(temp)
recipes[outname] = temp,'合成台'
print('[INFO]合成表 有序合成: %s条'%len(recipes))
tlrecipes = len(recipes)
# 读取无序合成表
with open('./config/无序.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key+' ',oredict[key]+' ')
data = data.replace(key+'\n',oredict[key]+'\n')
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
right = right.split(',')
outname = translate[left]
if outname in recipes:
temp = recipes[outname][0]
else:
temp = []
for inp in right:
itemcnt = {}
inp = inp.strip()
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]]
inp = inp.replace('@*','').strip()
inp = inp.split(' ')
for initem in inp:
ininame = translate[initem]
if ininame in itemcnt:
itemcnt[ininame] += 1
else:
itemcnt[ininame] = 1
temp.append((itemcnt,outcnt))
recipes[outname] = temp,'合成台无序'
print('[INFO]合成表 无序合成: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取高炉合成
recipes[translate['IC2:itemIngot@3']] = [({translate['minecraft:iron_ingot']:1},1)],\
'高炉'
recipes[translate['IC2:itemSlag']] = [({translate['minecraft:iron_ingot']:1},1)],\
'高炉'
print('[INFO]合成表 高炉: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取压缩机合成
with open('./config/压缩.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').replace('@-1','')\
.replace('@0','').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
outname = translate[right]
left = left[::-1]
temp1,temp2 = left.split('@',1)
temp1,temp2 = temp1[::-1],temp2[::-1]
incnt = int(temp1)
inname = translate[temp2]
if outname in recipes:
ttc = recipes[outname][0]
else:
ttc = []
ttc.append(({inname:incnt},1))
recipes[outname] = ttc,'压缩机'
print('[INFO]合成表 压缩机: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取提取机合成表
with open('./config/提取.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
inp = right
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]].strip()
outname = translate[inp]
itemcnt = {}
left = left.replace('@*','').strip()
itemcnt = {translate[left]:1}
if outname in recipes:
ttc = recipes[outname][0]
else:
ttc = []
ttc.append((itemcnt,outcnt))
recipes[outname] = ttc,'提取机'
print('[INFO]合成表 提取机: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取打粉机合成表
with open('./config/打粉.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
inp = right
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]].strip()
outname = translate[inp]
itemcnt = {}
left = left.replace('@*','').strip()
inp = left
if inp.count('@count') == 0:
incnt = 1
else:
_t = progcnt.search(inp)
incnt = _t.group(1)
inp = inp[:_t.span()[0]].strip()
itemcnt = {translate[inp]:incnt}
if outname in recipes:
ttc = recipes[outname][0]
else:
ttc = []
ttc.append((itemcnt,outcnt))
recipes[outname] = ttc,'打粉机'
print('[INFO]合成表 打粉机: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取金属成型机合成
# 剪切
recipes[translate['IC2:itemCable@5']] = \
[({translate[oredict['OreDict:plateIron']]:1},4)],'金属成型机-剪切'
recipes[translate['IC2:itemCable@10']] = \
[({translate[oredict['OreDict:plateTin']]:1},3)],'金属成型机-剪切'
recipes[translate['IC2:itemCable@1']] = \
[({translate[oredict['OreDict:plateCopper']]:1},3)],'金属成型机-剪切'
recipes[translate['IC2:itemCable@2']] = \
[({translate[oredict['OreDict:plateGold']]:1},4)],'金属成型机-剪切'
# 挤压
recipes[translate['IC2:itemTinCan']] = \
[({translate['IC2:itemCasing@1']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemCable@10']] = \
[({translate[oredict['OreDict:ingotTin']]:1},3)],'金属成型机-挤压'
recipes[translate['IC2:itemCable@2']] = \
[({translate['minecraft:gold_ingot']:1},4)],'金属成型机-挤压'
recipes[translate['IC2:itemRecipePart@12']] = \
[({translate[oredict['OreDict:blockSteel']]:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemCable@1']] = \
[({translate[oredict['OreDict:ingotCopper']]:1},3)],'金属成型机-挤压'
recipes[translate['IC2:itemRecipePart@11']] = \
[({translate['minecraft:iron_block']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemCellEmpty@0']] = \
[({translate[oredict['OreDict:plateTin']]:1},3)],'金属成型机-挤压'
recipes[translate['IC2:itemFuelRod']] = \
[({translate[oredict['OreDict:plateIron']]:1},1)],'金属成型机-挤压'
recipes[translate['IC2:blockFenceIron']] = \
[({translate['IC2:itemCasing@4']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemCable@5']] = \
[({translate['minecraft:iron_ingot']:1},4)],'金属成型机-挤压'
# 碾压
recipes[translate['IC2:itemPlates@3']] = \
[({translate['minecraft:gold_ingot']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemPlates@4']] = \
[({translate['minecraft:iron_ingot']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemPlates@1']] = \
[({translate[oredict['OreDict:ingotTin']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@6']] = \
[({translate[oredict['OreDict:plateLead']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@5']] = \
[({translate[oredict['OreDict:plateSteel']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@2']] = \
[({translate[oredict['OreDict:plateBronze']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@3']] = \
[({translate[oredict['OreDict:plateGold']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemPlates@2']] = \
[({translate[oredict['OreDict:ingotBronze']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@4']] = \
[({translate[oredict['OreDict:plateIron']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemPlates@5']] = \
[({translate[oredict['OreDict:ingotSteel']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@0']] = \
[({translate[oredict['OreDict:plateCopper']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemPlates']] = \
[({translate[oredict['OreDict:ingotCopper']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemPlates@6']] = \
[({translate[oredict['OreDict:ingotLead']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@1']] = \
[({translate[oredict['OreDict:plateTin']]:1},2)],'金属成型机-碾压'
print('[INFO]合成表 金属成型机: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取高级太阳能合成表
with open('./config/高级太阳能.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
right = right.split(',')
outname = translate[left]
if outname in recipes:
temp = recipes[outname][0]
else:
temp = []
for inp in right:
itemcnt = {}
_t = prog.search(inp)
table = _t.group(1).replace('|','')
inp = inp[_t.span()[1]:].strip()
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]]
inp = inp.replace('@*','').strip()
inp = inp.split(' ')
for initem in inp:
sign,ininame = initem.split(':',1)
itemcnt[translate[ininame]] = table.count(sign)
temp.append((itemcnt,outcnt))
if outname == '生碳网':
print(temp)
recipes[outname] = temp,'合成台'
print('[INFO]合成表 高级太阳能: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 附加规则
recipes['青铜锭'] = [({'青铜粉':1},1)],'熔炉'
recipes['青铜粉'] = ([({'锡粉': 1, '铜粉': 3}, 4)], '合成台')
recipes['铜粉'] = ([({'铜锭': 1}, 1)], '合成台')
recipes['锡粉'] = ([({'锡锭': 1}, 1)], '合成台')
recipes['铁粉'] = ([({'铁锭': 1}, 1)], '合成台')
recipes['铅粉'] = ([({'铅锭': 1}, 1)], '合成台')
recipes['青金石粉'] = ([({'青金石': 1}, 1)], '合成台')
recipes['防爆石'] = ([({'铁质脚手架':1,'建筑泡沫':1},1)],'建筑泡沫喷枪')
print('[INFO]合成表 其他: %s条'%(len(recipes)-tlrecipes))
with open('raw.ini','r',encoding='utf-8') as f:
raw = f.read().splitlines()
while raw.count(''):
del raw[raw.index('')]
print('[INFO]载入原材料规则: %s条'%(len(raw)))
print('================载入完毕================\n')
while True:
start = input('目标产物全称(输入quit退出):')
if start=='quit':
break
lst = {start:1}
flag = True
dopath = []
ret = {}
while flag:
newlst = {}
for key in lst:
if key not in recipes or key in raw: # 最终产物
if key in ret:
ret[key] += lst[key]
else:
ret[key] = lst[key]
else: # 继续细分
way = recipes[key][0][0]
outcnt = way[1]
ins = way[0]
temp = {}
for item in ins:
temp[item] = math.ceil(lst[key]/outcnt)*ins[item]
if item in newlst:
newlst[item] += math.ceil(lst[key]/outcnt)*ins[item]
else:
newlst[item] = math.ceil(lst[key]/outcnt)*ins[item]
if inlst(dopath, key, lambda x:x[0][0]) == -1:
dopath.insert(0,[[key,outcnt*math.ceil(lst[key]/outcnt)],temp])
else:
index = inlst(dopath, key, lambda x:x[0][0])
dopath[index][0][1] += outcnt*math.ceil(lst[key]/outcnt)
for kk in temp:
dopath[index][1][kk] += temp[kk]
lst = newlst
if len(lst) == 0:
flag = False
print('(合成路线功能目前尚不稳定,内容仅供参考)\n合成路线:')
for each in dopath:
outprint = []
for kk in each[1]:
outprint.append('%s*%s'%(kk,each[1][kk]))
print('\t',' + '.join(outprint),'=>','%s*%s'%(each[0][0],each[0][1]))
print('\n总耗材:')
for kk in ret:
zutext = ''
if ret[kk] >= 64:
if ret[kk]%64:
zutext = '(%s组 %s个)'%(ret[kk]//64,ret[kk]%64)
else:
zutext = '(%s组)'%(ret[kk]//64)
print('\t%s: %s%s'%(kk,ret[kk],zutext))
print('========================================\n')
system('pause')
if __name__ == '__main__':
try:
main()
except BaseException as e:
print('\n\n==========程序发生意外错误==========')
traceback.print_exc()
print('请将此内容截图发送至开发者\n')
system('pause')
sys.exit(1) | recipes_calc.py | import re
import math
from os import system
import sys
import traceback
def inlst(lst, value, key=lambda x:x):
for i in range(len(lst)):
each = lst[i]
if key(each) == value:
return i
return -1
recipes = {}
def main():
global recipes
prog = re.compile(r'"(.+?)"')
progcnt = re.compile(r'@count:([0-9]+?)')
print(r''' IC2合成计算器 alpha v1.0.3
作者: 墨滢
详细使用说明见“使用说明.txt”,更新见“更新记录.txt”
发现bug请将截图发给我,十分感谢
''')
# 读取汉化
with open('itempanel.csv','r',encoding='gb18030') as f:
data = f.read().splitlines()
del data[0]
for i in range(len(data)):
each = data[i].split(',')
data[i] = each.copy()
#print(data[0],data[1],data[2])
data.sort(key=lambda x:int(x[2]))
translate = {}
for each in data:
itemname = each[0]
itemmeta = int(each[2])
tranname = each[4]
if itemname not in translate:
translate[itemname] = tranname
translate['%s@%s'%(itemname,itemmeta)] = tranname
print('[INFO]读取到%s条汉化文本'%len(translate))
# 读取OreDict
with open('oredict.csv','r',encoding='gb18030') as f:
data = f.read().splitlines()
del data[0]
oredict = {}
for e in data:
each = e.split(',')
orename = 'OreDict:'+each[0]
itemid = each[2]
itemmeta = each[1]
if itemmeta.count('@') != 0:
itemmeta = itemmeta.split('@')[1]
else:
itemmeta = 0
if orename not in oredict:
oredict[orename] = '%s@%s'%(itemid,itemmeta)
oredict['Fluid:water'] = 'minecraft:water_bucket@*'
print('[INFO]读取到%s条矿物词典规则'%len(oredict))
recipes = {}
# 读取有序合成表
with open('./config/有序.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
right = right.split(',')
outname = translate[left]
if outname in recipes:
temp = recipes[outname][0]
else:
temp = []
for inp in right:
itemcnt = {}
_t = prog.search(inp)
table = _t.group(1).replace('|','')
inp = inp[_t.span()[1]:].strip()
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]]
inp = inp.replace('@*','').strip()
inp = inp.split(' ')
for initem in inp:
sign,ininame = initem.split(':',1)
itemcnt[translate[ininame]] = table.count(sign)
temp.append((itemcnt,outcnt))
if outname == '生碳网':
print(temp)
recipes[outname] = temp,'合成台'
print('[INFO]合成表 有序合成: %s条'%len(recipes))
tlrecipes = len(recipes)
# 读取无序合成表
with open('./config/无序.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key+' ',oredict[key]+' ')
data = data.replace(key+'\n',oredict[key]+'\n')
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
right = right.split(',')
outname = translate[left]
if outname in recipes:
temp = recipes[outname][0]
else:
temp = []
for inp in right:
itemcnt = {}
inp = inp.strip()
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]]
inp = inp.replace('@*','').strip()
inp = inp.split(' ')
for initem in inp:
ininame = translate[initem]
if ininame in itemcnt:
itemcnt[ininame] += 1
else:
itemcnt[ininame] = 1
temp.append((itemcnt,outcnt))
recipes[outname] = temp,'合成台无序'
print('[INFO]合成表 无序合成: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取高炉合成
recipes[translate['IC2:itemIngot@3']] = [({translate['minecraft:iron_ingot']:1},1)],\
'高炉'
recipes[translate['IC2:itemSlag']] = [({translate['minecraft:iron_ingot']:1},1)],\
'高炉'
print('[INFO]合成表 高炉: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取压缩机合成
with open('./config/压缩.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').replace('@-1','')\
.replace('@0','').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
outname = translate[right]
left = left[::-1]
temp1,temp2 = left.split('@',1)
temp1,temp2 = temp1[::-1],temp2[::-1]
incnt = int(temp1)
inname = translate[temp2]
if outname in recipes:
ttc = recipes[outname][0]
else:
ttc = []
ttc.append(({inname:incnt},1))
recipes[outname] = ttc,'压缩机'
print('[INFO]合成表 压缩机: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取提取机合成表
with open('./config/提取.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
inp = right
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]].strip()
outname = translate[inp]
itemcnt = {}
left = left.replace('@*','').strip()
itemcnt = {translate[left]:1}
if outname in recipes:
ttc = recipes[outname][0]
else:
ttc = []
ttc.append((itemcnt,outcnt))
recipes[outname] = ttc,'提取机'
print('[INFO]合成表 提取机: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取打粉机合成表
with open('./config/打粉.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
inp = right
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]].strip()
outname = translate[inp]
itemcnt = {}
left = left.replace('@*','').strip()
inp = left
if inp.count('@count') == 0:
incnt = 1
else:
_t = progcnt.search(inp)
incnt = _t.group(1)
inp = inp[:_t.span()[0]].strip()
itemcnt = {translate[inp]:incnt}
if outname in recipes:
ttc = recipes[outname][0]
else:
ttc = []
ttc.append((itemcnt,outcnt))
recipes[outname] = ttc,'打粉机'
print('[INFO]合成表 打粉机: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取金属成型机合成
# 剪切
recipes[translate['IC2:itemCable@5']] = \
[({translate[oredict['OreDict:plateIron']]:1},4)],'金属成型机-剪切'
recipes[translate['IC2:itemCable@10']] = \
[({translate[oredict['OreDict:plateTin']]:1},3)],'金属成型机-剪切'
recipes[translate['IC2:itemCable@1']] = \
[({translate[oredict['OreDict:plateCopper']]:1},3)],'金属成型机-剪切'
recipes[translate['IC2:itemCable@2']] = \
[({translate[oredict['OreDict:plateGold']]:1},4)],'金属成型机-剪切'
# 挤压
recipes[translate['IC2:itemTinCan']] = \
[({translate['IC2:itemCasing@1']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemCable@10']] = \
[({translate[oredict['OreDict:ingotTin']]:1},3)],'金属成型机-挤压'
recipes[translate['IC2:itemCable@2']] = \
[({translate['minecraft:gold_ingot']:1},4)],'金属成型机-挤压'
recipes[translate['IC2:itemRecipePart@12']] = \
[({translate[oredict['OreDict:blockSteel']]:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemCable@1']] = \
[({translate[oredict['OreDict:ingotCopper']]:1},3)],'金属成型机-挤压'
recipes[translate['IC2:itemRecipePart@11']] = \
[({translate['minecraft:iron_block']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemCellEmpty@0']] = \
[({translate[oredict['OreDict:plateTin']]:1},3)],'金属成型机-挤压'
recipes[translate['IC2:itemFuelRod']] = \
[({translate[oredict['OreDict:plateIron']]:1},1)],'金属成型机-挤压'
recipes[translate['IC2:blockFenceIron']] = \
[({translate['IC2:itemCasing@4']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemCable@5']] = \
[({translate['minecraft:iron_ingot']:1},4)],'金属成型机-挤压'
# 碾压
recipes[translate['IC2:itemPlates@3']] = \
[({translate['minecraft:gold_ingot']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemPlates@4']] = \
[({translate['minecraft:iron_ingot']:1},1)],'金属成型机-挤压'
recipes[translate['IC2:itemPlates@1']] = \
[({translate[oredict['OreDict:ingotTin']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@6']] = \
[({translate[oredict['OreDict:plateLead']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@5']] = \
[({translate[oredict['OreDict:plateSteel']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@2']] = \
[({translate[oredict['OreDict:plateBronze']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@3']] = \
[({translate[oredict['OreDict:plateGold']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemPlates@2']] = \
[({translate[oredict['OreDict:ingotBronze']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@4']] = \
[({translate[oredict['OreDict:plateIron']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemPlates@5']] = \
[({translate[oredict['OreDict:ingotSteel']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@0']] = \
[({translate[oredict['OreDict:plateCopper']]:1},2)],'金属成型机-碾压'
recipes[translate['IC2:itemPlates']] = \
[({translate[oredict['OreDict:ingotCopper']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemPlates@6']] = \
[({translate[oredict['OreDict:ingotLead']]:1},1)],'金属成型机-碾压'
recipes[translate['IC2:itemCasing@1']] = \
[({translate[oredict['OreDict:plateTin']]:1},2)],'金属成型机-碾压'
print('[INFO]合成表 金属成型机: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 读取高级太阳能合成表
with open('./config/高级太阳能.ini','r',encoding='utf-8') as f:
data = f.read()
for key in oredict:
data = data.replace(key,oredict[key])
data = data.replace('\\\n','').replace(' ',' ').splitlines()
for e in data:
e = e.strip()
if len(e) == 0:
continue
if e[0] == ';':
continue
left,right = e.split('=')
left = left.strip()
right = right.strip()
right = right.split(',')
outname = translate[left]
if outname in recipes:
temp = recipes[outname][0]
else:
temp = []
for inp in right:
itemcnt = {}
_t = prog.search(inp)
table = _t.group(1).replace('|','')
inp = inp[_t.span()[1]:].strip()
if inp.count('@count') == 0:
outcnt = 1
else:
_t = progcnt.search(inp)
outcnt = int(_t.group(1))
inp = inp[:_t.span()[0]]
inp = inp.replace('@*','').strip()
inp = inp.split(' ')
for initem in inp:
sign,ininame = initem.split(':',1)
itemcnt[translate[ininame]] = table.count(sign)
temp.append((itemcnt,outcnt))
if outname == '生碳网':
print(temp)
recipes[outname] = temp,'合成台'
print('[INFO]合成表 高级太阳能: %s条'%(len(recipes)-tlrecipes))
tlrecipes = len(recipes)
# 附加规则
recipes['青铜锭'] = [({'青铜粉':1},1)],'熔炉'
recipes['青铜粉'] = ([({'锡粉': 1, '铜粉': 3}, 4)], '合成台')
recipes['铜粉'] = ([({'铜锭': 1}, 1)], '合成台')
recipes['锡粉'] = ([({'锡锭': 1}, 1)], '合成台')
recipes['铁粉'] = ([({'铁锭': 1}, 1)], '合成台')
recipes['铅粉'] = ([({'铅锭': 1}, 1)], '合成台')
recipes['青金石粉'] = ([({'青金石': 1}, 1)], '合成台')
recipes['防爆石'] = ([({'铁质脚手架':1,'建筑泡沫':1},1)],'建筑泡沫喷枪')
print('[INFO]合成表 其他: %s条'%(len(recipes)-tlrecipes))
with open('raw.ini','r',encoding='utf-8') as f:
raw = f.read().splitlines()
while raw.count(''):
del raw[raw.index('')]
print('[INFO]载入原材料规则: %s条'%(len(raw)))
print('================载入完毕================\n')
while True:
start = input('目标产物全称(输入quit退出):')
if start=='quit':
break
lst = {start:1}
flag = True
dopath = []
ret = {}
while flag:
newlst = {}
for key in lst:
if key not in recipes or key in raw: # 最终产物
if key in ret:
ret[key] += lst[key]
else:
ret[key] = lst[key]
else: # 继续细分
way = recipes[key][0][0]
outcnt = way[1]
ins = way[0]
temp = {}
for item in ins:
temp[item] = math.ceil(lst[key]/outcnt)*ins[item]
if item in newlst:
newlst[item] += math.ceil(lst[key]/outcnt)*ins[item]
else:
newlst[item] = math.ceil(lst[key]/outcnt)*ins[item]
if inlst(dopath, key, lambda x:x[0][0]) == -1:
dopath.insert(0,[[key,outcnt*math.ceil(lst[key]/outcnt)],temp])
else:
index = inlst(dopath, key, lambda x:x[0][0])
dopath[index][0][1] += outcnt*math.ceil(lst[key]/outcnt)
for kk in temp:
dopath[index][1][kk] += temp[kk]
lst = newlst
if len(lst) == 0:
flag = False
print('(合成路线功能目前尚不稳定,内容仅供参考)\n合成路线:')
for each in dopath:
outprint = []
for kk in each[1]:
outprint.append('%s*%s'%(kk,each[1][kk]))
print('\t',' + '.join(outprint),'=>','%s*%s'%(each[0][0],each[0][1]))
print('\n总耗材:')
for kk in ret:
zutext = ''
if ret[kk] >= 64:
if ret[kk]%64:
zutext = '(%s组 %s个)'%(ret[kk]//64,ret[kk]%64)
else:
zutext = '(%s组)'%(ret[kk]//64)
print('\t%s: %s%s'%(kk,ret[kk],zutext))
print('========================================\n')
system('pause')
if __name__ == '__main__':
try:
main()
except BaseException as e:
print('\n\n==========程序发生意外错误==========')
traceback.print_exc()
print('请将此内容截图发送至开发者\n')
system('pause')
sys.exit(1) | 0.039909 | 0.152979 |
import pytest
import mock
# Django REST Framework
from rest_framework import exceptions
from rest_framework.generics import ListAPIView
# Django
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
# AWX
from awx.main.views import ApiErrorView
from awx.api.views import JobList, InventorySourceList
HTTP_METHOD_NAMES = [
'get',
'post',
'put',
'patch',
'delete',
'head',
'options',
'trace',
]
@pytest.fixture
def api_view_obj_fixture():
return ApiErrorView()
@pytest.mark.parametrize('method_name', HTTP_METHOD_NAMES)
def test_exception_view_allow_http_methods(method_name):
assert hasattr(ApiErrorView, method_name)
@pytest.mark.parametrize('method_name', HTTP_METHOD_NAMES)
def test_exception_view_raises_exception(api_view_obj_fixture, method_name):
request_mock = mock.MagicMock()
with pytest.raises(exceptions.APIException):
getattr(api_view_obj_fixture, method_name)(request_mock)
@pytest.mark.parametrize('version, supports_post', [(1, True), (2, False)])
def test_disable_post_on_v2_jobs_list(version, supports_post):
job_list = JobList()
job_list.request = mock.MagicMock()
with mock.patch('awx.api.views.get_request_version', return_value=version):
assert ('POST' in job_list.allowed_methods) == supports_post
@pytest.mark.parametrize('version, supports_post', [(1, False), (2, True)])
def test_disable_post_on_v1_inventory_source_list(version, supports_post):
inv_source_list = InventorySourceList()
inv_source_list.request = mock.MagicMock()
with mock.patch('awx.api.views.get_request_version', return_value=version):
assert ('POST' in inv_source_list.allowed_methods) == supports_post
def test_views_have_search_fields():
from awx.api.urls import urlpatterns as api_patterns
patterns = set([])
url_views = set([])
# Add recursive URL patterns
unprocessed = set(api_patterns)
while unprocessed:
to_process = unprocessed.copy()
unprocessed = set([])
for pattern in to_process:
if hasattr(pattern, 'lookup_str') and not pattern.lookup_str.startswith('awx.api'):
continue
patterns.add(pattern)
if isinstance(pattern, RegexURLResolver):
for sub_pattern in pattern.url_patterns:
if sub_pattern not in patterns:
unprocessed.add(sub_pattern)
# Get view classes
for pattern in patterns:
if isinstance(pattern, RegexURLPattern) and hasattr(pattern.callback, 'view_class'):
cls = pattern.callback.view_class
if issubclass(cls, ListAPIView):
url_views.add(pattern.callback.view_class)
# Gather any views that don't have search fields defined
views_missing_search = []
for View in url_views:
view = View()
if not hasattr(view, 'search_fields') or len(view.search_fields) == 0:
views_missing_search.append(view)
if views_missing_search:
raise Exception('{} views do not have search fields defined:\n{}'.format(
len(views_missing_search),
'\n'.join([
v.__class__.__name__ + ' (model: {})'.format(getattr(v, 'model', type(None)).__name__)
for v in views_missing_search
]))
) | awx/main/tests/unit/test_views.py | import pytest
import mock
# Django REST Framework
from rest_framework import exceptions
from rest_framework.generics import ListAPIView
# Django
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
# AWX
from awx.main.views import ApiErrorView
from awx.api.views import JobList, InventorySourceList
HTTP_METHOD_NAMES = [
'get',
'post',
'put',
'patch',
'delete',
'head',
'options',
'trace',
]
@pytest.fixture
def api_view_obj_fixture():
return ApiErrorView()
@pytest.mark.parametrize('method_name', HTTP_METHOD_NAMES)
def test_exception_view_allow_http_methods(method_name):
assert hasattr(ApiErrorView, method_name)
@pytest.mark.parametrize('method_name', HTTP_METHOD_NAMES)
def test_exception_view_raises_exception(api_view_obj_fixture, method_name):
request_mock = mock.MagicMock()
with pytest.raises(exceptions.APIException):
getattr(api_view_obj_fixture, method_name)(request_mock)
@pytest.mark.parametrize('version, supports_post', [(1, True), (2, False)])
def test_disable_post_on_v2_jobs_list(version, supports_post):
job_list = JobList()
job_list.request = mock.MagicMock()
with mock.patch('awx.api.views.get_request_version', return_value=version):
assert ('POST' in job_list.allowed_methods) == supports_post
@pytest.mark.parametrize('version, supports_post', [(1, False), (2, True)])
def test_disable_post_on_v1_inventory_source_list(version, supports_post):
inv_source_list = InventorySourceList()
inv_source_list.request = mock.MagicMock()
with mock.patch('awx.api.views.get_request_version', return_value=version):
assert ('POST' in inv_source_list.allowed_methods) == supports_post
def test_views_have_search_fields():
from awx.api.urls import urlpatterns as api_patterns
patterns = set([])
url_views = set([])
# Add recursive URL patterns
unprocessed = set(api_patterns)
while unprocessed:
to_process = unprocessed.copy()
unprocessed = set([])
for pattern in to_process:
if hasattr(pattern, 'lookup_str') and not pattern.lookup_str.startswith('awx.api'):
continue
patterns.add(pattern)
if isinstance(pattern, RegexURLResolver):
for sub_pattern in pattern.url_patterns:
if sub_pattern not in patterns:
unprocessed.add(sub_pattern)
# Get view classes
for pattern in patterns:
if isinstance(pattern, RegexURLPattern) and hasattr(pattern.callback, 'view_class'):
cls = pattern.callback.view_class
if issubclass(cls, ListAPIView):
url_views.add(pattern.callback.view_class)
# Gather any views that don't have search fields defined
views_missing_search = []
for View in url_views:
view = View()
if not hasattr(view, 'search_fields') or len(view.search_fields) == 0:
views_missing_search.append(view)
if views_missing_search:
raise Exception('{} views do not have search fields defined:\n{}'.format(
len(views_missing_search),
'\n'.join([
v.__class__.__name__ + ' (model: {})'.format(getattr(v, 'model', type(None)).__name__)
for v in views_missing_search
]))
) | 0.479747 | 0.242351 |
import pygame
import math
pygame.init()
class TowerTutorial(pygame.sprite.Sprite):
def __init__(Self):
Self.image = pygame.image.load("Media/Towers/Machine_Gun/L1 Images/L1_MachineGun0000.tif").convert_alpha()
Self.level = 1
Self.range = 40
Self.damage = 20
Self.visible = False
Self.cooldown = 0
def coordinates(Self , x , y):
Self.x = x
Self.y = y
Self.bulletx = x + 20
Self.bullety = y + 20
def showrange(Self , screen):
Self.visible = True
pygame.draw.circle(screen , ( 255 , 255 ,255) ,[Self.x + 20 , Self.y + 20] , Self.range , 2)
def lookat(Self , enemy):
tevector = ((enemy.x -Self.x) , (enemy.y - Self.y))
tedistance = math.sqrt(math.pow(tevector[0] , 2) + math.pow(tevector[1] , 2))
coste = -tevector[0] / tedistance
angle = int(math.degrees(math.acos(coste)))
if angle > 10 and angle < 100:
angle = "0" + str(angle)
elif angle < 10:
angle = "00" + str(angle)
file = "Media/Towers/Machine_Gun/L1 Images/L1_MachineGun0" + str(angle) + ".tif"
Self.image = pygame.image.load(file).convert_alpha()
def upgrade(Self):
if Self.visible:
if Self.level == 1:
Self.image = pygame.image.load("Media/Towers/Machine_Gun/L2 Images/L2_MachineGun0000.tif").convert_alpha()
Self.level += 1
Self.range = 50
Self.damage = 40
elif Self.level == 2:
Self.image = pygame.image.load("Media/Towers/Machine_Gun/L3 Images/L3_Machine_Gun0000.tif").convert_alpha()
Self.level += 1
Self.range = 70
Self.damage = 60
def shoot(Self , enemy , screen):
if Self.cooldown == 0:
distance = math.sqrt(math.pow((Self.x - enemy.x) , 2)+ math.pow((Self.y - enemy.y),2))
if distance <= Self.range:
bulletdistance = math.sqrt(math.pow((Self.bulletx - enemy.x - 20) , 2)+ math.pow((Self.bullety - enemy.y - 20),2))
btdistance = math.sqrt(math.pow((Self.bulletx - Self.x - 20) , 2)+ math.pow((Self.bullety - Self.y - 20),2))
if bulletdistance < 20:
Self.bulletx = Self.x + 20
Self.bullety = Self.y + 20
enemy.TakeDamage(Self.damage)
Self.cooldown += 1
if btdistance > distance:
Self.bulletx = enemy.x + 20
Self.bullety = enemy.y + 20
enemy.TakeDamage(Self.damage)
pygame.draw.circle(screen , (255 , 255 , 0) , [Self.bulletx , Self.bullety] , 7)
directionx = (enemy.x + 20 - Self.bulletx) / bulletdistance
directiony = (enemy.y + 20 - Self.bullety) / bulletdistance
Self.bulletx += int(directionx * 10)
Self.bullety += int(directiony * 10)
else:
Self.cooldown += 1
if Self.cooldown == 5:
Self.cooldown =0 | TowerTutorial.py | import pygame
import math
pygame.init()
class TowerTutorial(pygame.sprite.Sprite):
def __init__(Self):
Self.image = pygame.image.load("Media/Towers/Machine_Gun/L1 Images/L1_MachineGun0000.tif").convert_alpha()
Self.level = 1
Self.range = 40
Self.damage = 20
Self.visible = False
Self.cooldown = 0
def coordinates(Self , x , y):
Self.x = x
Self.y = y
Self.bulletx = x + 20
Self.bullety = y + 20
def showrange(Self , screen):
Self.visible = True
pygame.draw.circle(screen , ( 255 , 255 ,255) ,[Self.x + 20 , Self.y + 20] , Self.range , 2)
def lookat(Self , enemy):
tevector = ((enemy.x -Self.x) , (enemy.y - Self.y))
tedistance = math.sqrt(math.pow(tevector[0] , 2) + math.pow(tevector[1] , 2))
coste = -tevector[0] / tedistance
angle = int(math.degrees(math.acos(coste)))
if angle > 10 and angle < 100:
angle = "0" + str(angle)
elif angle < 10:
angle = "00" + str(angle)
file = "Media/Towers/Machine_Gun/L1 Images/L1_MachineGun0" + str(angle) + ".tif"
Self.image = pygame.image.load(file).convert_alpha()
def upgrade(Self):
if Self.visible:
if Self.level == 1:
Self.image = pygame.image.load("Media/Towers/Machine_Gun/L2 Images/L2_MachineGun0000.tif").convert_alpha()
Self.level += 1
Self.range = 50
Self.damage = 40
elif Self.level == 2:
Self.image = pygame.image.load("Media/Towers/Machine_Gun/L3 Images/L3_Machine_Gun0000.tif").convert_alpha()
Self.level += 1
Self.range = 70
Self.damage = 60
def shoot(Self , enemy , screen):
if Self.cooldown == 0:
distance = math.sqrt(math.pow((Self.x - enemy.x) , 2)+ math.pow((Self.y - enemy.y),2))
if distance <= Self.range:
bulletdistance = math.sqrt(math.pow((Self.bulletx - enemy.x - 20) , 2)+ math.pow((Self.bullety - enemy.y - 20),2))
btdistance = math.sqrt(math.pow((Self.bulletx - Self.x - 20) , 2)+ math.pow((Self.bullety - Self.y - 20),2))
if bulletdistance < 20:
Self.bulletx = Self.x + 20
Self.bullety = Self.y + 20
enemy.TakeDamage(Self.damage)
Self.cooldown += 1
if btdistance > distance:
Self.bulletx = enemy.x + 20
Self.bullety = enemy.y + 20
enemy.TakeDamage(Self.damage)
pygame.draw.circle(screen , (255 , 255 , 0) , [Self.bulletx , Self.bullety] , 7)
directionx = (enemy.x + 20 - Self.bulletx) / bulletdistance
directiony = (enemy.y + 20 - Self.bullety) / bulletdistance
Self.bulletx += int(directionx * 10)
Self.bullety += int(directiony * 10)
else:
Self.cooldown += 1
if Self.cooldown == 5:
Self.cooldown =0 | 0.34798 | 0.352146 |
from __future__ import (absolute_import, print_function, unicode_literals)
import math
import os
from colorclass import Color, Windows
from terminaltables import AsciiTable
from acli.utils import get_console_dimensions
try:
input = raw_input
except NameError:
pass
Windows.enable(auto_colors=True, reset_atexit=True)
def output_ascii_table(table_title=None,
table_data=None,
inner_heading_row_border=False,
inner_footing_row_border=False,
inner_row_border=False):
"""
@type table_title: unicode
@type table_data: list
@type inner_heading_row_border: bool
@type inner_footing_row_border: bool
@type inner_row_border: bool
"""
table = AsciiTable(table_data)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.inner_footing_row_border = inner_footing_row_border
table.title = table_title
print(table.table)
def output_ascii_table_list(table_title=None,
table_data=None,
table_header=None,
inner_heading_row_border=False,
inner_row_border=False):
"""
@type table_title: unicode
@type table_data: list
@type inner_heading_row_border: bool
@type inner_row_border: bool
@type table_header: list
"""
console_rows, _ = get_console_dimensions()
console_rows = int(console_rows)
full_display_length = len(table_data) + 7
items_per_page = console_rows - 7
num_pages = 0
if full_display_length > console_rows:
try:
num_pages = int(math.ceil(float(len(table_data)) / float(items_per_page)))
except ZeroDivisionError:
exit('Console too small to display.')
if num_pages:
running_count = 0
for page in range(1, num_pages + 1):
page_table_output = list()
page_table_output.insert(0, table_header)
upper = (console_rows + running_count) - 7
if upper > len(table_data):
upper = len(table_data)
for x in range(running_count, upper):
page_table_output.append(table_data[x])
running_count += 1
table = AsciiTable(page_table_output)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.title = table_title
if page != 1:
print('')
print(table.table)
if page < num_pages:
input("Press Enter to continue...")
os.system('clear')
else:
table_data.insert(0, table_header)
table = AsciiTable(table_data)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.title = table_title
print(table.table)
def dash_if_none(item=None):
"""
@type item: object
"""
return str(item) if item else Color('{autoblack}-{/autoblack}')
def get_tags(tags, separator=', '):
"""
@type tags: list
@type separator: unicode
"""
tag_list = list()
for tag in tags:
tag_list.append("{0}:{1}".format(tag.get('Key'), tag.get('Value')))
if tag_list:
return separator.join(tag_list)
def get_name_tag(tags):
"""
@type tags: dict
"""
for tag_name, tag_value in tags.iteritems():
if tag_name == 'Name':
return tag_value
return "-" | lib/acli/output/__init__.py | from __future__ import (absolute_import, print_function, unicode_literals)
import math
import os
from colorclass import Color, Windows
from terminaltables import AsciiTable
from acli.utils import get_console_dimensions
try:
input = raw_input
except NameError:
pass
Windows.enable(auto_colors=True, reset_atexit=True)
def output_ascii_table(table_title=None,
table_data=None,
inner_heading_row_border=False,
inner_footing_row_border=False,
inner_row_border=False):
"""
@type table_title: unicode
@type table_data: list
@type inner_heading_row_border: bool
@type inner_footing_row_border: bool
@type inner_row_border: bool
"""
table = AsciiTable(table_data)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.inner_footing_row_border = inner_footing_row_border
table.title = table_title
print(table.table)
def output_ascii_table_list(table_title=None,
table_data=None,
table_header=None,
inner_heading_row_border=False,
inner_row_border=False):
"""
@type table_title: unicode
@type table_data: list
@type inner_heading_row_border: bool
@type inner_row_border: bool
@type table_header: list
"""
console_rows, _ = get_console_dimensions()
console_rows = int(console_rows)
full_display_length = len(table_data) + 7
items_per_page = console_rows - 7
num_pages = 0
if full_display_length > console_rows:
try:
num_pages = int(math.ceil(float(len(table_data)) / float(items_per_page)))
except ZeroDivisionError:
exit('Console too small to display.')
if num_pages:
running_count = 0
for page in range(1, num_pages + 1):
page_table_output = list()
page_table_output.insert(0, table_header)
upper = (console_rows + running_count) - 7
if upper > len(table_data):
upper = len(table_data)
for x in range(running_count, upper):
page_table_output.append(table_data[x])
running_count += 1
table = AsciiTable(page_table_output)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.title = table_title
if page != 1:
print('')
print(table.table)
if page < num_pages:
input("Press Enter to continue...")
os.system('clear')
else:
table_data.insert(0, table_header)
table = AsciiTable(table_data)
table.inner_heading_row_border = inner_heading_row_border
table.inner_row_border = inner_row_border
table.title = table_title
print(table.table)
def dash_if_none(item=None):
"""
@type item: object
"""
return str(item) if item else Color('{autoblack}-{/autoblack}')
def get_tags(tags, separator=', '):
"""
@type tags: list
@type separator: unicode
"""
tag_list = list()
for tag in tags:
tag_list.append("{0}:{1}".format(tag.get('Key'), tag.get('Value')))
if tag_list:
return separator.join(tag_list)
def get_name_tag(tags):
"""
@type tags: dict
"""
for tag_name, tag_value in tags.iteritems():
if tag_name == 'Name':
return tag_value
return "-" | 0.359814 | 0.132767 |
import ConfigParser
import os
import os.path
from Log import log_main
class VamosLibConfig:
def __init__(self, name, mode='auto', version=0, profile=False, expunge='last_close'):
self.name = name
self.mode = mode
self.version = version
self.profile = profile
self.expunge = expunge
# types of keys
self._keys = {
'mode' : str,
'version' : int,
'profile' : bool,
'expunge' : str
}
def parse_key_value(self, lib_name, kv_str, errors):
"""parse a key value string: k=v,k=v,..."""
kvs = kv_str.split(',')
if len(kvs) == 0:
errors.append("%s: No key,value given in -o option: '%s'" % (lib_name, kv_str))
else:
for kv in kvs:
r = kv.split('=')
if len(r) != 2:
errors.append("%s: Syntax error: '%s'" % (lib_name, kv))
else:
k,v = r
self.set_value(lib_name, k, v, errors)
def set_value(self, lib_name, k, v, errors):
if k in self._keys:
t = self._keys[k]
try:
rv = t(v)
# validate value
check_name = '_check_' + k
if hasattr(self, check_name):
check_func = getattr(self, check_name)
if check_func(rv):
setattr(self, k, rv)
else:
errors.append("%s: invalid '%s' value: '%s'" % (lib_name, k, rv))
# no validation available
else:
setattr(self, k, rv)
except ValueError:
errors.append("%s: invalid '%s' value: '%s' for type %s" % (lib_name, k, v, t))
else:
errors.append("%s: invalid key: '%s'" % (lib_name, k))
def _check_mode(self, v):
return v in ('auto', 'vamos', 'amiga', 'fake', 'off')
def _check_version(self, v):
return v >= 0
def _check_expunge(self, v):
return v in ('last_close', 'shutdown', 'no_mem')
class VamosConfig(ConfigParser.SafeConfigParser):
default_lib = '*.library'
def __init__(self, extra_file=None, skip_defaults=False, args=None, def_data_dir=None):
ConfigParser.SafeConfigParser.__init__(self)
self.def_data_dir = def_data_dir
self.files = []
self.args = args
# keep errors until logging is available
self.errors = []
# prepend extra file
if extra_file != None:
self.files.append(extra_file)
# read default config files (if they exist)
if not skip_defaults:
# add config in current working dir
self.files.append(os.path.join(os.getcwd(),".vamosrc"))
# add config in home directory
self.files.append(os.path.expanduser("~/.vamosrc"))
# read configs
self.found_files = self.read(self.files)
# setup config
self._reset()
self._parse_config()
self._parse_lib_config()
self._parse_args(args)
self._parse_lib_args(args)
self._set_defaults()
def get_lib_config(self, lib_name, sane_name=None, use_default=True):
"""get a configuration object for the given lib"""
# specific lib in config?
if lib_name in self.libs:
return self.libs[lib_name]
# search addtional sane_name?
if sane_name is not None:
if sane_name in self.libs:
return self.libs[sane_name]
# default config
if self.default_lib in self.libs and use_default:
return self.libs[self.default_lib]
# none found
return None
def get_args(self):
"""return the command line arguments"""
return self.args
def log(self):
"""after logging is setup dump info and other remarks"""
if len(self.found_files) == 0:
log_main.info("no config file found: %s" % ",".join(self.files))
else:
log_main.info("read config file: %s" % ",".join(self.found_files))
# dump config
self._dump()
# print recorded errors
if len(self.errors) > 0:
for e in self.errors:
log_main.error("config error: " + e)
def _dump(self):
# main config
for key in sorted(self._keys):
log_main.debug("config: [vamos] %s = %s", key, getattr(self,key))
# lib configs
for lib in sorted(self.libs):
cfg = self.libs[lib]
for key in sorted(cfg._keys):
log_main.debug("config: [%s] %s = %s", lib, key, getattr(cfg,key))
def _reset(self):
# default library config
# make sure exec and dos is taken from vamos
self.libs = {
'*.library' : VamosLibConfig('*.library', 'auto', 40, False),
'exec.library' : VamosLibConfig('exec.library', 'vamos', 40, False),
'dos.library' : VamosLibConfig('dos.library', 'vamos', 40, False),
}
# define keys that can be set
self._keys = {
# logging
'logging' : (str, None),
'verbose' : (int, 0),
'quiet' : (bool, False),
'benchmark' : (bool, False),
'log_file' : (str, None),
# low-level tracing
'instr_trace' : (bool, False),
'memory_trace' : (bool, False),
'internal_memory_trace' : (bool, False),
'reg_dump' : (bool, False),
# cpu emu
'cpu' : (str, "68000"),
'max_cycles' : (int, 0),
'cycles_per_block' : (int, 1000),
# system
'ram_size' : (int, 1024),
'stack_size' : (int, 4),
'hw_access' : (str, "emu"),
'shell' : (bool, False),
# dirs
'data_dir' : (str, self.def_data_dir),
# paths
'cwd' : (str, None),
'pure_ami_paths' : (bool, False)
}
# prefill keys with None
for key in self._keys:
setattr(self, key, None)
def _set_defaults(self):
for key in self._keys:
val = getattr(self, key)
if val is None:
def_val = self._keys[key][1]
setattr(self, key, def_val)
def _check_cpu(self, val):
return val in ('68000','68020','68030',
'000','020','030',
'00','20','30')
def _set_value(self, key, value):
if key in self._keys:
val_type = self._keys[key][0]
try:
rv = val_type(value)
# check value
check_name = '_check_' + key
if hasattr(self, check_name):
check_func = getattr(self, check_name)
if(check_func(rv)):
setattr(self, key, rv)
else:
self.errors.append("Invalid '%s' value: '%s'" % (key, rv))
else:
setattr(self, key, rv)
except ValueError:
self.errors.append("Invalid '%s' type: '%s' must be %s" % (key, value, val_type))
else:
self.errors.append("Invalid key: '%s'" % key)
def _parse_config(self):
# parse [vamos] section
sect = 'vamos'
for key in self._keys:
if self.has_option(sect, key) and getattr(self, key) is None:
value = self.get(sect, key)
self._set_value(key, value)
def _parse_args(self, args):
# get paramters from args (allow to overwrite existing settings)
for key in self._keys:
if hasattr(args, key):
arg_value = getattr(args, key)
if arg_value is not None:
self._set_value(key, arg_value)
def _parse_lib_config(self):
# run through all sections matching [<bla.library>]:
for lib_name in self.sections():
if lib_name.endswith('.library') or lib_name.endswith('.device'):
# check for lib
if lib_name in self.libs:
lib = self.libs[lib_name]
else:
lib = VamosLibConfig(lib_name)
self.libs[lib_name] = lib
# walk through options
for key in self.options(lib_name):
if key in lib._keys:
v = self.get(lib_name, key)
# set value
lib.set_value(lib_name, key, v, self.errors)
else:
self.errors.append("%s: Invalid option: '%s'" % (lib_name, key))
def _parse_lib_args(self, args):
# parse lib options
if hasattr(args, 'lib_options') and args.lib_options != None:
for e in args.lib_options:
# lib+key=value,key=value
r = e.split('+')
if len(r) != 2:
self.errors.append("Syntax error: '%s'" % e)
else:
lib, kv = r
# generate lib name
if lib.endswith('.library') or lib.endswith('.device'):
lib_name = lib
else:
lib_name = lib + '.library'
# find or create config
if lib_name in self.libs:
# use already defined lib
lib_cfg = self.libs[lib_name]
else:
# create new lib
lib_cfg = VamosLibConfig(lib_name)
self.libs[lib_name] = lib_cfg
# parse key value
lib_cfg.parse_key_value(lib_name, kv, self.errors) | dependencies/amitools-0.1.0/amitools/vamos/VamosConfig.py | import ConfigParser
import os
import os.path
from Log import log_main
class VamosLibConfig:
def __init__(self, name, mode='auto', version=0, profile=False, expunge='last_close'):
self.name = name
self.mode = mode
self.version = version
self.profile = profile
self.expunge = expunge
# types of keys
self._keys = {
'mode' : str,
'version' : int,
'profile' : bool,
'expunge' : str
}
def parse_key_value(self, lib_name, kv_str, errors):
"""parse a key value string: k=v,k=v,..."""
kvs = kv_str.split(',')
if len(kvs) == 0:
errors.append("%s: No key,value given in -o option: '%s'" % (lib_name, kv_str))
else:
for kv in kvs:
r = kv.split('=')
if len(r) != 2:
errors.append("%s: Syntax error: '%s'" % (lib_name, kv))
else:
k,v = r
self.set_value(lib_name, k, v, errors)
def set_value(self, lib_name, k, v, errors):
if k in self._keys:
t = self._keys[k]
try:
rv = t(v)
# validate value
check_name = '_check_' + k
if hasattr(self, check_name):
check_func = getattr(self, check_name)
if check_func(rv):
setattr(self, k, rv)
else:
errors.append("%s: invalid '%s' value: '%s'" % (lib_name, k, rv))
# no validation available
else:
setattr(self, k, rv)
except ValueError:
errors.append("%s: invalid '%s' value: '%s' for type %s" % (lib_name, k, v, t))
else:
errors.append("%s: invalid key: '%s'" % (lib_name, k))
def _check_mode(self, v):
return v in ('auto', 'vamos', 'amiga', 'fake', 'off')
def _check_version(self, v):
return v >= 0
def _check_expunge(self, v):
return v in ('last_close', 'shutdown', 'no_mem')
class VamosConfig(ConfigParser.SafeConfigParser):
default_lib = '*.library'
def __init__(self, extra_file=None, skip_defaults=False, args=None, def_data_dir=None):
ConfigParser.SafeConfigParser.__init__(self)
self.def_data_dir = def_data_dir
self.files = []
self.args = args
# keep errors until logging is available
self.errors = []
# prepend extra file
if extra_file != None:
self.files.append(extra_file)
# read default config files (if they exist)
if not skip_defaults:
# add config in current working dir
self.files.append(os.path.join(os.getcwd(),".vamosrc"))
# add config in home directory
self.files.append(os.path.expanduser("~/.vamosrc"))
# read configs
self.found_files = self.read(self.files)
# setup config
self._reset()
self._parse_config()
self._parse_lib_config()
self._parse_args(args)
self._parse_lib_args(args)
self._set_defaults()
def get_lib_config(self, lib_name, sane_name=None, use_default=True):
"""get a configuration object for the given lib"""
# specific lib in config?
if lib_name in self.libs:
return self.libs[lib_name]
# search addtional sane_name?
if sane_name is not None:
if sane_name in self.libs:
return self.libs[sane_name]
# default config
if self.default_lib in self.libs and use_default:
return self.libs[self.default_lib]
# none found
return None
def get_args(self):
"""return the command line arguments"""
return self.args
def log(self):
"""after logging is setup dump info and other remarks"""
if len(self.found_files) == 0:
log_main.info("no config file found: %s" % ",".join(self.files))
else:
log_main.info("read config file: %s" % ",".join(self.found_files))
# dump config
self._dump()
# print recorded errors
if len(self.errors) > 0:
for e in self.errors:
log_main.error("config error: " + e)
def _dump(self):
# main config
for key in sorted(self._keys):
log_main.debug("config: [vamos] %s = %s", key, getattr(self,key))
# lib configs
for lib in sorted(self.libs):
cfg = self.libs[lib]
for key in sorted(cfg._keys):
log_main.debug("config: [%s] %s = %s", lib, key, getattr(cfg,key))
def _reset(self):
# default library config
# make sure exec and dos is taken from vamos
self.libs = {
'*.library' : VamosLibConfig('*.library', 'auto', 40, False),
'exec.library' : VamosLibConfig('exec.library', 'vamos', 40, False),
'dos.library' : VamosLibConfig('dos.library', 'vamos', 40, False),
}
# define keys that can be set
self._keys = {
# logging
'logging' : (str, None),
'verbose' : (int, 0),
'quiet' : (bool, False),
'benchmark' : (bool, False),
'log_file' : (str, None),
# low-level tracing
'instr_trace' : (bool, False),
'memory_trace' : (bool, False),
'internal_memory_trace' : (bool, False),
'reg_dump' : (bool, False),
# cpu emu
'cpu' : (str, "68000"),
'max_cycles' : (int, 0),
'cycles_per_block' : (int, 1000),
# system
'ram_size' : (int, 1024),
'stack_size' : (int, 4),
'hw_access' : (str, "emu"),
'shell' : (bool, False),
# dirs
'data_dir' : (str, self.def_data_dir),
# paths
'cwd' : (str, None),
'pure_ami_paths' : (bool, False)
}
# prefill keys with None
for key in self._keys:
setattr(self, key, None)
def _set_defaults(self):
for key in self._keys:
val = getattr(self, key)
if val is None:
def_val = self._keys[key][1]
setattr(self, key, def_val)
def _check_cpu(self, val):
return val in ('68000','68020','68030',
'000','020','030',
'00','20','30')
def _set_value(self, key, value):
if key in self._keys:
val_type = self._keys[key][0]
try:
rv = val_type(value)
# check value
check_name = '_check_' + key
if hasattr(self, check_name):
check_func = getattr(self, check_name)
if(check_func(rv)):
setattr(self, key, rv)
else:
self.errors.append("Invalid '%s' value: '%s'" % (key, rv))
else:
setattr(self, key, rv)
except ValueError:
self.errors.append("Invalid '%s' type: '%s' must be %s" % (key, value, val_type))
else:
self.errors.append("Invalid key: '%s'" % key)
def _parse_config(self):
# parse [vamos] section
sect = 'vamos'
for key in self._keys:
if self.has_option(sect, key) and getattr(self, key) is None:
value = self.get(sect, key)
self._set_value(key, value)
def _parse_args(self, args):
# get paramters from args (allow to overwrite existing settings)
for key in self._keys:
if hasattr(args, key):
arg_value = getattr(args, key)
if arg_value is not None:
self._set_value(key, arg_value)
def _parse_lib_config(self):
# run through all sections matching [<bla.library>]:
for lib_name in self.sections():
if lib_name.endswith('.library') or lib_name.endswith('.device'):
# check for lib
if lib_name in self.libs:
lib = self.libs[lib_name]
else:
lib = VamosLibConfig(lib_name)
self.libs[lib_name] = lib
# walk through options
for key in self.options(lib_name):
if key in lib._keys:
v = self.get(lib_name, key)
# set value
lib.set_value(lib_name, key, v, self.errors)
else:
self.errors.append("%s: Invalid option: '%s'" % (lib_name, key))
def _parse_lib_args(self, args):
# parse lib options
if hasattr(args, 'lib_options') and args.lib_options != None:
for e in args.lib_options:
# lib+key=value,key=value
r = e.split('+')
if len(r) != 2:
self.errors.append("Syntax error: '%s'" % e)
else:
lib, kv = r
# generate lib name
if lib.endswith('.library') or lib.endswith('.device'):
lib_name = lib
else:
lib_name = lib + '.library'
# find or create config
if lib_name in self.libs:
# use already defined lib
lib_cfg = self.libs[lib_name]
else:
# create new lib
lib_cfg = VamosLibConfig(lib_name)
self.libs[lib_name] = lib_cfg
# parse key value
lib_cfg.parse_key_value(lib_name, kv, self.errors) | 0.26923 | 0.107017 |
from manim import *
from functions import *
import color
class AddingSquares(MovingCameraScene):
def construct(self):
self.camera.background_color = color.BACKGROUND
print('First fibonacci (n value):')
n = int(input())
fibo = fiboarray(2*n+2)
dotGroup1 = VGroup()
for i in range(0, pow(fibo[n],2)):
dotGroup1.add(Dot().set_color(color.BLUE))
dotGroup1.arrange_in_grid(rows=fibo[n], cols=fibo[n], buff=0.09).move_to(LEFT * 4 + DOWN * 0.25 * fibo[n-1]/2)
dotGroup2 = VGroup()
for i in range(0, pow(fibo[n+1],2)):
dotGroup2.add(Dot().set_color(color.RED))
dotGroup2.arrange_in_grid(rows=fibo[n+1], cols=fibo[n+1], buff=0.09)
symbols = VGroup().add(Text('+').move_to(LEFT * (4 - (fibo[n] * 0.125) + fibo[n+1] * 0.125)/2).set_color(BLACK)).add(Text('=').move_to(RIGHT * (4 + fibo[n+1] * 0.125)/2).set_color(BLACK))
labels = VGroup()
labels.add(Tex('$' + str(fibo[n]) + '^2$').set_color(BLACK).move_to(LEFT * 4 + DOWN * 0.25 * (fibo[n+1]/2 + 4)))
labels.add(Tex('$' + str(fibo[n+1]) + '^2$').set_color(BLACK).move_to(DOWN * 0.25 * (fibo[n+1]/2 + 4)))
labels.add(Tex('$f_' + str(n) + '$').set_color(BLACK).move_to(LEFT * 4 + DOWN * 0.25 * (fibo[n+1]/2 + 8)))
labels.add(Tex('$f_' + str(n+1) + '$').set_color(BLACK).move_to(DOWN * 0.25 * (fibo[n+1]/2 + 8)))
self.play(FadeIn(labels,dotGroup1,dotGroup2,symbols[0]))
self.wait(1.5)
dotGroup1Copy = dotGroup1.copy()
dotGroup2Copy = dotGroup2.copy()
labels.add(Tex(str(fibo[2*n+1])).set_color(BLACK).move_to(RIGHT * 6 + UP * 0.5))
labels.add(Tex('$f_' + str(2*n + 1) + '$').set_color(BLACK).move_to(RIGHT * 6 + DOWN * 0.5))
brace = BraceBetweenPoints(RIGHT * 5 + UP * 0.25 * fibo[2*n+1]/2, RIGHT * 5 + DOWN * 0.25 * fibo[2*n+1]/2).set_color(BLACK).rotate(PI)
self.camera.frame.save_state()
camScale = 1
if n == 4:
camScale = 1.2
self.play(self.camera.frame.animate.scale(camScale), FadeIn(symbols[1], labels[4], labels[5], brace), dotGroup1Copy.animate.arrange(DOWN * 0.375).move_to(RIGHT * 4 + UP * 0.1274 * (fibo[2*n+1] - pow(fibo[n], 2))), dotGroup2Copy.animate.arrange(DOWN * 0.375).move_to(RIGHT * 4 + DOWN * 0.1274 * (fibo[2*n+1] - pow(fibo[n+1], 2))))
self.wait(2)
self.play(FadeIn(Square().scale(10).set_fill(color.BACKGROUND).set_opacity(1))) | addingSquares.py | from manim import *
from functions import *
import color
class AddingSquares(MovingCameraScene):
def construct(self):
self.camera.background_color = color.BACKGROUND
print('First fibonacci (n value):')
n = int(input())
fibo = fiboarray(2*n+2)
dotGroup1 = VGroup()
for i in range(0, pow(fibo[n],2)):
dotGroup1.add(Dot().set_color(color.BLUE))
dotGroup1.arrange_in_grid(rows=fibo[n], cols=fibo[n], buff=0.09).move_to(LEFT * 4 + DOWN * 0.25 * fibo[n-1]/2)
dotGroup2 = VGroup()
for i in range(0, pow(fibo[n+1],2)):
dotGroup2.add(Dot().set_color(color.RED))
dotGroup2.arrange_in_grid(rows=fibo[n+1], cols=fibo[n+1], buff=0.09)
symbols = VGroup().add(Text('+').move_to(LEFT * (4 - (fibo[n] * 0.125) + fibo[n+1] * 0.125)/2).set_color(BLACK)).add(Text('=').move_to(RIGHT * (4 + fibo[n+1] * 0.125)/2).set_color(BLACK))
labels = VGroup()
labels.add(Tex('$' + str(fibo[n]) + '^2$').set_color(BLACK).move_to(LEFT * 4 + DOWN * 0.25 * (fibo[n+1]/2 + 4)))
labels.add(Tex('$' + str(fibo[n+1]) + '^2$').set_color(BLACK).move_to(DOWN * 0.25 * (fibo[n+1]/2 + 4)))
labels.add(Tex('$f_' + str(n) + '$').set_color(BLACK).move_to(LEFT * 4 + DOWN * 0.25 * (fibo[n+1]/2 + 8)))
labels.add(Tex('$f_' + str(n+1) + '$').set_color(BLACK).move_to(DOWN * 0.25 * (fibo[n+1]/2 + 8)))
self.play(FadeIn(labels,dotGroup1,dotGroup2,symbols[0]))
self.wait(1.5)
dotGroup1Copy = dotGroup1.copy()
dotGroup2Copy = dotGroup2.copy()
labels.add(Tex(str(fibo[2*n+1])).set_color(BLACK).move_to(RIGHT * 6 + UP * 0.5))
labels.add(Tex('$f_' + str(2*n + 1) + '$').set_color(BLACK).move_to(RIGHT * 6 + DOWN * 0.5))
brace = BraceBetweenPoints(RIGHT * 5 + UP * 0.25 * fibo[2*n+1]/2, RIGHT * 5 + DOWN * 0.25 * fibo[2*n+1]/2).set_color(BLACK).rotate(PI)
self.camera.frame.save_state()
camScale = 1
if n == 4:
camScale = 1.2
self.play(self.camera.frame.animate.scale(camScale), FadeIn(symbols[1], labels[4], labels[5], brace), dotGroup1Copy.animate.arrange(DOWN * 0.375).move_to(RIGHT * 4 + UP * 0.1274 * (fibo[2*n+1] - pow(fibo[n], 2))), dotGroup2Copy.animate.arrange(DOWN * 0.375).move_to(RIGHT * 4 + DOWN * 0.1274 * (fibo[2*n+1] - pow(fibo[n+1], 2))))
self.wait(2)
self.play(FadeIn(Square().scale(10).set_fill(color.BACKGROUND).set_opacity(1))) | 0.421314 | 0.277601 |
import multiprocessing as mp
from datetime import datetime, timedelta
import dask.dataframe
import requests
from PIL import ImageFont
import motionshader
if __name__ == '__main__':
# Motionshader expects a time indexed, sorted, Dask DataFrame with columns containing EPSG:4326 coordinates.
# Motionshader is opinionated about using Dask DataFrames, for scaling this process to big data.
df = dask.dataframe.read_csv('ACLED.csv', usecols=['timestamp', 'longitude', 'latitude', 'event_type'])
df = df.categorize(columns=['event_type'])
df['timestamp'] = dask.dataframe.to_datetime(df['timestamp'], unit='s')
df = df.set_index('timestamp', npartitions=mp.cpu_count(), compute=True).persist()
# Define a Basemap using a WMS Service and associated layer. Assumes EPSG:4326.
# Provide a requests.Session() that can be customized for authentication or 2-way SSL verification.
basemap = motionshader.Basemap(basemap_session=requests.Session(),
basemap_url='https://ows.terrestris.de/osm/service',
basemap_layer='OSM-WMS')
# Define the Dataset, providing a Dask DataFrame and the names of the longitude and latitude columns.
dataset = motionshader.Dataset(data=df, longitude_column='longitude', latitude_column='latitude',
cat_column='event_type')
# Define the MotionVideo, providing the dataset to be rendered and the basemap service.
motion_video = motionshader.MotionVideo(dataset=dataset, basemap=basemap)
start_datetime, end_datetime = datetime(2020, 1, 1), datetime(2021, 1, 1)
min_longitude, max_longitude = df.longitude.min().compute(), df.longitude.max().compute()
min_latitude, max_latitude = df.latitude.min().compute(), df.latitude.max().compute()
# GeospatialViewport allows defining where the 'camera' looks on the globe, and the resolution of the output.
# TemporalPlayback defines the temporal bounds, temporal size of a frame, and the frames per second for the output.
# In this example, a single frame contains 7 days of data, and steps forward 7 days between frames.
viewport = motionshader.GeospatialViewport(min_longitude=min_longitude, max_longitude=max_longitude,
min_latitude=min_latitude, max_latitude=max_latitude, width_pixels=1920,
height_pixels=1080)
playback = motionshader.TemporalPlayback(start_time=start_datetime, end_time=end_datetime,
frame_length=timedelta(days=7),
frame_step=timedelta(days=7), frames_per_second=1)
# If a FrameAnnotation is provided to the rendering function, the time range and center coordinate will be
# added onto each frame. The FrameAnnotation allows customizing the position of the label in pixel coordinates,
# the font, the font color, the coordinate order, and the date format.
annotation = motionshader.FrameAnnotation(pos_x=10, pos_y=10, font=ImageFont.truetype('arial', 14),
font_color='#000000', lon_lat=True, date_format='%Y-%m-%dT%H:%M:%S%z')
# If a Watermark is provided to the rendering function, the watermark text provided will be added
# onto each frame. The Watermark allows customizing the position of the watermark in pixel coordinates,
# the font, and the font color.
watermark = motionshader.FrameWatermark(watermark='Rendered using Motionshader', pos_x=10,
pos_y=viewport.height_pixels - 40,
font=ImageFont.truetype('arial', 14),
font_color='#000000')
color_map = {
'Protests': '#ff33cc',
'Riots': '#ffff00',
'Violence against civilians': '#ff0000',
'Battles': '#66ff33',
'Strategic developments': '#9933ff',
'Explosions/Remote violence': '#0000ff'
}
# MotionVideo can be output as either a GIF or an MP4.
motion_video.to_gif(viewport=viewport, playback=playback, file_name='ACLED_Categorical.gif', annotation=annotation,
watermark=watermark, scale_points_pixels=1, color_map=color_map)
motion_video.to_video(viewport=viewport, playback=playback, file_name='ACLED_Categorical.mp4',
annotation=annotation, watermark=watermark, scale_points_pixels=1, color_map=color_map) | motionshader_categorical_example.py | import multiprocessing as mp
from datetime import datetime, timedelta
import dask.dataframe
import requests
from PIL import ImageFont
import motionshader
if __name__ == '__main__':
# Motionshader expects a time indexed, sorted, Dask DataFrame with columns containing EPSG:4326 coordinates.
# Motionshader is opinionated about using Dask DataFrames, for scaling this process to big data.
df = dask.dataframe.read_csv('ACLED.csv', usecols=['timestamp', 'longitude', 'latitude', 'event_type'])
df = df.categorize(columns=['event_type'])
df['timestamp'] = dask.dataframe.to_datetime(df['timestamp'], unit='s')
df = df.set_index('timestamp', npartitions=mp.cpu_count(), compute=True).persist()
# Define a Basemap using a WMS Service and associated layer. Assumes EPSG:4326.
# Provide a requests.Session() that can be customized for authentication or 2-way SSL verification.
basemap = motionshader.Basemap(basemap_session=requests.Session(),
basemap_url='https://ows.terrestris.de/osm/service',
basemap_layer='OSM-WMS')
# Define the Dataset, providing a Dask DataFrame and the names of the longitude and latitude columns.
dataset = motionshader.Dataset(data=df, longitude_column='longitude', latitude_column='latitude',
cat_column='event_type')
# Define the MotionVideo, providing the dataset to be rendered and the basemap service.
motion_video = motionshader.MotionVideo(dataset=dataset, basemap=basemap)
start_datetime, end_datetime = datetime(2020, 1, 1), datetime(2021, 1, 1)
min_longitude, max_longitude = df.longitude.min().compute(), df.longitude.max().compute()
min_latitude, max_latitude = df.latitude.min().compute(), df.latitude.max().compute()
# GeospatialViewport allows defining where the 'camera' looks on the globe, and the resolution of the output.
# TemporalPlayback defines the temporal bounds, temporal size of a frame, and the frames per second for the output.
# In this example, a single frame contains 7 days of data, and steps forward 7 days between frames.
viewport = motionshader.GeospatialViewport(min_longitude=min_longitude, max_longitude=max_longitude,
min_latitude=min_latitude, max_latitude=max_latitude, width_pixels=1920,
height_pixels=1080)
playback = motionshader.TemporalPlayback(start_time=start_datetime, end_time=end_datetime,
frame_length=timedelta(days=7),
frame_step=timedelta(days=7), frames_per_second=1)
# If a FrameAnnotation is provided to the rendering function, the time range and center coordinate will be
# added onto each frame. The FrameAnnotation allows customizing the position of the label in pixel coordinates,
# the font, the font color, the coordinate order, and the date format.
annotation = motionshader.FrameAnnotation(pos_x=10, pos_y=10, font=ImageFont.truetype('arial', 14),
font_color='#000000', lon_lat=True, date_format='%Y-%m-%dT%H:%M:%S%z')
# If a Watermark is provided to the rendering function, the watermark text provided will be added
# onto each frame. The Watermark allows customizing the position of the watermark in pixel coordinates,
# the font, and the font color.
watermark = motionshader.FrameWatermark(watermark='Rendered using Motionshader', pos_x=10,
pos_y=viewport.height_pixels - 40,
font=ImageFont.truetype('arial', 14),
font_color='#000000')
color_map = {
'Protests': '#ff33cc',
'Riots': '#ffff00',
'Violence against civilians': '#ff0000',
'Battles': '#66ff33',
'Strategic developments': '#9933ff',
'Explosions/Remote violence': '#0000ff'
}
# MotionVideo can be output as either a GIF or an MP4.
motion_video.to_gif(viewport=viewport, playback=playback, file_name='ACLED_Categorical.gif', annotation=annotation,
watermark=watermark, scale_points_pixels=1, color_map=color_map)
motion_video.to_video(viewport=viewport, playback=playback, file_name='ACLED_Categorical.mp4',
annotation=annotation, watermark=watermark, scale_points_pixels=1, color_map=color_map) | 0.827271 | 0.55646 |
import pandas as pd
import matplotlib.pyplot as plt
# Read data from csv into pandas dataframe
data = pd.read_csv('data.csv')[
['userid', 'From', 'To']]
# Display the first 10 rows of dataframe
print(data.head(10))
# Checking if there are NAN values
print("Presence of NAN values : \n", data.isnull().any())
# Function to remove letter from column data
def remove_letter(val):
if (val[0] == 'F' or val[0] == 'T'):
return val[1:]
# Use apply function to remove leter
data['From'] = data['From'].apply(remove_letter)
data['To'] = data['To'].apply(remove_letter)
# Check the data types of all columns
print(data.dtypes)
# Converting object datatype to int
data['From'] = data['From'].astype(str).astype(int)
data['To'] = data['To'].astype(str).astype(int)
# Remove duplicate user id
data.drop_duplicates('userid', inplace = True)
# Plotting Histograms
# 1) Histogram of 'To'column data
num_bins = 50
to_data = data['To']
legend = ['To']
plt.hist([to_data], bins=num_bins, color=['Orange'])
plt.xlabel("To data")
plt.ylabel("Frequency")
plt.legend(legend)
plt.show()
# 2) Histogram of 'From' column data
from_data = data['From']
legend = ['From']
plt.hist([from_data], bins=num_bins, color=['green'])
plt.xlabel("From data")
plt.ylabel("Frequency")
plt.legend(legend)
plt.show()
# 3) Histogram of 'userid' column data
userid_data = data['userid']
legend = ['userid']
plt.hist([userid_data], bins=num_bins, color=['Red'])
plt.xlabel("userid")
plt.ylabel("Frequency")
plt.legend(legend)
plt.show()
# Sorting data
print('Sorting...')
# Plotting boxplots
plt.boxplot(data['To'])
plt.boxplot(data['From'])
# Sort data according to userid
data = data.sort_values('userid')
# Adding values to region_rank to calculate rank
# Here the regions are divided by a distance of 15
# So the rank of the region with value 100 will be 0,
# 115 will be 1, 132 will be 2 and so on.
region_rank = []
for i in range(99, 210, 15):
region_rank.append(i)
print(region_rank)
# mark_region function calculates the actual
# rank of the region of values
def mark_region(x):
current_rank = -1 # As indices start from 0
for i in region_rank:
if (x > i):
current_rank += 1
else:
return current_rank
# Apply the mark_region function
data['source_to'] = data['To'].apply(mark_region)
data['target_from'] = data['From'].apply(mark_region)
# Confirming the absence of NAN values
print("Presence of NAN values : \n", data.isnull().any())
data.head(100)
max_region = data['source_to'].max()
print(max_region)
# creating output dict
# This will help to keep track of all paths taken by users
output = dict()
output.update({'links_dict': dict()})
def update_source_target(src, t):
try:
print(src, t)
# If this source is already in links_dict...
if src in output['links_dict']:
if t in output['links_dict'][src]:
# Increment count of users with this source/target pair by 1,
output['links_dict'][src][t]['unique_users'] += 1
# but if the target is not already associated to this source...
else:
# ...we create a new key for this target, for this source, and initiate it with 1 user and the time from source to target
output['links_dict'][src].update({t:
dict(
{'unique_users': 1
})
})
# ...but if this source isn't already available in the links_dict, we create its key and the key of this source's target, and we initiate it with 1 user and the time from source to target
else:
output['links_dict'].update({src: dict({t: dict(
{'unique_users': 1})})})
except Exception as e:
pass
data2 = data.apply(lambda x: update_source_target(x['source_to'], x['target_from']), axis = 1)
# Source and Target's unique combinations are printed here
for key, value in output['links_dict'].items():
print(key, value)
# Making lists of labels, sources, targets and values
labels = []
sources = []
targets = []
values = []
for source_key, source_value in output['links_dict'].items():
for target_key, target_value in output['links_dict'][source_key].items():
sources.append(source_key)
targets.append(target_key)
values.append(target_value['unique_users'])
# Check if all users considered
sum = 0
for i in values:
sum += i
num_rows = data.shape[0]
print(num_rows == sum)
# The 7 regions are assigned here need a multiple
# of same region as user can travel from and to any region
labels = ['Region One', 'Region Two', 'Region Three',
'Region Four', 'Region Five', 'Region Six', 'Region Seven',
'Region One', 'Region Two', 'Region Three', 'Region Four',
'Region Five', 'Region Six', 'Region Seven']
# In Sankey, the source is always smaller than target parameter
# So we increment source if it is smaller than target but
# final region remains the same for it
n = len(sources)
for i in range(n):
if sources[i] > targets[i]:
targets[i]+=7
# Ignore sources and targets that are in same region
rem_indices = []
for i in range(n):
if sources[i] == targets[i]:
rem_indices.append(i)
updated_sources = []
updated_targets = []
for i in range(n):
if i in rem_indices:
continue
else:
updated_sources.append(sources[i])
updated_targets.append(targets[i])
# Selecting colors in HEX format
palette = ['50BE97', 'E4655C', 'FCC865',
'BFD6DE', '3E5066', '353A3E', 'E6E6E6']
# Here, the colors are passed as HEX. This loop will convert from HEX to RGB:
for i, col in enumerate(palette):
palette[i] = tuple(int(col[i:i+2], 16) for i in (0, 2, 4))
print(palette)
# The colors for the regions are repeated
palette = 2*palette
print(palette)
colors = []
for color in palette:
colors.append('rgb' + str(color))
# Plotting the Sankey Chart
import plotly.graph_objects as go
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 5,
thickness = 10,
line = dict(color = "blue", width = 0.5),
label = labels,
color = colors
),
link = dict(
source = updated_sources,
target = updated_targets,
value = values,
hovertemplate='%{value} unique users went from %{source.label} to %{target.label}.<br />'
))])
fig.update_layout(autosize=True, title_text="Customer Sankey Chart", font=dict(size=15), plot_bgcolor='white')
fig.show() | Task1.py | import pandas as pd
import matplotlib.pyplot as plt
# Read data from csv into pandas dataframe
data = pd.read_csv('data.csv')[
['userid', 'From', 'To']]
# Display the first 10 rows of dataframe
print(data.head(10))
# Checking if there are NAN values
print("Presence of NAN values : \n", data.isnull().any())
# Function to remove letter from column data
def remove_letter(val):
if (val[0] == 'F' or val[0] == 'T'):
return val[1:]
# Use apply function to remove leter
data['From'] = data['From'].apply(remove_letter)
data['To'] = data['To'].apply(remove_letter)
# Check the data types of all columns
print(data.dtypes)
# Converting object datatype to int
data['From'] = data['From'].astype(str).astype(int)
data['To'] = data['To'].astype(str).astype(int)
# Remove duplicate user id
data.drop_duplicates('userid', inplace = True)
# Plotting Histograms
# 1) Histogram of 'To'column data
num_bins = 50
to_data = data['To']
legend = ['To']
plt.hist([to_data], bins=num_bins, color=['Orange'])
plt.xlabel("To data")
plt.ylabel("Frequency")
plt.legend(legend)
plt.show()
# 2) Histogram of 'From' column data
from_data = data['From']
legend = ['From']
plt.hist([from_data], bins=num_bins, color=['green'])
plt.xlabel("From data")
plt.ylabel("Frequency")
plt.legend(legend)
plt.show()
# 3) Histogram of 'userid' column data
userid_data = data['userid']
legend = ['userid']
plt.hist([userid_data], bins=num_bins, color=['Red'])
plt.xlabel("userid")
plt.ylabel("Frequency")
plt.legend(legend)
plt.show()
# Sorting data
print('Sorting...')
# Plotting boxplots
plt.boxplot(data['To'])
plt.boxplot(data['From'])
# Sort data according to userid
data = data.sort_values('userid')
# Adding values to region_rank to calculate rank
# Here the regions are divided by a distance of 15
# So the rank of the region with value 100 will be 0,
# 115 will be 1, 132 will be 2 and so on.
region_rank = []
for i in range(99, 210, 15):
region_rank.append(i)
print(region_rank)
# mark_region function calculates the actual
# rank of the region of values
def mark_region(x):
current_rank = -1 # As indices start from 0
for i in region_rank:
if (x > i):
current_rank += 1
else:
return current_rank
# Apply the mark_region function
data['source_to'] = data['To'].apply(mark_region)
data['target_from'] = data['From'].apply(mark_region)
# Confirming the absence of NAN values
print("Presence of NAN values : \n", data.isnull().any())
data.head(100)
max_region = data['source_to'].max()
print(max_region)
# creating output dict
# This will help to keep track of all paths taken by users
output = dict()
output.update({'links_dict': dict()})
def update_source_target(src, t):
try:
print(src, t)
# If this source is already in links_dict...
if src in output['links_dict']:
if t in output['links_dict'][src]:
# Increment count of users with this source/target pair by 1,
output['links_dict'][src][t]['unique_users'] += 1
# but if the target is not already associated to this source...
else:
# ...we create a new key for this target, for this source, and initiate it with 1 user and the time from source to target
output['links_dict'][src].update({t:
dict(
{'unique_users': 1
})
})
# ...but if this source isn't already available in the links_dict, we create its key and the key of this source's target, and we initiate it with 1 user and the time from source to target
else:
output['links_dict'].update({src: dict({t: dict(
{'unique_users': 1})})})
except Exception as e:
pass
data2 = data.apply(lambda x: update_source_target(x['source_to'], x['target_from']), axis = 1)
# Source and Target's unique combinations are printed here
for key, value in output['links_dict'].items():
print(key, value)
# Making lists of labels, sources, targets and values
labels = []
sources = []
targets = []
values = []
for source_key, source_value in output['links_dict'].items():
for target_key, target_value in output['links_dict'][source_key].items():
sources.append(source_key)
targets.append(target_key)
values.append(target_value['unique_users'])
# Check if all users considered
sum = 0
for i in values:
sum += i
num_rows = data.shape[0]
print(num_rows == sum)
# The 7 regions are assigned here need a multiple
# of same region as user can travel from and to any region
labels = ['Region One', 'Region Two', 'Region Three',
'Region Four', 'Region Five', 'Region Six', 'Region Seven',
'Region One', 'Region Two', 'Region Three', 'Region Four',
'Region Five', 'Region Six', 'Region Seven']
# In Sankey, the source is always smaller than target parameter
# So we increment source if it is smaller than target but
# final region remains the same for it
n = len(sources)
for i in range(n):
if sources[i] > targets[i]:
targets[i]+=7
# Ignore sources and targets that are in same region
rem_indices = []
for i in range(n):
if sources[i] == targets[i]:
rem_indices.append(i)
updated_sources = []
updated_targets = []
for i in range(n):
if i in rem_indices:
continue
else:
updated_sources.append(sources[i])
updated_targets.append(targets[i])
# Selecting colors in HEX format
palette = ['50BE97', 'E4655C', 'FCC865',
'BFD6DE', '3E5066', '353A3E', 'E6E6E6']
# Here, the colors are passed as HEX. This loop will convert from HEX to RGB:
for i, col in enumerate(palette):
palette[i] = tuple(int(col[i:i+2], 16) for i in (0, 2, 4))
print(palette)
# The colors for the regions are repeated
palette = 2*palette
print(palette)
colors = []
for color in palette:
colors.append('rgb' + str(color))
# Plotting the Sankey Chart
import plotly.graph_objects as go
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 5,
thickness = 10,
line = dict(color = "blue", width = 0.5),
label = labels,
color = colors
),
link = dict(
source = updated_sources,
target = updated_targets,
value = values,
hovertemplate='%{value} unique users went from %{source.label} to %{target.label}.<br />'
))])
fig.update_layout(autosize=True, title_text="Customer Sankey Chart", font=dict(size=15), plot_bgcolor='white')
fig.show() | 0.603231 | 0.597725 |
import requests
import json
convert = "USD"
listing_url = "https://api.coinmarketcap.com/v2/listings/"
url_end = "?structure=array&convert=" + convert
request = requests.get(listing_url)
results = request.json()
data = results["data"]
ticker_url_pairs = {}
for currency in data:
symbol = currency["symbol"]
url = currency["id"]
ticker_url_pairs[symbol] = url
print(ticker_url_pairs)
while True:
print()
choice = input("Enter the ticker symbol of the cryptosurrencys\: ")
choice = choice.upper()
ticker_url = f"https://api.coinmarketcap.com/v2/ticker/{str(ticker_url_pairs[choice])}/{url_end}"
# print(ticker_url)
request = requests.get(ticker_url)
results = request.json()
# print(json.dumps(results, sort_keys=True, indent=4))
currency = results["data"][0]
rank = currency["rank"]
name = currency["name"]
symbol = currency["symbol"]
circulating_supply = int(currency["circulating_supply"])
total_supply = int(currency["total_supply"])
quotes = currency["quotes"][convert]
market_cap = quotes["market_cap"]
hour_change = quotes["percent_change_1h"]
day_change = quotes["percent_change_24h"]
week_change = quotes["percent_change_7d"]
price = quotes["price"]
volume = quotes["volume_24h"]
volume_string = "{:,}".format(volume)
market_cap_string = "{:,}".format(market_cap)
circulating_supply_string = "{:,}".format(circulating_supply)
total_supply_string = "{:,}".format(total_supply)
print(f"{str(rank)}: {name} ({symbol})")
print(f"Market cap: \t\t${market_cap_string}")
print(f"Price: \t\t\t\t${str(price)}")
print(f"24h volume: \t\t${volume_string}")
print(f"Hour change: \t\t{str(hour_change)}%")
print(f"Day change: \t\t{str(day_change)}%")
print(f"Week change: \t\t{str(week_change)}%")
print(f"Total supply: \t\t{total_supply_string}")
print(f"Circulating supply: {circulating_supply_string}")
print(f"Percentage of coins in circulation: {str(int(circulating_supply / total_supply * 100))}")
print()
choice = input("Again?: (y/n):")
if choice == "n":
break
# done! | API/coincap_specific.py | import requests
import json
convert = "USD"
listing_url = "https://api.coinmarketcap.com/v2/listings/"
url_end = "?structure=array&convert=" + convert
request = requests.get(listing_url)
results = request.json()
data = results["data"]
ticker_url_pairs = {}
for currency in data:
symbol = currency["symbol"]
url = currency["id"]
ticker_url_pairs[symbol] = url
print(ticker_url_pairs)
while True:
print()
choice = input("Enter the ticker symbol of the cryptosurrencys\: ")
choice = choice.upper()
ticker_url = f"https://api.coinmarketcap.com/v2/ticker/{str(ticker_url_pairs[choice])}/{url_end}"
# print(ticker_url)
request = requests.get(ticker_url)
results = request.json()
# print(json.dumps(results, sort_keys=True, indent=4))
currency = results["data"][0]
rank = currency["rank"]
name = currency["name"]
symbol = currency["symbol"]
circulating_supply = int(currency["circulating_supply"])
total_supply = int(currency["total_supply"])
quotes = currency["quotes"][convert]
market_cap = quotes["market_cap"]
hour_change = quotes["percent_change_1h"]
day_change = quotes["percent_change_24h"]
week_change = quotes["percent_change_7d"]
price = quotes["price"]
volume = quotes["volume_24h"]
volume_string = "{:,}".format(volume)
market_cap_string = "{:,}".format(market_cap)
circulating_supply_string = "{:,}".format(circulating_supply)
total_supply_string = "{:,}".format(total_supply)
print(f"{str(rank)}: {name} ({symbol})")
print(f"Market cap: \t\t${market_cap_string}")
print(f"Price: \t\t\t\t${str(price)}")
print(f"24h volume: \t\t${volume_string}")
print(f"Hour change: \t\t{str(hour_change)}%")
print(f"Day change: \t\t{str(day_change)}%")
print(f"Week change: \t\t{str(week_change)}%")
print(f"Total supply: \t\t{total_supply_string}")
print(f"Circulating supply: {circulating_supply_string}")
print(f"Percentage of coins in circulation: {str(int(circulating_supply / total_supply * 100))}")
print()
choice = input("Again?: (y/n):")
if choice == "n":
break
# done! | 0.117408 | 0.144964 |
import os
import pytest
import tests
from just_bin_it.endpoints.sources import HistogramSource
from tests.doubles.consumer import StubConsumer
INVALID_FB = b"this is an invalid fb message"
class TestHistogramSource:
@pytest.fixture(autouse=True)
def prepare(self):
# Trick to get path of test data
path = os.path.dirname(tests.__file__)
with open(os.path.join(path, "example_hs00_fb.dat"), "rb") as f:
self.valid_fb = f.read()
def test_if_no_consumer_supplied_then_raises(self):
with pytest.raises(Exception):
HistogramSource(None)
def test_if_no_new_messages_then_no_data(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([])
hs = HistogramSource(mock_consumer)
data = hs.get_new_data()
assert len(data) == 0
def test_if_five_new_messages_on_one_topic_then_data_has_five_items(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([(0, 0, self.valid_fb)] * 5)
hs = HistogramSource(mock_consumer)
data = hs.get_new_data()
_, _, message = data[0]
assert len(data) == 5
assert message["source"] == "just-bin-it"
assert message["timestamp"] == 987_654_321
assert message["current_shape"] == [50]
assert len(message["data"]) == 50
assert len(message["dim_metadata"]) == 1
assert message["info"] == "hello"
assert message["dim_metadata"][0]["length"] == 50
assert len(message["dim_metadata"][0]["bin_boundaries"]) == 51
assert message["dim_metadata"][0]["bin_boundaries"][0] == 0.0
assert message["dim_metadata"][0]["bin_boundaries"][50] == 100_000_000.0
def test_deserialising_invalid_fb_does_not_throw(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([INVALID_FB])
hs = HistogramSource(mock_consumer)
hs.get_new_data() | tests/test_histogram_source.py | import os
import pytest
import tests
from just_bin_it.endpoints.sources import HistogramSource
from tests.doubles.consumer import StubConsumer
INVALID_FB = b"this is an invalid fb message"
class TestHistogramSource:
@pytest.fixture(autouse=True)
def prepare(self):
# Trick to get path of test data
path = os.path.dirname(tests.__file__)
with open(os.path.join(path, "example_hs00_fb.dat"), "rb") as f:
self.valid_fb = f.read()
def test_if_no_consumer_supplied_then_raises(self):
with pytest.raises(Exception):
HistogramSource(None)
def test_if_no_new_messages_then_no_data(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([])
hs = HistogramSource(mock_consumer)
data = hs.get_new_data()
assert len(data) == 0
def test_if_five_new_messages_on_one_topic_then_data_has_five_items(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([(0, 0, self.valid_fb)] * 5)
hs = HistogramSource(mock_consumer)
data = hs.get_new_data()
_, _, message = data[0]
assert len(data) == 5
assert message["source"] == "just-bin-it"
assert message["timestamp"] == 987_654_321
assert message["current_shape"] == [50]
assert len(message["data"]) == 50
assert len(message["dim_metadata"]) == 1
assert message["info"] == "hello"
assert message["dim_metadata"][0]["length"] == 50
assert len(message["dim_metadata"][0]["bin_boundaries"]) == 51
assert message["dim_metadata"][0]["bin_boundaries"][0] == 0.0
assert message["dim_metadata"][0]["bin_boundaries"][50] == 100_000_000.0
def test_deserialising_invalid_fb_does_not_throw(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([INVALID_FB])
hs = HistogramSource(mock_consumer)
hs.get_new_data() | 0.367384 | 0.445469 |
from panda3d.core import Point3
from panda3d.core import Vec3
from wecs.aspects import Aspect
from wecs.aspects import factory
from wecs import panda3d
from wecs import mechanics
from wecs import cefconsole
from wecs.panda3d import aspects
# Each frame, run these systems. This defines the game itself.
system_types = [
panda3d.LoadModels, # Loads models, sets up actors, makes them collibable.
panda3d.PrepareCameras, # Attach / detach camera pivots to / from models.
panda3d.UpdateClocks, # How long is this frame? Update all clocks.
# What movement do the characters intend to do?
# panda3d.AcceptInput, # Input from player, ranges ([-1; 1]), not scaled for time.
panda3d.Think, # Input from AIs, the same
panda3d.UpdateStamina, # A game mechanic that cancels move modes if the character is exhausted, "unintending" them
panda3d.UpdateCharacter, # Scale inputs by frame time, making them "Intended movement in this frame."
# The following systems adjust the intended movement
panda3d.Floating, # Scale by speed for floating
panda3d.Walking, # Scale by speed for walk / run / crouch / sprint
panda3d.Inertiing, # Clamp movement speed delta by inertia.
panda3d.Bumping, # Bump into things (and out again).
panda3d.Falling, # Fall, or stand on the ground.
panda3d.Jumping, # Impart upward impulse.
panda3d.TurningBackToCamera, # Head towards where the camera is pointing.
panda3d.ExecuteMovement, # Turn intention into actual movement.
panda3d.AnimateCharacter,
panda3d.Animate,
# We're done with character movement, now update the cameras and console.
panda3d.ResetMountedCamera,
panda3d.ReorientObjectCentricCamera,
panda3d.CollideCamerasWithTerrain,
cefconsole.UpdateWecsSubconsole,
cefconsole.WatchEntitiesInSubconsole,
]
# Aspects are basically classes for entities. Here are two that we will use.
game_map = Aspect(
[mechanics.Clock,
panda3d.Position,
panda3d.Model,
panda3d.Scene,
panda3d.CollidableGeometry,
panda3d.FlattenStrong,
],
overrides={
mechanics.Clock: dict(clock=panda3d.panda_clock),
panda3d.Model: dict(model_name='roadE.bam'),
panda3d.Scene: dict(node=base.render),
},
)
# Populate the world with the map, the player character, and a few NPCs
# Map
map_entity = base.ecs_world.create_entity(name="Level geometry")
game_map.add(map_entity)
# Player
player_avatar = Aspect(
[
aspects.player_character,
panda3d.Stamina,
cefconsole.WatchedEntity,
])
player_avatar.add(
base.ecs_world.create_entity(name="Playerbecca"),
overrides={
mechanics.Clock: dict(parent=map_entity._uid),
panda3d.Position: dict(value=Point3(50, 290, 0)),
},
)
# Non-moving NPC
aspects.non_player_character.add(
base.ecs_world.create_entity(name="Rebecca"),
overrides={
panda3d.Position: dict(value=Point3(60, 290, 0)),
mechanics.Clock: dict(parent=map_entity._uid),
},
)
# Small circle NPC
aspects.non_player_character.add(
base.ecs_world.create_entity(name="Roundbecca"),
overrides={
panda3d.Position: dict(value=Point3(70, 290, 0)),
panda3d.ConstantCharacterAI: dict(
move=Vec3(0.0, 0.25, 0.0),
heading=-0.5,
),
mechanics.Clock: dict(parent=map_entity._uid),
},
)
# Brownian NPC
new_npc = Aspect([aspects.avatar, aspects.npc_mind_brownian])
new_npc.add(
base.ecs_world.create_entity(name="Randombecca"),
overrides={
panda3d.Position: dict(value=Point3(80, 290, 0)),
mechanics.Clock: dict(parent=map_entity._uid),
},
) | examples/panda3d-character-controller/game.py | from panda3d.core import Point3
from panda3d.core import Vec3
from wecs.aspects import Aspect
from wecs.aspects import factory
from wecs import panda3d
from wecs import mechanics
from wecs import cefconsole
from wecs.panda3d import aspects
# Each frame, run these systems. This defines the game itself.
system_types = [
panda3d.LoadModels, # Loads models, sets up actors, makes them collibable.
panda3d.PrepareCameras, # Attach / detach camera pivots to / from models.
panda3d.UpdateClocks, # How long is this frame? Update all clocks.
# What movement do the characters intend to do?
# panda3d.AcceptInput, # Input from player, ranges ([-1; 1]), not scaled for time.
panda3d.Think, # Input from AIs, the same
panda3d.UpdateStamina, # A game mechanic that cancels move modes if the character is exhausted, "unintending" them
panda3d.UpdateCharacter, # Scale inputs by frame time, making them "Intended movement in this frame."
# The following systems adjust the intended movement
panda3d.Floating, # Scale by speed for floating
panda3d.Walking, # Scale by speed for walk / run / crouch / sprint
panda3d.Inertiing, # Clamp movement speed delta by inertia.
panda3d.Bumping, # Bump into things (and out again).
panda3d.Falling, # Fall, or stand on the ground.
panda3d.Jumping, # Impart upward impulse.
panda3d.TurningBackToCamera, # Head towards where the camera is pointing.
panda3d.ExecuteMovement, # Turn intention into actual movement.
panda3d.AnimateCharacter,
panda3d.Animate,
# We're done with character movement, now update the cameras and console.
panda3d.ResetMountedCamera,
panda3d.ReorientObjectCentricCamera,
panda3d.CollideCamerasWithTerrain,
cefconsole.UpdateWecsSubconsole,
cefconsole.WatchEntitiesInSubconsole,
]
# Aspects are basically classes for entities. Here are two that we will use.
game_map = Aspect(
[mechanics.Clock,
panda3d.Position,
panda3d.Model,
panda3d.Scene,
panda3d.CollidableGeometry,
panda3d.FlattenStrong,
],
overrides={
mechanics.Clock: dict(clock=panda3d.panda_clock),
panda3d.Model: dict(model_name='roadE.bam'),
panda3d.Scene: dict(node=base.render),
},
)
# Populate the world with the map, the player character, and a few NPCs
# Map
map_entity = base.ecs_world.create_entity(name="Level geometry")
game_map.add(map_entity)
# Player
player_avatar = Aspect(
[
aspects.player_character,
panda3d.Stamina,
cefconsole.WatchedEntity,
])
player_avatar.add(
base.ecs_world.create_entity(name="Playerbecca"),
overrides={
mechanics.Clock: dict(parent=map_entity._uid),
panda3d.Position: dict(value=Point3(50, 290, 0)),
},
)
# Non-moving NPC
aspects.non_player_character.add(
base.ecs_world.create_entity(name="Rebecca"),
overrides={
panda3d.Position: dict(value=Point3(60, 290, 0)),
mechanics.Clock: dict(parent=map_entity._uid),
},
)
# Small circle NPC
aspects.non_player_character.add(
base.ecs_world.create_entity(name="Roundbecca"),
overrides={
panda3d.Position: dict(value=Point3(70, 290, 0)),
panda3d.ConstantCharacterAI: dict(
move=Vec3(0.0, 0.25, 0.0),
heading=-0.5,
),
mechanics.Clock: dict(parent=map_entity._uid),
},
)
# Brownian NPC
new_npc = Aspect([aspects.avatar, aspects.npc_mind_brownian])
new_npc.add(
base.ecs_world.create_entity(name="Randombecca"),
overrides={
panda3d.Position: dict(value=Point3(80, 290, 0)),
mechanics.Clock: dict(parent=map_entity._uid),
},
) | 0.739422 | 0.443299 |
import contextlib
import urllib.parse
from typing import TYPE_CHECKING, cast
import flask
import peewee
import werkzeug.datastructures
import werkzeug.exceptions
if TYPE_CHECKING:
from typing import Any, Callable, List, Optional, Union
from jinja2 import Environment
from peewee import Model
from werkzeug.datastructures import ImmutableMultiDict
from .typing import ObjectData, Query
__all__ = [
'render_macro',
'prepare_response',
'parse_request',
]
def render_macro(template_name_or_list: 'Union[str, List[str]]', macro: str, **context: 'Any') -> str:
"""Evaluates and renders a **macro** from the template.
Args:
template_name_or_list: The name of the template to be rendered, or an iterable with template names
the first one existing will be rendered.
macro: The name of macro to be called.
Keyword Args:
**context: The variables that should be available in the context of the template.
Returns:
The rendered macro.
"""
template = cast('Environment', flask.current_app.jinja_env).get_or_select_template(template_name_or_list) # type: ignore[arg-type] # pylint: disable=line-too-long
macro_func = getattr(template.module, macro)
return macro_func(**context)
def prepare_response(template: 'Union[str, List[str]]') -> 'Callable[[Model], ObjectData]':
"""Prepare response object data.
The function returns a wrapper function to use the ``template`` as a factory to
render HTML response blocks. The Jinja templates should have **macro** blocks
for each target field named after ``render_{field_name}`` and takes only one
argument ``record`` as the selected data model record.
Args:
template: Path to the macro template.
Returns:
Prepared response object data.
See Also:
See :func:`flask_datatables.utils.render_macro` for more information.
"""
def wrapper(record: peewee.Model) -> 'ObjectData':
data = {} # type: ObjectData
for field in record.__data__.keys():
try:
data[field] = render_macro(template, f'render_{field}', record=record) # type: ignore[misc]
except Exception:
data[field] = getattr(record, field) # type: ignore[misc]
return data
return wrapper
def _parse_int(arg: 'Optional[str]') -> int:
"""Parse argument as :obj:`int`.
Args:
arg: Original request argument.
Returns:
Parsed query argument.
"""
if arg is not None:
with contextlib.suppress(Exception):
return int(arg)
return -1
def _parse_bool(arg: 'Optional[str]') -> bool:
"""Parse argument as :obj:`bool`.
Args:
arg: Original request argument.
Returns:
Parsed query argument.
"""
if isinstance(arg, str):
arg = arg.casefold()
if arg == 'true':
return True
if arg == 'false':
return False
return False
def _parse_str(arg: 'Optional[str]') -> str:
"""Parse argument as :obj:`str`.
Args:
arg: Original request argument.
Returns:
Parsed query argument.
"""
if arg is None:
return ''
return arg
def parse_request(args: 'Optional[ImmutableMultiDict]' = None) -> 'Query':
"""Parse :attr:`flask.request.args <flask.Request.args>` as :class:`~tekid.ext.datatables.Query`.
Args:
args: Original request arguments. The default value is inferred from
:attr:`request.args <flask.Request.args>`.
Returns:
Parsed query dictionary.
"""
if args is None:
args = flask.request.args
query = {
'draw': _parse_int(args.get('draw')),
'columns': [],
'order': [],
'start': _parse_int(args.get('start')),
'length': _parse_int(args.get('length')),
'search': {
'value': _parse_str(args.get('search[value]')),
'regex': _parse_bool(args.get('search[regex]')),
},
'_': _parse_int(args.get('_')),
} # type: Query
index = 0
while True:
try:
data = args[f'columns[{index}][data]']
except werkzeug.exceptions.BadRequestKeyError:
break
query['columns'].append({
'data': _parse_str(data),
'name': _parse_str(args.get(f'columns[{index}][data]')),
'searchable': _parse_bool(args.get(f'columns[{index}][searchable]')),
'orderable': _parse_bool(args.get(f'columns[{index}][orderable]')),
'search': {
'value': _parse_str(args.get(f'columns[{index}][search][value]')),
'regex': _parse_bool(args.get(f'columns[{index}][search][regex]')),
},
})
index += 1
index = 0
while True:
try:
column = args[f'order[{index}][column]']
except werkzeug.exceptions.BadRequestKeyError:
break
query['order'].append({
'column': _parse_int(column),
'dir': _parse_str(args.get(f'order[{index}][dir]')), # type: ignore[typeddict-item]
})
index += 1
return query
def build_cache(query_string: 'Optional[str]' = None) -> str:
"""Build a key to cache the query parameters.
Args:
query_string: Query parameters in string form. The default value is inferred
from :attr:`request.query_string <flask.Request.query_string>`.
Returns:
A string literal representing the query parameters.
"""
if query_string is None:
query_string = flask.request.query_string.decode()
query_parsed = urllib.parse.parse_qsl(query_string)
query = werkzeug.datastructures.MultiDict(query_parsed).to_dict()
if 'draw' in query:
del query['draw']
query_sorted = sorted(query.items(), key=lambda kv: kv[0])
return urllib.parse.urlencode(query_sorted) | flask_datatables/utils.py | import contextlib
import urllib.parse
from typing import TYPE_CHECKING, cast
import flask
import peewee
import werkzeug.datastructures
import werkzeug.exceptions
if TYPE_CHECKING:
from typing import Any, Callable, List, Optional, Union
from jinja2 import Environment
from peewee import Model
from werkzeug.datastructures import ImmutableMultiDict
from .typing import ObjectData, Query
__all__ = [
'render_macro',
'prepare_response',
'parse_request',
]
def render_macro(template_name_or_list: 'Union[str, List[str]]', macro: str, **context: 'Any') -> str:
"""Evaluates and renders a **macro** from the template.
Args:
template_name_or_list: The name of the template to be rendered, or an iterable with template names
the first one existing will be rendered.
macro: The name of macro to be called.
Keyword Args:
**context: The variables that should be available in the context of the template.
Returns:
The rendered macro.
"""
template = cast('Environment', flask.current_app.jinja_env).get_or_select_template(template_name_or_list) # type: ignore[arg-type] # pylint: disable=line-too-long
macro_func = getattr(template.module, macro)
return macro_func(**context)
def prepare_response(template: 'Union[str, List[str]]') -> 'Callable[[Model], ObjectData]':
"""Prepare response object data.
The function returns a wrapper function to use the ``template`` as a factory to
render HTML response blocks. The Jinja templates should have **macro** blocks
for each target field named after ``render_{field_name}`` and takes only one
argument ``record`` as the selected data model record.
Args:
template: Path to the macro template.
Returns:
Prepared response object data.
See Also:
See :func:`flask_datatables.utils.render_macro` for more information.
"""
def wrapper(record: peewee.Model) -> 'ObjectData':
data = {} # type: ObjectData
for field in record.__data__.keys():
try:
data[field] = render_macro(template, f'render_{field}', record=record) # type: ignore[misc]
except Exception:
data[field] = getattr(record, field) # type: ignore[misc]
return data
return wrapper
def _parse_int(arg: 'Optional[str]') -> int:
"""Parse argument as :obj:`int`.
Args:
arg: Original request argument.
Returns:
Parsed query argument.
"""
if arg is not None:
with contextlib.suppress(Exception):
return int(arg)
return -1
def _parse_bool(arg: 'Optional[str]') -> bool:
"""Parse argument as :obj:`bool`.
Args:
arg: Original request argument.
Returns:
Parsed query argument.
"""
if isinstance(arg, str):
arg = arg.casefold()
if arg == 'true':
return True
if arg == 'false':
return False
return False
def _parse_str(arg: 'Optional[str]') -> str:
"""Parse argument as :obj:`str`.
Args:
arg: Original request argument.
Returns:
Parsed query argument.
"""
if arg is None:
return ''
return arg
def parse_request(args: 'Optional[ImmutableMultiDict]' = None) -> 'Query':
"""Parse :attr:`flask.request.args <flask.Request.args>` as :class:`~tekid.ext.datatables.Query`.
Args:
args: Original request arguments. The default value is inferred from
:attr:`request.args <flask.Request.args>`.
Returns:
Parsed query dictionary.
"""
if args is None:
args = flask.request.args
query = {
'draw': _parse_int(args.get('draw')),
'columns': [],
'order': [],
'start': _parse_int(args.get('start')),
'length': _parse_int(args.get('length')),
'search': {
'value': _parse_str(args.get('search[value]')),
'regex': _parse_bool(args.get('search[regex]')),
},
'_': _parse_int(args.get('_')),
} # type: Query
index = 0
while True:
try:
data = args[f'columns[{index}][data]']
except werkzeug.exceptions.BadRequestKeyError:
break
query['columns'].append({
'data': _parse_str(data),
'name': _parse_str(args.get(f'columns[{index}][data]')),
'searchable': _parse_bool(args.get(f'columns[{index}][searchable]')),
'orderable': _parse_bool(args.get(f'columns[{index}][orderable]')),
'search': {
'value': _parse_str(args.get(f'columns[{index}][search][value]')),
'regex': _parse_bool(args.get(f'columns[{index}][search][regex]')),
},
})
index += 1
index = 0
while True:
try:
column = args[f'order[{index}][column]']
except werkzeug.exceptions.BadRequestKeyError:
break
query['order'].append({
'column': _parse_int(column),
'dir': _parse_str(args.get(f'order[{index}][dir]')), # type: ignore[typeddict-item]
})
index += 1
return query
def build_cache(query_string: 'Optional[str]' = None) -> str:
"""Build a key to cache the query parameters.
Args:
query_string: Query parameters in string form. The default value is inferred
from :attr:`request.query_string <flask.Request.query_string>`.
Returns:
A string literal representing the query parameters.
"""
if query_string is None:
query_string = flask.request.query_string.decode()
query_parsed = urllib.parse.parse_qsl(query_string)
query = werkzeug.datastructures.MultiDict(query_parsed).to_dict()
if 'draw' in query:
del query['draw']
query_sorted = sorted(query.items(), key=lambda kv: kv[0])
return urllib.parse.urlencode(query_sorted) | 0.834441 | 0.392075 |
import os
import sys
import logging
import redis
# 项目根目录
BASE_DIR = os.path.dirname(__file__)
"""
自添加USER_AGENT请按照已有数据的格式来添加
"""
USER_AGENT = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ) AppleWebKit/534.12 (KHTML, like Gecko) Maxthon/3.0 Safari/534.12',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.472.33 Safari/534.3 SE 2.X MetaSr 1.0',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E) QQBrowser/6.9.11079.201',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Tri dent/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64;Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)',
]
"""
代理网站:自添加PROXY_URLS请按照已有数据的格式来添加
"""
PROXY_URLS = [
{'url': 'https://www.xicidaili.com/nn', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/nt', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/wn', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/wt', 'type': 'xici'},
{'url': 'http://www.xiladaili.com/gaoni/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/http/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/https/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/putong/', 'type': 'xila'},
{'url': 'https://www.kuaidaili.com/free/intr/', 'type': 'kuaidaili'},
{'url': 'https://www.kuaidaili.com/free/inha/', 'type': 'kuaidaili'},
{'url': 'https://www.kuaidaili.com/ops/', 'type': 'kuaidaili_new'},
{'url': 'http://www.89ip.cn/', 'type': '89ip'},
{'url': 'http://www.qydaili.com/free/', 'type': 'qydaili'},
{'url': 'https://ip.ihuan.me/', 'type': 'ihuan'},
{'url': 'http://www.ip3366.net/', 'type': '3366'},
{'url': 'http://www.iphai.com/free/ng', 'type': 'iphai'},
{'url': 'http://www.iphai.com/free/wg', 'type': 'iphai'},
{'url': 'http://www.iphai.com/free/wp', 'type': 'iphai'},
{'url': 'http://www.goubanjia.com/', 'type': 'goubanjia'},
{'url': 'http://www.feiyiproxy.com/?page_id=1457', 'type': 'feiyi'},
{'url': 'http://www.shenjidaili.com/open/', 'type': 'shenji'},
{'url': 'http://ip.kxdaili.com/dailiip.html', 'type': 'kaixin'},
{'url': 'http://www.superfastip.com/welcome/freeIP', 'type': 'jisu'},
{'url': 'http://ip.jiangxianli.com/', 'type': 'jxl'},
{'url': 'https://lab.crossincode.com/proxy/', 'type': 'cross'},
{'url': 'http://www.nimadaili.com/gaoni/', 'type': 'nima'},
{'url': 'http://www.nimadaili.com/http/', 'type': 'nima'},
{'url': 'http://www.nimadaili.com/https/', 'type': 'nima'},
{'url': 'http://www.data5u.com/', 'type': 'da5u'},
{'url': 'https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list', 'type': 'github'},
{'url': 'https://proxy.mimvp.com/freeopen.php', 'type': 'mipu'}, # 需要图片识别端口,已解决
{'url': 'http://www.xsdaili.com/', 'type': 'xsdaili'}, # 需要爬取二级网页,已解决
{'url': 'http://www.66ip.cn/mo.php?tqsl=1024', 'type': '66ip'}, # 需要js解密,已解决
]
"""
测试代理网站:自添加测试代理的url请按照已有数据的格式来添加
"""
TEST_PROXY_URLS = [
# 下面的是主流搜索引擎搜ip的网址,相对比较开放,而且查询结果比较准确
{'url': 'https://www.baidu.com/s?wd=ip', 'type': 'baidu'},
{'url': 'https://www.sogou.com/web?query=ip', 'type': 'sogou'},
{'url': 'https://www.so.com/s?q=ip&src=srp&fr=none&psid=2d511001ad6e91af893e0d7e561f1bba', 'type': 'so'},
{'url': 'https://mijisou.com/?q=ip&category_general=on&time_range=&language=zh-CN&pageno=1', 'type': 'miji'},
# 下面的是专门查询本机公网ip的网址,请求不能过于频繁
{'url': 'http://pv.sohu.com/cityjson', 'type': 'sohu'},
{'url': 'http://ip.taobao.com/ipSearch.html', 'type': 'taobao'},
{'url': 'https://myip.ipip.net/', 'type': 'myip'},
{'url': 'http://httpbin.org/ip', 'type': 'httpbin'},
{'url': 'http://ip.chinaz.com/', 'type': 'chinaz'},
{'url': 'https://www.ipip.net/ip.html', 'type': 'ipip'},
{'url': 'https://ip.cn/', 'type': 'ipcn'},
{'url': 'https://tool.lu/ip/', 'type': 'luip'},
{'url': 'http://api.online-service.vip/ip/me', 'type': 'onlineservice'},
{'url': 'https://ip.ttt.sh/', 'type': 'ttt'},
# {'url': 'http://icanhazip.com/', 'type': 'ican'}, # 该网站有时会返回一个ipv6地址,导致结果有误
]
# redis数据库连接池
'可以自行设置为其他的数据库'
POOL = redis.ConnectionPool(host='127.0.0.1', max_connections=80, decode_responses=True, db=1)
POOL2 = redis.ConnectionPool(host='127.0.0.1', max_connections=80, decode_responses=True, db=2)
POOL3 = redis.ConnectionPool(host='127.0.0.1', max_connections=80, decode_responses=True, db=3) | video/utils/proxy/config.py |
import os
import sys
import logging
import redis
# 项目根目录
BASE_DIR = os.path.dirname(__file__)
"""
自添加USER_AGENT请按照已有数据的格式来添加
"""
USER_AGENT = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ) AppleWebKit/534.12 (KHTML, like Gecko) Maxthon/3.0 Safari/534.12',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.472.33 Safari/534.3 SE 2.X MetaSr 1.0',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E) QQBrowser/6.9.11079.201',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Tri dent/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64;Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)',
]
"""
代理网站:自添加PROXY_URLS请按照已有数据的格式来添加
"""
PROXY_URLS = [
{'url': 'https://www.xicidaili.com/nn', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/nt', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/wn', 'type': 'xici'},
{'url': 'https://www.xicidaili.com/wt', 'type': 'xici'},
{'url': 'http://www.xiladaili.com/gaoni/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/http/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/https/', 'type': 'xila'},
{'url': 'http://www.xiladaili.com/putong/', 'type': 'xila'},
{'url': 'https://www.kuaidaili.com/free/intr/', 'type': 'kuaidaili'},
{'url': 'https://www.kuaidaili.com/free/inha/', 'type': 'kuaidaili'},
{'url': 'https://www.kuaidaili.com/ops/', 'type': 'kuaidaili_new'},
{'url': 'http://www.89ip.cn/', 'type': '89ip'},
{'url': 'http://www.qydaili.com/free/', 'type': 'qydaili'},
{'url': 'https://ip.ihuan.me/', 'type': 'ihuan'},
{'url': 'http://www.ip3366.net/', 'type': '3366'},
{'url': 'http://www.iphai.com/free/ng', 'type': 'iphai'},
{'url': 'http://www.iphai.com/free/wg', 'type': 'iphai'},
{'url': 'http://www.iphai.com/free/wp', 'type': 'iphai'},
{'url': 'http://www.goubanjia.com/', 'type': 'goubanjia'},
{'url': 'http://www.feiyiproxy.com/?page_id=1457', 'type': 'feiyi'},
{'url': 'http://www.shenjidaili.com/open/', 'type': 'shenji'},
{'url': 'http://ip.kxdaili.com/dailiip.html', 'type': 'kaixin'},
{'url': 'http://www.superfastip.com/welcome/freeIP', 'type': 'jisu'},
{'url': 'http://ip.jiangxianli.com/', 'type': 'jxl'},
{'url': 'https://lab.crossincode.com/proxy/', 'type': 'cross'},
{'url': 'http://www.nimadaili.com/gaoni/', 'type': 'nima'},
{'url': 'http://www.nimadaili.com/http/', 'type': 'nima'},
{'url': 'http://www.nimadaili.com/https/', 'type': 'nima'},
{'url': 'http://www.data5u.com/', 'type': 'da5u'},
{'url': 'https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list', 'type': 'github'},
{'url': 'https://proxy.mimvp.com/freeopen.php', 'type': 'mipu'}, # 需要图片识别端口,已解决
{'url': 'http://www.xsdaili.com/', 'type': 'xsdaili'}, # 需要爬取二级网页,已解决
{'url': 'http://www.66ip.cn/mo.php?tqsl=1024', 'type': '66ip'}, # 需要js解密,已解决
]
"""
测试代理网站:自添加测试代理的url请按照已有数据的格式来添加
"""
TEST_PROXY_URLS = [
# 下面的是主流搜索引擎搜ip的网址,相对比较开放,而且查询结果比较准确
{'url': 'https://www.baidu.com/s?wd=ip', 'type': 'baidu'},
{'url': 'https://www.sogou.com/web?query=ip', 'type': 'sogou'},
{'url': 'https://www.so.com/s?q=ip&src=srp&fr=none&psid=2d511001ad6e91af893e0d7e561f1bba', 'type': 'so'},
{'url': 'https://mijisou.com/?q=ip&category_general=on&time_range=&language=zh-CN&pageno=1', 'type': 'miji'},
# 下面的是专门查询本机公网ip的网址,请求不能过于频繁
{'url': 'http://pv.sohu.com/cityjson', 'type': 'sohu'},
{'url': 'http://ip.taobao.com/ipSearch.html', 'type': 'taobao'},
{'url': 'https://myip.ipip.net/', 'type': 'myip'},
{'url': 'http://httpbin.org/ip', 'type': 'httpbin'},
{'url': 'http://ip.chinaz.com/', 'type': 'chinaz'},
{'url': 'https://www.ipip.net/ip.html', 'type': 'ipip'},
{'url': 'https://ip.cn/', 'type': 'ipcn'},
{'url': 'https://tool.lu/ip/', 'type': 'luip'},
{'url': 'http://api.online-service.vip/ip/me', 'type': 'onlineservice'},
{'url': 'https://ip.ttt.sh/', 'type': 'ttt'},
# {'url': 'http://icanhazip.com/', 'type': 'ican'}, # 该网站有时会返回一个ipv6地址,导致结果有误
]
# redis数据库连接池
'可以自行设置为其他的数据库'
POOL = redis.ConnectionPool(host='127.0.0.1', max_connections=80, decode_responses=True, db=1)
POOL2 = redis.ConnectionPool(host='127.0.0.1', max_connections=80, decode_responses=True, db=2)
POOL3 = redis.ConnectionPool(host='127.0.0.1', max_connections=80, decode_responses=True, db=3) | 0.311636 | 0.058373 |
import jsonschema
from jsonschema.exceptions import ValidationError
from kuryr_libnetwork import schemata
from kuryr_libnetwork.schemata import commons
from kuryr_libnetwork.tests.unit import base
class TestKuryrSchema(base.TestKuryrBase):
"""Unit tests for Kuryr schema."""
def test_network_id_64_len(self):
network_id = '51c75a2515d47edecc3f720bb541e287224416fb66715eb' \
'7802011d6ffd499f1'
target_schema = commons.COMMONS[u'definitions'][u'id']
self._validate_schema(network_id, target_schema)
def test_network_id_25_len(self):
network_id = 'xqqzd9p112o4kvok38n3caxjm'
target_schema = commons.COMMONS[u'definitions'][u'id']
self._validate_schema(network_id, target_schema)
def test_network_id_invalid_charactor(self):
network_id = '@#qzd9p112o4kvok38n3cax&%'
target_schema = commons.COMMONS[u'definitions'][u'id']
self.assertRaises(ValidationError, jsonschema.validate, network_id,
target_schema)
def test_network_id_invalid_length(self):
network_id = 'xqqzd9p112o4kvok38n3caxjmabcd'
target_schema = commons.COMMONS[u'definitions'][u'id']
self.assertRaises(ValidationError, jsonschema.validate, network_id,
target_schema)
def test_network_create_schema(self):
docker_network_id = '51c75a2515d47edecc3f720bb541e287224416fb66715eb' \
'7802011d6ffd499f1'
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
'AuxAddresses': {}
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
'AuxAddresses': {}
}],
'Options': {}
}
self._validate_schema(network_request, schemata.NETWORK_CREATE_SCHEMA)
def test_network_create_schema_missing_required(self):
docker_network_id = '51c75a2515d47edecc3f720bb541e287224416fb66715eb' \
'7802011d6ffd499f1'
net_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
'AuxAddresses': {}
}],
'Options': {}
}
self.assertRaises(ValidationError, jsonschema.validate, net_request,
schemata.NETWORK_CREATE_SCHEMA)
@classmethod
def _validate_schema(self, target, schema):
try:
jsonschema.validate(target, schema)
except ValidationError:
self.fail("Unexpected validation error raised!") | kuryr_libnetwork/tests/unit/test_schema.py |
import jsonschema
from jsonschema.exceptions import ValidationError
from kuryr_libnetwork import schemata
from kuryr_libnetwork.schemata import commons
from kuryr_libnetwork.tests.unit import base
class TestKuryrSchema(base.TestKuryrBase):
"""Unit tests for Kuryr schema."""
def test_network_id_64_len(self):
network_id = '51c75a2515d47edecc3f720bb541e287224416fb66715eb' \
'7802011d6ffd499f1'
target_schema = commons.COMMONS[u'definitions'][u'id']
self._validate_schema(network_id, target_schema)
def test_network_id_25_len(self):
network_id = 'xqqzd9p112o4kvok38n3caxjm'
target_schema = commons.COMMONS[u'definitions'][u'id']
self._validate_schema(network_id, target_schema)
def test_network_id_invalid_charactor(self):
network_id = '@#qzd9p112o4kvok38n3cax&%'
target_schema = commons.COMMONS[u'definitions'][u'id']
self.assertRaises(ValidationError, jsonschema.validate, network_id,
target_schema)
def test_network_id_invalid_length(self):
network_id = 'xqqzd9p112o4kvok38n3caxjmabcd'
target_schema = commons.COMMONS[u'definitions'][u'id']
self.assertRaises(ValidationError, jsonschema.validate, network_id,
target_schema)
def test_network_create_schema(self):
docker_network_id = '51c75a2515d47edecc3f720bb541e287224416fb66715eb' \
'7802011d6ffd499f1'
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
'AuxAddresses': {}
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
'AuxAddresses': {}
}],
'Options': {}
}
self._validate_schema(network_request, schemata.NETWORK_CREATE_SCHEMA)
def test_network_create_schema_missing_required(self):
docker_network_id = '51c75a2515d47edecc3f720bb541e287224416fb66715eb' \
'7802011d6ffd499f1'
net_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
'AuxAddresses': {}
}],
'Options': {}
}
self.assertRaises(ValidationError, jsonschema.validate, net_request,
schemata.NETWORK_CREATE_SCHEMA)
@classmethod
def _validate_schema(self, target, schema):
try:
jsonschema.validate(target, schema)
except ValidationError:
self.fail("Unexpected validation error raised!") | 0.6137 | 0.162579 |
import json
import logging
from functools import wraps
import botocore
from decouple import config
import lpipe.contrib.boto3
from lpipe import utils
from lpipe.contrib import mindictive
def build(message_data, message_group_id=None):
data = json.dumps(message_data, sort_keys=True)
msg = {"Id": utils.hash(data), "MessageBody": data}
if message_group_id:
msg["MessageGroupId"] = str(message_group_id)
return msg
def mock_sqs(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (
botocore.exceptions.NoCredentialsError,
botocore.exceptions.ClientError,
botocore.exceptions.NoRegionError,
botocore.exceptions.ParamValidationError,
):
if config("MOCK_AWS", default=False):
log = kwargs["logger"] if "logger" in kwargs else logging.getLogger()
log.debug(
"Mocked SQS: {}()".format(func),
function=f"{func}",
params={"args": f"{args}", "kwargs": f"{kwargs}"},
)
return
else:
raise
return wrapper
@mock_sqs
def batch_put_messages(
queue_url, messages, batch_size=10, message_group_id=None, **kwargs
):
"""Put messages into a sqs queue, batched by the maximum of 10."""
assert batch_size <= 10 # send_message_batch will fail otherwise
client = lpipe.contrib.boto3.client("sqs")
responses = []
for b in utils.batch(messages, batch_size):
responses.append(
utils.call(
client.send_message_batch,
QueueUrl=queue_url,
Entries=[build(message, message_group_id) for message in b],
)
)
return tuple(responses)
def put_message(queue_url, data, message_group_id=None, **kwargs):
return batch_put_messages(
queue_url=queue_url, messages=[data], message_group_id=message_group_id
)
@mock_sqs
def get_queue_url(queue_name):
return utils.call(
lpipe.contrib.boto3.client("sqs").get_queue_url, QueueName=queue_name
)["QueueUrl"]
@mock_sqs
def get_queue_arn(queue_url):
return mindictive.get_nested(
utils.call(
lpipe.contrib.boto3.client("sqs").get_queue_attributes,
QueueUrl=queue_url,
AttributeNames=["QueueArn"],
),
["Attributes", "QueueArn"],
)
@mock_sqs
def delete_message_batch(queue_url, entries):
return utils.call(
lpipe.contrib.boto3.client("sqs").delete_message_batch,
QueueUrl=queue_url,
Entries=entries,
) | lpipe/contrib/sqs.py | import json
import logging
from functools import wraps
import botocore
from decouple import config
import lpipe.contrib.boto3
from lpipe import utils
from lpipe.contrib import mindictive
def build(message_data, message_group_id=None):
data = json.dumps(message_data, sort_keys=True)
msg = {"Id": utils.hash(data), "MessageBody": data}
if message_group_id:
msg["MessageGroupId"] = str(message_group_id)
return msg
def mock_sqs(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (
botocore.exceptions.NoCredentialsError,
botocore.exceptions.ClientError,
botocore.exceptions.NoRegionError,
botocore.exceptions.ParamValidationError,
):
if config("MOCK_AWS", default=False):
log = kwargs["logger"] if "logger" in kwargs else logging.getLogger()
log.debug(
"Mocked SQS: {}()".format(func),
function=f"{func}",
params={"args": f"{args}", "kwargs": f"{kwargs}"},
)
return
else:
raise
return wrapper
@mock_sqs
def batch_put_messages(
queue_url, messages, batch_size=10, message_group_id=None, **kwargs
):
"""Put messages into a sqs queue, batched by the maximum of 10."""
assert batch_size <= 10 # send_message_batch will fail otherwise
client = lpipe.contrib.boto3.client("sqs")
responses = []
for b in utils.batch(messages, batch_size):
responses.append(
utils.call(
client.send_message_batch,
QueueUrl=queue_url,
Entries=[build(message, message_group_id) for message in b],
)
)
return tuple(responses)
def put_message(queue_url, data, message_group_id=None, **kwargs):
return batch_put_messages(
queue_url=queue_url, messages=[data], message_group_id=message_group_id
)
@mock_sqs
def get_queue_url(queue_name):
return utils.call(
lpipe.contrib.boto3.client("sqs").get_queue_url, QueueName=queue_name
)["QueueUrl"]
@mock_sqs
def get_queue_arn(queue_url):
return mindictive.get_nested(
utils.call(
lpipe.contrib.boto3.client("sqs").get_queue_attributes,
QueueUrl=queue_url,
AttributeNames=["QueueArn"],
),
["Attributes", "QueueArn"],
)
@mock_sqs
def delete_message_batch(queue_url, entries):
return utils.call(
lpipe.contrib.boto3.client("sqs").delete_message_batch,
QueueUrl=queue_url,
Entries=entries,
) | 0.569733 | 0.093719 |
import os
import warnings
from unittest import TestCase
import conf
class TestConfg(TestCase):
def test_read_yml(self):
conf.load('test_resources/conf1.yml')
self.assertEqual(conf.get('message'), 'this is yml')
self.assertEqual(conf.message, 'this is yml')
self.assertEqual(conf.reader.message, 'this is yml')
from conf import message as msg
self.assertEqual(msg, 'this is yml')
def test_read_yml_from_env_var(self):
os.environ['configfile'] = 'test_resources/conf1.yml'
conf.load('configfile')
self.assertEqual(conf.get('message'), 'this is yml')
def test_get_default(self):
conf.load('test_resources/conf1.yml')
self.assertEqual(conf.get('key_does_not_exist'), None)
self.assertEqual(conf.get('key_does_not_exist', 'some_value'),
'some_value')
def test_asdict(self):
conf.load('test_resources/conf1.yml')
d = conf.asdict()
self.assertEqual(d.get('key_does_not_exist', None), None)
self.assertEqual(d.get('key_does_not_exist', 'some_value'),
'some_value')
self.assertEqual(d['message'], 'this is yml')
def test_read_yaml(self):
conf.load('test_resources/conf2.yaml')
self.assertEqual(conf.get('message'), 'this is yml')
def test_read_yaml_uppercases(self):
conf.load('test_resources/conf3.YaMl')
self.assertEqual(conf.get('message'), 'this is yml')
def test_read_json(self):
conf.load('test_resources/conf4.json')
self.assertEqual(conf.get('message'), 'this is json')
def test_read_ini(self):
conf.load('test_resources/conf6.ini')
self.assertEqual(conf.get('main_section')['message'],
'this is ini')
def test_read_default(self):
conf.load('test_resources/default_python_conf')
self.assertEqual(conf.get('main_section')['message'],
'this is default')
def test_warn_if_file_not_found(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
conf.load('no_way_this_file_exists.yml')
assert len(w) == 1
assert 'not found' in str(w[-1].message)
def test_warn_if_file_cannot_be_parsed(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
conf.load('test_resources/conf.chuck_norris')
assert len(w) == 1
assert 'cannot parse' in str(w[-1].message)
def test_warn_if_file_cannot_be_parsed_due_to_content(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
conf.load('test_resources/conf5.json')
assert len(w) == 1
assert 'failed to parse' in str(w[-1].message)
def test_warn_if_filename_is_empty(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
conf.load('')
assert len(w) == 1
assert 'empty name' in str(w[-1].message)
def test_raise_invalid_config(self):
with self.assertRaises(Exception):
conf.load('test_resources/bad.yaml', raise_exception=True)
def test_noraise_invalid_config(self):
conf.load('test_resources/bad.yaml') | tests.py | import os
import warnings
from unittest import TestCase
import conf
class TestConfg(TestCase):
def test_read_yml(self):
conf.load('test_resources/conf1.yml')
self.assertEqual(conf.get('message'), 'this is yml')
self.assertEqual(conf.message, 'this is yml')
self.assertEqual(conf.reader.message, 'this is yml')
from conf import message as msg
self.assertEqual(msg, 'this is yml')
def test_read_yml_from_env_var(self):
os.environ['configfile'] = 'test_resources/conf1.yml'
conf.load('configfile')
self.assertEqual(conf.get('message'), 'this is yml')
def test_get_default(self):
conf.load('test_resources/conf1.yml')
self.assertEqual(conf.get('key_does_not_exist'), None)
self.assertEqual(conf.get('key_does_not_exist', 'some_value'),
'some_value')
def test_asdict(self):
conf.load('test_resources/conf1.yml')
d = conf.asdict()
self.assertEqual(d.get('key_does_not_exist', None), None)
self.assertEqual(d.get('key_does_not_exist', 'some_value'),
'some_value')
self.assertEqual(d['message'], 'this is yml')
def test_read_yaml(self):
conf.load('test_resources/conf2.yaml')
self.assertEqual(conf.get('message'), 'this is yml')
def test_read_yaml_uppercases(self):
conf.load('test_resources/conf3.YaMl')
self.assertEqual(conf.get('message'), 'this is yml')
def test_read_json(self):
conf.load('test_resources/conf4.json')
self.assertEqual(conf.get('message'), 'this is json')
def test_read_ini(self):
conf.load('test_resources/conf6.ini')
self.assertEqual(conf.get('main_section')['message'],
'this is ini')
def test_read_default(self):
conf.load('test_resources/default_python_conf')
self.assertEqual(conf.get('main_section')['message'],
'this is default')
def test_warn_if_file_not_found(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
conf.load('no_way_this_file_exists.yml')
assert len(w) == 1
assert 'not found' in str(w[-1].message)
def test_warn_if_file_cannot_be_parsed(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
conf.load('test_resources/conf.chuck_norris')
assert len(w) == 1
assert 'cannot parse' in str(w[-1].message)
def test_warn_if_file_cannot_be_parsed_due_to_content(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
conf.load('test_resources/conf5.json')
assert len(w) == 1
assert 'failed to parse' in str(w[-1].message)
def test_warn_if_filename_is_empty(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
conf.load('')
assert len(w) == 1
assert 'empty name' in str(w[-1].message)
def test_raise_invalid_config(self):
with self.assertRaises(Exception):
conf.load('test_resources/bad.yaml', raise_exception=True)
def test_noraise_invalid_config(self):
conf.load('test_resources/bad.yaml') | 0.44553 | 0.354768 |
from django.utils import timezone
from meteo.models import MeteoData
from .api import Weathermanager
class CacheManager:
def __init__(self, insee):
self.insee = insee
def _check_if_data_in_db(self):
current_datetime_aware = timezone.localtime(timezone.now())
current_date_aware = current_datetime_aware.date()
get = MeteoData.objects.filter(day=0).filter(insee=self.insee).first()
if get:
if current_date_aware == get.datetime.date():
return True
else:
return False
else:
return False
def get_meteo_data(self):
validator = self._check_if_data_in_db()
if validator:
data = MeteoData.objects.filter(insee=self.insee)
return data
else:
weather = Weathermanager()
old_meteo_data = MeteoData.objects.filter(insee=self.insee)
old_meteo_data.delete()
weather.get_weekly_forecast(self.insee)
data = MeteoData.objects.filter(insee=self.insee)
return data
def jsonify_payload(weather_obj_list):
payload = {}
for day in weather_obj_list:
payload[f"day{day.day}"] = {
"day": day.day,
"weather": day.weather,
"tmin": day.tmin,
"tmax": day.tmax,
}
return payload
def get_meteo_and_city_for_an_event(events, day):
event_images_codes = {}
for event in events:
city = event.customer.city.insee
get_meteo = CacheManager(city)
get_meteo.get_meteo_data()
meteo = MeteoData.objects.filter(datetime__day=day, insee=city)
if meteo:
weathercode = meteo[0].weather
weather_image_code = _transform_weather_code_to_img_code(weathercode)
link_to_img = f"images/{weather_image_code}.svg"
else:
weathercode = 0
weather_image_code = _transform_weather_code_to_img_code(weathercode)
link_to_img = f"images/{weather_image_code}.svg"
event_images_codes[f"{event.id}"] = [event.id, link_to_img]
return event_images_codes
def _transform_weather_code_to_img_code(weathercode):
nbForCloudy = [2, 3, 4, 5]
nbCloudSun = [1, 235]
nbSunSnow = [220, 221, 222]
nbSunRain = [210, 211, 212]
nbCloudRain = [
10,
11,
13,
16,
30,
31,
12,
14,
15,
32,
40,
41,
42,
43,
44,
45,
46,
47,
48,
]
nbStorm = [
100,
101,
102,
103,
104,
105,
106,
107,
108,
120,
121,
122,
123,
124,
125,
126,
127,
128,
130,
131,
132,
133,
134,
135,
136,
137,
138,
]
nbSnow = [
20,
21,
22,
60,
61,
62,
63,
64,
65,
66,
67,
68,
70,
71,
72,
73,
74,
75,
76,
77,
78,
142,
]
if weathercode in nbForCloudy:
return 1
elif weathercode in nbCloudSun:
return 2
elif weathercode in nbSunSnow:
return 3
elif weathercode in nbSunRain:
return 4
elif weathercode in nbCloudRain:
return 5
elif weathercode in nbStorm:
return 6
elif weathercode in nbSnow:
return 7
else:
return 8 | gardenizer/meteo/utils/meteo_data_manager.py | from django.utils import timezone
from meteo.models import MeteoData
from .api import Weathermanager
class CacheManager:
def __init__(self, insee):
self.insee = insee
def _check_if_data_in_db(self):
current_datetime_aware = timezone.localtime(timezone.now())
current_date_aware = current_datetime_aware.date()
get = MeteoData.objects.filter(day=0).filter(insee=self.insee).first()
if get:
if current_date_aware == get.datetime.date():
return True
else:
return False
else:
return False
def get_meteo_data(self):
validator = self._check_if_data_in_db()
if validator:
data = MeteoData.objects.filter(insee=self.insee)
return data
else:
weather = Weathermanager()
old_meteo_data = MeteoData.objects.filter(insee=self.insee)
old_meteo_data.delete()
weather.get_weekly_forecast(self.insee)
data = MeteoData.objects.filter(insee=self.insee)
return data
def jsonify_payload(weather_obj_list):
payload = {}
for day in weather_obj_list:
payload[f"day{day.day}"] = {
"day": day.day,
"weather": day.weather,
"tmin": day.tmin,
"tmax": day.tmax,
}
return payload
def get_meteo_and_city_for_an_event(events, day):
event_images_codes = {}
for event in events:
city = event.customer.city.insee
get_meteo = CacheManager(city)
get_meteo.get_meteo_data()
meteo = MeteoData.objects.filter(datetime__day=day, insee=city)
if meteo:
weathercode = meteo[0].weather
weather_image_code = _transform_weather_code_to_img_code(weathercode)
link_to_img = f"images/{weather_image_code}.svg"
else:
weathercode = 0
weather_image_code = _transform_weather_code_to_img_code(weathercode)
link_to_img = f"images/{weather_image_code}.svg"
event_images_codes[f"{event.id}"] = [event.id, link_to_img]
return event_images_codes
def _transform_weather_code_to_img_code(weathercode):
nbForCloudy = [2, 3, 4, 5]
nbCloudSun = [1, 235]
nbSunSnow = [220, 221, 222]
nbSunRain = [210, 211, 212]
nbCloudRain = [
10,
11,
13,
16,
30,
31,
12,
14,
15,
32,
40,
41,
42,
43,
44,
45,
46,
47,
48,
]
nbStorm = [
100,
101,
102,
103,
104,
105,
106,
107,
108,
120,
121,
122,
123,
124,
125,
126,
127,
128,
130,
131,
132,
133,
134,
135,
136,
137,
138,
]
nbSnow = [
20,
21,
22,
60,
61,
62,
63,
64,
65,
66,
67,
68,
70,
71,
72,
73,
74,
75,
76,
77,
78,
142,
]
if weathercode in nbForCloudy:
return 1
elif weathercode in nbCloudSun:
return 2
elif weathercode in nbSunSnow:
return 3
elif weathercode in nbSunRain:
return 4
elif weathercode in nbCloudRain:
return 5
elif weathercode in nbStorm:
return 6
elif weathercode in nbSnow:
return 7
else:
return 8 | 0.48438 | 0.27597 |
import argparse
import time
import unicurses as uc
import common
from display import CursesDisplay
from api import SpotifyApi
from state import SpotifyState, Config
logger = common.logging.getLogger(__name__)
def get_args():
"""Parse and return the command line arguments."""
parser = argparse.ArgumentParser(description="Terminal remote Spotify player.",
epilog=Config.help(),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("username", help="username associated with your spotify account (email or user id)")
parser.add_argument("-c --clear_cache",
action="store_true",
default=False,
dest="clear_cache",
help="clear the cache")
parser.add_argument("-a --clear_auth",
action="store_true",
default=False,
dest="clear_auth",
help="clear your authorization tokens")
parser.add_argument("-p --config_path",
default=None,
dest="config_path",
help="pass a configuration file")
return parser.parse_args()
def check_version():
"""Check the version we're running."""
my_version = common.get_version()
latest_version = common.get_master_version()
if my_version and latest_version and (my_version < latest_version):
print("Version {}.{}.{} is now available".format(*latest_version))
print("Run with -c after upgrading to clear your cache!".format(*latest_version))
time.sleep(3)
else:
logger.info("Current version: %s", my_version)
logger.info("Latest version: %s", latest_version)
if __name__ == '__main__':
# Get command line arguments.
args = get_args()
# Clear the console then print the title screen.
common.clear()
print(common.TITLE)
# Check the version we're running.
check_version()
# Clear your auth keys.
if args.clear_auth:
logger.debug("Clearing authorization tokens")
common.clear_auth(args.username)
# Reset the cache.
if args.clear_cache:
logger.debug("Clearing the cache")
common.clear_cache(args.username)
# Parse config file.
logger.debug("Parsing config file %s", args.config_path)
config = Config(args.config_path)
# Spotify API interface.
api = SpotifyApi(args.username)
# Display premium warning.
if not api.is_premium():
print "This is not a Premium account. Some features may not work."
time.sleep(3)
# Create Spotify state.
sp_state = SpotifyState(api, config)
sp_state.load_state()
sp_state.init()
# Initialize the curses screen.
stdscr = uc.initscr()
# Create the display.
display = CursesDisplay(stdscr, sp_state)
# Start the display.
# Clear the screen before raising any Exceptions.
try:
display.start()
except KeyboardInterrupt:
common.clear()
except BaseException:
common.clear()
raise
print(common.PEACE)
# Save the state.
sp_state.save_state()
# Clear the screen to leave a clean terminal.
common.clear() | spotify.py |
import argparse
import time
import unicurses as uc
import common
from display import CursesDisplay
from api import SpotifyApi
from state import SpotifyState, Config
logger = common.logging.getLogger(__name__)
def get_args():
"""Parse and return the command line arguments."""
parser = argparse.ArgumentParser(description="Terminal remote Spotify player.",
epilog=Config.help(),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("username", help="username associated with your spotify account (email or user id)")
parser.add_argument("-c --clear_cache",
action="store_true",
default=False,
dest="clear_cache",
help="clear the cache")
parser.add_argument("-a --clear_auth",
action="store_true",
default=False,
dest="clear_auth",
help="clear your authorization tokens")
parser.add_argument("-p --config_path",
default=None,
dest="config_path",
help="pass a configuration file")
return parser.parse_args()
def check_version():
"""Check the version we're running."""
my_version = common.get_version()
latest_version = common.get_master_version()
if my_version and latest_version and (my_version < latest_version):
print("Version {}.{}.{} is now available".format(*latest_version))
print("Run with -c after upgrading to clear your cache!".format(*latest_version))
time.sleep(3)
else:
logger.info("Current version: %s", my_version)
logger.info("Latest version: %s", latest_version)
if __name__ == '__main__':
# Get command line arguments.
args = get_args()
# Clear the console then print the title screen.
common.clear()
print(common.TITLE)
# Check the version we're running.
check_version()
# Clear your auth keys.
if args.clear_auth:
logger.debug("Clearing authorization tokens")
common.clear_auth(args.username)
# Reset the cache.
if args.clear_cache:
logger.debug("Clearing the cache")
common.clear_cache(args.username)
# Parse config file.
logger.debug("Parsing config file %s", args.config_path)
config = Config(args.config_path)
# Spotify API interface.
api = SpotifyApi(args.username)
# Display premium warning.
if not api.is_premium():
print "This is not a Premium account. Some features may not work."
time.sleep(3)
# Create Spotify state.
sp_state = SpotifyState(api, config)
sp_state.load_state()
sp_state.init()
# Initialize the curses screen.
stdscr = uc.initscr()
# Create the display.
display = CursesDisplay(stdscr, sp_state)
# Start the display.
# Clear the screen before raising any Exceptions.
try:
display.start()
except KeyboardInterrupt:
common.clear()
except BaseException:
common.clear()
raise
print(common.PEACE)
# Save the state.
sp_state.save_state()
# Clear the screen to leave a clean terminal.
common.clear() | 0.611498 | 0.115461 |
from PIL import Image, ImageChops, ImageEnhance, ImageOps
import requests
import os
from flask import Flask, redirect, jsonify, render_template, request, send_file, send_from_directory
import werkzeug
import datetime
import uuid
app = Flask(__name__)
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
def process_image(img):
BLEND_PCT = 0.45
#open up the mask
logo = Image.open('mask.png')
logo = logo.convert('RGBA')
#open gradient
gradient = Image.open('mask.jpg')
gradient = gradient.convert('RGBA')
gradient = gradient.resize(img.size, Image.ANTIALIAS)
#make sure logo matches the size of the image
logo_width = min(img.size) / 4
logo_aspect = float(logo.size[1]) / logo.size[0]
logo_size = (logo_width, logo_aspect * logo_width)
logo = logo.resize(map(int, logo_size), Image.ANTIALIAS)
#make sure our image has alpha channel
img = img.convert('RGBA')
#unique name
filename = uuid.uuid4().hex + '.png'
filename = os.path.join('/tmp', filename)
#put in gradient
graded = Image.blend(img, gradient, BLEND_PCT)
#then the logo
logo_bot_right = map(lambda x: x * .95, graded.size)
logo_top_left = map(lambda x, y: x - y, logo_bot_right, logo.size)
graded.paste(logo, map(int, logo_top_left + logo_bot_right), logo)
graded.save(filename, 'PNG')
#send it back
return filename
@app.route('/')
def index():
return render_template('index.html')
@app.route('/hackru', methods=['POST'])
def classify_upload():
try:
#get the image from the request
imagefile = request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join('/tmp', filename_)
#make sure it has the correct file type
if not any(ext in filename for ext in ALLOWED_IMAGE_EXTENSIONS):
return 'Invalid filetype.'
#save the file to /tmp
imagefile.save(filename)
#open the image for Pillow
image = Image.open(filename)
except Exception as err:
#uh oh. Something went wrong.
print 'Uploaded image open error: ' + err
return 'Error: ' + err
#process the image
resultFilename = process_image(image)
#send it back
return send_file(resultFilename, mimetype='image/png', as_attachment=True, attachment_filename='hackrued.png')
@app.route('/templates/<path:path>')
def send_static(path):
return send_from_directory('templates', path)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 8000))
app.run(host='0.0.0.0', debug=True, port=port) | app.py | from PIL import Image, ImageChops, ImageEnhance, ImageOps
import requests
import os
from flask import Flask, redirect, jsonify, render_template, request, send_file, send_from_directory
import werkzeug
import datetime
import uuid
app = Flask(__name__)
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
def process_image(img):
BLEND_PCT = 0.45
#open up the mask
logo = Image.open('mask.png')
logo = logo.convert('RGBA')
#open gradient
gradient = Image.open('mask.jpg')
gradient = gradient.convert('RGBA')
gradient = gradient.resize(img.size, Image.ANTIALIAS)
#make sure logo matches the size of the image
logo_width = min(img.size) / 4
logo_aspect = float(logo.size[1]) / logo.size[0]
logo_size = (logo_width, logo_aspect * logo_width)
logo = logo.resize(map(int, logo_size), Image.ANTIALIAS)
#make sure our image has alpha channel
img = img.convert('RGBA')
#unique name
filename = uuid.uuid4().hex + '.png'
filename = os.path.join('/tmp', filename)
#put in gradient
graded = Image.blend(img, gradient, BLEND_PCT)
#then the logo
logo_bot_right = map(lambda x: x * .95, graded.size)
logo_top_left = map(lambda x, y: x - y, logo_bot_right, logo.size)
graded.paste(logo, map(int, logo_top_left + logo_bot_right), logo)
graded.save(filename, 'PNG')
#send it back
return filename
@app.route('/')
def index():
return render_template('index.html')
@app.route('/hackru', methods=['POST'])
def classify_upload():
try:
#get the image from the request
imagefile = request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join('/tmp', filename_)
#make sure it has the correct file type
if not any(ext in filename for ext in ALLOWED_IMAGE_EXTENSIONS):
return 'Invalid filetype.'
#save the file to /tmp
imagefile.save(filename)
#open the image for Pillow
image = Image.open(filename)
except Exception as err:
#uh oh. Something went wrong.
print 'Uploaded image open error: ' + err
return 'Error: ' + err
#process the image
resultFilename = process_image(image)
#send it back
return send_file(resultFilename, mimetype='image/png', as_attachment=True, attachment_filename='hackrued.png')
@app.route('/templates/<path:path>')
def send_static(path):
return send_from_directory('templates', path)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 8000))
app.run(host='0.0.0.0', debug=True, port=port) | 0.304559 | 0.201538 |
import pickle
import inspect
import logging
import unittest
import numpy as np
import tensorflow as tf
from neupy import utils, layers, init
from neupy.utils import tensorflow_eval, tensorflow_session, shape_to_tuple
from neupy.layers.base import format_name_if_specified_as_pattern
from helpers import vectors_for_testing
class BaseTestCase(unittest.TestCase):
single_thread = False
verbose = False
random_seed = 0
def eval(self, value):
return tensorflow_eval(value)
def setUp(self):
tf.reset_default_graph()
if self.single_thread:
sess = tensorflow_session()
sess.close()
config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
)
tensorflow_session.cache = tf.Session(config=config)
if not self.verbose:
logging.disable(logging.CRITICAL)
# Clean identifiers map for each test
if hasattr(format_name_if_specified_as_pattern, 'counters'):
del format_name_if_specified_as_pattern.counters
utils.reproducible(seed=self.random_seed)
def tearDown(self):
sess = tensorflow_session()
sess.close()
def assertItemsEqual(self, list1, list2):
self.assertEqual(sorted(list1), sorted(list2))
def assertInvalidVectorTrain(self, network, input_vector, target=None,
decimal=5, is_feature1d=True, **train_kwargs):
"""
Method helps test network prediction training using different
types of row or column vector.
"""
input_vectors = vectors_for_testing(input_vector, is_feature1d)
if target is not None:
target_vectors = vectors_for_testing(target, is_feature1d)
input_vectors = zip(input_vectors, target_vectors)
train_args = inspect.getargspec(network.train).args
if 'epochs' in train_args and 'epochs' not in train_kwargs:
train_kwargs['epochs'] = 5
elif 'epsilon' in train_args and 'epsilon' not in train_kwargs:
train_kwargs['epsilon'] = 0.1
for i, X in enumerate(input_vectors, start=1):
if target is None:
network.train(X, **train_kwargs)
else:
network.train(*X, **train_kwargs)
def assertInvalidVectorPred(self, network, input_vector, target,
decimal=5, is_feature1d=True):
"""
Method helps test network prediction procedure using different
types of row or column vector.
"""
test_vectors = vectors_for_testing(input_vector, is_feature1d)
for i, test_vector in enumerate(test_vectors, start=1):
predicted_vector = network.predict(test_vector)
np.testing.assert_array_almost_equal(
predicted_vector, target, decimal=decimal)
def assertPickledNetwork(self, network, X):
stored_network = pickle.dumps(network)
loaded_network = pickle.loads(stored_network)
network_prediction = network.predict(X)
loaded_network_prediction = loaded_network.predict(X)
np.testing.assert_array_almost_equal(
loaded_network_prediction,
network_prediction)
def assertCanNetworkOverfit(self, network_class, epochs=100,
min_accepted_loss=0.001):
x_train = 2 * np.random.random((10, 2)) - 1 # zero centered
y_train = np.random.random((10, 1))
relu_xavier_normal = init.XavierNormal(gain=4)
relu_weight = relu_xavier_normal.sample((2, 20), return_array=True)
xavier_normal = init.XavierNormal(gain=2)
sigmoid_weight = xavier_normal.sample((20, 1), return_array=True)
optimizer = network_class([
layers.Input(2),
layers.Relu(20, weight=relu_weight),
layers.Sigmoid(1, weight=sigmoid_weight),
])
optimizer.train(x_train, y_train, epochs=epochs)
self.assertLess(optimizer.errors.train[-1], min_accepted_loss)
def assertShapesEqual(self, shape1, shape2, *args, **kwargs):
shape1 = shape_to_tuple(shape1)
shape2 = shape_to_tuple(shape2)
self.assertEqual(shape1, shape2, *args, **kwargs) | tests/base.py | import pickle
import inspect
import logging
import unittest
import numpy as np
import tensorflow as tf
from neupy import utils, layers, init
from neupy.utils import tensorflow_eval, tensorflow_session, shape_to_tuple
from neupy.layers.base import format_name_if_specified_as_pattern
from helpers import vectors_for_testing
class BaseTestCase(unittest.TestCase):
single_thread = False
verbose = False
random_seed = 0
def eval(self, value):
return tensorflow_eval(value)
def setUp(self):
tf.reset_default_graph()
if self.single_thread:
sess = tensorflow_session()
sess.close()
config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
)
tensorflow_session.cache = tf.Session(config=config)
if not self.verbose:
logging.disable(logging.CRITICAL)
# Clean identifiers map for each test
if hasattr(format_name_if_specified_as_pattern, 'counters'):
del format_name_if_specified_as_pattern.counters
utils.reproducible(seed=self.random_seed)
def tearDown(self):
sess = tensorflow_session()
sess.close()
def assertItemsEqual(self, list1, list2):
self.assertEqual(sorted(list1), sorted(list2))
def assertInvalidVectorTrain(self, network, input_vector, target=None,
decimal=5, is_feature1d=True, **train_kwargs):
"""
Method helps test network prediction training using different
types of row or column vector.
"""
input_vectors = vectors_for_testing(input_vector, is_feature1d)
if target is not None:
target_vectors = vectors_for_testing(target, is_feature1d)
input_vectors = zip(input_vectors, target_vectors)
train_args = inspect.getargspec(network.train).args
if 'epochs' in train_args and 'epochs' not in train_kwargs:
train_kwargs['epochs'] = 5
elif 'epsilon' in train_args and 'epsilon' not in train_kwargs:
train_kwargs['epsilon'] = 0.1
for i, X in enumerate(input_vectors, start=1):
if target is None:
network.train(X, **train_kwargs)
else:
network.train(*X, **train_kwargs)
def assertInvalidVectorPred(self, network, input_vector, target,
decimal=5, is_feature1d=True):
"""
Method helps test network prediction procedure using different
types of row or column vector.
"""
test_vectors = vectors_for_testing(input_vector, is_feature1d)
for i, test_vector in enumerate(test_vectors, start=1):
predicted_vector = network.predict(test_vector)
np.testing.assert_array_almost_equal(
predicted_vector, target, decimal=decimal)
def assertPickledNetwork(self, network, X):
stored_network = pickle.dumps(network)
loaded_network = pickle.loads(stored_network)
network_prediction = network.predict(X)
loaded_network_prediction = loaded_network.predict(X)
np.testing.assert_array_almost_equal(
loaded_network_prediction,
network_prediction)
def assertCanNetworkOverfit(self, network_class, epochs=100,
min_accepted_loss=0.001):
x_train = 2 * np.random.random((10, 2)) - 1 # zero centered
y_train = np.random.random((10, 1))
relu_xavier_normal = init.XavierNormal(gain=4)
relu_weight = relu_xavier_normal.sample((2, 20), return_array=True)
xavier_normal = init.XavierNormal(gain=2)
sigmoid_weight = xavier_normal.sample((20, 1), return_array=True)
optimizer = network_class([
layers.Input(2),
layers.Relu(20, weight=relu_weight),
layers.Sigmoid(1, weight=sigmoid_weight),
])
optimizer.train(x_train, y_train, epochs=epochs)
self.assertLess(optimizer.errors.train[-1], min_accepted_loss)
def assertShapesEqual(self, shape1, shape2, *args, **kwargs):
shape1 = shape_to_tuple(shape1)
shape2 = shape_to_tuple(shape2)
self.assertEqual(shape1, shape2, *args, **kwargs) | 0.745398 | 0.358241 |
import logging, os
from config import Config
from logging.handlers import SMTPHandler, RotatingFileHandler
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_admin import Admin
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_babel import Babel, lazy_gettext as _l
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db, render_as_batch=True)
login = LoginManager(app)
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page')
bootstrap = Bootstrap(app)
mail = Mail(app)
moment = Moment(app)
babel = Babel(app)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app import models
from app.auth.admin import AdminView
from flask_admin.menu import MenuLink
from flask_admin.contrib.sqla import ModelView
from app.models import User, Post, Message, Notification
admin = Admin(app, name='1st Woodhall Blog\'s Admin Dashboard', template_mode='bootstrap3', index_view=AdminView(User,
db.session, url='/admin', endpoint='admin'))
admin.add_view(ModelView(Post, db.session))
admin.add_view(ModelView(Message, db.session))
admin.add_view(ModelView(Notification, db.session))
admin.add_link(MenuLink(name='Back to Homepage', category='', url='/'))
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/1st_woodhall_blog.log', maxBytes=10240, backupCount=20)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('1st Woodhall Blog Startup')
if app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/1st_woodhall_blog.log',
maxBytes=10240, backupCount=20)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('1st Woodhall Blog Startup')
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(app.config['LANGUAGES']) | app/__init__.py | import logging, os
from config import Config
from logging.handlers import SMTPHandler, RotatingFileHandler
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_admin import Admin
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_babel import Babel, lazy_gettext as _l
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db, render_as_batch=True)
login = LoginManager(app)
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page')
bootstrap = Bootstrap(app)
mail = Mail(app)
moment = Moment(app)
babel = Babel(app)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app import models
from app.auth.admin import AdminView
from flask_admin.menu import MenuLink
from flask_admin.contrib.sqla import ModelView
from app.models import User, Post, Message, Notification
admin = Admin(app, name='1st Woodhall Blog\'s Admin Dashboard', template_mode='bootstrap3', index_view=AdminView(User,
db.session, url='/admin', endpoint='admin'))
admin.add_view(ModelView(Post, db.session))
admin.add_view(ModelView(Message, db.session))
admin.add_view(ModelView(Notification, db.session))
admin.add_link(MenuLink(name='Back to Homepage', category='', url='/'))
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/1st_woodhall_blog.log', maxBytes=10240, backupCount=20)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('1st Woodhall Blog Startup')
if app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/1st_woodhall_blog.log',
maxBytes=10240, backupCount=20)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('1st Woodhall Blog Startup')
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(app.config['LANGUAGES']) | 0.227041 | 0.035013 |
import re
import locale
import util
def strtr(s, repl):
pattern = '|'.join(map(re.escape, sorted(repl, key=len, reverse=True)))
return re.sub(pattern, lambda m: repl[m.group()], s)
class check:
def __init__(self, conn, message, settings):
self.conn = conn
self.message = message
self.settings = settings
self.prefixes = [
#Из одной буквы
'[уyuooаa]_?(?=(е|e|ye|х|x|h))',
'[вvwfфb]_?(?=(х|п|б|м|г|ж|x|p|m|g|j))',
'[вvwfфb]_?[ъь]_?(?=(е|ё|e|ye|yo))'
'[кk]_?(?=[х])',
'[сcs]_?(?=[хxhпp])',
'[сcs]_?[ъь]_?(?=(е|e|ё|ye|yo))',
'[оo]_?(?=[хxh])',
#Из двух букв
'[вvbw]_?[ыyiи]_?',
'[вvwb]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j))',
'[вvwb]_?[зzсcs]_?[ъь]_?(?=(е|e|ё|ye|yo))',
'[зzсcs]_?[оoаa]_?',
'[иiuyвvbw]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[нnh]_?[аaоo]_?',
'[нnh]_?(е|e|ye|и|i|y|u)_?',
'[оoаa]_?[бbтtm]_?(?=(х|п|б|м|г|ж|x|p|m|g))',
'[оoаa]_?[бbтtm]_?[ъь]_?(?=(е|e|ё|ye|yo))',
'[сcs]_?[оoаa]_?',
'[сcs]_?[уyu]_?',
'[пnpдdg]_?[оoаa]_?(?:[бb]?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))|[бb]?[ьъ](?=(е|e|ё|ye|yo))|[зzs]?[аa]?)',
#Из трёх букв
'[бb6]_?[еe]_?[зzсcs]_?',
'[вvbw]_?[нnh]_?[еeё]_?',
'[вvbw]_?[оа]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[вvbw]_?[оа]_?[зzсcs]_?[ьъ]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[вvbw]_?[сcs]_?[еeё]_?',
'[иiuy]_?[зzсcs]_?[оo0аa@]_?',
'[кk]_?[оo0аa@]_?[еeё]_?',
'[мm]_?[еeё]_?[жjg]_?',
'[нnh]_?[аa@оo0]_?[дgdтmt]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[нnh]_?[аa@оo0]_?[дgdтmt]_?[ьъ]_?(?=(е|e|ё|ye|yo))',
'[нnh]_?[аa@]_?[иiuy]_?',
'[нnh]_?[иiuy]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[нnh]_?[иiuy]_?[зzсcs]_?[ьъ]_?(?=(е|e|ё|ye|yo))',
'[оo]_?[бbтmt]_?[оo]_?',
'[пpn]_?[оo]_?[дdgтmt]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[пpn]_?[оo0]_?[дdgтmt]_?[ьъ]_?(?=(е|e|ё|ye|yo))',
'[пnp]_?[рpr]_?[аa]_?',
'[пnp]_?[рpr]_?[еeё]_?',
'[пnp]_?[рpr]_?[иiuy]_?',
'[пnp]_?[рpr]_?[оo]_?',
'[рpr]_?[аa]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[рpr]_?[аa]_?[зzсcs]_?[ьъ]_?(?=(е|e|ё|ye|yo))',
'[рpr]_?[оo]_?[зzсcs]_?',
'[дdg]_?[иuiy]_?[сcs]_?',
'[зz]_?[лl]_?[оo]_?',
#Из четырёх букв
'[вvbw]_?[оo]_?[зzs]_?[оo]_?',
'[нnh]_?[аa]_?[дdg]_?[оoаa]_?',
'[нnh]_?[еeё]_?[дdg]_?[оo]_?',
'[пnp]_?[еeё]_?[рpr]_?[еeё]_?',
'[пpn]_?[оoаa]_?[дdg]_?[оoаa]_?',
'[пnp]_?[рpr]_?[еeё]_?[дdg]_?',
'[рpr]_?[аa]_?[зzсcs]_?[оoаa]_?',
'[оoаa]_?[дdg]_?[нnh]_?[оoаa]_?',
'[кk]_?[oо]_?[нnh]_?[оoаa]_?',
'[мm]_?[уyu]_?[дdg]_?[оoаa]_?'
'[oо]_?[сcs]_?[тtm]_?[оoаa]_?',
'[дdg]_?[уyu]_?[рpr]_?[оoаa]_?',
'[хxh]_?[уyu]_?[дdg]_?[оoаa]_?',
'[мm]_?[уuy]_?[дd]_?[оoaа]_?',
#Из пяти символов
'[мm]_?[нnh]_?[оo]_?[гg]_?[оoаa]_?',
'[мm]_?[оo]_?[рpr]_?[дdg]_?[оoаa]_?',
'[мm]_?[оo]_?[зz]_?[гg]_?[оoаa]_?',
'[дdg]_?[оoаa]_?[лl]_?[бb]_?[оoаa]_?',
'[оo]_?[сcs]_?[тtm]_?[рpr]_?[оoаa]_?'
]
self.badwords = [
'(?<=\PL)%RE_PRETEXT%?[-]?[hхx]_?[уyu]_?[ийiеeёяюjuоo](?<!_hue(?=_)|_hue(?=so_)|_хуе(?=дин)|_hyu(?=ndai_))', #Хуй
'(?<=\PL)[хxh]_?[уyu]_?[лl1]_?[иuiyеeь\']_?',#хули
'(?<=\PL)[пp]_?[оo]_?[хxh]_',
'(?<=\PL)[нn]_?[аa]_?[хxh]_',
'(?<=\PL)[пp]_?[нnh]_?[хxh]_',
'(?<=\PL)%RE_PRETEXT%?[-]?[пpрn]_?[иuyiы]_?[зzсcs]_?[дdgж]_?(?:[_]?[:vowel:])', #Пизда
'(?<=\PL)%RE_PRETEXT%?[-]?[бbмm]_?[лl]_?(я|а|a|ya|е|e|э|еа|ea|eа|еa|r)_?(?:_|_?[тдtd])(?<!_б_лет)', #Бля
'(?<=\PL)%RE_PRETEXT%?[-]?(е|e|ё|yo|ye|э)_?(?<!н[eе][её]_|т_е_)[бb6п]_?(?=[уyиuiаa]_|[ыиiоoaаеeёуy][_]?[тtсscлlвvнnрrpкц](?<!_ebo[kt](?=[_])|_буд[уе])(?!ьше|ял_|ятно|еп)|[лl](?:([оoаaыиuyуiяr]|ya))|[нnh][_]?[уyuи]|[кk][_]?[аa]|[сc][_]?[тt]|[тt]_?[аaеeиuiy])', #Ебать
'(?<=\PL)%RE_PRETEXT%?[-]?(?<!с_|т_)[eеёэ]_?[бb6пp](?=_)', #ёб
'(?<=\PL)[пpn]_?[иuyуieе]_?[дdg]_?[eеaаoо]_?[rpр](?=_?[:vowel:]|_)',#Пидор
'(?<=\PL)[пpn]_?[иuyуieе]_?[дdg]_?[rpр](?=_)',#Пидор
'(?<=\PL)[пpn]_?[иuyуieе]_?[тt]_?[aаoо]_?[rpр](?=_?[:vowel:]|_)',#Пидор
'(?<=\PL)[пpn]_?[еe]_?[дdg]_?[иuiy]_?[кk](?=_?[:vowel:]|_)',#Педик
'(?<=\PL)[мm]_?[уyuи]_?[дdg]_?[аaеe](?<!_myda(?=s_))',#Мудак
'(?<=\PL)[zж]_?h?_?[оo]_?[pпn]_?[:vowel:]',#Жопа
'(?<=\PL)[мmn]_?[аa]_?[нnh]_?[дdg]_?[:vowel:](?<!манда(?=рин)|manda(?=rin))',#Манда
'(?<=\PL)%RE_PRETEXT%?[-]?[гg]_?[оoаa@]_?[вvwb]_?[нnh]_?[:vowel:]',#Говно
'(?<=\PL)f_?u_?[cс]_?k(?=_)',#Fuck
'(?<=\PL)%RE_PRETEXT%?[-]?л_?[оo]_?[хxh](?=_)',#Лох
'(?<=\PL)[^р]?_?[scс]_?[уyuю]_?[kк]_?[:vowel:]',#Сука
'(?<=\PL)[^р]?_?[scс]_?[уyuю]_?(ch|ч)_?[кk]_?[:vowel:]',
'(?<=\PL)[зz]_?[аa]_?[лl]_?[уyu]_?[пpр]_?[:vowel:]',#Залупа
'(?<=\PL)[зz]_?[аa]_?[лl]_?[уyuи]_?[пpр]_?[кk]_?[:vowel:]',#залупка
'(?<=\PL)%RE_PRETEXT%?[хxh]_?[еe]_?[рpr](?:_?[нnh]_?[:vowel:]|[:vowel:][:consonant:][ь]?|_)(?<!хер(?=[оа]сим[:vowel:]))',#Хер
'(?<=\PL)(ч|ch)_?[лl]_?[ееё]_?[нnh](?=[:vowel:]_|[:vowel:][:consonant:]_|_)',#член
'(?<=\PL)%RE_PRETEXT%?[вvbw]_?[аaоo]_?[гgr]_?[иuiy]_?[нnh]_?(?:[:vowel:])',#вагина
'(?<=\PL)(ч|ch)_?[мm]_?(?:[:vowel:])(?=_)',#чмо
'(?<=\PL)(ш|sh|sch)_?[лl]_?[юуuy]_?[хxh]_?(?:[:vowel:]_?)',#шлюха
'(?<=\PL)(ш|sh|sch)_?[лl]_?[юуuy]_?(ш|sh|sch)_?',#шлюшка
'(?<=\PL)(?<!с)[пnp]_?(?!ice)[иyi]_?[сcs]_?(?!ать|ал|ала|али|ало)(?:([ь]_?[:consonant:]_?[:vowel:])|([:vowel:]))',#Пися/писька
'(?<=\PL)(п|p|n)_?(з|z|s)_?(д|d)_?(ц|ts|s|tc)(?=_)',#пздц
'(?<=\PL)[кkc]_?[уuy]_?[рrp]_?[вbw6]_?(?=[:vowel:]_)',#курва
'(?<=\PL)[дd]_?[еe]_?[рrp]_?[ьb\']?_?[мm]_?(?=[:vowel:])',#дерьмо
'(?<=\PL)[аоao]_?[нn]_?[аa]_?[лl]_?(?=[:vowel:]([:consonant:]_|_)|_)',#анал
]
self.trans = {
'_' : '\x20', #Пробел
'\pL' : '[^\x20\d]', #Буква
'\PL' : '[\x20\d]', #Не буква
'[:vowel:]' : '[аеиоуыэюяёaeioyur]', #Гласные
'[:consonant:]' : '[^аеиоуыэюяёaeioyu\x20\d]' #Согласные
}
self.re_badwords = re.sub(r'%RE_PRETEXT%', '(?:' + '|'.join(self.prefixes) + ')', '|'.join(self.badwords))
self.re_badwords = strtr(self.re_badwords, self.trans)
def checkUrl(self):
self.ifUrl = re.search(r'http(s)?:\/\/(www\.)?[A-Za-z]{3,}\.[A-Za-z]{,6}\/[A-Za-z]{3,}', self.message.content)
if not self.result == None and not self.ifUrl == None and self.result.start() > self.ifUrl.start() and self.result.start() <= self.ifUrl.end():
return True
return False
#https://regex101.com/r/lKnFhB/1
def run(self):
self.string = self.message.content
self.string = ' '.join(re.findall(r'[\S][^A-ZА-ЯЁ]{1,}|[\S]', self.string))
self.string = ' ' + self.string.lower() + ' '
self.trans = {
'4' : 'ч',
'3' : 'з',
'0' : 'о',
'6' : 'б',
'@' : 'а',
'$' : 'с',
'1' : 'и',
'∃':'е','∄':'е','∅':'о','∈':'е','∉':'е','∊':'е','∋':'э','∌':'э','∍':'э','∏':'п','∑':'е',
'Ⓐ':'a','Ⓑ':'b','Ⓒ':'c','Ⓓ':'d','Ⓔ':'e','Ⓕ':'f','Ⓖ':'g','Ⓗ':'h','Ⓘ':'i','Ⓙ':'j','Ⓚ':'k','Ⓛ':'l','Ⓜ':'m','Ⓝ':'n','Ⓞ':'o','Ⓟ':'p','Ⓠ':'q','Ⓡ':'r','Ⓢ':'s','Ⓣ':'t','Ⓤ':'u','Ⓥ':'v','Ⓦ':'w','Ⓧ':'x','Ⓨ':'y','Ⓩ':'z',
'ⓐ':'a','ⓑ':'b','ⓒ':'c','ⓓ':'d','ⓔ':'e','ⓕ':'f','ⓖ':'g','ⓗ':'h','ⓘ':'i','ⓙ':'j','ⓚ':'k','ⓛ':'l','ⓜ':'m','ⓝ':'n','ⓞ':'o','ⓟ':'p','ⓠ':'q','ⓡ':'r','ⓢ':'s','ⓣ':'t','ⓤ':'u','ⓥ':'v','ⓦ':'w','ⓧ':'x','ⓨ':'y','ⓩ':'z',
'₣' : 'f',
'¢' : 'c',
'฿' : 'b',
'©' : 'c',
'®' : 'р',
'!' : 'и',
'Ꭿ':'a','凡':'a','Ꮨ':'a','∀':'a','₳':'a','Ǻ':'a','ǻ':'a','α':'a','ά':'a','Ά':'a','ẫ':'a','Ắ':'a','ắ':'a','Ằ':'a','ằ':'a','ẳ':'a','Ẵ':'a','ẵ':'a','Ä':'a','ª':'a','ä':'a','Å':'a','À':'a','Á':'a','Â':'a','å':'a','ã':'a','â':'a','à':'a','á':'a','Ã':'a','ᗩ':'a','ᵰ':'a',
'ℬ' : 'б','Ᏸ' : 'б','β' : 'б','฿' : 'б','ß' : 'б','Ђ' : 'б','ᗷ' : 'б','ᗸ' : 'б','ᗹ' : 'б','ᗽ' : 'б','ᗾ' : 'б','ᗿ' : 'б','Ɓ' : 'б','ƀ' : 'б','ხ' : 'б','方' : 'б','␢' : 'б','Ꮄ' : 'б',
'☾' : 'c','ℭ' : 'c','ℂ' : 'c','Ç' : 'c','¢' : 'c','ç' : 'c','Č' : 'c','ċ' : 'c','Ċ' : 'c','ĉ' : 'c','ς' : 'c','Ĉ' : 'c','ć' : 'c','Ć' : 'c','č' : 'c','Ḉ' : 'c','ḉ' : 'c','⊂' : 'c','Ꮸ' : 'c','₡' : 'c','¢' : 'c',
'ᗫ' : 'д','Ɗ' : 'д','Ď' : 'д','ď' : 'д','Đ' : 'д','đ' : 'д','ð' : 'д','∂' : 'д','₫' : 'д','ȡ' : 'д',
'ℯ' : 'е','ໂ' : 'е','६' : 'е','£' : 'е','Ē' : 'е','℮' : 'е','ē' : 'е','Ė' : 'е','ė' : 'е','Ę' : 'е','ě' : 'е','Ě' : 'е','ę' : 'е','Έ' : 'е','ê' : 'е','ξ' : 'е','Ê' : 'е','È' : 'е','€' : 'е','É' : 'е','∑' : 'е','Ế' : 'е','Ề' : 'е','Ể' : 'е','Ễ' : 'е','é' : 'е','è' : 'е','ع' : 'е','Є' : 'е','є' : 'е','έ' : 'е','ε' : 'е',
'ℱ' : 'ф','₣' : 'ф','ƒ' : 'ф','∮' : 'ф','Ḟ' : 'ф','ḟ' : 'ф','ჶ' : 'ф','ᶂ' : 'ф','φ' : 'ф',
'Ꮹ' : 'г','Ꮆ' : 'г','ℊ' : 'г','Ǥ' : 'г','ǥ' : 'г','Ĝ' : 'г','ĝ' : 'г','Ğ' : 'г','ğ' : 'г','Ġ' : 'г','ġ' : 'г','Ģ' : 'г','ģ' : 'г','פ' : 'г','ᶃ' : 'г','₲' : 'г',
'ℍ' : 'х','ℋ' : 'х','ℎ' : 'х','ℌ' : 'х','ℏ' : 'х','ዙ' : 'х','Ꮵ' : 'х','Ĥ' : 'х','Ħ' : 'х','ħ' : 'х','Ή' : 'х','♅' : 'х','廾' : 'х','Ћ' : 'х','ђ' : 'х','Ḩ' : 'х','Һ' : 'х','ḩ' : 'х','♄' : 'х',
'ℐ' : 'и','ℑ' : 'и','ί' : 'и','ι' : 'и','Ï' : 'и','Ί' : 'и','Î' : 'и','ì' : 'и','Ì' : 'и','í' : 'и','Í' : 'и','î' : 'и','ϊ' : 'и','ΐ' : 'и','Ĩ' : 'и','ĩ' : 'и','Ī' : 'и','ī' : 'и','Ĭ' : 'и','ĭ' : 'и','İ' : 'и','į' : 'и','Į' : 'и','Ꭵ' : 'и',
'ჟ' : 'ж','Ĵ' : 'ж','ĵ' : 'ж','ᶖ' : 'ж','ɉ' : 'ж',
'₭' : 'к','Ꮶ' : 'к','Ќ' : 'к','k' : 'к','ќ' : 'к','ķ' : 'к','Ķ' : 'к','Ҝ' : 'к','ҝ' : 'к','ﻸ' : 'к','ᶄ' : 'к',
'ℒ' : 'л','ℓ' : 'л','Ŀ' : 'л','ŀ' : 'л','£' : 'л','Ĺ' : 'л','ĺ' : 'л','Ļ' : 'л','ļ' : 'л','λ' : 'л','₤' : 'л','Ł' : 'л','ł' : 'л','ľ' : 'л','Ľ' : 'л','Ḽ' : 'л','ḽ' : 'л','ȴ' : 'л','Ꮭ' : 'л','£' : 'л','Ꮑ' : 'л',
'ℳ' : 'м','ʍ' : 'м','ᶆ' : 'м','Ḿ' : 'м','ḿ' : 'м','♍' : 'м','ᗰ' : 'м','ᙢ' : 'м','爪' : 'м','♏' : 'м','₥' : 'м',
'ℕ' : 'н','η' : 'н','ñ' : 'н','ח' : 'н','Ñ' : 'н','ή' : 'н','ŋ' : 'н','Ŋ' : 'н','Ń' : 'н','ń' : 'н','Ņ' : 'н','ņ' : 'н','Ň' : 'н','ň' : 'н','ʼn' : 'н','ȵ' : 'н','ℵ' : 'н','₦' : 'н',
'ℴ' : 'о','ტ' : 'о','٥' : 'о','Ό' : 'о','ó' : 'о','ό' : 'о','σ' : 'о','ǿ' : 'о','Ǿ' : 'о','Θ' : 'о','ò' : 'о','Ó' : 'о','Ò' : 'о','Ô' : 'о','ô' : 'о','Ö' : 'о','ö' : 'о','Õ' : 'о','õ' : 'о','ờ' : 'о','ớ' : 'о','ọ' : 'о','Ọ' : 'о','ợ' : 'о','Ợ' : 'о','ø' : 'о','Ø' : 'о','Ό' : 'о','Ở' : 'о','Ờ' : 'о','Ớ' : 'о','Ổ' : 'о','ổ' : 'о','Ợ' : 'о','Ō' : 'о','ō' : 'о',
'ℙ' : 'п','℘' : 'п','þ' : 'п','Þ' : 'п','ρ' : 'п','Ꭾ' : 'п','Ꮅ' : 'п','尸' : 'п','Ҏ' : 'п','ҏ' : 'п','ᶈ' : 'п','₱' : 'п','☧' : 'п','ᖘ' : 'п','ק' : 'п','ァ' : 'п',
'ℚ' : 'к','q' : 'к','Q' : 'к','ᶐ' : 'к','Ǭ' : 'к','ǭ' : 'к','ჹ' : 'к',
'ℝ' : 'р','ℜ' : 'р','ℛ' : 'р','℟' : 'р','ჩ' : 'р','ᖇ' : 'р','ř' : 'р','Ř' : 'р','ŗ' : 'р','Ŗ' : 'р','ŕ' : 'р','Ŕ' : 'р','ᶉ' : 'р','Ꮢ' : 'р','尺' : 'р',
'Ꮥ' : 'с','Ṧ' : 'с','ṧ' : 'с','ȿ' : 'с','ى' : 'с','§' : 'с','Ś' : 'с','ś' : 'с','š' : 'с','Š' : 'с','ş' : 'с','Ş' : 'с','ŝ' : 'с','Ŝ' : 'с','₰' : 'с','∫' : 'с','$' : 'с','ֆ' : 'с',
'₸' : 'т','†' : 'т','T' : 'т','t' : 'т','τ' : 'т','ΐ' : 'т','Ţ' : 'т','ţ' : 'т','Ť' : 'т','ť' : 'т','ŧ' : 'т','Ŧ' : 'т','ィ' : 'т','干' : 'т','Ṫ' : 'т','ṫ' : 'т','ナ' : 'т','Ꮏ' : 'т','Ꮖ' : 'т','テ' : 'т','₮' : 'т',
'∪' : 'у','ᙀ' : 'у','Ũ' : 'у','⋒' : 'у','Ủ' : 'у','Ừ' : 'у','Ử' : 'у','Ữ' : 'у','Ự' : 'у','ύ' : 'у','ϋ' : 'у','Ù' : 'у','ú' : 'у','Ú' : 'у','ΰ' : 'у','ù' : 'у','Û' : 'у','û' : 'у','Ü' : 'у','ử' : 'у','ữ' : 'у','ự' : 'у','Џ' : 'у','ü' : 'у','ừ' : 'у','Ũ' : 'у','ũ' : 'у','Ū' : 'у','ū' : 'у','Ŭ' : 'у','ŭ' : 'у','ų' : 'у','Ų' : 'у','ű' : 'у','Ű' : 'у','ů' : 'у','Ů' : 'у',
'✔' : 'в','✓' : 'в','∨' : 'в','√' : 'в','Ꮙ' : 'в','Ṽ' : 'в','ṽ' : 'в','ᶌ' : 'в','\/' : 'в','℣' : 'в','ʋ' : 'в','₩' : 'в','ẃ' : 'в','Ẃ' : 'в','ẁ' : 'в','Ẁ' : 'в','ẅ' : 'в','ώ' : 'в','ω' : 'в','ŵ' : 'в','Ŵ' : 'в','Ꮤ' : 'в','Ꮃ' : 'в','ฬ' : 'в','ᗯ' : 'в','ᙡ' : 'в','Ẅ' : 'в','ѡ' : 'в','ಎ' : 'в','ಭ' : 'в','Ꮚ' : 'в','Ꮗ' : 'в','ผ' : 'в','ฝ' : 'в','พ' : 'в','ฟ' : 'в',
'χ' : 'х','×' : 'х','✗' : 'х','✘' : 'х','᙭' : 'х','ჯ' : 'х','Ẍ' : 'х','ẍ' : 'х','ᶍ' : 'х','⏆' : 'х',
'ɣ' : 'у','Ꭹ' : 'у','Ꮍ' : 'у','Ẏ' : 'у','ẏ' : 'у','ϒ' : 'у','ɤ' : 'у','¥' : 'у','り' : 'у',
'ℤ' : 'з','ℨ' : 'з','ჳ' : 'з','乙' : 'з','Ẑ' : 'з','ẑ' : 'з','ɀ' : 'з','Ꮓ' : 'з'
}
self.string = strtr(self.string, self.trans)
self.string = re.sub(r'[\.\,\[\]\(\)\{\}\\\/\?\<\>\"\'\|\=\+\-\*\&\^\%\#\№\:\`\’]', ' ', self.string)
self.string = re.sub(r'([\s\S])(?=\1)', '', self.string)
self.string = re.sub(r'ё', 'е', self.string)
#Поиск собственно матов
self.result = re.search(self.re_badwords, self.string)
if not self.result == None:
if not self.checkUrl():
util.logger(str(self.result), True, True)
return True
return False
else:
return False | modules/matCheck.py | import re
import locale
import util
def strtr(s, repl):
pattern = '|'.join(map(re.escape, sorted(repl, key=len, reverse=True)))
return re.sub(pattern, lambda m: repl[m.group()], s)
class check:
def __init__(self, conn, message, settings):
self.conn = conn
self.message = message
self.settings = settings
self.prefixes = [
#Из одной буквы
'[уyuooаa]_?(?=(е|e|ye|х|x|h))',
'[вvwfфb]_?(?=(х|п|б|м|г|ж|x|p|m|g|j))',
'[вvwfфb]_?[ъь]_?(?=(е|ё|e|ye|yo))'
'[кk]_?(?=[х])',
'[сcs]_?(?=[хxhпp])',
'[сcs]_?[ъь]_?(?=(е|e|ё|ye|yo))',
'[оo]_?(?=[хxh])',
#Из двух букв
'[вvbw]_?[ыyiи]_?',
'[вvwb]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j))',
'[вvwb]_?[зzсcs]_?[ъь]_?(?=(е|e|ё|ye|yo))',
'[зzсcs]_?[оoаa]_?',
'[иiuyвvbw]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[нnh]_?[аaоo]_?',
'[нnh]_?(е|e|ye|и|i|y|u)_?',
'[оoаa]_?[бbтtm]_?(?=(х|п|б|м|г|ж|x|p|m|g))',
'[оoаa]_?[бbтtm]_?[ъь]_?(?=(е|e|ё|ye|yo))',
'[сcs]_?[оoаa]_?',
'[сcs]_?[уyu]_?',
'[пnpдdg]_?[оoаa]_?(?:[бb]?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))|[бb]?[ьъ](?=(е|e|ё|ye|yo))|[зzs]?[аa]?)',
#Из трёх букв
'[бb6]_?[еe]_?[зzсcs]_?',
'[вvbw]_?[нnh]_?[еeё]_?',
'[вvbw]_?[оа]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[вvbw]_?[оа]_?[зzсcs]_?[ьъ]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[вvbw]_?[сcs]_?[еeё]_?',
'[иiuy]_?[зzсcs]_?[оo0аa@]_?',
'[кk]_?[оo0аa@]_?[еeё]_?',
'[мm]_?[еeё]_?[жjg]_?',
'[нnh]_?[аa@оo0]_?[дgdтmt]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[нnh]_?[аa@оo0]_?[дgdтmt]_?[ьъ]_?(?=(е|e|ё|ye|yo))',
'[нnh]_?[аa@]_?[иiuy]_?',
'[нnh]_?[иiuy]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[нnh]_?[иiuy]_?[зzсcs]_?[ьъ]_?(?=(е|e|ё|ye|yo))',
'[оo]_?[бbтmt]_?[оo]_?',
'[пpn]_?[оo]_?[дdgтmt]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[пpn]_?[оo0]_?[дdgтmt]_?[ьъ]_?(?=(е|e|ё|ye|yo))',
'[пnp]_?[рpr]_?[аa]_?',
'[пnp]_?[рpr]_?[еeё]_?',
'[пnp]_?[рpr]_?[иiuy]_?',
'[пnp]_?[рpr]_?[оo]_?',
'[рpr]_?[аa]_?[зzсcs]_?(?=(х|п|б|м|г|ж|x|p|m|g|j|е|ё|e|ye|yo))',
'[рpr]_?[аa]_?[зzсcs]_?[ьъ]_?(?=(е|e|ё|ye|yo))',
'[рpr]_?[оo]_?[зzсcs]_?',
'[дdg]_?[иuiy]_?[сcs]_?',
'[зz]_?[лl]_?[оo]_?',
#Из четырёх букв
'[вvbw]_?[оo]_?[зzs]_?[оo]_?',
'[нnh]_?[аa]_?[дdg]_?[оoаa]_?',
'[нnh]_?[еeё]_?[дdg]_?[оo]_?',
'[пnp]_?[еeё]_?[рpr]_?[еeё]_?',
'[пpn]_?[оoаa]_?[дdg]_?[оoаa]_?',
'[пnp]_?[рpr]_?[еeё]_?[дdg]_?',
'[рpr]_?[аa]_?[зzсcs]_?[оoаa]_?',
'[оoаa]_?[дdg]_?[нnh]_?[оoаa]_?',
'[кk]_?[oо]_?[нnh]_?[оoаa]_?',
'[мm]_?[уyu]_?[дdg]_?[оoаa]_?'
'[oо]_?[сcs]_?[тtm]_?[оoаa]_?',
'[дdg]_?[уyu]_?[рpr]_?[оoаa]_?',
'[хxh]_?[уyu]_?[дdg]_?[оoаa]_?',
'[мm]_?[уuy]_?[дd]_?[оoaа]_?',
#Из пяти символов
'[мm]_?[нnh]_?[оo]_?[гg]_?[оoаa]_?',
'[мm]_?[оo]_?[рpr]_?[дdg]_?[оoаa]_?',
'[мm]_?[оo]_?[зz]_?[гg]_?[оoаa]_?',
'[дdg]_?[оoаa]_?[лl]_?[бb]_?[оoаa]_?',
'[оo]_?[сcs]_?[тtm]_?[рpr]_?[оoаa]_?'
]
self.badwords = [
'(?<=\PL)%RE_PRETEXT%?[-]?[hхx]_?[уyu]_?[ийiеeёяюjuоo](?<!_hue(?=_)|_hue(?=so_)|_хуе(?=дин)|_hyu(?=ndai_))', #Хуй
'(?<=\PL)[хxh]_?[уyu]_?[лl1]_?[иuiyеeь\']_?',#хули
'(?<=\PL)[пp]_?[оo]_?[хxh]_',
'(?<=\PL)[нn]_?[аa]_?[хxh]_',
'(?<=\PL)[пp]_?[нnh]_?[хxh]_',
'(?<=\PL)%RE_PRETEXT%?[-]?[пpрn]_?[иuyiы]_?[зzсcs]_?[дdgж]_?(?:[_]?[:vowel:])', #Пизда
'(?<=\PL)%RE_PRETEXT%?[-]?[бbмm]_?[лl]_?(я|а|a|ya|е|e|э|еа|ea|eа|еa|r)_?(?:_|_?[тдtd])(?<!_б_лет)', #Бля
'(?<=\PL)%RE_PRETEXT%?[-]?(е|e|ё|yo|ye|э)_?(?<!н[eе][её]_|т_е_)[бb6п]_?(?=[уyиuiаa]_|[ыиiоoaаеeёуy][_]?[тtсscлlвvнnрrpкц](?<!_ebo[kt](?=[_])|_буд[уе])(?!ьше|ял_|ятно|еп)|[лl](?:([оoаaыиuyуiяr]|ya))|[нnh][_]?[уyuи]|[кk][_]?[аa]|[сc][_]?[тt]|[тt]_?[аaеeиuiy])', #Ебать
'(?<=\PL)%RE_PRETEXT%?[-]?(?<!с_|т_)[eеёэ]_?[бb6пp](?=_)', #ёб
'(?<=\PL)[пpn]_?[иuyуieе]_?[дdg]_?[eеaаoо]_?[rpр](?=_?[:vowel:]|_)',#Пидор
'(?<=\PL)[пpn]_?[иuyуieе]_?[дdg]_?[rpр](?=_)',#Пидор
'(?<=\PL)[пpn]_?[иuyуieе]_?[тt]_?[aаoо]_?[rpр](?=_?[:vowel:]|_)',#Пидор
'(?<=\PL)[пpn]_?[еe]_?[дdg]_?[иuiy]_?[кk](?=_?[:vowel:]|_)',#Педик
'(?<=\PL)[мm]_?[уyuи]_?[дdg]_?[аaеe](?<!_myda(?=s_))',#Мудак
'(?<=\PL)[zж]_?h?_?[оo]_?[pпn]_?[:vowel:]',#Жопа
'(?<=\PL)[мmn]_?[аa]_?[нnh]_?[дdg]_?[:vowel:](?<!манда(?=рин)|manda(?=rin))',#Манда
'(?<=\PL)%RE_PRETEXT%?[-]?[гg]_?[оoаa@]_?[вvwb]_?[нnh]_?[:vowel:]',#Говно
'(?<=\PL)f_?u_?[cс]_?k(?=_)',#Fuck
'(?<=\PL)%RE_PRETEXT%?[-]?л_?[оo]_?[хxh](?=_)',#Лох
'(?<=\PL)[^р]?_?[scс]_?[уyuю]_?[kк]_?[:vowel:]',#Сука
'(?<=\PL)[^р]?_?[scс]_?[уyuю]_?(ch|ч)_?[кk]_?[:vowel:]',
'(?<=\PL)[зz]_?[аa]_?[лl]_?[уyu]_?[пpр]_?[:vowel:]',#Залупа
'(?<=\PL)[зz]_?[аa]_?[лl]_?[уyuи]_?[пpр]_?[кk]_?[:vowel:]',#залупка
'(?<=\PL)%RE_PRETEXT%?[хxh]_?[еe]_?[рpr](?:_?[нnh]_?[:vowel:]|[:vowel:][:consonant:][ь]?|_)(?<!хер(?=[оа]сим[:vowel:]))',#Хер
'(?<=\PL)(ч|ch)_?[лl]_?[ееё]_?[нnh](?=[:vowel:]_|[:vowel:][:consonant:]_|_)',#член
'(?<=\PL)%RE_PRETEXT%?[вvbw]_?[аaоo]_?[гgr]_?[иuiy]_?[нnh]_?(?:[:vowel:])',#вагина
'(?<=\PL)(ч|ch)_?[мm]_?(?:[:vowel:])(?=_)',#чмо
'(?<=\PL)(ш|sh|sch)_?[лl]_?[юуuy]_?[хxh]_?(?:[:vowel:]_?)',#шлюха
'(?<=\PL)(ш|sh|sch)_?[лl]_?[юуuy]_?(ш|sh|sch)_?',#шлюшка
'(?<=\PL)(?<!с)[пnp]_?(?!ice)[иyi]_?[сcs]_?(?!ать|ал|ала|али|ало)(?:([ь]_?[:consonant:]_?[:vowel:])|([:vowel:]))',#Пися/писька
'(?<=\PL)(п|p|n)_?(з|z|s)_?(д|d)_?(ц|ts|s|tc)(?=_)',#пздц
'(?<=\PL)[кkc]_?[уuy]_?[рrp]_?[вbw6]_?(?=[:vowel:]_)',#курва
'(?<=\PL)[дd]_?[еe]_?[рrp]_?[ьb\']?_?[мm]_?(?=[:vowel:])',#дерьмо
'(?<=\PL)[аоao]_?[нn]_?[аa]_?[лl]_?(?=[:vowel:]([:consonant:]_|_)|_)',#анал
]
self.trans = {
'_' : '\x20', #Пробел
'\pL' : '[^\x20\d]', #Буква
'\PL' : '[\x20\d]', #Не буква
'[:vowel:]' : '[аеиоуыэюяёaeioyur]', #Гласные
'[:consonant:]' : '[^аеиоуыэюяёaeioyu\x20\d]' #Согласные
}
self.re_badwords = re.sub(r'%RE_PRETEXT%', '(?:' + '|'.join(self.prefixes) + ')', '|'.join(self.badwords))
self.re_badwords = strtr(self.re_badwords, self.trans)
def checkUrl(self):
self.ifUrl = re.search(r'http(s)?:\/\/(www\.)?[A-Za-z]{3,}\.[A-Za-z]{,6}\/[A-Za-z]{3,}', self.message.content)
if not self.result == None and not self.ifUrl == None and self.result.start() > self.ifUrl.start() and self.result.start() <= self.ifUrl.end():
return True
return False
#https://regex101.com/r/lKnFhB/1
def run(self):
self.string = self.message.content
self.string = ' '.join(re.findall(r'[\S][^A-ZА-ЯЁ]{1,}|[\S]', self.string))
self.string = ' ' + self.string.lower() + ' '
self.trans = {
'4' : 'ч',
'3' : 'з',
'0' : 'о',
'6' : 'б',
'@' : 'а',
'$' : 'с',
'1' : 'и',
'∃':'е','∄':'е','∅':'о','∈':'е','∉':'е','∊':'е','∋':'э','∌':'э','∍':'э','∏':'п','∑':'е',
'Ⓐ':'a','Ⓑ':'b','Ⓒ':'c','Ⓓ':'d','Ⓔ':'e','Ⓕ':'f','Ⓖ':'g','Ⓗ':'h','Ⓘ':'i','Ⓙ':'j','Ⓚ':'k','Ⓛ':'l','Ⓜ':'m','Ⓝ':'n','Ⓞ':'o','Ⓟ':'p','Ⓠ':'q','Ⓡ':'r','Ⓢ':'s','Ⓣ':'t','Ⓤ':'u','Ⓥ':'v','Ⓦ':'w','Ⓧ':'x','Ⓨ':'y','Ⓩ':'z',
'ⓐ':'a','ⓑ':'b','ⓒ':'c','ⓓ':'d','ⓔ':'e','ⓕ':'f','ⓖ':'g','ⓗ':'h','ⓘ':'i','ⓙ':'j','ⓚ':'k','ⓛ':'l','ⓜ':'m','ⓝ':'n','ⓞ':'o','ⓟ':'p','ⓠ':'q','ⓡ':'r','ⓢ':'s','ⓣ':'t','ⓤ':'u','ⓥ':'v','ⓦ':'w','ⓧ':'x','ⓨ':'y','ⓩ':'z',
'₣' : 'f',
'¢' : 'c',
'฿' : 'b',
'©' : 'c',
'®' : 'р',
'!' : 'и',
'Ꭿ':'a','凡':'a','Ꮨ':'a','∀':'a','₳':'a','Ǻ':'a','ǻ':'a','α':'a','ά':'a','Ά':'a','ẫ':'a','Ắ':'a','ắ':'a','Ằ':'a','ằ':'a','ẳ':'a','Ẵ':'a','ẵ':'a','Ä':'a','ª':'a','ä':'a','Å':'a','À':'a','Á':'a','Â':'a','å':'a','ã':'a','â':'a','à':'a','á':'a','Ã':'a','ᗩ':'a','ᵰ':'a',
'ℬ' : 'б','Ᏸ' : 'б','β' : 'б','฿' : 'б','ß' : 'б','Ђ' : 'б','ᗷ' : 'б','ᗸ' : 'б','ᗹ' : 'б','ᗽ' : 'б','ᗾ' : 'б','ᗿ' : 'б','Ɓ' : 'б','ƀ' : 'б','ხ' : 'б','方' : 'б','␢' : 'б','Ꮄ' : 'б',
'☾' : 'c','ℭ' : 'c','ℂ' : 'c','Ç' : 'c','¢' : 'c','ç' : 'c','Č' : 'c','ċ' : 'c','Ċ' : 'c','ĉ' : 'c','ς' : 'c','Ĉ' : 'c','ć' : 'c','Ć' : 'c','č' : 'c','Ḉ' : 'c','ḉ' : 'c','⊂' : 'c','Ꮸ' : 'c','₡' : 'c','¢' : 'c',
'ᗫ' : 'д','Ɗ' : 'д','Ď' : 'д','ď' : 'д','Đ' : 'д','đ' : 'д','ð' : 'д','∂' : 'д','₫' : 'д','ȡ' : 'д',
'ℯ' : 'е','ໂ' : 'е','६' : 'е','£' : 'е','Ē' : 'е','℮' : 'е','ē' : 'е','Ė' : 'е','ė' : 'е','Ę' : 'е','ě' : 'е','Ě' : 'е','ę' : 'е','Έ' : 'е','ê' : 'е','ξ' : 'е','Ê' : 'е','È' : 'е','€' : 'е','É' : 'е','∑' : 'е','Ế' : 'е','Ề' : 'е','Ể' : 'е','Ễ' : 'е','é' : 'е','è' : 'е','ع' : 'е','Є' : 'е','є' : 'е','έ' : 'е','ε' : 'е',
'ℱ' : 'ф','₣' : 'ф','ƒ' : 'ф','∮' : 'ф','Ḟ' : 'ф','ḟ' : 'ф','ჶ' : 'ф','ᶂ' : 'ф','φ' : 'ф',
'Ꮹ' : 'г','Ꮆ' : 'г','ℊ' : 'г','Ǥ' : 'г','ǥ' : 'г','Ĝ' : 'г','ĝ' : 'г','Ğ' : 'г','ğ' : 'г','Ġ' : 'г','ġ' : 'г','Ģ' : 'г','ģ' : 'г','פ' : 'г','ᶃ' : 'г','₲' : 'г',
'ℍ' : 'х','ℋ' : 'х','ℎ' : 'х','ℌ' : 'х','ℏ' : 'х','ዙ' : 'х','Ꮵ' : 'х','Ĥ' : 'х','Ħ' : 'х','ħ' : 'х','Ή' : 'х','♅' : 'х','廾' : 'х','Ћ' : 'х','ђ' : 'х','Ḩ' : 'х','Һ' : 'х','ḩ' : 'х','♄' : 'х',
'ℐ' : 'и','ℑ' : 'и','ί' : 'и','ι' : 'и','Ï' : 'и','Ί' : 'и','Î' : 'и','ì' : 'и','Ì' : 'и','í' : 'и','Í' : 'и','î' : 'и','ϊ' : 'и','ΐ' : 'и','Ĩ' : 'и','ĩ' : 'и','Ī' : 'и','ī' : 'и','Ĭ' : 'и','ĭ' : 'и','İ' : 'и','į' : 'и','Į' : 'и','Ꭵ' : 'и',
'ჟ' : 'ж','Ĵ' : 'ж','ĵ' : 'ж','ᶖ' : 'ж','ɉ' : 'ж',
'₭' : 'к','Ꮶ' : 'к','Ќ' : 'к','k' : 'к','ќ' : 'к','ķ' : 'к','Ķ' : 'к','Ҝ' : 'к','ҝ' : 'к','ﻸ' : 'к','ᶄ' : 'к',
'ℒ' : 'л','ℓ' : 'л','Ŀ' : 'л','ŀ' : 'л','£' : 'л','Ĺ' : 'л','ĺ' : 'л','Ļ' : 'л','ļ' : 'л','λ' : 'л','₤' : 'л','Ł' : 'л','ł' : 'л','ľ' : 'л','Ľ' : 'л','Ḽ' : 'л','ḽ' : 'л','ȴ' : 'л','Ꮭ' : 'л','£' : 'л','Ꮑ' : 'л',
'ℳ' : 'м','ʍ' : 'м','ᶆ' : 'м','Ḿ' : 'м','ḿ' : 'м','♍' : 'м','ᗰ' : 'м','ᙢ' : 'м','爪' : 'м','♏' : 'м','₥' : 'м',
'ℕ' : 'н','η' : 'н','ñ' : 'н','ח' : 'н','Ñ' : 'н','ή' : 'н','ŋ' : 'н','Ŋ' : 'н','Ń' : 'н','ń' : 'н','Ņ' : 'н','ņ' : 'н','Ň' : 'н','ň' : 'н','ʼn' : 'н','ȵ' : 'н','ℵ' : 'н','₦' : 'н',
'ℴ' : 'о','ტ' : 'о','٥' : 'о','Ό' : 'о','ó' : 'о','ό' : 'о','σ' : 'о','ǿ' : 'о','Ǿ' : 'о','Θ' : 'о','ò' : 'о','Ó' : 'о','Ò' : 'о','Ô' : 'о','ô' : 'о','Ö' : 'о','ö' : 'о','Õ' : 'о','õ' : 'о','ờ' : 'о','ớ' : 'о','ọ' : 'о','Ọ' : 'о','ợ' : 'о','Ợ' : 'о','ø' : 'о','Ø' : 'о','Ό' : 'о','Ở' : 'о','Ờ' : 'о','Ớ' : 'о','Ổ' : 'о','ổ' : 'о','Ợ' : 'о','Ō' : 'о','ō' : 'о',
'ℙ' : 'п','℘' : 'п','þ' : 'п','Þ' : 'п','ρ' : 'п','Ꭾ' : 'п','Ꮅ' : 'п','尸' : 'п','Ҏ' : 'п','ҏ' : 'п','ᶈ' : 'п','₱' : 'п','☧' : 'п','ᖘ' : 'п','ק' : 'п','ァ' : 'п',
'ℚ' : 'к','q' : 'к','Q' : 'к','ᶐ' : 'к','Ǭ' : 'к','ǭ' : 'к','ჹ' : 'к',
'ℝ' : 'р','ℜ' : 'р','ℛ' : 'р','℟' : 'р','ჩ' : 'р','ᖇ' : 'р','ř' : 'р','Ř' : 'р','ŗ' : 'р','Ŗ' : 'р','ŕ' : 'р','Ŕ' : 'р','ᶉ' : 'р','Ꮢ' : 'р','尺' : 'р',
'Ꮥ' : 'с','Ṧ' : 'с','ṧ' : 'с','ȿ' : 'с','ى' : 'с','§' : 'с','Ś' : 'с','ś' : 'с','š' : 'с','Š' : 'с','ş' : 'с','Ş' : 'с','ŝ' : 'с','Ŝ' : 'с','₰' : 'с','∫' : 'с','$' : 'с','ֆ' : 'с',
'₸' : 'т','†' : 'т','T' : 'т','t' : 'т','τ' : 'т','ΐ' : 'т','Ţ' : 'т','ţ' : 'т','Ť' : 'т','ť' : 'т','ŧ' : 'т','Ŧ' : 'т','ィ' : 'т','干' : 'т','Ṫ' : 'т','ṫ' : 'т','ナ' : 'т','Ꮏ' : 'т','Ꮖ' : 'т','テ' : 'т','₮' : 'т',
'∪' : 'у','ᙀ' : 'у','Ũ' : 'у','⋒' : 'у','Ủ' : 'у','Ừ' : 'у','Ử' : 'у','Ữ' : 'у','Ự' : 'у','ύ' : 'у','ϋ' : 'у','Ù' : 'у','ú' : 'у','Ú' : 'у','ΰ' : 'у','ù' : 'у','Û' : 'у','û' : 'у','Ü' : 'у','ử' : 'у','ữ' : 'у','ự' : 'у','Џ' : 'у','ü' : 'у','ừ' : 'у','Ũ' : 'у','ũ' : 'у','Ū' : 'у','ū' : 'у','Ŭ' : 'у','ŭ' : 'у','ų' : 'у','Ų' : 'у','ű' : 'у','Ű' : 'у','ů' : 'у','Ů' : 'у',
'✔' : 'в','✓' : 'в','∨' : 'в','√' : 'в','Ꮙ' : 'в','Ṽ' : 'в','ṽ' : 'в','ᶌ' : 'в','\/' : 'в','℣' : 'в','ʋ' : 'в','₩' : 'в','ẃ' : 'в','Ẃ' : 'в','ẁ' : 'в','Ẁ' : 'в','ẅ' : 'в','ώ' : 'в','ω' : 'в','ŵ' : 'в','Ŵ' : 'в','Ꮤ' : 'в','Ꮃ' : 'в','ฬ' : 'в','ᗯ' : 'в','ᙡ' : 'в','Ẅ' : 'в','ѡ' : 'в','ಎ' : 'в','ಭ' : 'в','Ꮚ' : 'в','Ꮗ' : 'в','ผ' : 'в','ฝ' : 'в','พ' : 'в','ฟ' : 'в',
'χ' : 'х','×' : 'х','✗' : 'х','✘' : 'х','᙭' : 'х','ჯ' : 'х','Ẍ' : 'х','ẍ' : 'х','ᶍ' : 'х','⏆' : 'х',
'ɣ' : 'у','Ꭹ' : 'у','Ꮍ' : 'у','Ẏ' : 'у','ẏ' : 'у','ϒ' : 'у','ɤ' : 'у','¥' : 'у','り' : 'у',
'ℤ' : 'з','ℨ' : 'з','ჳ' : 'з','乙' : 'з','Ẑ' : 'з','ẑ' : 'з','ɀ' : 'з','Ꮓ' : 'з'
}
self.string = strtr(self.string, self.trans)
self.string = re.sub(r'[\.\,\[\]\(\)\{\}\\\/\?\<\>\"\'\|\=\+\-\*\&\^\%\#\№\:\`\’]', ' ', self.string)
self.string = re.sub(r'([\s\S])(?=\1)', '', self.string)
self.string = re.sub(r'ё', 'е', self.string)
#Поиск собственно матов
self.result = re.search(self.re_badwords, self.string)
if not self.result == None:
if not self.checkUrl():
util.logger(str(self.result), True, True)
return True
return False
else:
return False | 0.15704 | 0.60326 |
import numpy as np
import os
import unittest
from colour import RGB_COLOURSPACES
from colour_hdri import TESTS_RESOURCES_DIRECTORY
from colour_hdri.generation import image_stack_to_radiance_image
from colour_hdri.calibration import camera_response_functions_Debevec1997
from colour_hdri.utilities import ImageStack, filter_files
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'FROBISHER_001_DIRECTORY',
'GENERATION_DIRECTORY',
'JPG_IMAGES',
'TestRadianceImage',
]
FROBISHER_001_DIRECTORY = os.path.join(TESTS_RESOURCES_DIRECTORY,
'frobisher_001')
GENERATION_DIRECTORY = os.path.join(TESTS_RESOURCES_DIRECTORY, 'colour_hdri',
'generation')
JPG_IMAGES = filter_files(FROBISHER_001_DIRECTORY, ('jpg', ))
class TestRadianceImage(unittest.TestCase):
"""
Defines :func:`colour_hdri.generation.radiance.\
image_stack_to_radiance_image` definition unit tests methods.
"""
def test_radiance_image(self):
"""
Tests :func:`colour_hdri.generation.\
radiance.image_stack_to_radiance_image` definition.
"""
image_stack = ImageStack.from_files(JPG_IMAGES)
image_stack.data = RGB_COLOURSPACES['sRGB'].cctf_decoding(
image_stack.data)
# Lower precision for unit tests under *travis-ci*.
np.testing.assert_allclose(
image_stack_to_radiance_image(image_stack),
np.load(
os.path.join(GENERATION_DIRECTORY,
'test_radiance_image_linear.npy')),
rtol=0.0001,
atol=0.0001)
# Lower precision for unit tests under *travis-ci*.
image_stack = ImageStack.from_files(JPG_IMAGES)
np.testing.assert_allclose(
image_stack_to_radiance_image(
image_stack,
camera_response_functions=(
camera_response_functions_Debevec1997(image_stack))),
np.load(
os.path.join(GENERATION_DIRECTORY,
'test_radiance_image_crfs.npy')),
rtol=0.0001,
atol=0.0001)
if __name__ == '__main__':
unittest.main() | colour_hdri/generation/tests/test_radiance.py | import numpy as np
import os
import unittest
from colour import RGB_COLOURSPACES
from colour_hdri import TESTS_RESOURCES_DIRECTORY
from colour_hdri.generation import image_stack_to_radiance_image
from colour_hdri.calibration import camera_response_functions_Debevec1997
from colour_hdri.utilities import ImageStack, filter_files
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'FROBISHER_001_DIRECTORY',
'GENERATION_DIRECTORY',
'JPG_IMAGES',
'TestRadianceImage',
]
FROBISHER_001_DIRECTORY = os.path.join(TESTS_RESOURCES_DIRECTORY,
'frobisher_001')
GENERATION_DIRECTORY = os.path.join(TESTS_RESOURCES_DIRECTORY, 'colour_hdri',
'generation')
JPG_IMAGES = filter_files(FROBISHER_001_DIRECTORY, ('jpg', ))
class TestRadianceImage(unittest.TestCase):
"""
Defines :func:`colour_hdri.generation.radiance.\
image_stack_to_radiance_image` definition unit tests methods.
"""
def test_radiance_image(self):
"""
Tests :func:`colour_hdri.generation.\
radiance.image_stack_to_radiance_image` definition.
"""
image_stack = ImageStack.from_files(JPG_IMAGES)
image_stack.data = RGB_COLOURSPACES['sRGB'].cctf_decoding(
image_stack.data)
# Lower precision for unit tests under *travis-ci*.
np.testing.assert_allclose(
image_stack_to_radiance_image(image_stack),
np.load(
os.path.join(GENERATION_DIRECTORY,
'test_radiance_image_linear.npy')),
rtol=0.0001,
atol=0.0001)
# Lower precision for unit tests under *travis-ci*.
image_stack = ImageStack.from_files(JPG_IMAGES)
np.testing.assert_allclose(
image_stack_to_radiance_image(
image_stack,
camera_response_functions=(
camera_response_functions_Debevec1997(image_stack))),
np.load(
os.path.join(GENERATION_DIRECTORY,
'test_radiance_image_crfs.npy')),
rtol=0.0001,
atol=0.0001)
if __name__ == '__main__':
unittest.main() | 0.722625 | 0.267041 |
import torch
import bittensor.utils.weight_utils as weight_utils
import pytest
def test_convert_weight_and_uids():
uids = torch.tensor(list(range(10)))
weights = torch.rand(10)
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
# min weight < 0
weights[5] = -1
with pytest.raises(ValueError) as pytest_wrapped_e:
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
# min uid < 0
weights[5] = 0
uids[3] = -1
with pytest.raises(ValueError) as pytest_wrapped_e:
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
# len(uids) != len(weights)
uids[3] = 3
with pytest.raises(ValueError) as pytest_wrapped_e:
weight_utils.convert_weights_and_uids_for_emit( uids, weights[1:] )
# sum(weights) == 0
weights = torch.zeros(10)
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
# test for overflow and underflow
for _ in range (5):
uids = torch.tensor(list(range(10)))
weights = torch.rand(10)
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
def test_normalize_with_min_max():
weights = torch.rand(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.rand(2)
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.randn(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.eye(10)[0]
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.zeros(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.rand(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3
weights = torch.rand(2)
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3
weights = torch.randn(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3
weights = torch.eye(10)[0]
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3
weights = torch.zeros(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3 | tests/unit_tests/bittensor_tests/utils/test_weight_utils.py | import torch
import bittensor.utils.weight_utils as weight_utils
import pytest
def test_convert_weight_and_uids():
uids = torch.tensor(list(range(10)))
weights = torch.rand(10)
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
# min weight < 0
weights[5] = -1
with pytest.raises(ValueError) as pytest_wrapped_e:
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
# min uid < 0
weights[5] = 0
uids[3] = -1
with pytest.raises(ValueError) as pytest_wrapped_e:
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
# len(uids) != len(weights)
uids[3] = 3
with pytest.raises(ValueError) as pytest_wrapped_e:
weight_utils.convert_weights_and_uids_for_emit( uids, weights[1:] )
# sum(weights) == 0
weights = torch.zeros(10)
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
# test for overflow and underflow
for _ in range (5):
uids = torch.tensor(list(range(10)))
weights = torch.rand(10)
weight_utils.convert_weights_and_uids_for_emit( uids, weights )
def test_normalize_with_min_max():
weights = torch.rand(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.rand(2)
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.randn(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.eye(10)[0]
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.zeros(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 10 )
assert wn.max() / wn.min() <= 11
weights = torch.rand(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3
weights = torch.rand(2)
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3
weights = torch.randn(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3
weights = torch.eye(10)[0]
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3
weights = torch.zeros(10)
wn = weight_utils.normalize_max_multiple( weights, multiple = 2 )
assert wn.max() / wn.min() <= 3 | 0.758868 | 0.708717 |
import os
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.optim as optim
import torchaudio
from asr.data import TextTransform, get_audio_transforms, data_processing
from asr.models import SpeechRecognitionModel
from asr.train import train
from asr.test import test
from asr.utils import IterMeter
from logger.train_logger import TrainLogger
from args.asr_train_arg_parser import ASRTrainArgParser
from saver.model_saver import ModelSaver
from dataset.dataset import Dataset
def main(args):
if args.librispeech:
print("Loading Librispeech dataset!")
train_dataset = torchaudio.datasets.LIBRISPEECH(
args.data_dir, url="train-clean-360", download=True)
valid_dataset = torchaudio.datasets.LIBRISPEECH(
args.data_dir, url="test-clean", download=True)
else:
train_dataset = Dataset(args, "train", return_pair=args.return_pair)
valid_dataset = Dataset(args, "val", return_pair=args.return_pair)
print(f"Training set has {len(train_dataset)} samples. Validation set has {len(valid_dataset)} samples.")
# train_audio_transforms = get_audio_transforms('train')
# valid_audio_transforms = get_audio_transforms('valid')
text_transform = TextTransform()
train_loader = data.DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
collate_fn=lambda x: data_processing(
x, "train", text_transform),
num_workers=args.num_workers,
pin_memory=True)
valid_loader = data.DataLoader(dataset=valid_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=lambda x: data_processing(
x, "valid", text_transform),
num_workers=args.num_workers,
pin_memory=True)
model = SpeechRecognitionModel(
args.n_cnn_layers, args.n_rnn_layers, args.rnn_dim,
args.n_class, args.n_feats, args.stride, args.dropout
).to(args.device)
print('Num Model Parameters', sum(
[param.nelement() for param in model.parameters()]))
optimizer = optim.AdamW(model.parameters(), args.lr)
criterion = nn.CTCLoss(blank=28).to(args.device)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=args.lr,
steps_per_epoch=int(
len(train_loader)),
epochs=args.num_epochs,
anneal_strategy='linear')
# scheduler = optim.lr_scheduler.ExponentialLR(optimizer, args.gamma)
saver = ModelSaver(args, max_ckpts=args.max_ckpts,
metric_name="test_wer", maximize_metric=False)
if args.continue_train:
saver.load_model(model, "SpeechRecognitionModel",
args.ckpt_path, optimizer, scheduler)
elif args.pretrained_ckpt_path:
saver.load_model(model, "SpeechRecognitionModel",
args.pretrained_ckpt_path, None, None)
if torch.cuda.device_count() > 1:
print(f"Using {torch.cuda.device_count()} GPUs!")
model = nn.DataParallel(model)
logger = TrainLogger(args, len(train_loader.dataset))
logger.log_hparams(args)
for epoch in range(args.start_epoch, args.num_epochs + 1):
train(args, model, train_loader, criterion,
optimizer, scheduler, logger)
if logger.epoch % args.epochs_per_save == 0:
metric_dict = test(args, model, valid_loader, criterion, logger)
saver.save(logger.epoch, model, optimizer, scheduler, args.device,
"SpeechRecognitionModel", metric_dict["test_wer"])
logger.end_epoch()
if __name__ == "__main__":
parser = ASRTrainArgParser()
args = parser.parse_args()
main(args) | asr/main.py | import os
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.optim as optim
import torchaudio
from asr.data import TextTransform, get_audio_transforms, data_processing
from asr.models import SpeechRecognitionModel
from asr.train import train
from asr.test import test
from asr.utils import IterMeter
from logger.train_logger import TrainLogger
from args.asr_train_arg_parser import ASRTrainArgParser
from saver.model_saver import ModelSaver
from dataset.dataset import Dataset
def main(args):
if args.librispeech:
print("Loading Librispeech dataset!")
train_dataset = torchaudio.datasets.LIBRISPEECH(
args.data_dir, url="train-clean-360", download=True)
valid_dataset = torchaudio.datasets.LIBRISPEECH(
args.data_dir, url="test-clean", download=True)
else:
train_dataset = Dataset(args, "train", return_pair=args.return_pair)
valid_dataset = Dataset(args, "val", return_pair=args.return_pair)
print(f"Training set has {len(train_dataset)} samples. Validation set has {len(valid_dataset)} samples.")
# train_audio_transforms = get_audio_transforms('train')
# valid_audio_transforms = get_audio_transforms('valid')
text_transform = TextTransform()
train_loader = data.DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
collate_fn=lambda x: data_processing(
x, "train", text_transform),
num_workers=args.num_workers,
pin_memory=True)
valid_loader = data.DataLoader(dataset=valid_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=lambda x: data_processing(
x, "valid", text_transform),
num_workers=args.num_workers,
pin_memory=True)
model = SpeechRecognitionModel(
args.n_cnn_layers, args.n_rnn_layers, args.rnn_dim,
args.n_class, args.n_feats, args.stride, args.dropout
).to(args.device)
print('Num Model Parameters', sum(
[param.nelement() for param in model.parameters()]))
optimizer = optim.AdamW(model.parameters(), args.lr)
criterion = nn.CTCLoss(blank=28).to(args.device)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=args.lr,
steps_per_epoch=int(
len(train_loader)),
epochs=args.num_epochs,
anneal_strategy='linear')
# scheduler = optim.lr_scheduler.ExponentialLR(optimizer, args.gamma)
saver = ModelSaver(args, max_ckpts=args.max_ckpts,
metric_name="test_wer", maximize_metric=False)
if args.continue_train:
saver.load_model(model, "SpeechRecognitionModel",
args.ckpt_path, optimizer, scheduler)
elif args.pretrained_ckpt_path:
saver.load_model(model, "SpeechRecognitionModel",
args.pretrained_ckpt_path, None, None)
if torch.cuda.device_count() > 1:
print(f"Using {torch.cuda.device_count()} GPUs!")
model = nn.DataParallel(model)
logger = TrainLogger(args, len(train_loader.dataset))
logger.log_hparams(args)
for epoch in range(args.start_epoch, args.num_epochs + 1):
train(args, model, train_loader, criterion,
optimizer, scheduler, logger)
if logger.epoch % args.epochs_per_save == 0:
metric_dict = test(args, model, valid_loader, criterion, logger)
saver.save(logger.epoch, model, optimizer, scheduler, args.device,
"SpeechRecognitionModel", metric_dict["test_wer"])
logger.end_epoch()
if __name__ == "__main__":
parser = ASRTrainArgParser()
args = parser.parse_args()
main(args) | 0.786418 | 0.401277 |
import numpy as np
import torch
class ppo:
def __init__(self, model, env):
self.model = model
use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
self.env = env
def test_env(self, vis=False):
state = self.env.reset()
if vis:
self.env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
dist, _ = self.model(state)
next_state, reward, done, _ = \
self.env.step(dist.sample().cpu().numpy()[0])
state = next_state
if vis:
self.env.render()
total_reward += reward
return total_reward
@staticmethod
def compute_gae(next_value, rewards, masks, values, gamma=0.99, tau=0.95):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + gamma * values[step + 1] * masks[step] - \
values[step] # value: model value,
gae = delta + gamma * tau * masks[step] * gae
returns.insert(0, gae + values[step])
return returns
@staticmethod
def ppo_iter(mini_batch_size, states, actions, log_probs,
returns, advantage):
batch_size = states.size(0) # number of states in batch
for _ in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
# random integer in a range
yield states[rand_ids, :], actions[rand_ids, :],\
log_probs[rand_ids, :], returns[rand_ids, :], \
advantage[rand_ids, :]
def ppo_update(
self, ppo_epochs, mini_batch_size, states, actions, log_probs, returns,
advantages, optimizer, clip_param=0.2):
for _ in range(ppo_epochs):
for state, action, old_log_probs, return_, advantage in\
ppo.ppo_iter(mini_batch_size, states, actions, log_probs,
returns, advantages):
dist, value = self.model(state)
entropy = dist.entropy().mean()
new_log_probs = dist.log_prob(action)
ratio = (new_log_probs - old_log_probs).exp()
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param)\
* advantage
actor_loss = -torch.min(surr1, surr2).mean()
critic_loss = (return_ - value).pow(2).mean()
loss = 0.5 * critic_loss + actor_loss - 0.001 * entropy
optimizer.zero_grad()
loss.backward()
optimizer.step() | Simulation/ppo_method.py | import numpy as np
import torch
class ppo:
def __init__(self, model, env):
self.model = model
use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
self.env = env
def test_env(self, vis=False):
state = self.env.reset()
if vis:
self.env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
dist, _ = self.model(state)
next_state, reward, done, _ = \
self.env.step(dist.sample().cpu().numpy()[0])
state = next_state
if vis:
self.env.render()
total_reward += reward
return total_reward
@staticmethod
def compute_gae(next_value, rewards, masks, values, gamma=0.99, tau=0.95):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + gamma * values[step + 1] * masks[step] - \
values[step] # value: model value,
gae = delta + gamma * tau * masks[step] * gae
returns.insert(0, gae + values[step])
return returns
@staticmethod
def ppo_iter(mini_batch_size, states, actions, log_probs,
returns, advantage):
batch_size = states.size(0) # number of states in batch
for _ in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
# random integer in a range
yield states[rand_ids, :], actions[rand_ids, :],\
log_probs[rand_ids, :], returns[rand_ids, :], \
advantage[rand_ids, :]
def ppo_update(
self, ppo_epochs, mini_batch_size, states, actions, log_probs, returns,
advantages, optimizer, clip_param=0.2):
for _ in range(ppo_epochs):
for state, action, old_log_probs, return_, advantage in\
ppo.ppo_iter(mini_batch_size, states, actions, log_probs,
returns, advantages):
dist, value = self.model(state)
entropy = dist.entropy().mean()
new_log_probs = dist.log_prob(action)
ratio = (new_log_probs - old_log_probs).exp()
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param)\
* advantage
actor_loss = -torch.min(surr1, surr2).mean()
critic_loss = (return_ - value).pow(2).mean()
loss = 0.5 * critic_loss + actor_loss - 0.001 * entropy
optimizer.zero_grad()
loss.backward()
optimizer.step() | 0.874961 | 0.562237 |
from django.shortcuts import get_object_or_404
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError
from users.models import User
from .models import Post, Comment, Like
from .serializers import PostSerializer, CommentSerializer, LikeSerializer
class PostViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Posts to be viewed or edited.
"""
queryset = Post.objects.all().order_by('-created')
serializer_class = PostSerializer
def create(self, request, *args, **kwargs):
"""
Create a Post, save file and set user owner
"""
# avoid post without image
if 'photo' not in request.data:
raise ParseError("Empty content")
# obtain user instance
pk = request.data['user_id']
description = request.data['description']
user = get_object_or_404(User, id=pk)
# create user's post and save image
f = request.data['photo']
post = Post.objects.create(user=user, description=description)
post.photo.save(f.name, f, save=True)
# prepare response
serializer = PostSerializer(post)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
if 'photo' in request.data:
instance.photo.delete(save=False)
serializer = self.get_serializer(
instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
def destroy(self, request, *args, **kwargs):
"""
Destroy an instance and remove file from S3
"""
instance = self.get_object()
instance.photo.delete(save=False)
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['get'])
def comments(self, request, pk=None):
"""
Get post comments
"""
post_comments = Comment.objects.filter(post=pk).order_by('created')
page = self.paginate_queryset(post_comments)
if page is not None:
serializer = CommentSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = CommentSerializer(post_comments, many=True)
return Response(serializer.data)
@action(detail=True, methods=['put'])
def likes(self, request, pk=None):
user_id = request.data['user']
user = get_object_or_404(User, pk=user_id)
post = get_object_or_404(Post, pk=pk)
try:
like = Like.objects.get(post=post, user=user)
if request.data['action'] == 'dislike':
like.delete()
except Like.DoesNotExist:
like = Like.objects.create(post=post, user=user)
serializer = LikeSerializer(like)
return Response(serializer.data)
return Response(status=status.HTTP_204_NO_CONTENT)
class CommentViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Comments to be viewed or edited.
"""
queryset = Comment.objects.all().order_by('-created')
serializer_class = CommentSerializer
def create(self, request, *args, **kwargs):
"""
Create a Comment and set user owner
"""
# obtain instances
user_id = request.data['user_id']
post_id = request.data['post_id']
message = request.data['message']
user = get_object_or_404(User, id=user_id)
post = get_object_or_404(Post, uuid=post_id)
# create user's comment
comment = Comment.objects.create(user=user, post=post, message=message)
# prepare response
serializer = self.get_serializer(comment)
return Response(serializer.data, status=status.HTTP_201_CREATED) | petgram/views.py | from django.shortcuts import get_object_or_404
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError
from users.models import User
from .models import Post, Comment, Like
from .serializers import PostSerializer, CommentSerializer, LikeSerializer
class PostViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Posts to be viewed or edited.
"""
queryset = Post.objects.all().order_by('-created')
serializer_class = PostSerializer
def create(self, request, *args, **kwargs):
"""
Create a Post, save file and set user owner
"""
# avoid post without image
if 'photo' not in request.data:
raise ParseError("Empty content")
# obtain user instance
pk = request.data['user_id']
description = request.data['description']
user = get_object_or_404(User, id=pk)
# create user's post and save image
f = request.data['photo']
post = Post.objects.create(user=user, description=description)
post.photo.save(f.name, f, save=True)
# prepare response
serializer = PostSerializer(post)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
if 'photo' in request.data:
instance.photo.delete(save=False)
serializer = self.get_serializer(
instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
def destroy(self, request, *args, **kwargs):
"""
Destroy an instance and remove file from S3
"""
instance = self.get_object()
instance.photo.delete(save=False)
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['get'])
def comments(self, request, pk=None):
"""
Get post comments
"""
post_comments = Comment.objects.filter(post=pk).order_by('created')
page = self.paginate_queryset(post_comments)
if page is not None:
serializer = CommentSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = CommentSerializer(post_comments, many=True)
return Response(serializer.data)
@action(detail=True, methods=['put'])
def likes(self, request, pk=None):
user_id = request.data['user']
user = get_object_or_404(User, pk=user_id)
post = get_object_or_404(Post, pk=pk)
try:
like = Like.objects.get(post=post, user=user)
if request.data['action'] == 'dislike':
like.delete()
except Like.DoesNotExist:
like = Like.objects.create(post=post, user=user)
serializer = LikeSerializer(like)
return Response(serializer.data)
return Response(status=status.HTTP_204_NO_CONTENT)
class CommentViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Comments to be viewed or edited.
"""
queryset = Comment.objects.all().order_by('-created')
serializer_class = CommentSerializer
def create(self, request, *args, **kwargs):
"""
Create a Comment and set user owner
"""
# obtain instances
user_id = request.data['user_id']
post_id = request.data['post_id']
message = request.data['message']
user = get_object_or_404(User, id=user_id)
post = get_object_or_404(Post, uuid=post_id)
# create user's comment
comment = Comment.objects.create(user=user, post=post, message=message)
# prepare response
serializer = self.get_serializer(comment)
return Response(serializer.data, status=status.HTTP_201_CREATED) | 0.567098 | 0.075007 |
class DoubleLinkedNode:
def __init__(self, key, value):
self.key = key
self.value = value
self.pre = None
self.next = None
class LRUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.kv = dict()
self.count = 0
self.head = DoubleLinkedNode(-1, -1)
self.tail = DoubleLinkedNode(-1, -1)
self.head.next = self.tail
self.tail.pre = self.head
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.kv:
return -1
node = self.kv[key]
self.remove(node)
self.append(node)
return node.value
def remove(self, node):
node.pre.next = node.next
node.next.pre = node.pre
node.pre = node.next = None
def append(self, node):
last = self.tail.pre
last.next = node
node.pre = last
node.next = self.tail
self.tail.pre = node
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if key not in self.kv:
node = DoubleLinkedNode(key, value)
self.count += 1
else:
node = self.kv[key]
node.value = value
self.remove(node)
self.kv[key] = node
self.append(node)
while self.count > self.capacity:
self.count -= 1
remove = self.head.next
self.remove(remove)
del self.kv[remove.key]
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
def main():
cache = LRUCache(2)
print(cache.put(1, 1))
print(cache.put(2, 2))
print(cache.get(1)) # 1
print(cache.put(3, 3))
print(cache.get(2)) # -1
print(cache.put(4, 4))
print(cache.get(1)) # -1
print(cache.get(3))
print(cache.get(4))
print(cache.get(2)) # -1
def test():
cache = LRUCache(2)
for call, param in zip(["put", "put", "put", "put", "get", "get"],
[[2, 1], [1, 1], [2, 3], [4, 1], [1], [2]]):
print(cache.__getattribute__(call)(*param))
if __name__ == '__main__':
# main()
test() | p146_lru_cache.py | class DoubleLinkedNode:
def __init__(self, key, value):
self.key = key
self.value = value
self.pre = None
self.next = None
class LRUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.kv = dict()
self.count = 0
self.head = DoubleLinkedNode(-1, -1)
self.tail = DoubleLinkedNode(-1, -1)
self.head.next = self.tail
self.tail.pre = self.head
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.kv:
return -1
node = self.kv[key]
self.remove(node)
self.append(node)
return node.value
def remove(self, node):
node.pre.next = node.next
node.next.pre = node.pre
node.pre = node.next = None
def append(self, node):
last = self.tail.pre
last.next = node
node.pre = last
node.next = self.tail
self.tail.pre = node
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if key not in self.kv:
node = DoubleLinkedNode(key, value)
self.count += 1
else:
node = self.kv[key]
node.value = value
self.remove(node)
self.kv[key] = node
self.append(node)
while self.count > self.capacity:
self.count -= 1
remove = self.head.next
self.remove(remove)
del self.kv[remove.key]
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
def main():
cache = LRUCache(2)
print(cache.put(1, 1))
print(cache.put(2, 2))
print(cache.get(1)) # 1
print(cache.put(3, 3))
print(cache.get(2)) # -1
print(cache.put(4, 4))
print(cache.get(1)) # -1
print(cache.get(3))
print(cache.get(4))
print(cache.get(2)) # -1
def test():
cache = LRUCache(2)
for call, param in zip(["put", "put", "put", "put", "get", "get"],
[[2, 1], [1, 1], [2, 3], [4, 1], [1], [2]]):
print(cache.__getattribute__(call)(*param))
if __name__ == '__main__':
# main()
test() | 0.661595 | 0.327964 |
import os.path as osp
from jacinle.cli.argument import JacArgumentParser
from jacinle.logging import get_logger
from jacinle.utils.container import GView
from jacinle.utils.meter import GroupMeters
from jacinle.utils.tqdm import tqdm_gofor, get_current_tqdm
from nscl.datasets import get_available_symbolic_datasets, initialize_dataset, get_symbolic_dataset_builder
from nscl.datasets.common.program_executor import execute_program
logger = get_logger(__file__)
parser = JacArgumentParser()
parser.add_argument('--dataset', required=True, choices=get_available_symbolic_datasets(), help='dataset')
parser.add_argument('--data-dir', required=True)
parser.add_argument('--data-scenes-json', type='checked_file')
parser.add_argument('--data-questions-json', type='checked_file')
parser.add_argument('--data-vocab-json', type='checked_file')
args = parser.parse_args()
if args.data_scenes_json is None:
args.data_scenes_json = osp.join(args.data_dir, 'scenes.json')
if args.data_questions_json is None:
args.data_questions_json = osp.join(args.data_dir, 'questions.json')
if args.data_vocab_json is None:
args.data_vocab_json = osp.join(args.data_dir, 'vocab.json')
def main():
initialize_dataset(args.dataset)
build_symbolic_dataset = get_symbolic_dataset_builder(args.dataset)
dataset = build_symbolic_dataset(args)
dataloader = dataset.make_dataloader(32, False, False, nr_workers=4)
meters = GroupMeters()
for idx, feed_dict in tqdm_gofor(dataloader):
feed_dict = GView(feed_dict)
for i, (p, s, gt) in enumerate(zip(feed_dict.program_seq, feed_dict.scene, feed_dict.answer)):
_, pred = execute_program(p, s)
if pred[0] == 'error':
raise pred[1]
if pred[1] != gt:
print(p)
print(s)
from IPython import embed; embed()
from sys import exit; exit()
meters.update('accuracy', pred[1] == gt)
get_current_tqdm().set_description(meters.format_simple('Exec:', 'val', compressed=True))
logger.critical(meters.format_simple('Symbolic execution test:', 'avg', compressed=False))
if __name__ == '__main__':
main() | scripts/run-symbolic-executor.py | import os.path as osp
from jacinle.cli.argument import JacArgumentParser
from jacinle.logging import get_logger
from jacinle.utils.container import GView
from jacinle.utils.meter import GroupMeters
from jacinle.utils.tqdm import tqdm_gofor, get_current_tqdm
from nscl.datasets import get_available_symbolic_datasets, initialize_dataset, get_symbolic_dataset_builder
from nscl.datasets.common.program_executor import execute_program
logger = get_logger(__file__)
parser = JacArgumentParser()
parser.add_argument('--dataset', required=True, choices=get_available_symbolic_datasets(), help='dataset')
parser.add_argument('--data-dir', required=True)
parser.add_argument('--data-scenes-json', type='checked_file')
parser.add_argument('--data-questions-json', type='checked_file')
parser.add_argument('--data-vocab-json', type='checked_file')
args = parser.parse_args()
if args.data_scenes_json is None:
args.data_scenes_json = osp.join(args.data_dir, 'scenes.json')
if args.data_questions_json is None:
args.data_questions_json = osp.join(args.data_dir, 'questions.json')
if args.data_vocab_json is None:
args.data_vocab_json = osp.join(args.data_dir, 'vocab.json')
def main():
initialize_dataset(args.dataset)
build_symbolic_dataset = get_symbolic_dataset_builder(args.dataset)
dataset = build_symbolic_dataset(args)
dataloader = dataset.make_dataloader(32, False, False, nr_workers=4)
meters = GroupMeters()
for idx, feed_dict in tqdm_gofor(dataloader):
feed_dict = GView(feed_dict)
for i, (p, s, gt) in enumerate(zip(feed_dict.program_seq, feed_dict.scene, feed_dict.answer)):
_, pred = execute_program(p, s)
if pred[0] == 'error':
raise pred[1]
if pred[1] != gt:
print(p)
print(s)
from IPython import embed; embed()
from sys import exit; exit()
meters.update('accuracy', pred[1] == gt)
get_current_tqdm().set_description(meters.format_simple('Exec:', 'val', compressed=True))
logger.critical(meters.format_simple('Symbolic execution test:', 'avg', compressed=False))
if __name__ == '__main__':
main() | 0.257672 | 0.108378 |
import json
from fuzzywuzzy import fuzz
from utils import db, dt
from utils.cache import cache
from gig._constants import GIG_CACHE_NAME, GIG_CACHE_TIMEOUT
from gig._remote_data import _get_remote_tsv_data
from gig.ent_types import ENTITY_TYPE, get_entity_type
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entities(entity_type):
"""Get get all entity data, for entities of a particular type.
Args:
entity_type(str): entity type
Returns:
entity data
.. code-block:: python
>> from gig import ents
>> entities = ents.get_entities('province')
>> entities[0]
{'province_id': 'LK-1', 'name': 'Western',
'country_id': 'LK', 'fips': 'CE36', 'area': '3709',
'capital': 'Colombo'}
"""
def clean_types(d):
if 'area' in d:
d['area'] = dt.parse_float(d['area'])
if 'population' in d:
d['population'] = dt.parse_int(d['population'])
if 'centroid_altitude' in d:
try:
d['centroid_altitude'] = dt.parse_float(d['centroid_altitude'])
except ValueError:
d['centroid_altitude'] = 0
for k in ['centroid', 'subs', 'supers', 'ints', 'eqs']:
if k in d:
if d[k]:
d[k] = json.loads(d[k].replace('\'', '"'))
return d
return list(
map(
clean_types,
list(
filter(
lambda x: x,
_get_remote_tsv_data('%s.tsv' % (entity_type)),
)
),
)
)
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entity_index(entity_type):
"""Get all entity data, for entities of a particular type.
Indexed by entity id.
Args:
entity_type(str): entity type
Returns:
entity data
.. code-block:: python
>> from gig import ents
>> entity_index = ents.get_entity_index('province')
>> entity_index['LK-2']
{'province_id': 'LK-2', 'name': 'Central',
'country_id': 'LK', 'fips': 'CE29', 'area': '5584', 'capital': 'Kandy'}
"""
entities = get_entities(entity_type)
id_key = db.get_id_key(entity_type)
return dict(
zip(
list(
map(
lambda e: e[id_key],
entities,
)
),
entities,
)
)
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entity(entity_id):
"""Get entity by entity id.
Args:
entity_id(str): entity id
Returns:
entity (dict)
.. code-block:: python
>> from gig import ents
>> ents.get_entity('LK-3')
{'province_id': 'LK-3', 'name': 'Southern', 'country_id': 'LK',
'fips': 'CE34', 'area': '5559', 'capital': 'Galle'}
"""
entity_type = get_entity_type(entity_id)
entity_index = get_entity_index(entity_type)
return entity_index.get(entity_id, None)
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def multiget_entities(entity_ids):
"""Get multiple entities by entity id.
Args:
entity_ids(list of str): entity_ids id
Returns:
map of entity id to entity
.. code-block:: python
>> from gig import ents
>> ents.multiget_entities(
['LK-1', 'LK-11', 'LK-1127', 'LK-1127015']
)
{'LK-1': {'province_id': 'LK-1', 'name': 'Western',
'country_id': 'LK', 'fips': 'CE36', 'area': '3709',
'capital': 'Colombo'},
'LK-11': {'district_id': 'LK-11', 'name': 'Colombo',
'province_id': 'LK-1', 'ed_id': 'EC-01',
'hasc': 'LK.CO', 'fips': 'CE23', 'area': '642',
'population': '2324349'},
'LK-1127': {'dsd_id': LK-1127', 'name': 'Thimbirigasyaya',
'hasc': 'LK.CO.TH','province_id': 'LK-1', 'district_id': 'LK-11',
'area': '24', 'population': '238057'},
'LK-1127015': {'gnd_id':'LK-1127015', 'name': 'Kurunduwatta',
'province_id': 'LK-1', 'district_id': 'LK-11',
'dsd_id': 'LK-1127', 'pd_id': 'EC-01C', 'gnd_num': 'None'}}
"""
entity_map = {}
for entity_id in entity_ids:
entity_map[entity_id] = get_entity(entity_id)
return entity_map
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entity_ids(entity_type):
"""Get all entity_ids of a particular entity type.
Args:
entity_type(str): entity type
Returns:
entity ids (list)
.. code-block:: python
>> from gig import ents
>> ents.get_entity_ids('province')
['LK-1', 'LK-2', 'LK-3', 'LK-4', 'LK-5', 'LK-6',
'LK-7', 'LK-8', 'LK-9']
"""
return list(get_entity_index(entity_type).keys())
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entities_by_name_fuzzy(
fuzzy_entity_name,
filter_entity_type=None,
filter_parent_id=None,
limit=5,
min_fuzz_ratio=80,
):
"""Get entity by fuzzy name search.
Args:
entity_name(str): entity name
limit (int): Maximum number of results to return
Returns:
entities (list) that approximately match the entity name
"""
matching_entities_info = []
for entity_type in ENTITY_TYPE.list():
if filter_entity_type and (filter_entity_type != entity_type):
continue
entities = get_entities(entity_type)
for entity in entities:
if filter_parent_id and (filter_parent_id not in entity['id']):
continue
fuzz_ratio = fuzz.ratio(entity['name'], fuzzy_entity_name)
if fuzz_ratio >= min_fuzz_ratio:
matching_entities_info.append([entity, fuzz_ratio])
matching_entities = list(
map(
lambda x: x[0],
sorted(
matching_entities_info,
key=lambda x: -x[1],
),
)
)
if len(matching_entities) >= limit:
return matching_entities[:limit]
return matching_entities
if __name__ == '__main__':
entities = get_entities('province')
print(entities) | src/gig/ents.py | import json
from fuzzywuzzy import fuzz
from utils import db, dt
from utils.cache import cache
from gig._constants import GIG_CACHE_NAME, GIG_CACHE_TIMEOUT
from gig._remote_data import _get_remote_tsv_data
from gig.ent_types import ENTITY_TYPE, get_entity_type
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entities(entity_type):
"""Get get all entity data, for entities of a particular type.
Args:
entity_type(str): entity type
Returns:
entity data
.. code-block:: python
>> from gig import ents
>> entities = ents.get_entities('province')
>> entities[0]
{'province_id': 'LK-1', 'name': 'Western',
'country_id': 'LK', 'fips': 'CE36', 'area': '3709',
'capital': 'Colombo'}
"""
def clean_types(d):
if 'area' in d:
d['area'] = dt.parse_float(d['area'])
if 'population' in d:
d['population'] = dt.parse_int(d['population'])
if 'centroid_altitude' in d:
try:
d['centroid_altitude'] = dt.parse_float(d['centroid_altitude'])
except ValueError:
d['centroid_altitude'] = 0
for k in ['centroid', 'subs', 'supers', 'ints', 'eqs']:
if k in d:
if d[k]:
d[k] = json.loads(d[k].replace('\'', '"'))
return d
return list(
map(
clean_types,
list(
filter(
lambda x: x,
_get_remote_tsv_data('%s.tsv' % (entity_type)),
)
),
)
)
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entity_index(entity_type):
"""Get all entity data, for entities of a particular type.
Indexed by entity id.
Args:
entity_type(str): entity type
Returns:
entity data
.. code-block:: python
>> from gig import ents
>> entity_index = ents.get_entity_index('province')
>> entity_index['LK-2']
{'province_id': 'LK-2', 'name': 'Central',
'country_id': 'LK', 'fips': 'CE29', 'area': '5584', 'capital': 'Kandy'}
"""
entities = get_entities(entity_type)
id_key = db.get_id_key(entity_type)
return dict(
zip(
list(
map(
lambda e: e[id_key],
entities,
)
),
entities,
)
)
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entity(entity_id):
"""Get entity by entity id.
Args:
entity_id(str): entity id
Returns:
entity (dict)
.. code-block:: python
>> from gig import ents
>> ents.get_entity('LK-3')
{'province_id': 'LK-3', 'name': 'Southern', 'country_id': 'LK',
'fips': 'CE34', 'area': '5559', 'capital': 'Galle'}
"""
entity_type = get_entity_type(entity_id)
entity_index = get_entity_index(entity_type)
return entity_index.get(entity_id, None)
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def multiget_entities(entity_ids):
"""Get multiple entities by entity id.
Args:
entity_ids(list of str): entity_ids id
Returns:
map of entity id to entity
.. code-block:: python
>> from gig import ents
>> ents.multiget_entities(
['LK-1', 'LK-11', 'LK-1127', 'LK-1127015']
)
{'LK-1': {'province_id': 'LK-1', 'name': 'Western',
'country_id': 'LK', 'fips': 'CE36', 'area': '3709',
'capital': 'Colombo'},
'LK-11': {'district_id': 'LK-11', 'name': 'Colombo',
'province_id': 'LK-1', 'ed_id': 'EC-01',
'hasc': 'LK.CO', 'fips': 'CE23', 'area': '642',
'population': '2324349'},
'LK-1127': {'dsd_id': LK-1127', 'name': 'Thimbirigasyaya',
'hasc': 'LK.CO.TH','province_id': 'LK-1', 'district_id': 'LK-11',
'area': '24', 'population': '238057'},
'LK-1127015': {'gnd_id':'LK-1127015', 'name': 'Kurunduwatta',
'province_id': 'LK-1', 'district_id': 'LK-11',
'dsd_id': 'LK-1127', 'pd_id': 'EC-01C', 'gnd_num': 'None'}}
"""
entity_map = {}
for entity_id in entity_ids:
entity_map[entity_id] = get_entity(entity_id)
return entity_map
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entity_ids(entity_type):
"""Get all entity_ids of a particular entity type.
Args:
entity_type(str): entity type
Returns:
entity ids (list)
.. code-block:: python
>> from gig import ents
>> ents.get_entity_ids('province')
['LK-1', 'LK-2', 'LK-3', 'LK-4', 'LK-5', 'LK-6',
'LK-7', 'LK-8', 'LK-9']
"""
return list(get_entity_index(entity_type).keys())
@cache(GIG_CACHE_NAME, GIG_CACHE_TIMEOUT)
def get_entities_by_name_fuzzy(
fuzzy_entity_name,
filter_entity_type=None,
filter_parent_id=None,
limit=5,
min_fuzz_ratio=80,
):
"""Get entity by fuzzy name search.
Args:
entity_name(str): entity name
limit (int): Maximum number of results to return
Returns:
entities (list) that approximately match the entity name
"""
matching_entities_info = []
for entity_type in ENTITY_TYPE.list():
if filter_entity_type and (filter_entity_type != entity_type):
continue
entities = get_entities(entity_type)
for entity in entities:
if filter_parent_id and (filter_parent_id not in entity['id']):
continue
fuzz_ratio = fuzz.ratio(entity['name'], fuzzy_entity_name)
if fuzz_ratio >= min_fuzz_ratio:
matching_entities_info.append([entity, fuzz_ratio])
matching_entities = list(
map(
lambda x: x[0],
sorted(
matching_entities_info,
key=lambda x: -x[1],
),
)
)
if len(matching_entities) >= limit:
return matching_entities[:limit]
return matching_entities
if __name__ == '__main__':
entities = get_entities('province')
print(entities) | 0.590071 | 0.210644 |
import re
from typing import List
def find_indexes(haystack: List[str], regex: str) -> List[int]:
"""
Find indexes in a list where a regular expression matches.
Parameters
----------
haystack
List of strings.
regex
Regular expression to match.
Returns
-------
The indexes where the regular expression was found.
"""
return [i for i, item in enumerate(haystack) if re.search(regex, item)]
def find_style_file(filename, config):
"""
TODO
"""
outpath = config['local_path'] / 'out'
filepath = outpath / config['style_path'] / config[filename]
if not filepath.exists():
filepath = outpath / 'markdownrevealstyle' / config[filename]
if not filepath.exists():
return
return filepath.relative_to(filepath.parents[1])
def tweak_html_footer(html, footer):
"""
TODO
"""
if not footer:
return False
text = '<div class="markdownreveal_footer">%s</div>' % footer
for index in find_indexes(html, '<div class=\"reveal\">'):
html.insert(index + 1, text)
return True
def tweak_html_header(html, header):
"""
TODO
"""
if not header:
return
text = '<div class="markdownreveal_header">%s</div>' % header
for index in find_indexes(html, '<div class=\"reveal\">'):
html.insert(index + 1, text)
def tweak_html_warmup(html, config):
"""
TODO
"""
if config.get("no_warmup"):
return
fname = find_style_file('style_warmup', config)
if not fname:
return
text = '<section><img src="%s" /></section>' % fname
index = find_indexes(html, 'div class="slides"')[0]
html.insert(index + 1, text)
def tweak_html_logo(html, config):
"""
TODO
"""
fname = find_style_file('style_logo', config)
if not fname:
return
text = '<div class="logo"><img src="%s" /></div>' % fname
for index in find_indexes(html, '<div class=\"reveal\">'):
html.insert(index + 1, text)
def tweak_html_background(html, config):
"""
TODO
"""
fname = find_style_file('style_background', config)
if not fname:
return
for index in find_indexes(html, '^<section.*'):
html[index] = html[index].replace(
'<section', '<section data-background="%s"' % fname, 1
)
def tweak_html_css(html, config):
"""
TODO
"""
fname = find_style_file('style_custom_css', config)
if not fname:
return
index = find_indexes(html, 'stylesheet.*id="theme"')[0]
text = '<link rel="stylesheet" href="%s">' % fname
html.insert(index + 1, text)
return True
def tweak_html_emoji(html):
"""
Add required scripts to parse emojis and display them with a consistent
style in all browsers.
"""
text = """
<script src="https://twemoji.maxcdn.com/v/latest/twemoji.min.js"
crossorigin="anonymous"></script>
<script>
function addEvent(element, eventName, fn) {
if (element.addEventListener)
element.addEventListener(eventName, fn, false);
else if (element.attachEvent)
element.attachEvent('on' + eventName, fn);
}
addEvent(window, 'load', function() {
twemoji.parse(document.body, {'folder': 'svg', 'ext': '.svg'});
});
</script>
"""
html.insert(find_indexes(html, '</head>')[0], text)
def tweak_html(html, config):
"""
TODO
"""
html = html.splitlines()
tweak_html_footer(html, config['footer'])
tweak_html_header(html, config['header'])
tweak_html_warmup(html, config)
tweak_html_logo(html, config)
tweak_html_background(html, config)
tweak_html_css(html, config)
tweak_html_emoji(html)
return '\n'.join(html) | markdownreveal/tweak.py | import re
from typing import List
def find_indexes(haystack: List[str], regex: str) -> List[int]:
"""
Find indexes in a list where a regular expression matches.
Parameters
----------
haystack
List of strings.
regex
Regular expression to match.
Returns
-------
The indexes where the regular expression was found.
"""
return [i for i, item in enumerate(haystack) if re.search(regex, item)]
def find_style_file(filename, config):
"""
TODO
"""
outpath = config['local_path'] / 'out'
filepath = outpath / config['style_path'] / config[filename]
if not filepath.exists():
filepath = outpath / 'markdownrevealstyle' / config[filename]
if not filepath.exists():
return
return filepath.relative_to(filepath.parents[1])
def tweak_html_footer(html, footer):
"""
TODO
"""
if not footer:
return False
text = '<div class="markdownreveal_footer">%s</div>' % footer
for index in find_indexes(html, '<div class=\"reveal\">'):
html.insert(index + 1, text)
return True
def tweak_html_header(html, header):
"""
TODO
"""
if not header:
return
text = '<div class="markdownreveal_header">%s</div>' % header
for index in find_indexes(html, '<div class=\"reveal\">'):
html.insert(index + 1, text)
def tweak_html_warmup(html, config):
"""
TODO
"""
if config.get("no_warmup"):
return
fname = find_style_file('style_warmup', config)
if not fname:
return
text = '<section><img src="%s" /></section>' % fname
index = find_indexes(html, 'div class="slides"')[0]
html.insert(index + 1, text)
def tweak_html_logo(html, config):
"""
TODO
"""
fname = find_style_file('style_logo', config)
if not fname:
return
text = '<div class="logo"><img src="%s" /></div>' % fname
for index in find_indexes(html, '<div class=\"reveal\">'):
html.insert(index + 1, text)
def tweak_html_background(html, config):
"""
TODO
"""
fname = find_style_file('style_background', config)
if not fname:
return
for index in find_indexes(html, '^<section.*'):
html[index] = html[index].replace(
'<section', '<section data-background="%s"' % fname, 1
)
def tweak_html_css(html, config):
"""
TODO
"""
fname = find_style_file('style_custom_css', config)
if not fname:
return
index = find_indexes(html, 'stylesheet.*id="theme"')[0]
text = '<link rel="stylesheet" href="%s">' % fname
html.insert(index + 1, text)
return True
def tweak_html_emoji(html):
"""
Add required scripts to parse emojis and display them with a consistent
style in all browsers.
"""
text = """
<script src="https://twemoji.maxcdn.com/v/latest/twemoji.min.js"
crossorigin="anonymous"></script>
<script>
function addEvent(element, eventName, fn) {
if (element.addEventListener)
element.addEventListener(eventName, fn, false);
else if (element.attachEvent)
element.attachEvent('on' + eventName, fn);
}
addEvent(window, 'load', function() {
twemoji.parse(document.body, {'folder': 'svg', 'ext': '.svg'});
});
</script>
"""
html.insert(find_indexes(html, '</head>')[0], text)
def tweak_html(html, config):
"""
TODO
"""
html = html.splitlines()
tweak_html_footer(html, config['footer'])
tweak_html_header(html, config['header'])
tweak_html_warmup(html, config)
tweak_html_logo(html, config)
tweak_html_background(html, config)
tweak_html_css(html, config)
tweak_html_emoji(html)
return '\n'.join(html) | 0.656548 | 0.374448 |
from os import uname
from machine import SPI, Pin, I2C, SoftSPI
from micropython import const
from machine import UART
__version__ = "0.1.2.0"
# ----------------------------------------------------------------------------
class SPIBus(object):
"""SPI bus access."""
def __init__(self, freq, sck, sdo, sdi=None, spidev=None):
_mi = Pin(sdi) if sdi else None
_mo = Pin(sdo)
_sc = Pin(sck)
if spidev:
self._spi = SPI(spidev)
self._spi.init(baudrate=freq, sck=_sc, mosi=_mo, miso=_mi)
elif spidev < 0:
self._spi = SoftSPI(sck=_sc, mosi=_mo, miso=_mi)
else:
self._spi = SPI(baudrate=freq, sck=_sc, mosi=_mo, miso=_mi)
def deinit(self):
self._spi.deinit()
@property
def bus(self):
return self._spi
def write_readinto(self, wbuf, rbuf):
self._spi.write_readinto(wbuf, rbuf)
def write(self, wbuf):
self._spi.write(wbuf)
# ----------------------------------------------------------------------------
class I2CBus(object):
"""I2C bus access."""
def __init__(self, **kwargs):
self._i2cDevList = []
freq = 0
do_scan = False if not "scan" in kwargs else kwargs["scan"]
code = 0 if not "code" in kwargs else kwargs["code"]
freq = 400000 if not "freq" in kwargs else kwargs["freq"]
scl = kwargs["scl"]
sda = kwargs["sda"]
if not code in [-1,0,1] or float(uname()[2][:4]) < 1.12:
# Defaults to software implementation of I2C
self._i2c = I2C(scl=Pin(scl), sda=Pin(sda), freq=freq)
self._isSoft = True
codeStr = "Software"
else:
# User selected -1=software or 0,1=hardware implementation of I2C
self._i2c = I2C(code, scl=Pin(scl), sda=Pin(sda), freq=freq)
self._isSoft = True if code == -1 else False
codeStr = "Software" if self._isSoft else "Hardware #{0}".format(code)
s = " frequency is {0} kHz".format(freq/1000) if freq > 0 else ""
print("{0} I2C bus {1}".format(codeStr, s))
if do_scan:
print("Scanning I2C bus ...")
self._i2cDevList = self._i2c.scan()
print("... {0} device(s) found ({1})"
.format(len(self._i2cDevList), self._i2cDevList))
def deinit(self):
self._i2c = None
@property
def bus(self):
return self._i2c
@property
def deviceAddrList(self):
return self._i2cDevList
def start(self):
assert self._isSoft, "SoftI2C expected"
self._i2c.start()
def stop(self):
assert self._isSoft, "SoftI2C expected"
self._i2c.stop()
def write(self, buf):
assert self._isSoft, "SoftI2C expected"
self._i2c.write(buf)
def writeto(self, addr, buf, stop_=True):
self._i2c.writeto(addr, buf, stop_)
def readinto(self, buf):
self._i2c.readinto(buf)
def readfrom(self, addr):
return self._i2c.readfrom(addr)
def readfrom_into(self, addr, buf):
self._i2c.readfrom_into(addr, buf)
def write_then_readinto(self, addr, bufo, bufi, out_start=0, out_end=None,
in_start=0, in_end=None, stop_=True):
self._i2c.writeto(addr, bufo[out_start:out_end], stop_)
buf = bytearray(bufi[in_start:in_end])
self._i2c.readfrom_into(addr, buf)
bufi[in_start:in_end] = buf
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
# ---------------------------------------------------------------------------- | platform/esp32/busio.py | from os import uname
from machine import SPI, Pin, I2C, SoftSPI
from micropython import const
from machine import UART
__version__ = "0.1.2.0"
# ----------------------------------------------------------------------------
class SPIBus(object):
"""SPI bus access."""
def __init__(self, freq, sck, sdo, sdi=None, spidev=None):
_mi = Pin(sdi) if sdi else None
_mo = Pin(sdo)
_sc = Pin(sck)
if spidev:
self._spi = SPI(spidev)
self._spi.init(baudrate=freq, sck=_sc, mosi=_mo, miso=_mi)
elif spidev < 0:
self._spi = SoftSPI(sck=_sc, mosi=_mo, miso=_mi)
else:
self._spi = SPI(baudrate=freq, sck=_sc, mosi=_mo, miso=_mi)
def deinit(self):
self._spi.deinit()
@property
def bus(self):
return self._spi
def write_readinto(self, wbuf, rbuf):
self._spi.write_readinto(wbuf, rbuf)
def write(self, wbuf):
self._spi.write(wbuf)
# ----------------------------------------------------------------------------
class I2CBus(object):
"""I2C bus access."""
def __init__(self, **kwargs):
self._i2cDevList = []
freq = 0
do_scan = False if not "scan" in kwargs else kwargs["scan"]
code = 0 if not "code" in kwargs else kwargs["code"]
freq = 400000 if not "freq" in kwargs else kwargs["freq"]
scl = kwargs["scl"]
sda = kwargs["sda"]
if not code in [-1,0,1] or float(uname()[2][:4]) < 1.12:
# Defaults to software implementation of I2C
self._i2c = I2C(scl=Pin(scl), sda=Pin(sda), freq=freq)
self._isSoft = True
codeStr = "Software"
else:
# User selected -1=software or 0,1=hardware implementation of I2C
self._i2c = I2C(code, scl=Pin(scl), sda=Pin(sda), freq=freq)
self._isSoft = True if code == -1 else False
codeStr = "Software" if self._isSoft else "Hardware #{0}".format(code)
s = " frequency is {0} kHz".format(freq/1000) if freq > 0 else ""
print("{0} I2C bus {1}".format(codeStr, s))
if do_scan:
print("Scanning I2C bus ...")
self._i2cDevList = self._i2c.scan()
print("... {0} device(s) found ({1})"
.format(len(self._i2cDevList), self._i2cDevList))
def deinit(self):
self._i2c = None
@property
def bus(self):
return self._i2c
@property
def deviceAddrList(self):
return self._i2cDevList
def start(self):
assert self._isSoft, "SoftI2C expected"
self._i2c.start()
def stop(self):
assert self._isSoft, "SoftI2C expected"
self._i2c.stop()
def write(self, buf):
assert self._isSoft, "SoftI2C expected"
self._i2c.write(buf)
def writeto(self, addr, buf, stop_=True):
self._i2c.writeto(addr, buf, stop_)
def readinto(self, buf):
self._i2c.readinto(buf)
def readfrom(self, addr):
return self._i2c.readfrom(addr)
def readfrom_into(self, addr, buf):
self._i2c.readfrom_into(addr, buf)
def write_then_readinto(self, addr, bufo, bufi, out_start=0, out_end=None,
in_start=0, in_end=None, stop_=True):
self._i2c.writeto(addr, bufo[out_start:out_end], stop_)
buf = bytearray(bufi[in_start:in_end])
self._i2c.readfrom_into(addr, buf)
bufi[in_start:in_end] = buf
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
# ---------------------------------------------------------------------------- | 0.700588 | 0.158272 |
import socket
import time
import logging
from dnslib import *
from treelib import Tree
udp_socket = socket.socket(type=socket.SOCK_DGRAM)
udp_socket.bind(("127.0.0.1", 53))
cache = Tree()
ROOT_IP = ["192.168.3.11",
"172.16.58.3",
"192.168.3.11",
"192.168.127.12",
"172.16.31.10",
"192.168.127.12",
"192.168.3.11",
"172.16.31.10",
"172.16.58.3",
"172.16.58.3",
"172.16.17.32",
"172.16.31.10",
"172.16.58.3"]
def init_cache():
global dom_num
dom_num = 0
cache.create_node("", 0, data=(ROOT_IP, 0, 0))
dom_num += 1
def find_in_cache(zones):
global dom_num
par_id = 0
depth = 0
for z in zones:
found = False
for sub in cache.children(par_id):
if sub.tag == z:
par_id = sub.identifier
found = True
depth += 1
break
if not found:
break
if not found:
for z in zones[depth:]:
cache.create_node(z, dom_num, parent=par_id, data=(None, 0, 0))
par_id = dom_num
dom_num += 1
depth += 1
depth += 1
ttl = int(1e5/depth)
return (None, time.perf_counter(), ttl), par_id
else:
data = cache.get_node(par_id).data
return data, par_id
def rec_find(domain, ip):
que = DNSRecord.question(domain)
resp = DNSRecord.parse(que.send(ip))
if not resp.rr:
for record in resp.ar:
if record.rtype == 1:
next_ip = str(record.rdata)
return rec_find(domain, next_ip)
else:
return str(resp.rr[0].rdata)
return None
def resolve(domain):
global dom_num
zones = domain.split('.')[-2::-1]
(ip, last_time, ttl), par_id = find_in_cache(zones)
if not ip:
for root_ip in cache.get_node(0).data[0]:
ip = rec_find(domain, root_ip)
if ip:
break
if ip:
cache.get_node(par_id).data = (ip, last_time, ttl)
else:
new_time = time.perf_counter()
if new_time - last_time > ttl:
for root_ip in cache.get_node(0).data[0]:
ip = rec_find(domain, root_ip)
if ip:
break
if ip:
cache.get_node(par_id).data = (ip, new_time, ttl)
return ip, ttl
if __name__ == '__main__':
init_cache()
logging.basicConfig(level=logging.DEBUG,
filename='log',
filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
try:
while True:
data, addr = udp_socket.recvfrom(1024)
record = DNSRecord.parse(data)
header = record.header
qr = header.qr
rcode = header.rcode
if not qr and not rcode:
answers = []
for que in record.questions:
domain = que.qname
if que.qtype == 1:
ip, ttl = resolve(str(domain))
if ip:
answers.append(RR(domain, ttl=ttl, rdata=A(ip)))
logging.info(str(domain) + " - " + str(ip))
else:
logging.error(str(domain) + " - IP-address was not found!")
if not answers:
header.rcode = 2
header.ra = 1
header.qr = 1
answer = DNSRecord(header, record.questions, answers)
udp_socket.sendto(answer.pack(), addr)
except KeyboardInterrupt:
udp_socket.close()
exit(0) | main.py | import socket
import time
import logging
from dnslib import *
from treelib import Tree
udp_socket = socket.socket(type=socket.SOCK_DGRAM)
udp_socket.bind(("127.0.0.1", 53))
cache = Tree()
ROOT_IP = ["192.168.3.11",
"172.16.58.3",
"192.168.3.11",
"192.168.127.12",
"172.16.31.10",
"192.168.127.12",
"192.168.3.11",
"172.16.31.10",
"172.16.58.3",
"172.16.58.3",
"172.16.17.32",
"172.16.31.10",
"172.16.58.3"]
def init_cache():
global dom_num
dom_num = 0
cache.create_node("", 0, data=(ROOT_IP, 0, 0))
dom_num += 1
def find_in_cache(zones):
global dom_num
par_id = 0
depth = 0
for z in zones:
found = False
for sub in cache.children(par_id):
if sub.tag == z:
par_id = sub.identifier
found = True
depth += 1
break
if not found:
break
if not found:
for z in zones[depth:]:
cache.create_node(z, dom_num, parent=par_id, data=(None, 0, 0))
par_id = dom_num
dom_num += 1
depth += 1
depth += 1
ttl = int(1e5/depth)
return (None, time.perf_counter(), ttl), par_id
else:
data = cache.get_node(par_id).data
return data, par_id
def rec_find(domain, ip):
que = DNSRecord.question(domain)
resp = DNSRecord.parse(que.send(ip))
if not resp.rr:
for record in resp.ar:
if record.rtype == 1:
next_ip = str(record.rdata)
return rec_find(domain, next_ip)
else:
return str(resp.rr[0].rdata)
return None
def resolve(domain):
global dom_num
zones = domain.split('.')[-2::-1]
(ip, last_time, ttl), par_id = find_in_cache(zones)
if not ip:
for root_ip in cache.get_node(0).data[0]:
ip = rec_find(domain, root_ip)
if ip:
break
if ip:
cache.get_node(par_id).data = (ip, last_time, ttl)
else:
new_time = time.perf_counter()
if new_time - last_time > ttl:
for root_ip in cache.get_node(0).data[0]:
ip = rec_find(domain, root_ip)
if ip:
break
if ip:
cache.get_node(par_id).data = (ip, new_time, ttl)
return ip, ttl
if __name__ == '__main__':
init_cache()
logging.basicConfig(level=logging.DEBUG,
filename='log',
filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
try:
while True:
data, addr = udp_socket.recvfrom(1024)
record = DNSRecord.parse(data)
header = record.header
qr = header.qr
rcode = header.rcode
if not qr and not rcode:
answers = []
for que in record.questions:
domain = que.qname
if que.qtype == 1:
ip, ttl = resolve(str(domain))
if ip:
answers.append(RR(domain, ttl=ttl, rdata=A(ip)))
logging.info(str(domain) + " - " + str(ip))
else:
logging.error(str(domain) + " - IP-address was not found!")
if not answers:
header.rcode = 2
header.ra = 1
header.qr = 1
answer = DNSRecord(header, record.questions, answers)
udp_socket.sendto(answer.pack(), addr)
except KeyboardInterrupt:
udp_socket.close()
exit(0) | 0.236252 | 0.187337 |
import multiprocessing as mp
from pretraining.pretraining_example import create_example, create_context
class AlbertDataWorker(mp.Process):
def __init__(self, idx, config, inqueue, outqueue, *args, **kwargs):
super(AlbertDataWorker, self).__init__(*args, **kwargs)
self.idx = idx
self.config = config
self.context = None
self.inqueue = inqueue
self.outqueue = outqueue
self.should_run = True
def run(self):
self.context = self._init_context()
while self.should_run:
try:
par = self.inqueue.get(block=True, timeout=1)
except mp.TimeoutError:
continue
example = self._create_example(par)
self.outqueue.put(example)
self._destroy_context(self.context)
def _create_example(self, par):
return create_example(par, self.config, self.context)
def _init_context(self):
return create_context(self.config)
def _destroy_context(self, context):
pass
class AlbertDataFeeder(mp.Process):
def __init__(self, text_files, queue, *args, **kwargs):
super(AlbertDataFeeder, self).__init__(*args, **kwargs)
self.text_files = text_files
self.queue = queue
def run(self):
for file_name in self.text_files:
with open(file_name) as f:
for par in self._split_pars(f):
self.queue.put(par)
@staticmethod
def _split_pars(f):
par = []
for line in f:
line = line.strip()
if not line:
if par:
yield "\n".join(par)
par = []
par.append(line)
class AlbertDataGen:
def __init__(self, text_files, config, worker_count,
worker_class=AlbertDataWorker, feeder_class=AlbertDataFeeder):
self.text_files = text_files
self.config = config
self.worker_count = worker_count
self.worker_class = worker_class
self.feeder_class = feeder_class
def __iter__(self):
return AlbertDataIter(self.text_files, self.config, self.worker_count,
self.worker_class, self.feeder_class)
class AlbertDataIter:
id_counter = 0
def __init__(self, text_files, config, worker_count,
worker_class=AlbertDataWorker, feeder_class=AlbertDataFeeder):
self.text_files = text_files
self.config = config
self.worker_count = worker_count
self.worker_class = worker_class
self.feeder_class = feeder_class
self.idx = self.id_counter
self.id_counter += 1
self.inqueue = mp.Queue(worker_count)
self.outqueue = mp.Queue(worker_count)
self.feeder = feeder_class(text_files, self.inqueue)
self.workers = [
worker_class(f"{self.idx}:{i}", config, self.inqueue, self.outqueue)
for i in range(worker_count)
]
self.feeder.start()
for worker in self.workers:
worker.start()
def __next__(self):
return self.outqueue.get()
def __del__(self):
for worker in self.workers:
worker.should_run = False
self.feeder.terminate()
class AlbertDataRecorder:
def __init__(self):
pass | pretraining/albert_datagen.py | import multiprocessing as mp
from pretraining.pretraining_example import create_example, create_context
class AlbertDataWorker(mp.Process):
def __init__(self, idx, config, inqueue, outqueue, *args, **kwargs):
super(AlbertDataWorker, self).__init__(*args, **kwargs)
self.idx = idx
self.config = config
self.context = None
self.inqueue = inqueue
self.outqueue = outqueue
self.should_run = True
def run(self):
self.context = self._init_context()
while self.should_run:
try:
par = self.inqueue.get(block=True, timeout=1)
except mp.TimeoutError:
continue
example = self._create_example(par)
self.outqueue.put(example)
self._destroy_context(self.context)
def _create_example(self, par):
return create_example(par, self.config, self.context)
def _init_context(self):
return create_context(self.config)
def _destroy_context(self, context):
pass
class AlbertDataFeeder(mp.Process):
def __init__(self, text_files, queue, *args, **kwargs):
super(AlbertDataFeeder, self).__init__(*args, **kwargs)
self.text_files = text_files
self.queue = queue
def run(self):
for file_name in self.text_files:
with open(file_name) as f:
for par in self._split_pars(f):
self.queue.put(par)
@staticmethod
def _split_pars(f):
par = []
for line in f:
line = line.strip()
if not line:
if par:
yield "\n".join(par)
par = []
par.append(line)
class AlbertDataGen:
def __init__(self, text_files, config, worker_count,
worker_class=AlbertDataWorker, feeder_class=AlbertDataFeeder):
self.text_files = text_files
self.config = config
self.worker_count = worker_count
self.worker_class = worker_class
self.feeder_class = feeder_class
def __iter__(self):
return AlbertDataIter(self.text_files, self.config, self.worker_count,
self.worker_class, self.feeder_class)
class AlbertDataIter:
id_counter = 0
def __init__(self, text_files, config, worker_count,
worker_class=AlbertDataWorker, feeder_class=AlbertDataFeeder):
self.text_files = text_files
self.config = config
self.worker_count = worker_count
self.worker_class = worker_class
self.feeder_class = feeder_class
self.idx = self.id_counter
self.id_counter += 1
self.inqueue = mp.Queue(worker_count)
self.outqueue = mp.Queue(worker_count)
self.feeder = feeder_class(text_files, self.inqueue)
self.workers = [
worker_class(f"{self.idx}:{i}", config, self.inqueue, self.outqueue)
for i in range(worker_count)
]
self.feeder.start()
for worker in self.workers:
worker.start()
def __next__(self):
return self.outqueue.get()
def __del__(self):
for worker in self.workers:
worker.should_run = False
self.feeder.terminate()
class AlbertDataRecorder:
def __init__(self):
pass | 0.488771 | 0.077588 |
import biplist
import code_resources
from exceptions import NotMatched
import copy
import glob
import logging
import os
from os.path import basename, exists, join, splitext
import signable
import shutil
log = logging.getLogger(__name__)
class Bundle(object):
""" A bundle is a standard directory structure, a signable, installable set of files.
Apps are Bundles, but so are some kinds of Frameworks (libraries) """
helpers = []
signable_class = None
entitlements_path = None # Not set for every bundle type # TODO code smell??
@classmethod
def has_platform(cls, plist, platforms):
""" If an bundle is for a native platform, it has these properties in the Info.plist
Note that starting with iOS 10, simulator framework/test bundles also need to
be signed (at least ad hoc).
"""
if platforms is None:
raise Exception("no platforms?")
return (
'CFBundleSupportedPlatforms' in plist and
any(map(lambda p: p in plist['CFBundleSupportedPlatforms'], platforms))
)
def __init__(self, path, native_platforms):
self.path = path
self.info_path = join(self.path, 'Info.plist')
self.native_platforms = native_platforms # TODO extract this from CFBundleSupportedPlatforms?
if not exists(self.info_path):
raise NotMatched("no Info.plist found; probably not a bundle")
self.info = biplist.readPlist(self.info_path)
self.orig_info = None
if not self._is_native(self.info):
raise NotMatched("not a native bundle")
# will be added later
self.seal_path = None
def get_bundle_id(self):
return self.info['CFBundleIdentifier']
def _is_native(self, info):
return self.__class__.has_platform(info, self.native_platforms)
def get_entitlements_path(self):
return self.entitlements_path
def get_executable_path(self):
""" Path to the main executable. For an app, this is app itself. For
a Framework, this is the main framework """
executable_name = None
if 'CFBundleExecutable' in self.info:
executable_name = self.info['CFBundleExecutable']
else:
executable_name, _ = splitext(basename(self.path))
executable = join(self.path, executable_name)
if not exists(executable):
raise Exception(
'could not find executable for {0}'.format(self.path))
return executable
def update_info_props(self, new_props):
if self.orig_info is None:
self.orig_info = copy.deepcopy(self.info)
changed = False
if ('CFBundleIdentifier' in new_props and
'CFBundleURLTypes' in self.info and
'CFBundleURLTypes' not in new_props):
# The bundle identifier changed. Check CFBundleURLTypes for
# CFBundleURLName values matching the old bundle
# id if it's not being set explicitly
old_bundle_id = self.info['CFBundleIdentifier']
new_bundle_id = new_props['CFBundleIdentifier']
for url_type in self.info['CFBundleURLTypes']:
if 'CFBundleURLName' not in url_type:
continue
if url_type['CFBundleURLName'] == old_bundle_id:
url_type['CFBundleURLName'] = new_bundle_id
changed = True
for key, val in new_props.iteritems():
is_new_key = key not in self.info
if is_new_key or self.info[key] != val:
if is_new_key:
log.warn("Adding new Info.plist key: {}".format(key))
self.info[key] = val
changed = True
if changed:
biplist.writePlist(self.info, self.info_path, binary=True)
else:
self.orig_info = None
def info_props_changed(self):
return self.orig_info is not None
def info_prop_changed(self, key):
if not self.orig_info:
# No props have been changed
return False
if key in self.info and key in self.orig_info and self.info[key] == self.orig_info[key]:
return False
return True
def get_info_prop(self, key):
return self.info[key]
def sign_dylibs(self, cms_signer, path):
""" Sign all the dylibs in this directory """
for dylib_path in glob.glob(join(path, '*.dylib')):
dylib = signable.Dylib(self, dylib_path, cms_signer)
dylib.sign(self, cms_signer)
def resign(self, deep, cms_signer, provisioner):
""" Sign everything in this bundle, in place. If deep is specified, sign
recursively with sub-bundles """
# log.debug("SIGNING: %s" % self.path)
if deep:
plugins_path = join(self.path, 'PlugIns')
if exists(plugins_path):
# sign the appex executables
appex_paths = glob.glob(join(plugins_path, '*.appex'))
for appex_path in appex_paths:
log.debug('working on appex {}'.format(appex_path))
# Appexes are essentially the same as app bundles, for signing purposes
# They could be a different class, but there aren't any differences yet noted.
# They will have the same OS (e.g. iOS, Watch) as their parent
appex = self.__class__(appex_path)
appex.resign(deep, cms_signer, provisioner)
frameworks_path = join(self.path, 'Frameworks')
if exists(frameworks_path):
# log.debug("SIGNING FRAMEWORKS: %s" % frameworks_path)
# sign all the frameworks
for framework_name in os.listdir(frameworks_path):
framework_path = join(frameworks_path, framework_name)
# log.debug("checking for framework: %s" % framework_path)
try:
framework = Framework(framework_path, self.native_platforms)
# log.debug("resigning: %s" % framework_path)
framework.resign(deep, cms_signer, provisioner)
except NotMatched:
# log.debug("not a framework: %s" % framework_path)
continue
# sign all the dylibs under Frameworks
self.sign_dylibs(cms_signer, frameworks_path)
# sign any dylibs in the main directory (rare, but it happens)
self.sign_dylibs(cms_signer, self.path)
# then create the seal
# TODO maybe the app should know what its seal path should be...
self.seal_path = code_resources.make_seal(self.get_executable_path(),
self.path)
# then sign the executable
executable = self.signable_class(self, self.get_executable_path(), cms_signer)
executable.sign(self, cms_signer)
log.debug("Resigned bundle at <%s>", self.path)
class Framework(Bundle):
""" A bundle that comprises reusable code. Similar to an app in that it has
its own resources and metadata. Not like an app because the main executable
doesn't have Entitlements, or an Application hash, and it doesn't have its
own provisioning profile. """
# the executable in this bundle will be a Framework
signable_class = signable.Framework
class App(Bundle):
""" The kind of bundle that is visible as an app to the user.
Contains the provisioning profile, entitlements, etc. """
# the executable in this bundle will be an Executable (i.e. the main
# executable of an app)
signable_class = signable.Executable
def __init__(self, path, native_platforms):
self.entitlements = None # this is a bit ugly, but we have to communicate this down to Codesig
super(App, self).__init__(path, native_platforms)
def provision(self, team_id, provisioner):
identifier = '.'.join([team_id, self.get_bundle_id()])
provisioning_profile_path = provisioner.get_provisioning_profile(identifier)
target_path = join(self.path, 'embedded.mobileprovision')
log.debug("provisioning from {} to {}".format(provisioning_profile_path, target_path))
shutil.copyfile(provisioning_profile_path, target_path)
def entitle(self, team_id, provisioner):
identifier = '.'.join([team_id, self.get_bundle_id()])
self.entitlements = provisioner.get_entitlements(identifier)
def resign(self, deep, cms_signer, provisioner):
""" signs app in place """
# In the typical case, we add entitlements from the pprof into the app's signature
if not cms_signer.is_adhoc():
team_id = cms_signer.get_team_id()
self.provision(team_id, provisioner)
self.entitle(team_id, provisioner)
# actually resign this bundle now
super(App, self).resign(deep, cms_signer, provisioner)
class WatchApp(App):
""" At some point it became possible to ship a Watch app as a complete app, embedded in an IosApp. """
# possible values for CFBundleSupportedPlatforms
native_platforms = ['WatchOS', 'WatchSimulator']
def __init__(self, path):
super(WatchApp, self).__init__(path, self.native_platforms)
class IosApp(App):
""" Represents a normal iOS app. Just an App, except it may also contain a Watch app """
# possible values for CFBundleSupportedPlatforms
native_platforms = ['iPhoneOS', 'iPhoneSimulator']
# TODO this is a bit convoluted
# we keep the class value 'native_platforms' available so the archive precheck can
# call a simple IosApp.is_native() without instantiating the full IosApp.
# We *also* put native_platforms into
# the superclass Bundle, because any frameworks discovered beneath the app also need to be the same platform, and
# the simplest thing is to pass down a "native_platforms" in initialization,
# rather than have two kinds of Frameworks: IosFramework and WatchFramework...
@classmethod
def is_native(cls, info):
return cls.has_platform(info, cls.native_platforms)
def __init__(self, path):
super(IosApp, self).__init__(path, self.native_platforms)
def sign_watch_apps(self, deep, cms_signer, provisioner):
watch_apps_path = join(self.path, 'Watch')
if exists(watch_apps_path):
watch_app_paths = glob.glob(join(watch_apps_path, '*.app'))
for watch_app_path in watch_app_paths:
log.debug("found Watch app at {}".format(watch_app_path))
watch_app = WatchApp(watch_app_path)
watch_app.resign(deep, cms_signer, provisioner)
def resign(self, deep, cms_signer, provisioner):
self.sign_watch_apps(deep, cms_signer, provisioner)
super(IosApp, self).resign(deep, cms_signer, provisioner) | isign/bundle.py | import biplist
import code_resources
from exceptions import NotMatched
import copy
import glob
import logging
import os
from os.path import basename, exists, join, splitext
import signable
import shutil
log = logging.getLogger(__name__)
class Bundle(object):
""" A bundle is a standard directory structure, a signable, installable set of files.
Apps are Bundles, but so are some kinds of Frameworks (libraries) """
helpers = []
signable_class = None
entitlements_path = None # Not set for every bundle type # TODO code smell??
@classmethod
def has_platform(cls, plist, platforms):
""" If an bundle is for a native platform, it has these properties in the Info.plist
Note that starting with iOS 10, simulator framework/test bundles also need to
be signed (at least ad hoc).
"""
if platforms is None:
raise Exception("no platforms?")
return (
'CFBundleSupportedPlatforms' in plist and
any(map(lambda p: p in plist['CFBundleSupportedPlatforms'], platforms))
)
def __init__(self, path, native_platforms):
self.path = path
self.info_path = join(self.path, 'Info.plist')
self.native_platforms = native_platforms # TODO extract this from CFBundleSupportedPlatforms?
if not exists(self.info_path):
raise NotMatched("no Info.plist found; probably not a bundle")
self.info = biplist.readPlist(self.info_path)
self.orig_info = None
if not self._is_native(self.info):
raise NotMatched("not a native bundle")
# will be added later
self.seal_path = None
def get_bundle_id(self):
return self.info['CFBundleIdentifier']
def _is_native(self, info):
return self.__class__.has_platform(info, self.native_platforms)
def get_entitlements_path(self):
return self.entitlements_path
def get_executable_path(self):
""" Path to the main executable. For an app, this is app itself. For
a Framework, this is the main framework """
executable_name = None
if 'CFBundleExecutable' in self.info:
executable_name = self.info['CFBundleExecutable']
else:
executable_name, _ = splitext(basename(self.path))
executable = join(self.path, executable_name)
if not exists(executable):
raise Exception(
'could not find executable for {0}'.format(self.path))
return executable
def update_info_props(self, new_props):
if self.orig_info is None:
self.orig_info = copy.deepcopy(self.info)
changed = False
if ('CFBundleIdentifier' in new_props and
'CFBundleURLTypes' in self.info and
'CFBundleURLTypes' not in new_props):
# The bundle identifier changed. Check CFBundleURLTypes for
# CFBundleURLName values matching the old bundle
# id if it's not being set explicitly
old_bundle_id = self.info['CFBundleIdentifier']
new_bundle_id = new_props['CFBundleIdentifier']
for url_type in self.info['CFBundleURLTypes']:
if 'CFBundleURLName' not in url_type:
continue
if url_type['CFBundleURLName'] == old_bundle_id:
url_type['CFBundleURLName'] = new_bundle_id
changed = True
for key, val in new_props.iteritems():
is_new_key = key not in self.info
if is_new_key or self.info[key] != val:
if is_new_key:
log.warn("Adding new Info.plist key: {}".format(key))
self.info[key] = val
changed = True
if changed:
biplist.writePlist(self.info, self.info_path, binary=True)
else:
self.orig_info = None
def info_props_changed(self):
return self.orig_info is not None
def info_prop_changed(self, key):
if not self.orig_info:
# No props have been changed
return False
if key in self.info and key in self.orig_info and self.info[key] == self.orig_info[key]:
return False
return True
def get_info_prop(self, key):
return self.info[key]
def sign_dylibs(self, cms_signer, path):
""" Sign all the dylibs in this directory """
for dylib_path in glob.glob(join(path, '*.dylib')):
dylib = signable.Dylib(self, dylib_path, cms_signer)
dylib.sign(self, cms_signer)
def resign(self, deep, cms_signer, provisioner):
""" Sign everything in this bundle, in place. If deep is specified, sign
recursively with sub-bundles """
# log.debug("SIGNING: %s" % self.path)
if deep:
plugins_path = join(self.path, 'PlugIns')
if exists(plugins_path):
# sign the appex executables
appex_paths = glob.glob(join(plugins_path, '*.appex'))
for appex_path in appex_paths:
log.debug('working on appex {}'.format(appex_path))
# Appexes are essentially the same as app bundles, for signing purposes
# They could be a different class, but there aren't any differences yet noted.
# They will have the same OS (e.g. iOS, Watch) as their parent
appex = self.__class__(appex_path)
appex.resign(deep, cms_signer, provisioner)
frameworks_path = join(self.path, 'Frameworks')
if exists(frameworks_path):
# log.debug("SIGNING FRAMEWORKS: %s" % frameworks_path)
# sign all the frameworks
for framework_name in os.listdir(frameworks_path):
framework_path = join(frameworks_path, framework_name)
# log.debug("checking for framework: %s" % framework_path)
try:
framework = Framework(framework_path, self.native_platforms)
# log.debug("resigning: %s" % framework_path)
framework.resign(deep, cms_signer, provisioner)
except NotMatched:
# log.debug("not a framework: %s" % framework_path)
continue
# sign all the dylibs under Frameworks
self.sign_dylibs(cms_signer, frameworks_path)
# sign any dylibs in the main directory (rare, but it happens)
self.sign_dylibs(cms_signer, self.path)
# then create the seal
# TODO maybe the app should know what its seal path should be...
self.seal_path = code_resources.make_seal(self.get_executable_path(),
self.path)
# then sign the executable
executable = self.signable_class(self, self.get_executable_path(), cms_signer)
executable.sign(self, cms_signer)
log.debug("Resigned bundle at <%s>", self.path)
class Framework(Bundle):
""" A bundle that comprises reusable code. Similar to an app in that it has
its own resources and metadata. Not like an app because the main executable
doesn't have Entitlements, or an Application hash, and it doesn't have its
own provisioning profile. """
# the executable in this bundle will be a Framework
signable_class = signable.Framework
class App(Bundle):
""" The kind of bundle that is visible as an app to the user.
Contains the provisioning profile, entitlements, etc. """
# the executable in this bundle will be an Executable (i.e. the main
# executable of an app)
signable_class = signable.Executable
def __init__(self, path, native_platforms):
self.entitlements = None # this is a bit ugly, but we have to communicate this down to Codesig
super(App, self).__init__(path, native_platforms)
def provision(self, team_id, provisioner):
identifier = '.'.join([team_id, self.get_bundle_id()])
provisioning_profile_path = provisioner.get_provisioning_profile(identifier)
target_path = join(self.path, 'embedded.mobileprovision')
log.debug("provisioning from {} to {}".format(provisioning_profile_path, target_path))
shutil.copyfile(provisioning_profile_path, target_path)
def entitle(self, team_id, provisioner):
identifier = '.'.join([team_id, self.get_bundle_id()])
self.entitlements = provisioner.get_entitlements(identifier)
def resign(self, deep, cms_signer, provisioner):
""" signs app in place """
# In the typical case, we add entitlements from the pprof into the app's signature
if not cms_signer.is_adhoc():
team_id = cms_signer.get_team_id()
self.provision(team_id, provisioner)
self.entitle(team_id, provisioner)
# actually resign this bundle now
super(App, self).resign(deep, cms_signer, provisioner)
class WatchApp(App):
""" At some point it became possible to ship a Watch app as a complete app, embedded in an IosApp. """
# possible values for CFBundleSupportedPlatforms
native_platforms = ['WatchOS', 'WatchSimulator']
def __init__(self, path):
super(WatchApp, self).__init__(path, self.native_platforms)
class IosApp(App):
""" Represents a normal iOS app. Just an App, except it may also contain a Watch app """
# possible values for CFBundleSupportedPlatforms
native_platforms = ['iPhoneOS', 'iPhoneSimulator']
# TODO this is a bit convoluted
# we keep the class value 'native_platforms' available so the archive precheck can
# call a simple IosApp.is_native() without instantiating the full IosApp.
# We *also* put native_platforms into
# the superclass Bundle, because any frameworks discovered beneath the app also need to be the same platform, and
# the simplest thing is to pass down a "native_platforms" in initialization,
# rather than have two kinds of Frameworks: IosFramework and WatchFramework...
@classmethod
def is_native(cls, info):
return cls.has_platform(info, cls.native_platforms)
def __init__(self, path):
super(IosApp, self).__init__(path, self.native_platforms)
def sign_watch_apps(self, deep, cms_signer, provisioner):
watch_apps_path = join(self.path, 'Watch')
if exists(watch_apps_path):
watch_app_paths = glob.glob(join(watch_apps_path, '*.app'))
for watch_app_path in watch_app_paths:
log.debug("found Watch app at {}".format(watch_app_path))
watch_app = WatchApp(watch_app_path)
watch_app.resign(deep, cms_signer, provisioner)
def resign(self, deep, cms_signer, provisioner):
self.sign_watch_apps(deep, cms_signer, provisioner)
super(IosApp, self).resign(deep, cms_signer, provisioner) | 0.269614 | 0.122786 |
from py_ecc import optimized_bls12_381 as b
from fft import fft
from poly_utils import PrimeField
from multicombs import lincomb
from verkle import WIDTH, DEPTH, MODULUS, root_of_unity_candidates, ROOT_OF_UNITY
import time
ROOT_OF_UNITY2 = root_of_unity_candidates[WIDTH*2]
# default O(n^2) algorithm
def semi_toeplitz_naive(toeplitz_coefficients, x):
r = []
for i in range(len(x)):
r.append(sum(toeplitz_coefficients[j - i] * x[j] for j in range(i, len(x))))
return r
xext_hat = None
# FFT algorithm
# Toeplitz multiplication via http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
def semi_toeplitz_fft(toeplitz_coefficients, x):
global xext_hat
assert len(x) == WIDTH
if xext_hat == None:
a = time.time()
if type(x[0]) == tuple:
xext = x + [b.Z1 for a in x]
else:
xext = x + [0 * a for a in x]
xext_hat = fft(xext, MODULUS, ROOT_OF_UNITY2, inv=False)
print("Toeplitz preprocessing in %.3f seconds" % (time.time() - a))
text = toeplitz_coefficients[:1] + [0 * a for a in toeplitz_coefficients] + toeplitz_coefficients[:0:-1]
text_hat = fft(text, MODULUS, ROOT_OF_UNITY2, inv=False)
yext_hat = [None for i in range(2*len(x))]
for i in range(len(xext_hat)):
if type(xext_hat[0]) == tuple:
yext_hat[i] = b.multiply(xext_hat[i], text_hat[i])
else:
yext_hat[i] *= text_hat[i]
return fft(yext_hat, MODULUS, ROOT_OF_UNITY2, inv=True)[:len(x)]
def generate_all_proofs(values, setup):
assert len(values) == WIDTH
# Get polynomial coefficients using IFFT
print("---")
print("Generating all proofs using FK20 for width = %d" % WIDTH)
a = time.time()
coefs = fft(values, MODULUS, ROOT_OF_UNITY, inv=True)[:0:-1]
print("Generated polynomial coefficients in %.3f seconds" % (time.time() - a))
a = time.time()
h = semi_toeplitz_fft(coefs + [0], setup[0][len(values)-2::-1] + [b.Z1])
print("Toeplitz matrix multiplication in %.3f seconds" % (time.time() - a))
a = time.time()
r = fft(h, MODULUS, ROOT_OF_UNITY)
print("Final FFT in %.3f seconds" % (time.time() - a))
print("---")
return r | verkle/fk20.py |
from py_ecc import optimized_bls12_381 as b
from fft import fft
from poly_utils import PrimeField
from multicombs import lincomb
from verkle import WIDTH, DEPTH, MODULUS, root_of_unity_candidates, ROOT_OF_UNITY
import time
ROOT_OF_UNITY2 = root_of_unity_candidates[WIDTH*2]
# default O(n^2) algorithm
def semi_toeplitz_naive(toeplitz_coefficients, x):
r = []
for i in range(len(x)):
r.append(sum(toeplitz_coefficients[j - i] * x[j] for j in range(i, len(x))))
return r
xext_hat = None
# FFT algorithm
# Toeplitz multiplication via http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
def semi_toeplitz_fft(toeplitz_coefficients, x):
global xext_hat
assert len(x) == WIDTH
if xext_hat == None:
a = time.time()
if type(x[0]) == tuple:
xext = x + [b.Z1 for a in x]
else:
xext = x + [0 * a for a in x]
xext_hat = fft(xext, MODULUS, ROOT_OF_UNITY2, inv=False)
print("Toeplitz preprocessing in %.3f seconds" % (time.time() - a))
text = toeplitz_coefficients[:1] + [0 * a for a in toeplitz_coefficients] + toeplitz_coefficients[:0:-1]
text_hat = fft(text, MODULUS, ROOT_OF_UNITY2, inv=False)
yext_hat = [None for i in range(2*len(x))]
for i in range(len(xext_hat)):
if type(xext_hat[0]) == tuple:
yext_hat[i] = b.multiply(xext_hat[i], text_hat[i])
else:
yext_hat[i] *= text_hat[i]
return fft(yext_hat, MODULUS, ROOT_OF_UNITY2, inv=True)[:len(x)]
def generate_all_proofs(values, setup):
assert len(values) == WIDTH
# Get polynomial coefficients using IFFT
print("---")
print("Generating all proofs using FK20 for width = %d" % WIDTH)
a = time.time()
coefs = fft(values, MODULUS, ROOT_OF_UNITY, inv=True)[:0:-1]
print("Generated polynomial coefficients in %.3f seconds" % (time.time() - a))
a = time.time()
h = semi_toeplitz_fft(coefs + [0], setup[0][len(values)-2::-1] + [b.Z1])
print("Toeplitz matrix multiplication in %.3f seconds" % (time.time() - a))
a = time.time()
r = fft(h, MODULUS, ROOT_OF_UNITY)
print("Final FFT in %.3f seconds" % (time.time() - a))
print("---")
return r | 0.517083 | 0.508483 |
import re
import sys
from datetime import datetime
def main(filename):
git_log_text = ""
with open(filename, "r") as file:
git_log_text = file.read()
result = process_git_log(git_log_text)
print(result)
def strip_timezone_offset(timestamp_str):
timezone_offset_pattern = "[+\-][0-9][0-9]:[0-9][0-9]$"
return re.sub(timezone_offset_pattern, "", timestamp_str)
def get_churn_int_values_even_if_dash(text_number):
metric = 1
if text_number.strip() != "-":
metric = int(text_number)
return metric
def get_first_directories_from_filename(file):
file_dir_parts = file.split("/")
results = []
dir_1 = ""
dir_2 = ""
dir_3 = ""
dir_4 = ""
if len(file_dir_parts) >= 2:
dir_1 = file_dir_parts[0]
if len(file_dir_parts) >= 3:
dir_2 = file_dir_parts[1]
if len(file_dir_parts) >= 4:
dir_3 = file_dir_parts[2]
if len(file_dir_parts) >= 5:
dir_4 = file_dir_parts[3]
return [dir_1, dir_2, dir_3, dir_4]
def process_git_log(log):
commits = log.split("^^")
result = "commit_hash,epoch,timestamp,date,year,month,day,author,file,churn_count,dir_1,dir_2,dir_3,dir_4\n"
for number, commit in enumerate(commits):
if commit != "":
commit_lines = commit.split("\n")
commit_basics = commit_lines[0]
commit_basics_parts = commit_basics.split("--")
hash = commit_basics_parts[0]
epoch = commit_basics_parts[1]
tmsp = commit_basics_parts[2]
# 2019-12-17T09:16:10-05:00
# yyyy-mm-ddT
tmsp = strip_timezone_offset(tmsp)
tmsp_date = datetime.strptime(tmsp, "%Y-%m-%dT%H:%M:%S")
day_only = tmsp_date.date()
year = tmsp_date.year
month = tmsp_date.month
day = tmsp_date.day
author = commit_basics_parts[3]
total_lines = len(commit_lines)
for row_index in range(3, total_lines - 1):
churn_line = commit_lines[row_index]
churn_line_parts = churn_line.split("\t")
insertions = get_churn_int_values_even_if_dash(churn_line_parts[0])
deletions = get_churn_int_values_even_if_dash(churn_line_parts[1])
total_churn = insertions + deletions
file = churn_line_parts[2]
dirs = get_first_directories_from_filename(file)
result = (
result
+ f'{hash},{epoch},{tmsp},{day_only},{year},{month},{day},"{author}",{file},{total_churn},{dirs[0]},{dirs[1]},{dirs[2]},{dirs[3]}\n'
)
return result
if __name__ == "__main__":
filename = sys.argv[1]
main(filename) | git_log_to_csv.py | import re
import sys
from datetime import datetime
def main(filename):
git_log_text = ""
with open(filename, "r") as file:
git_log_text = file.read()
result = process_git_log(git_log_text)
print(result)
def strip_timezone_offset(timestamp_str):
timezone_offset_pattern = "[+\-][0-9][0-9]:[0-9][0-9]$"
return re.sub(timezone_offset_pattern, "", timestamp_str)
def get_churn_int_values_even_if_dash(text_number):
metric = 1
if text_number.strip() != "-":
metric = int(text_number)
return metric
def get_first_directories_from_filename(file):
file_dir_parts = file.split("/")
results = []
dir_1 = ""
dir_2 = ""
dir_3 = ""
dir_4 = ""
if len(file_dir_parts) >= 2:
dir_1 = file_dir_parts[0]
if len(file_dir_parts) >= 3:
dir_2 = file_dir_parts[1]
if len(file_dir_parts) >= 4:
dir_3 = file_dir_parts[2]
if len(file_dir_parts) >= 5:
dir_4 = file_dir_parts[3]
return [dir_1, dir_2, dir_3, dir_4]
def process_git_log(log):
commits = log.split("^^")
result = "commit_hash,epoch,timestamp,date,year,month,day,author,file,churn_count,dir_1,dir_2,dir_3,dir_4\n"
for number, commit in enumerate(commits):
if commit != "":
commit_lines = commit.split("\n")
commit_basics = commit_lines[0]
commit_basics_parts = commit_basics.split("--")
hash = commit_basics_parts[0]
epoch = commit_basics_parts[1]
tmsp = commit_basics_parts[2]
# 2019-12-17T09:16:10-05:00
# yyyy-mm-ddT
tmsp = strip_timezone_offset(tmsp)
tmsp_date = datetime.strptime(tmsp, "%Y-%m-%dT%H:%M:%S")
day_only = tmsp_date.date()
year = tmsp_date.year
month = tmsp_date.month
day = tmsp_date.day
author = commit_basics_parts[3]
total_lines = len(commit_lines)
for row_index in range(3, total_lines - 1):
churn_line = commit_lines[row_index]
churn_line_parts = churn_line.split("\t")
insertions = get_churn_int_values_even_if_dash(churn_line_parts[0])
deletions = get_churn_int_values_even_if_dash(churn_line_parts[1])
total_churn = insertions + deletions
file = churn_line_parts[2]
dirs = get_first_directories_from_filename(file)
result = (
result
+ f'{hash},{epoch},{tmsp},{day_only},{year},{month},{day},"{author}",{file},{total_churn},{dirs[0]},{dirs[1]},{dirs[2]},{dirs[3]}\n'
)
return result
if __name__ == "__main__":
filename = sys.argv[1]
main(filename) | 0.234582 | 0.164148 |
import numpy as np
import statsmodels as sm
from ThymeBoost.trend_models.trend_base_class import TrendBaseModel
from pmdarima.arima import auto_arima
class ArimaModel(TrendBaseModel):
"""ARIMA Model from Statsmodels"""
model = 'arima'
def __init__(self):
self.model_params = None
self.fitted = None
def __str__(self):
return f'{self.model}({self.kwargs["arima_order"]})'
def fit(self, y, **kwargs):
"""
Fit the trend component in the boosting loop for a arima model.
Parameters
----------
time_series : TYPE
DESCRIPTION.
**kwargs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.kwargs = kwargs
self.order = kwargs['arima_order']
self.arima_trend = kwargs['arima_trend']
bias = kwargs['bias']
if self.order == 'auto':
ar_model = auto_arima(y,
seasonal=False,
error_action='warn',
trace=False,
supress_warnings=True,
stepwise=True,
random_state=20,
n_fits=50)
self.fitted = ar_model.predict_in_sample()
else:
ar_model = sm.tsa.arima.model.ARIMA(y - bias,
order=self.order,
trend=self.arima_trend).fit()
self.fitted = ar_model.predict(start=0, end=len(y) - 1) + bias
self.model_params = (ar_model, bias, len(y))
return self.fitted
def predict(self, forecast_horizon, model_params):
last_point = model_params[2] + forecast_horizon
if self.order == 'auto':
prediction = model_params[0].predict(n_periods=forecast_horizon)
else:
prediction = model_params[0].predict(start=model_params[2] + 1, end=last_point) + \
model_params[1]
return prediction | ThymeBoost/trend_models/arima_trend.py | import numpy as np
import statsmodels as sm
from ThymeBoost.trend_models.trend_base_class import TrendBaseModel
from pmdarima.arima import auto_arima
class ArimaModel(TrendBaseModel):
"""ARIMA Model from Statsmodels"""
model = 'arima'
def __init__(self):
self.model_params = None
self.fitted = None
def __str__(self):
return f'{self.model}({self.kwargs["arima_order"]})'
def fit(self, y, **kwargs):
"""
Fit the trend component in the boosting loop for a arima model.
Parameters
----------
time_series : TYPE
DESCRIPTION.
**kwargs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.kwargs = kwargs
self.order = kwargs['arima_order']
self.arima_trend = kwargs['arima_trend']
bias = kwargs['bias']
if self.order == 'auto':
ar_model = auto_arima(y,
seasonal=False,
error_action='warn',
trace=False,
supress_warnings=True,
stepwise=True,
random_state=20,
n_fits=50)
self.fitted = ar_model.predict_in_sample()
else:
ar_model = sm.tsa.arima.model.ARIMA(y - bias,
order=self.order,
trend=self.arima_trend).fit()
self.fitted = ar_model.predict(start=0, end=len(y) - 1) + bias
self.model_params = (ar_model, bias, len(y))
return self.fitted
def predict(self, forecast_horizon, model_params):
last_point = model_params[2] + forecast_horizon
if self.order == 'auto':
prediction = model_params[0].predict(n_periods=forecast_horizon)
else:
prediction = model_params[0].predict(start=model_params[2] + 1, end=last_point) + \
model_params[1]
return prediction | 0.821223 | 0.402157 |
import math
import numpy as np
import pandas as pd
import argparse
from util import get_input_label_split, get_accuracy, get_precision, discretize
class NBC:
def __init__(self):
self.train_data = None
self.label = ''
self.classes = set()
self.prob = {}
self.is_discrete = set()
def train(self, data, label_name):
self.train_data = data
self.label = label_name
size = self.train_data.shape[0]
train_y = self.train_data[self.label].to_numpy()
for y in train_y:
self.classes.add(y)
for y in self.classes:
self.prob[y] = train_y[train_y == y].shape[0] / size
# discretize columns for more accurate inference
columns = self.train_data.columns
for col in columns:
if col == label_name:
continue
discretized_col = discretize(self.train_data[col])
if discretized_col is not None:
self.is_discrete.add(col)
self.train_data[col] = discretized_col
def infer(self, test_data):
size = test_data.shape[0]
pred = np.zeros(size)
columns = test_data.columns
for i in range(size):
best_y = None
best_prob = -1
for y in self.classes:
prob = 1.
temp = self.train_data[self.train_data[self.label] == y]
temp_size = len(temp)
for col in columns:
cur_col = temp[col].to_numpy()
test_val = test_data[col].iloc[i]
if col in self.is_discrete:
vals = np.unique(cur_col)
found = False
for v in vals:
if test_val >= v[0] and test_val <= v[1]:
test_val = v
found = True
break
if not found:
# dummy range that spans the entire R (or strictly speaking the entire FLOAT)
test_val = (float('-inf'), float('inf'))
cnt = 0
for val in cur_col:
if val == test_val:
cnt += 1
prob *= cnt / temp_size
else:
prob *= len(cur_col[cur_col == test_val]) / temp_size
prob *= self.prob[y]
if best_prob < prob:
best_y = y
best_prob = prob
pred[i] = best_y
return pred
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', required=True, help='training data file path')
parser.add_argument('--label_name', type=str, default='label', help='label column name for the input file')
parser.add_argument('--eval_mode', action='store_true', help='run this in evaluation mode')
args = parser.parse_args()
df = pd.read_csv(args.file_path)
if args.eval_mode:
train_sz = int(len(df) * 0.8)
df_train = df[:train_sz]
df_test = df[train_sz:]
nbc = NBC()
nbc.train(df_train, args.label_name)
test_x = df_test.drop(columns=[args.label_name])
test_y = df_test[args.label_name].to_numpy()
pred = nbc.infer(test_x)
print(f"accuracy score: {get_accuracy(pred, test_y)}")
print(f"precision score: {get_precision(pred, test_y)}")
else:
pass | classification/naive_bayes_clas.py | import math
import numpy as np
import pandas as pd
import argparse
from util import get_input_label_split, get_accuracy, get_precision, discretize
class NBC:
def __init__(self):
self.train_data = None
self.label = ''
self.classes = set()
self.prob = {}
self.is_discrete = set()
def train(self, data, label_name):
self.train_data = data
self.label = label_name
size = self.train_data.shape[0]
train_y = self.train_data[self.label].to_numpy()
for y in train_y:
self.classes.add(y)
for y in self.classes:
self.prob[y] = train_y[train_y == y].shape[0] / size
# discretize columns for more accurate inference
columns = self.train_data.columns
for col in columns:
if col == label_name:
continue
discretized_col = discretize(self.train_data[col])
if discretized_col is not None:
self.is_discrete.add(col)
self.train_data[col] = discretized_col
def infer(self, test_data):
size = test_data.shape[0]
pred = np.zeros(size)
columns = test_data.columns
for i in range(size):
best_y = None
best_prob = -1
for y in self.classes:
prob = 1.
temp = self.train_data[self.train_data[self.label] == y]
temp_size = len(temp)
for col in columns:
cur_col = temp[col].to_numpy()
test_val = test_data[col].iloc[i]
if col in self.is_discrete:
vals = np.unique(cur_col)
found = False
for v in vals:
if test_val >= v[0] and test_val <= v[1]:
test_val = v
found = True
break
if not found:
# dummy range that spans the entire R (or strictly speaking the entire FLOAT)
test_val = (float('-inf'), float('inf'))
cnt = 0
for val in cur_col:
if val == test_val:
cnt += 1
prob *= cnt / temp_size
else:
prob *= len(cur_col[cur_col == test_val]) / temp_size
prob *= self.prob[y]
if best_prob < prob:
best_y = y
best_prob = prob
pred[i] = best_y
return pred
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', required=True, help='training data file path')
parser.add_argument('--label_name', type=str, default='label', help='label column name for the input file')
parser.add_argument('--eval_mode', action='store_true', help='run this in evaluation mode')
args = parser.parse_args()
df = pd.read_csv(args.file_path)
if args.eval_mode:
train_sz = int(len(df) * 0.8)
df_train = df[:train_sz]
df_test = df[train_sz:]
nbc = NBC()
nbc.train(df_train, args.label_name)
test_x = df_test.drop(columns=[args.label_name])
test_y = df_test[args.label_name].to_numpy()
pred = nbc.infer(test_x)
print(f"accuracy score: {get_accuracy(pred, test_y)}")
print(f"precision score: {get_precision(pred, test_y)}")
else:
pass | 0.501465 | 0.333069 |
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
PKG='ros_nodes'
import sys
import unittest
import rospy
import roslib
import rospkg
from rapp_platform_ros_communications.srv import (
LightCheckRosSrv,
LightCheckRosSrvRequest,
DoorCheckRosSrv,
DoorCheckRosSrvRequest
)
class HazardDetectionFunc(unittest.TestCase):
"""Handles the hazard detection functional tests
"""
## Tests light detection with lamp turned on
def test_lightOn(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/lamp_on.jpg'
response = lc_service(req)
light_level = response.light_level
self.assertGreater( light_level, 50 )
## Tests light detection with lamp turned off
def test_lightOff(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/lamp_off.jpg'
response = lc_service(req)
light_level = response.light_level
self.assertLess( light_level, 50 )
## Stress test for light detection. 20 calls in a row
def test_light_stress(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/lamp_on.jpg'
for i in range(0, 20):
response = lc_service(req)
light_level = response.light_level
self.assertGreater( light_level, 50 )
## Tests light detection with a non existent image. Should return -1
def test_light_fileDoesNotExist(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/not_existent_file.jpg'
response = lc_service(req)
light_level = response.light_level
self.assertEqual( light_level, -1 )
## Tests light detection with an audio file. Should not crush an return -1
def test_light_fileExistsButItAudio(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/silence_sample.wav'
response = lc_service(req)
light_level = response.light_level
self.assertEqual( light_level, -1 )
## Tests door detection with door opened
def test_doorOpen(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/door_2.png'
response = lc_service(req)
door_angle = response.door_angle
self.assertGreater( door_angle, 1 )
## Tests door detection with door closed
def test_doorClosed(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/door_4.png'
response = lc_service(req)
door_angle = response.door_angle
self.assertLess( door_angle, 2 )
## Stress test for door detection. 20 calls in a row
def test_door_stress(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/door_1.png'
for i in range(0, 20):
response = lc_service(req)
door_angle = response.door_angle
self.assertGreater( door_angle, 0 )
## Tests door detection with a non existent image. Should return -1
def test_door_fileDoesNotExist(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/not_existent_file.jpg'
response = lc_service(req)
door_angle = response.door_angle
self.assertEqual( door_angle, -1 )
## Tests door detection with an audio file. Should not crush an return -1
def test_door_fileExistsButItAudio(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/silence_sample.wav'
response = lc_service(req)
door_angle = response.door_angle
self.assertEqual( door_angle, -1 )
## The main function. Initializes the functional tests
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, 'HazardDetectionFunc', HazardDetectionFunc) | rapp_hazard_detection/tests/hazard_detection/functional_tests.py |
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
PKG='ros_nodes'
import sys
import unittest
import rospy
import roslib
import rospkg
from rapp_platform_ros_communications.srv import (
LightCheckRosSrv,
LightCheckRosSrvRequest,
DoorCheckRosSrv,
DoorCheckRosSrvRequest
)
class HazardDetectionFunc(unittest.TestCase):
"""Handles the hazard detection functional tests
"""
## Tests light detection with lamp turned on
def test_lightOn(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/lamp_on.jpg'
response = lc_service(req)
light_level = response.light_level
self.assertGreater( light_level, 50 )
## Tests light detection with lamp turned off
def test_lightOff(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/lamp_off.jpg'
response = lc_service(req)
light_level = response.light_level
self.assertLess( light_level, 50 )
## Stress test for light detection. 20 calls in a row
def test_light_stress(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/lamp_on.jpg'
for i in range(0, 20):
response = lc_service(req)
light_level = response.light_level
self.assertGreater( light_level, 50 )
## Tests light detection with a non existent image. Should return -1
def test_light_fileDoesNotExist(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/not_existent_file.jpg'
response = lc_service(req)
light_level = response.light_level
self.assertEqual( light_level, -1 )
## Tests light detection with an audio file. Should not crush an return -1
def test_light_fileExistsButItAudio(self):
rospack = rospkg.RosPack()
light_service = rospy.get_param("rapp_hazard_detection_light_check_topic")
rospy.wait_for_service(light_service)
lc_service = rospy.ServiceProxy(light_service, LightCheckRosSrv)
req = LightCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/silence_sample.wav'
response = lc_service(req)
light_level = response.light_level
self.assertEqual( light_level, -1 )
## Tests door detection with door opened
def test_doorOpen(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/door_2.png'
response = lc_service(req)
door_angle = response.door_angle
self.assertGreater( door_angle, 1 )
## Tests door detection with door closed
def test_doorClosed(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/door_4.png'
response = lc_service(req)
door_angle = response.door_angle
self.assertLess( door_angle, 2 )
## Stress test for door detection. 20 calls in a row
def test_door_stress(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/hazard_detection_samples/door_1.png'
for i in range(0, 20):
response = lc_service(req)
door_angle = response.door_angle
self.assertGreater( door_angle, 0 )
## Tests door detection with a non existent image. Should return -1
def test_door_fileDoesNotExist(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/not_existent_file.jpg'
response = lc_service(req)
door_angle = response.door_angle
self.assertEqual( door_angle, -1 )
## Tests door detection with an audio file. Should not crush an return -1
def test_door_fileExistsButItAudio(self):
rospack = rospkg.RosPack()
door_service = rospy.get_param("rapp_hazard_detection_door_check_topic")
rospy.wait_for_service(door_service)
lc_service = rospy.ServiceProxy(door_service, DoorCheckRosSrv)
req = DoorCheckRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/silence_sample.wav'
response = lc_service(req)
door_angle = response.door_angle
self.assertEqual( door_angle, -1 )
## The main function. Initializes the functional tests
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, 'HazardDetectionFunc', HazardDetectionFunc) | 0.58676 | 0.247703 |
from enum import IntEnum, Enum
# Depending modules
import six
__all__ = [
'SubscriptionStatus',
'SocketStatus',
'MessageTypes'
]
class SubscriptionStatus(IntEnum):
PENDING = 1
CONNECTED = 2
CLOSING = 3
CLOSED = 4
FAILED = 5
class SocketStatus(IntEnum):
CONNECTING = 1
READY = 2
CLOSED = 3
class MessageTypes(Enum):
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
elif (isinstance(other, str)
or (not six.PY3 and isinstance(other, unicode))):
return self.value == other
return NotImplemented
'''
* Client -> Server message.
* This message type is the first message after handshake and this will initialize AWS AppSync RealTime communication
'''
GQL_CONNECTION_INIT = 'connection_init'
'''
* Server -> Client message
* This message type is in case there is an issue with AWS AppSync RealTime when establishing connection
'''
GQL_CONNECTION_ERROR = 'connection_error'
'''
* Server -> Client message.
* This message type is for the ack response from AWS AppSync RealTime for GQL_CONNECTION_INIT message
'''
GQL_CONNECTION_ACK = 'connection_ack'
'''
* Client -> Server message.
* This message type is for register subscriptions with AWS AppSync RealTime
'''
GQL_START = 'start'
'''
* Server -> Client message.
* This message type is for the ack response from AWS AppSync RealTime for GQL_START message
'''
GQL_START_ACK = 'start_ack'
'''
* Server -> Client message.
* This message type is for subscription message from AWS AppSync RealTime
'''
GQL_DATA = 'data'
'''
* Server -> Client message.
* This message type helps the client to know is still receiving messages from AWS AppSync RealTime
'''
GQL_CONNECTION_KEEP_ALIVE = 'ka'
'''
* Client -> Server message.
* This message type is for unregister subscriptions with AWS AppSync RealTime
'''
GQL_STOP = 'stop'
'''
* Server -> Client message.
* This message type is for the ack response from AWS AppSync RealTime for GQL_STOP message
'''
GQL_COMPLETE = 'complete'
'''
* Server -> Client message.
* This message type is for sending error messages from AWS AppSync RealTime to the client
'''
GQL_ERROR = 'error' #Server -> Client | appsync_subscription_manager/types.py | from enum import IntEnum, Enum
# Depending modules
import six
__all__ = [
'SubscriptionStatus',
'SocketStatus',
'MessageTypes'
]
class SubscriptionStatus(IntEnum):
PENDING = 1
CONNECTED = 2
CLOSING = 3
CLOSED = 4
FAILED = 5
class SocketStatus(IntEnum):
CONNECTING = 1
READY = 2
CLOSED = 3
class MessageTypes(Enum):
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
elif (isinstance(other, str)
or (not six.PY3 and isinstance(other, unicode))):
return self.value == other
return NotImplemented
'''
* Client -> Server message.
* This message type is the first message after handshake and this will initialize AWS AppSync RealTime communication
'''
GQL_CONNECTION_INIT = 'connection_init'
'''
* Server -> Client message
* This message type is in case there is an issue with AWS AppSync RealTime when establishing connection
'''
GQL_CONNECTION_ERROR = 'connection_error'
'''
* Server -> Client message.
* This message type is for the ack response from AWS AppSync RealTime for GQL_CONNECTION_INIT message
'''
GQL_CONNECTION_ACK = 'connection_ack'
'''
* Client -> Server message.
* This message type is for register subscriptions with AWS AppSync RealTime
'''
GQL_START = 'start'
'''
* Server -> Client message.
* This message type is for the ack response from AWS AppSync RealTime for GQL_START message
'''
GQL_START_ACK = 'start_ack'
'''
* Server -> Client message.
* This message type is for subscription message from AWS AppSync RealTime
'''
GQL_DATA = 'data'
'''
* Server -> Client message.
* This message type helps the client to know is still receiving messages from AWS AppSync RealTime
'''
GQL_CONNECTION_KEEP_ALIVE = 'ka'
'''
* Client -> Server message.
* This message type is for unregister subscriptions with AWS AppSync RealTime
'''
GQL_STOP = 'stop'
'''
* Server -> Client message.
* This message type is for the ack response from AWS AppSync RealTime for GQL_STOP message
'''
GQL_COMPLETE = 'complete'
'''
* Server -> Client message.
* This message type is for sending error messages from AWS AppSync RealTime to the client
'''
GQL_ERROR = 'error' #Server -> Client | 0.5144 | 0.074131 |
import time
from typing import (
Any,
Callable,
Coroutine,
Dict,
)
# Third party libraries
import graphene
from graphql.execution.executors.asyncio import AsyncioExecutor
from starlette.applications import Starlette
from starlette.graphql import GraphQLApp
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from starlette.templating import Jinja2Templates
import tracers.daemon
import tracers.function
# Local libraries
import server.api.schema.mutation
import server.api.schema.query
import server.authc
import server.utils.aio
# Implementation
TEMPLATING_ENGINE = Jinja2Templates(
directory='templates',
)
def render_template(
*,
context: Dict[str, Any],
template: str,
) -> Callable[[Request], Coroutine[Any, Any, Response]]:
@tracers.function.trace(overridden_function=render_template)
async def render(request: Request) -> Response:
return TEMPLATING_ENGINE.TemplateResponse(
name=template,
context=dict(request=request, **context),
)
return render
SERVER = Starlette(
middleware = [
Middleware(
cls=server.authc.AuthenticationMidleware,
authentication_path='/authenticate',
authentication_required_paths=(
'/api',
),
),
],
routes=[
Route(
endpoint=render_template(
context={},
template='index.html',
),
path='/',
),
Route(
endpoint=GraphQLApp(
executor_class=AsyncioExecutor,
graphiql=True,
schema=graphene.Schema(
mutation=server.api.schema.mutation.Mutation,
query=server.api.schema.query.Query,
),
),
path='/api',
),
Mount(
app=StaticFiles(
directory='../static',
),
name='static',
path='/static',
),
],
)
tracers.daemon.start_daemon()
tracers.function.call(time.sleep, 0) | server/server/asgi.py | import time
from typing import (
Any,
Callable,
Coroutine,
Dict,
)
# Third party libraries
import graphene
from graphql.execution.executors.asyncio import AsyncioExecutor
from starlette.applications import Starlette
from starlette.graphql import GraphQLApp
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from starlette.templating import Jinja2Templates
import tracers.daemon
import tracers.function
# Local libraries
import server.api.schema.mutation
import server.api.schema.query
import server.authc
import server.utils.aio
# Implementation
TEMPLATING_ENGINE = Jinja2Templates(
directory='templates',
)
def render_template(
*,
context: Dict[str, Any],
template: str,
) -> Callable[[Request], Coroutine[Any, Any, Response]]:
@tracers.function.trace(overridden_function=render_template)
async def render(request: Request) -> Response:
return TEMPLATING_ENGINE.TemplateResponse(
name=template,
context=dict(request=request, **context),
)
return render
SERVER = Starlette(
middleware = [
Middleware(
cls=server.authc.AuthenticationMidleware,
authentication_path='/authenticate',
authentication_required_paths=(
'/api',
),
),
],
routes=[
Route(
endpoint=render_template(
context={},
template='index.html',
),
path='/',
),
Route(
endpoint=GraphQLApp(
executor_class=AsyncioExecutor,
graphiql=True,
schema=graphene.Schema(
mutation=server.api.schema.mutation.Mutation,
query=server.api.schema.query.Query,
),
),
path='/api',
),
Mount(
app=StaticFiles(
directory='../static',
),
name='static',
path='/static',
),
],
)
tracers.daemon.start_daemon()
tracers.function.call(time.sleep, 0) | 0.51562 | 0.110519 |
import argparse
import csv
import logging
import json
from util.parse import parse_package_details, parse_package_to_repos_file
from datetime import datetime
logging.basicConfig(level=logging.INFO,
format='%(asctime)s | [%(levelname)s] : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
parser = argparse.ArgumentParser()
parser.add_argument("--package_list",
type=argparse.FileType('r'),
help="The csv file that contins output the match process",
required=True)
parser.add_argument("--details_dir",
type=str,
help="Folder containing json files that store google play metadata",
required=True)
parser.add_argument(
'--output', default=open('new_apps.json', 'w'),
type=argparse.FileType('w'),
help='Output file. Default: new_apps.json.')
args = parser.parse_args()
csv_reader = csv.reader(args.package_list, delimiter=',')
matched_pkgs = []
matched_dict = {}
for row in csv_reader:
matched_pkgs.append(row[0].strip())
matched_dict[row[0]] = "https://github.com/{}".format(row[1].strip())
args.package_list.close()
n_matched = len(matched_pkgs)
found = 0
details_dir = args.details_dir
result = []
for package_name, package_details in parse_package_details(details_dir):
if package_name in matched_pkgs:
logging.debug(package_name)
found = found + 1
workdone = found/n_matched
print("\rProgress: [{0:50s}] {1:.1f}% {2}/{3}".format('#' * int(workdone * 50), workdone*100, found, n_matched), end='', flush=True)
app_details = package_details.get('details').get('appDetails', None)
relevant_info = {}
if app_details:
relevant_info['package'] = package_name
relevant_info['name'] = package_details.get('title')
relevant_info['summary'] = package_details.get('promotionalDescription')
relevant_info['last_added_on'] = str(datetime.strptime(app_details.get('uploadDate'), "%b %d, %Y").date())
relevant_info['last_version_number'] = app_details.get('versionCode')
relevant_info['last_version_name'] = app_details.get('versionString')
relevant_info['source_repo'] = matched_dict.get(package_name)
result.append(relevant_info)
else:
logging.warning('Impossible to retrive details from package {}'.format(package_name))
if n_matched == found:
break
print(json.dumps(result, indent=4, sort_keys=False), file=args.output) | helpers/get_matched_metadata.py |
import argparse
import csv
import logging
import json
from util.parse import parse_package_details, parse_package_to_repos_file
from datetime import datetime
logging.basicConfig(level=logging.INFO,
format='%(asctime)s | [%(levelname)s] : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
parser = argparse.ArgumentParser()
parser.add_argument("--package_list",
type=argparse.FileType('r'),
help="The csv file that contins output the match process",
required=True)
parser.add_argument("--details_dir",
type=str,
help="Folder containing json files that store google play metadata",
required=True)
parser.add_argument(
'--output', default=open('new_apps.json', 'w'),
type=argparse.FileType('w'),
help='Output file. Default: new_apps.json.')
args = parser.parse_args()
csv_reader = csv.reader(args.package_list, delimiter=',')
matched_pkgs = []
matched_dict = {}
for row in csv_reader:
matched_pkgs.append(row[0].strip())
matched_dict[row[0]] = "https://github.com/{}".format(row[1].strip())
args.package_list.close()
n_matched = len(matched_pkgs)
found = 0
details_dir = args.details_dir
result = []
for package_name, package_details in parse_package_details(details_dir):
if package_name in matched_pkgs:
logging.debug(package_name)
found = found + 1
workdone = found/n_matched
print("\rProgress: [{0:50s}] {1:.1f}% {2}/{3}".format('#' * int(workdone * 50), workdone*100, found, n_matched), end='', flush=True)
app_details = package_details.get('details').get('appDetails', None)
relevant_info = {}
if app_details:
relevant_info['package'] = package_name
relevant_info['name'] = package_details.get('title')
relevant_info['summary'] = package_details.get('promotionalDescription')
relevant_info['last_added_on'] = str(datetime.strptime(app_details.get('uploadDate'), "%b %d, %Y").date())
relevant_info['last_version_number'] = app_details.get('versionCode')
relevant_info['last_version_name'] = app_details.get('versionString')
relevant_info['source_repo'] = matched_dict.get(package_name)
result.append(relevant_info)
else:
logging.warning('Impossible to retrive details from package {}'.format(package_name))
if n_matched == found:
break
print(json.dumps(result, indent=4, sort_keys=False), file=args.output) | 0.320502 | 0.079317 |
import re
import secrets
import string
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from pyrsistent import field
from pyrsistent import m
from pyrsistent import PMap
from pyrsistent import pmap
from pyrsistent import PVector
from pyrsistent import pvector
from pyrsistent import v
from task_processing.plugins.kubernetes.utils import get_sanitised_kubernetes_name
if TYPE_CHECKING:
from task_processing.plugins.kubernetes.types import DockerVolume
from task_processing.plugins.kubernetes.types import SecretEnvSource
from task_processing.interfaces.task_executor import DefaultTaskConfigInterface
POD_SUFFIX_ALPHABET = string.ascii_lowercase + string.digits
POD_SUFFIX_LENGTH = 6
MAX_POD_NAME_LENGTH = 253
VALID_POD_NAME_REGEX = '[a-z0-9]([.-a-z0-9]*[a-z0-9])?'
VALID_VOLUME_KEYS = {'mode', 'container_path', 'host_path'}
VALID_SECRET_ENV_KEYS = {'secret_name', 'key'}
VALID_CAPABILITIES = {
"AUDIT_CONTROL",
"AUDIT_READ",
"AUDIT_WRITE",
"BLOCK_SUSPEND",
"CHOWN",
"DAC_OVERRIDE",
"DAC_READ_SEARCH",
"FOWNER",
"FSETID",
"IPC_LOCK",
"IPC_OWNER",
"KILL",
"LEASE",
"LINUX_IMMUTABLE",
"MAC_ADMIN",
"MAC_OVERRIDE",
"MKNOD",
"NET_ADMIN",
"NET_BIND_SERVICE",
"NET_BROADCAST",
"NET_RAW",
"SETFCAP",
"SETGID",
"SETPCAP",
"SETUID",
"SYSLOG",
"SYS_ADMIN",
"SYS_BOOT",
"SYS_CHROOT",
"SYS_MODULE",
"SYS_NICE",
"SYS_PACCT",
"SYS_PTRACE",
"SYS_RAWIO",
"SYS_RESOURCE",
"SYS_TIME",
"SYS_TTY_CONFIG",
"WAKE_ALARM",
}
DEFAULT_CAPS_DROP = {
"AUDIT_WRITE",
"CHOWN",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"MKNOD",
"NET_BIND_SERVICE",
"NET_RAW",
"SETFCAP",
"SETGID",
"SETPCAP",
"SETUID",
"SYS_CHROOT",
}
VALID_DOCKER_VOLUME_MODES = {"RW", "RO"}
def _generate_pod_suffix() -> str:
return ''.join(secrets.choice(POD_SUFFIX_ALPHABET) for i in range(POD_SUFFIX_LENGTH))
def _valid_volumes(volumes: Sequence["DockerVolume"]) -> Tuple[bool, Optional[str]]:
for volume in volumes:
if set(volume.keys()) != VALID_VOLUME_KEYS:
return (
False,
f'Invalid volume format, must only contain following keys: '
f'{VALID_VOLUME_KEYS}, got: {volume.keys()}'
)
if volume["mode"] not in VALID_DOCKER_VOLUME_MODES:
return (
False,
f"Invalid mode for volume, must be one of {VALID_DOCKER_VOLUME_MODES}",
)
return (True, None)
def _valid_secret_envs(secret_envs: Mapping[str, "SecretEnvSource"]) -> Tuple[bool, Optional[str]]:
# Note we are not validating existence of secret in k8s here, leave that to creation of pod
for key, value in secret_envs.items():
if set(value.keys()) != VALID_SECRET_ENV_KEYS:
return (
False,
f'Invalid secret environment variable {key}, must only contain following keys: '
f'{VALID_SECRET_ENV_KEYS}, got: {value.keys()}'
)
return (True, None)
def _valid_capabilities(capabilities: Sequence[str]) -> Tuple[bool, Optional[str]]:
if (set(capabilities) & VALID_CAPABILITIES) != set(capabilities):
return (
False,
f"Invalid capabilities - got {capabilities} but expected only values from "
f"{VALID_CAPABILITIES}",
)
return (True, None)
class KubernetesTaskConfig(DefaultTaskConfigInterface):
def __invariant__(self):
return (
(
len(get_sanitised_kubernetes_name(self.pod_name)) < MAX_POD_NAME_LENGTH,
(
f'Pod name must have up to {MAX_POD_NAME_LENGTH} characters.'
)
),
(
re.match(VALID_POD_NAME_REGEX, get_sanitised_kubernetes_name(self.pod_name)),
(
'Must comply with Kubernetes pod naming standards.'
)
)
)
uuid = field(type=str, initial=_generate_pod_suffix) # type: ignore
name = field(type=str, initial="default")
node_selector = field(type=PMap)
# Hardcoded for the time being
restart_policy = "Never"
# By default, the retrying executor retries 3 times. This task option
# overrides the executor setting.
retries = field(
type=int,
factory=int,
mandatory=False,
invariant=lambda r: (r >= 0, 'retries >= 0')
)
image = field(type=str, mandatory=True)
command = field(
type=str,
mandatory=True,
invariant=lambda cmd: (cmd.strip() != '', 'empty command is not allowed')
)
volumes = field(
type=PVector if not TYPE_CHECKING else PVector["DockerVolume"],
initial=v(),
factory=pvector,
invariant=_valid_volumes,
)
cpus = field(
type=float,
initial=0.1,
factory=float,
invariant=lambda c: (c > 0, 'cpus > 0'))
memory = field(
type=float,
initial=128.0,
factory=float,
invariant=lambda m: (m >= 32, 'mem is >= 32'))
disk = field(
type=float,
initial=10.0,
factory=float,
invariant=lambda d: (d > 0, 'disk > 0'))
environment = field(
type=PMap if not TYPE_CHECKING else PMap[str, str],
initial=m(),
factory=pmap,
)
secret_environment = field(
type=PMap if not TYPE_CHECKING else PMap[str, 'SecretEnvSource'],
initial=m(),
factory=pmap,
invariant=_valid_secret_envs,
)
cap_add = field(
type=PVector if not TYPE_CHECKING else PVector[str],
initial=v(),
factory=pvector,
invariant=_valid_capabilities,
)
cap_drop = field(
type=PVector if not TYPE_CHECKING else PVector[str],
initial=pvector(DEFAULT_CAPS_DROP),
factory=pvector,
invariant=_valid_capabilities,
)
@property
def pod_name(self) -> str:
return get_sanitised_kubernetes_name(f'{self.name}.{self.uuid}') # type: ignore
def set_pod_name(self, pod_name: str):
try:
name, uuid = pod_name.rsplit('.', maxsplit=1)
except ValueError:
raise ValueError(f'Invalid format for pod_name {pod_name}')
return self.set(name=name, uuid=uuid) | task_processing/plugins/kubernetes/task_config.py | import re
import secrets
import string
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from pyrsistent import field
from pyrsistent import m
from pyrsistent import PMap
from pyrsistent import pmap
from pyrsistent import PVector
from pyrsistent import pvector
from pyrsistent import v
from task_processing.plugins.kubernetes.utils import get_sanitised_kubernetes_name
if TYPE_CHECKING:
from task_processing.plugins.kubernetes.types import DockerVolume
from task_processing.plugins.kubernetes.types import SecretEnvSource
from task_processing.interfaces.task_executor import DefaultTaskConfigInterface
POD_SUFFIX_ALPHABET = string.ascii_lowercase + string.digits
POD_SUFFIX_LENGTH = 6
MAX_POD_NAME_LENGTH = 253
VALID_POD_NAME_REGEX = '[a-z0-9]([.-a-z0-9]*[a-z0-9])?'
VALID_VOLUME_KEYS = {'mode', 'container_path', 'host_path'}
VALID_SECRET_ENV_KEYS = {'secret_name', 'key'}
VALID_CAPABILITIES = {
"AUDIT_CONTROL",
"AUDIT_READ",
"AUDIT_WRITE",
"BLOCK_SUSPEND",
"CHOWN",
"DAC_OVERRIDE",
"DAC_READ_SEARCH",
"FOWNER",
"FSETID",
"IPC_LOCK",
"IPC_OWNER",
"KILL",
"LEASE",
"LINUX_IMMUTABLE",
"MAC_ADMIN",
"MAC_OVERRIDE",
"MKNOD",
"NET_ADMIN",
"NET_BIND_SERVICE",
"NET_BROADCAST",
"NET_RAW",
"SETFCAP",
"SETGID",
"SETPCAP",
"SETUID",
"SYSLOG",
"SYS_ADMIN",
"SYS_BOOT",
"SYS_CHROOT",
"SYS_MODULE",
"SYS_NICE",
"SYS_PACCT",
"SYS_PTRACE",
"SYS_RAWIO",
"SYS_RESOURCE",
"SYS_TIME",
"SYS_TTY_CONFIG",
"WAKE_ALARM",
}
DEFAULT_CAPS_DROP = {
"AUDIT_WRITE",
"CHOWN",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"MKNOD",
"NET_BIND_SERVICE",
"NET_RAW",
"SETFCAP",
"SETGID",
"SETPCAP",
"SETUID",
"SYS_CHROOT",
}
VALID_DOCKER_VOLUME_MODES = {"RW", "RO"}
def _generate_pod_suffix() -> str:
return ''.join(secrets.choice(POD_SUFFIX_ALPHABET) for i in range(POD_SUFFIX_LENGTH))
def _valid_volumes(volumes: Sequence["DockerVolume"]) -> Tuple[bool, Optional[str]]:
for volume in volumes:
if set(volume.keys()) != VALID_VOLUME_KEYS:
return (
False,
f'Invalid volume format, must only contain following keys: '
f'{VALID_VOLUME_KEYS}, got: {volume.keys()}'
)
if volume["mode"] not in VALID_DOCKER_VOLUME_MODES:
return (
False,
f"Invalid mode for volume, must be one of {VALID_DOCKER_VOLUME_MODES}",
)
return (True, None)
def _valid_secret_envs(secret_envs: Mapping[str, "SecretEnvSource"]) -> Tuple[bool, Optional[str]]:
# Note we are not validating existence of secret in k8s here, leave that to creation of pod
for key, value in secret_envs.items():
if set(value.keys()) != VALID_SECRET_ENV_KEYS:
return (
False,
f'Invalid secret environment variable {key}, must only contain following keys: '
f'{VALID_SECRET_ENV_KEYS}, got: {value.keys()}'
)
return (True, None)
def _valid_capabilities(capabilities: Sequence[str]) -> Tuple[bool, Optional[str]]:
if (set(capabilities) & VALID_CAPABILITIES) != set(capabilities):
return (
False,
f"Invalid capabilities - got {capabilities} but expected only values from "
f"{VALID_CAPABILITIES}",
)
return (True, None)
class KubernetesTaskConfig(DefaultTaskConfigInterface):
def __invariant__(self):
return (
(
len(get_sanitised_kubernetes_name(self.pod_name)) < MAX_POD_NAME_LENGTH,
(
f'Pod name must have up to {MAX_POD_NAME_LENGTH} characters.'
)
),
(
re.match(VALID_POD_NAME_REGEX, get_sanitised_kubernetes_name(self.pod_name)),
(
'Must comply with Kubernetes pod naming standards.'
)
)
)
uuid = field(type=str, initial=_generate_pod_suffix) # type: ignore
name = field(type=str, initial="default")
node_selector = field(type=PMap)
# Hardcoded for the time being
restart_policy = "Never"
# By default, the retrying executor retries 3 times. This task option
# overrides the executor setting.
retries = field(
type=int,
factory=int,
mandatory=False,
invariant=lambda r: (r >= 0, 'retries >= 0')
)
image = field(type=str, mandatory=True)
command = field(
type=str,
mandatory=True,
invariant=lambda cmd: (cmd.strip() != '', 'empty command is not allowed')
)
volumes = field(
type=PVector if not TYPE_CHECKING else PVector["DockerVolume"],
initial=v(),
factory=pvector,
invariant=_valid_volumes,
)
cpus = field(
type=float,
initial=0.1,
factory=float,
invariant=lambda c: (c > 0, 'cpus > 0'))
memory = field(
type=float,
initial=128.0,
factory=float,
invariant=lambda m: (m >= 32, 'mem is >= 32'))
disk = field(
type=float,
initial=10.0,
factory=float,
invariant=lambda d: (d > 0, 'disk > 0'))
environment = field(
type=PMap if not TYPE_CHECKING else PMap[str, str],
initial=m(),
factory=pmap,
)
secret_environment = field(
type=PMap if not TYPE_CHECKING else PMap[str, 'SecretEnvSource'],
initial=m(),
factory=pmap,
invariant=_valid_secret_envs,
)
cap_add = field(
type=PVector if not TYPE_CHECKING else PVector[str],
initial=v(),
factory=pvector,
invariant=_valid_capabilities,
)
cap_drop = field(
type=PVector if not TYPE_CHECKING else PVector[str],
initial=pvector(DEFAULT_CAPS_DROP),
factory=pvector,
invariant=_valid_capabilities,
)
@property
def pod_name(self) -> str:
return get_sanitised_kubernetes_name(f'{self.name}.{self.uuid}') # type: ignore
def set_pod_name(self, pod_name: str):
try:
name, uuid = pod_name.rsplit('.', maxsplit=1)
except ValueError:
raise ValueError(f'Invalid format for pod_name {pod_name}')
return self.set(name=name, uuid=uuid) | 0.773644 | 0.266044 |
from django.db import models
class Role(models.Model):
name = models.CharField(verbose_name='name', max_length=40, primary_key=True)
description = models.CharField(verbose_name='description', max_length=100)
functions = models.CharField(verbose_name='functions', max_length=500)
class Right(models.Model):
userName = models.CharField(verbose_name='userName', max_length=20, primary_key=True)
roleName = models.CharField(verbose_name='roleName', max_length=20)
description = models.CharField(verbose_name='description', max_length=100)
class Function(models.Model):
num = models.AutoField(verbose_name='num', primary_key=True)
name = models.CharField(verbose_name='description', max_length=40)
URL = models.CharField(verbose_name='URL', max_length=100)
description = models.CharField(verbose_name='description', max_length=100)
class Contract(models.Model):
num = models.AutoField(verbose_name='num', primary_key=True)
name = models.CharField(verbose_name='name', max_length=40)
customer = models.CharField(verbose_name='customer', max_length=40)
beginTime = models.DateTimeField(verbose_name='beginTime')
endTime = models.DateTimeField(verbose_name='endTime')
content = models.TextField(verbose_name='content')
userName = models.CharField(verbose_name='userName', max_length=40)
class Process(models.Model):
conNum = models.IntegerField(verbose_name='conNum')
type = models.IntegerField(verbose_name='type')
state = models.IntegerField(verbose_name='state')
userName = models.CharField(verbose_name='userName', max_length=40)
content = models.TextField(verbose_name='content')
time = models.DateTimeField(verbose_name='time')
class State(models.Model):
conNum = models.IntegerField(verbose_name='conNum')
type = models.IntegerField(verbose_name='type')
time = models.DateTimeField(verbose_name='time')
class Log(models.Model):
userName = models.CharField(verbose_name='userName', max_length=40)
content = models.TextField(verbose_name='content')
time = models.DateTimeField(verbose_name='time')
class Customer(models.Model):
num = models.AutoField(verbose_name='num', primary_key=True)
name = models.CharField(verbose_name='name', max_length=40)
address = models.CharField(verbose_name='address', max_length=100)
tel = models.CharField(verbose_name='tel', max_length=20)
fax = models.CharField(verbose_name='fax', max_length=20)
email = models.EmailField(verbose_name='email', max_length=20)
bank = models.CharField(verbose_name='bank', max_length=50)
account = models.CharField(verbose_name='account', max_length=50)
remark = models.CharField(verbose_name='remark', max_length=200)
class Attachment(models.Model):
conNum = models.CharField(verbose_name='conNum', max_length=20, primary_key=True)
cusName = models.CharField(verbose_name='cusName', max_length=100)
fileName = models.CharField(verbose_name='fileName', max_length=100)
path = models.CharField(verbose_name='path', max_length=100)
type = models.CharField(verbose_name='type', max_length=20)
uploadTime = models.DateTimeField(verbose_name='uploadTime')
file = models.FileField(verbose_name='file') | app/models.py | from django.db import models
class Role(models.Model):
name = models.CharField(verbose_name='name', max_length=40, primary_key=True)
description = models.CharField(verbose_name='description', max_length=100)
functions = models.CharField(verbose_name='functions', max_length=500)
class Right(models.Model):
userName = models.CharField(verbose_name='userName', max_length=20, primary_key=True)
roleName = models.CharField(verbose_name='roleName', max_length=20)
description = models.CharField(verbose_name='description', max_length=100)
class Function(models.Model):
num = models.AutoField(verbose_name='num', primary_key=True)
name = models.CharField(verbose_name='description', max_length=40)
URL = models.CharField(verbose_name='URL', max_length=100)
description = models.CharField(verbose_name='description', max_length=100)
class Contract(models.Model):
num = models.AutoField(verbose_name='num', primary_key=True)
name = models.CharField(verbose_name='name', max_length=40)
customer = models.CharField(verbose_name='customer', max_length=40)
beginTime = models.DateTimeField(verbose_name='beginTime')
endTime = models.DateTimeField(verbose_name='endTime')
content = models.TextField(verbose_name='content')
userName = models.CharField(verbose_name='userName', max_length=40)
class Process(models.Model):
conNum = models.IntegerField(verbose_name='conNum')
type = models.IntegerField(verbose_name='type')
state = models.IntegerField(verbose_name='state')
userName = models.CharField(verbose_name='userName', max_length=40)
content = models.TextField(verbose_name='content')
time = models.DateTimeField(verbose_name='time')
class State(models.Model):
conNum = models.IntegerField(verbose_name='conNum')
type = models.IntegerField(verbose_name='type')
time = models.DateTimeField(verbose_name='time')
class Log(models.Model):
userName = models.CharField(verbose_name='userName', max_length=40)
content = models.TextField(verbose_name='content')
time = models.DateTimeField(verbose_name='time')
class Customer(models.Model):
num = models.AutoField(verbose_name='num', primary_key=True)
name = models.CharField(verbose_name='name', max_length=40)
address = models.CharField(verbose_name='address', max_length=100)
tel = models.CharField(verbose_name='tel', max_length=20)
fax = models.CharField(verbose_name='fax', max_length=20)
email = models.EmailField(verbose_name='email', max_length=20)
bank = models.CharField(verbose_name='bank', max_length=50)
account = models.CharField(verbose_name='account', max_length=50)
remark = models.CharField(verbose_name='remark', max_length=200)
class Attachment(models.Model):
conNum = models.CharField(verbose_name='conNum', max_length=20, primary_key=True)
cusName = models.CharField(verbose_name='cusName', max_length=100)
fileName = models.CharField(verbose_name='fileName', max_length=100)
path = models.CharField(verbose_name='path', max_length=100)
type = models.CharField(verbose_name='type', max_length=20)
uploadTime = models.DateTimeField(verbose_name='uploadTime')
file = models.FileField(verbose_name='file') | 0.485356 | 0.187058 |
from django.test import TestCase
from .models import UserProfile
class TestUserProfileModel(TestCase):
def setUp(self):
UserProfile.objects.create(
default_full_name='test default full name',
default_phone_number='test default phone number',
default_street_address1='test street address1',
default_street_address2='test street address2',
default_town_or_city='test town or city',
default_county='test county',
default_postcode='test postcode',
default_country='test country',
)
def test_user_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('user').verbose_name
self.assertEqual(field_label, 'user')
def test_default_full_name_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('default_full_name').verbose_name
self.assertEqual(field_label, 'default full name')
def test_default_full_name_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_full_name').max_length
self.assertEqual(max_length, 50)
def test_default_phone_number_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field(
'default_phone_number').verbose_name
self.assertEqual(field_label, 'default phone number')
def test_default_phone_number_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_phone_number').max_length
self.assertEqual(max_length, 20)
def test_default_street_address1_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field(
'default_street_address1').verbose_name
self.assertEqual(field_label, 'default street address1')
def test_default_street_address1_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field(
'default_street_address1').max_length
self.assertEqual(max_length, 80)
def test_default_street_address2_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field(
'default_street_address2').verbose_name
self.assertEqual(field_label, 'default street address2')
def test_default_street_address2_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field(
'default_street_address2').max_length
self.assertEqual(max_length, 80)
def test_default_town_or_city_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field(
'default_town_or_city').verbose_name
self.assertEqual(field_label, 'default town or city')
def test_default_town_or_city_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_town_or_city').max_length
self.assertEqual(max_length, 40)
def test_default_postcode_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('default_postcode').verbose_name
self.assertEqual(field_label, 'default postcode')
def test_default_postcode_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_postcode').max_length
self.assertEqual(max_length, 20)
def test_default_county_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('default_county').verbose_name
self.assertEqual(field_label, 'default county')
def test_default_county_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_county').max_length
self.assertEqual(max_length, 80)
def test_default_default_country_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('default_country').verbose_name
self.assertEqual(field_label, 'default country')
def test_default_country_blank_label(self):
profile = UserProfile.objects.get(id=1)
blank_label = profile._meta.get_field('default_country').blank_label
self.assertEqual(blank_label, 'Country') | profiles/test_models_profiles.py | from django.test import TestCase
from .models import UserProfile
class TestUserProfileModel(TestCase):
def setUp(self):
UserProfile.objects.create(
default_full_name='test default full name',
default_phone_number='test default phone number',
default_street_address1='test street address1',
default_street_address2='test street address2',
default_town_or_city='test town or city',
default_county='test county',
default_postcode='test postcode',
default_country='test country',
)
def test_user_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('user').verbose_name
self.assertEqual(field_label, 'user')
def test_default_full_name_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('default_full_name').verbose_name
self.assertEqual(field_label, 'default full name')
def test_default_full_name_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_full_name').max_length
self.assertEqual(max_length, 50)
def test_default_phone_number_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field(
'default_phone_number').verbose_name
self.assertEqual(field_label, 'default phone number')
def test_default_phone_number_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_phone_number').max_length
self.assertEqual(max_length, 20)
def test_default_street_address1_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field(
'default_street_address1').verbose_name
self.assertEqual(field_label, 'default street address1')
def test_default_street_address1_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field(
'default_street_address1').max_length
self.assertEqual(max_length, 80)
def test_default_street_address2_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field(
'default_street_address2').verbose_name
self.assertEqual(field_label, 'default street address2')
def test_default_street_address2_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field(
'default_street_address2').max_length
self.assertEqual(max_length, 80)
def test_default_town_or_city_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field(
'default_town_or_city').verbose_name
self.assertEqual(field_label, 'default town or city')
def test_default_town_or_city_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_town_or_city').max_length
self.assertEqual(max_length, 40)
def test_default_postcode_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('default_postcode').verbose_name
self.assertEqual(field_label, 'default postcode')
def test_default_postcode_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_postcode').max_length
self.assertEqual(max_length, 20)
def test_default_county_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('default_county').verbose_name
self.assertEqual(field_label, 'default county')
def test_default_county_max_length(self):
profile = UserProfile.objects.get(id=1)
max_length = profile._meta.get_field('default_county').max_length
self.assertEqual(max_length, 80)
def test_default_default_country_label(self):
profile = UserProfile.objects.get(id=1)
field_label = profile._meta.get_field('default_country').verbose_name
self.assertEqual(field_label, 'default country')
def test_default_country_blank_label(self):
profile = UserProfile.objects.get(id=1)
blank_label = profile._meta.get_field('default_country').blank_label
self.assertEqual(blank_label, 'Country') | 0.647464 | 0.158532 |
import os
from pybuildtool import BaseTask, expand_resource
tool_name = __name__
class Task(BaseTask):
name = tool_name
workdir = None
def prepare(self):
cfg = self.conf
args = self.args
c = cfg.get('work_dir')
if c:
self.workdir = expand_resource(self.group, c)
c = cfg.get('mode', os.environ.get('NODE_ENV'))
if not c:
if self.bld.variant in ('prod', 'production'):
c = 'production'
else:
c = 'development'
args.append('--mode=' + c)
self.add_bool_args('debug', 'verbose', 'progress', 'output_pathinfo',
'cache', 'watch_stdin', 'watch_poll', 'hot', 'labeled_modules',
'bail', 'profile', 'optimize_minimize', 'color', 'hide_modules',
'display_modules', 'display_chunks', 'display_entrypoints',
'display_origins', 'display_cached', 'display_cached_assets',
'display_reasons', 'display_depth', 'display_used_exports',
'display_provided_exports', 'display_error_details')
self.add_dict_args('module_bind', 'module_bind_pre', 'module_bind_post',
'define', 'provide', 'resolve_alias', 'resolve_loader_alias',
opt_val_sep=' ')
self.add_int_args('watch_aggregate_timeout', 'optimize_max_chunks',
'optimize_min_chunk_size', 'display_max_modules')
self.add_list_args_multi('devtool', 'plugin', 'display_exclude')
self.add_list_args_multi('env', opt_val_sep='.')
self.add_list_args_multi('resolve_extensions', opt_val_sep=' ')
self.add_path_args('context', 'records_input_path')
self.add_path_list_args_multi('prefetch')
self.add_str_args('output_path', 'output_filename',
'output_chunk_filename', 'output_source_map_filename',
'output_public_path', 'output_jsonp_function', 'output_library',
'output_library_target', 'records_output_path', 'records_path',
'target', 'sort_modules_by', 'sort_chunks_by', 'sort_assets_by',
)
c = cfg.get('config_file')
if c:
args.append('--config=' + expand_resource(self.group, c))
c = cfg.get('entry', {})
for entry_name, entry_js_file in c.items():
args.append('--%s=%s' % (entry_name, expand_resource(
self.group, entry_js_file)))
def perform(self):
if len(self.file_out) > 1:
self.bld.fatal('%s at most produces one output' %\
tool_name.capitalize())
kwargs = {}
if self.workdir is not None:
kwargs['cwd'] = self.workdir
executable = self.env['%s_BIN' % tool_name.upper()]
return self.exec_command(
"{exe} {arg} {in_} {out}".format(
exe=executable,
arg=' '.join(self.args),
in_=' '.join(self.file_in),
out=' '.join(self.file_out),
),
**kwargs)
def configure(conf):
bin_path = 'node_modules/webpack-cli/bin/cli.js'
conf.start_msg("Checking for program '%s'" % tool_name)
if os.path.exists(bin_path):
bin_path = os.path.realpath(bin_path)
conf.end_msg(bin_path)
else:
conf.end_msg('not found', color='YELLOW')
bin_path = conf.find_program('webpack')[0]
conf.env['%s_BIN' % tool_name.upper()] = bin_path | pybuildtool/tools/webpack.py | import os
from pybuildtool import BaseTask, expand_resource
tool_name = __name__
class Task(BaseTask):
name = tool_name
workdir = None
def prepare(self):
cfg = self.conf
args = self.args
c = cfg.get('work_dir')
if c:
self.workdir = expand_resource(self.group, c)
c = cfg.get('mode', os.environ.get('NODE_ENV'))
if not c:
if self.bld.variant in ('prod', 'production'):
c = 'production'
else:
c = 'development'
args.append('--mode=' + c)
self.add_bool_args('debug', 'verbose', 'progress', 'output_pathinfo',
'cache', 'watch_stdin', 'watch_poll', 'hot', 'labeled_modules',
'bail', 'profile', 'optimize_minimize', 'color', 'hide_modules',
'display_modules', 'display_chunks', 'display_entrypoints',
'display_origins', 'display_cached', 'display_cached_assets',
'display_reasons', 'display_depth', 'display_used_exports',
'display_provided_exports', 'display_error_details')
self.add_dict_args('module_bind', 'module_bind_pre', 'module_bind_post',
'define', 'provide', 'resolve_alias', 'resolve_loader_alias',
opt_val_sep=' ')
self.add_int_args('watch_aggregate_timeout', 'optimize_max_chunks',
'optimize_min_chunk_size', 'display_max_modules')
self.add_list_args_multi('devtool', 'plugin', 'display_exclude')
self.add_list_args_multi('env', opt_val_sep='.')
self.add_list_args_multi('resolve_extensions', opt_val_sep=' ')
self.add_path_args('context', 'records_input_path')
self.add_path_list_args_multi('prefetch')
self.add_str_args('output_path', 'output_filename',
'output_chunk_filename', 'output_source_map_filename',
'output_public_path', 'output_jsonp_function', 'output_library',
'output_library_target', 'records_output_path', 'records_path',
'target', 'sort_modules_by', 'sort_chunks_by', 'sort_assets_by',
)
c = cfg.get('config_file')
if c:
args.append('--config=' + expand_resource(self.group, c))
c = cfg.get('entry', {})
for entry_name, entry_js_file in c.items():
args.append('--%s=%s' % (entry_name, expand_resource(
self.group, entry_js_file)))
def perform(self):
if len(self.file_out) > 1:
self.bld.fatal('%s at most produces one output' %\
tool_name.capitalize())
kwargs = {}
if self.workdir is not None:
kwargs['cwd'] = self.workdir
executable = self.env['%s_BIN' % tool_name.upper()]
return self.exec_command(
"{exe} {arg} {in_} {out}".format(
exe=executable,
arg=' '.join(self.args),
in_=' '.join(self.file_in),
out=' '.join(self.file_out),
),
**kwargs)
def configure(conf):
bin_path = 'node_modules/webpack-cli/bin/cli.js'
conf.start_msg("Checking for program '%s'" % tool_name)
if os.path.exists(bin_path):
bin_path = os.path.realpath(bin_path)
conf.end_msg(bin_path)
else:
conf.end_msg('not found', color='YELLOW')
bin_path = conf.find_program('webpack')[0]
conf.env['%s_BIN' % tool_name.upper()] = bin_path | 0.167219 | 0.045628 |
import click
import numpy as np
import rasterio
from scipy.stats import mode
from rio_alpha.utils import (
_convert_rgb,
_compute_continuous,
_debug_mode,
_search_image_edge,
_evaluate_count,
)
def discover_ndv(rgb_orig, debug, verbose):
"""Returns nodata value by calculating mode of RGB array
Parameters
----------
rgb_orig: ndarray
array of input pixels of shape (rows, cols, depth)
debug: Boolean
Enables matplotlib & printing of figures
verbose: Boolean
Prints extra information, like competing candidate values
Returns
-------
list of nodata value candidates or empty string if none found
"""
rgb_mod, rgb_mod_flat = _convert_rgb(rgb_orig)
# Full image mode bincount
mode_vals = mode(rgb_mod_flat)
candidate_original = [int((mode_vals[0])[0, i]) for i in range(3)]
# Find continuous values in RGB array
candidate_continuous, arr = _compute_continuous(rgb_mod, 1)
# If debug mode, print histograms & be verbose
if debug:
click.echo("Original image ndv candidate: %s" % (str(candidate_original)))
click.echo("Filtered image ndv candidate: %s" % (str(candidate_continuous)))
outplot = "/tmp/hist_plot.png"
_debug_mode(rgb_mod_flat, arr, outplot)
# Compare ndv candidates from full & squished image
candidate_list = [
i for i, j in zip(candidate_original, candidate_continuous) if i == j
]
# If candidates from original & filtered images match exactly,
# print value & exit
if len(candidate_list) == 3:
return candidate_list
# If candidates do not match exactly, continue vetting process
# by searching image edge for frequency of each candidate
elif len(candidate_list) < 3:
if verbose:
click.echo(
"Competing ndv candidates...searching "
"image collar for value frequency. "
"Candidate list: %s" % str(candidate_list)
)
count_img_edge_full, count_img_edge_continuous = _search_image_edge(
rgb_mod, candidate_original, candidate_continuous
)
if verbose:
for candidate in (candidate_original, candidate_continuous):
click.echo(
"Candidate value: %s "
"Candidate count: %s "
"Continuous count: %s"
% (
str(candidate),
str(count_img_edge_full),
str(count_img_edge_continuous),
)
)
output = _evaluate_count(
count_img_edge_full, count_img_edge_continuous, verbose
)
return output
else:
raise ValueError("Invalid candidate list {!r}".format(candidate_list))
def determine_nodata(src_path, user_nodata, discovery, debug, verbose):
"""Worker function for determining nodata
Parameters
----------
src_path: string
user_nodata: string/integer
User supplies the nodata value,
input a single value or a string of list
discovery: Boolean
determines nodata if alpha channel does not exist
or internal ndv does not exist
debug: Boolean
Enables matplotlib & printing of figures
verbose: Boolean
Prints extra information, like competing candidate values
Returns
-------
nodata value: string
string(int) or stringified array of values of
len == the number of bands.
For example, string([int(ndv), int(ndv), int(ndv)])
"""
if user_nodata:
return user_nodata
with rasterio.open(src_path, "r") as src:
count = src.count
if count == 4:
return "alpha"
else:
nodata = src.nodata
if nodata is None:
if discovery:
data = np.rollaxis(src.read(), 0, 3)
candidates = discover_ndv(data, debug, verbose)
if len(candidates) != 3:
return ""
else:
return "[{}, {}, {}]".format(*candidates)
else:
return ""
else:
return "%s" % (str(int(nodata))) | rio_alpha/findnodata.py |
import click
import numpy as np
import rasterio
from scipy.stats import mode
from rio_alpha.utils import (
_convert_rgb,
_compute_continuous,
_debug_mode,
_search_image_edge,
_evaluate_count,
)
def discover_ndv(rgb_orig, debug, verbose):
"""Returns nodata value by calculating mode of RGB array
Parameters
----------
rgb_orig: ndarray
array of input pixels of shape (rows, cols, depth)
debug: Boolean
Enables matplotlib & printing of figures
verbose: Boolean
Prints extra information, like competing candidate values
Returns
-------
list of nodata value candidates or empty string if none found
"""
rgb_mod, rgb_mod_flat = _convert_rgb(rgb_orig)
# Full image mode bincount
mode_vals = mode(rgb_mod_flat)
candidate_original = [int((mode_vals[0])[0, i]) for i in range(3)]
# Find continuous values in RGB array
candidate_continuous, arr = _compute_continuous(rgb_mod, 1)
# If debug mode, print histograms & be verbose
if debug:
click.echo("Original image ndv candidate: %s" % (str(candidate_original)))
click.echo("Filtered image ndv candidate: %s" % (str(candidate_continuous)))
outplot = "/tmp/hist_plot.png"
_debug_mode(rgb_mod_flat, arr, outplot)
# Compare ndv candidates from full & squished image
candidate_list = [
i for i, j in zip(candidate_original, candidate_continuous) if i == j
]
# If candidates from original & filtered images match exactly,
# print value & exit
if len(candidate_list) == 3:
return candidate_list
# If candidates do not match exactly, continue vetting process
# by searching image edge for frequency of each candidate
elif len(candidate_list) < 3:
if verbose:
click.echo(
"Competing ndv candidates...searching "
"image collar for value frequency. "
"Candidate list: %s" % str(candidate_list)
)
count_img_edge_full, count_img_edge_continuous = _search_image_edge(
rgb_mod, candidate_original, candidate_continuous
)
if verbose:
for candidate in (candidate_original, candidate_continuous):
click.echo(
"Candidate value: %s "
"Candidate count: %s "
"Continuous count: %s"
% (
str(candidate),
str(count_img_edge_full),
str(count_img_edge_continuous),
)
)
output = _evaluate_count(
count_img_edge_full, count_img_edge_continuous, verbose
)
return output
else:
raise ValueError("Invalid candidate list {!r}".format(candidate_list))
def determine_nodata(src_path, user_nodata, discovery, debug, verbose):
"""Worker function for determining nodata
Parameters
----------
src_path: string
user_nodata: string/integer
User supplies the nodata value,
input a single value or a string of list
discovery: Boolean
determines nodata if alpha channel does not exist
or internal ndv does not exist
debug: Boolean
Enables matplotlib & printing of figures
verbose: Boolean
Prints extra information, like competing candidate values
Returns
-------
nodata value: string
string(int) or stringified array of values of
len == the number of bands.
For example, string([int(ndv), int(ndv), int(ndv)])
"""
if user_nodata:
return user_nodata
with rasterio.open(src_path, "r") as src:
count = src.count
if count == 4:
return "alpha"
else:
nodata = src.nodata
if nodata is None:
if discovery:
data = np.rollaxis(src.read(), 0, 3)
candidates = discover_ndv(data, debug, verbose)
if len(candidates) != 3:
return ""
else:
return "[{}, {}, {}]".format(*candidates)
else:
return ""
else:
return "%s" % (str(int(nodata))) | 0.651466 | 0.382084 |
import json
import io
import datetime
import logging
import boto3
import pandas
import altair
from shared import S3_scraper_index
from twitter_shared import TwitterAPI
from plot_shared import get_chrome_driver
good_symb = '\u2193'
bad_symb = '\u2191'
def colclean(old):
for name in ['Hospital', 'Care Home', 'Hospice', 'Home', 'Other', 'Total', 'Week Ending']:
if old.startswith(name):
return name
return old
def lambda_handler(event, context):
# Get the secret
sm = boto3.client('secretsmanager')
secretobj = sm.get_secret_value(SecretId='ni-covid-tweets')
secret = json.loads(secretobj['SecretString'])
tweets = []
# Download the most recently updated Excel file
s3 = boto3.client('s3')
for change in event:
obj = s3.get_object(Bucket=secret['bucketname'],Key=change['keyname'])['Body']
stream = io.BytesIO(obj.read())
# Load test data and add extra fields
df = pandas.read_excel(stream,engine='openpyxl',sheet_name='Table 7', header=3)
df.dropna('columns',how='all',inplace=True)
df.rename(columns=colclean,inplace=True)
df.dropna('rows',subset=['Total'],inplace=True)
# Get the latest dates with values for tests and rolling
df['date'] = pandas.to_datetime(df['Week Ending'], format='%d/%m/%Y')
df.sort_values('date', inplace=True)
latest = df.iloc[-1]
# Check against previous day's reports
status = S3_scraper_index(s3, secret['bucketname'], secret['nisra-deaths-index'])
index = status.get_dict()
plots = []
if latest['Total'] == 0:
tweet = '''No deaths registered in Northern Ireland, week ended {date}
'''.format(
date=latest['date'].strftime('%A %-d %B %Y'),
)
else:
if latest['Total'] == 1:
tweet = '''One death registered in Northern Ireland, week ended {date}, in:
'''.format(
date=latest['date'].strftime('%A %-d %B %Y')
)
else:
tweet = '''{deaths:,} deaths registered in Northern Ireland, week ended {date}, in:
'''.format(
date=latest['date'].strftime('%A %-d %B %Y'),
deaths=int(latest['Total'])
)
for name in ['Hospital', 'Care Home', 'Hospice', 'Home', 'Other']:
if latest[name] > 0:
tweet += '\u2022 %s: %s\n' %(name, int(latest[name]))
tweet += '\n'
if len(df) > 1:
prev = df.iloc[-2]
diff = latest['Total'] - prev['Total']
tweet += '''{symb} {diff} {comp} than previous week
'''.format(
symb=good_symb if diff < 0 else bad_symb,
diff=abs(int(diff)),
comp='fewer' if diff < 0 else 'more'
)
try:
driver = get_chrome_driver()
plots = []
if driver is None:
logging.error('Failed to start chrome')
else:
toplot = df[(df['Week Ending'] > df['Week Ending'].max()-pandas.to_timedelta(84, unit='d'))]
toplot = toplot.drop(columns=['Week of Death','date','Total']).melt(id_vars='Week Ending', var_name='Location', value_name='Deaths')
print(toplot)
p = altair.vconcat(
altair.Chart(
toplot
).mark_area().encode(
x = altair.X('Week Ending:T', axis=altair.Axis(title='Week of death')),
y = altair.Y('sum(Deaths):Q', axis=altair.Axis(title='Deaths', orient="right", tickMinStep=1)),
color=altair.Color('Location', sort=altair.SortField('order',order='descending')),
).properties(
height=450,
width=800,
title='NI COVID-19 Deaths reported by NISRA from %s to %s' %(toplot['Week Ending'].min().strftime('%-d %B %Y'), toplot['Week Ending'].max().strftime('%-d %B %Y'))
),
).properties(
title=altair.TitleParams(
['Data from NISRA',
'https://twitter.com/ni_covid19_data on %s' %datetime.datetime.now().date().strftime('%A %-d %B %Y')],
baseline='bottom',
orient='bottom',
anchor='end',
fontWeight='normal',
fontSize=10,
dy=10
),
)
plotname = 'nisra-deaths-time-%s.png'%datetime.datetime.now().date().strftime('%Y-%d-%m')
plotstore = io.BytesIO()
p.save(fp=plotstore, format='png', method='selenium', webdriver=driver)
plotstore.seek(0)
plots.append({'name': plotname, 'store': plotstore})
except:
logging.exception('Error creating plot')
tweets.append({
'text': tweet,
'url': change['url'],
'notweet': change.get('notweet'),
'filedate': change['filedate'],
'plots': plots
})
donottweet = []
if len(tweets) > 1:
for i in range(1,len(tweets)):
for j in range(0, i):
if (tweets[i]['text'] == tweets[j]['text']):
donottweet.append(i)
messages = []
for idx in range(len(tweets)):
tweet = tweets[idx]['text'] + tweets[idx]['url']
if (idx not in donottweet):
if tweets[idx].get('notweet') is not True:
api = TwitterAPI(secret['twitter_apikey'], secret['twitter_apisecretkey'], secret['twitter_accesstoken'], secret['twitter_accesstokensecret'])
upload_ids = api.upload_multiple(tweets[idx]['plots'])
if change.get('testtweet') is True:
if len(upload_ids) > 0:
resp = api.dm(secret['twitter_dmaccount'], tweet, upload_ids[0])
else:
resp = api.dm(secret['twitter_dmaccount'], tweet)
messages.append('Tweeted DM ID %s' %(resp.id))
else:
if len(upload_ids) > 0:
resp = api.tweet(tweet, media_ids=upload_ids)
else:
resp = api.tweet(tweet)
messages.append('Tweeted ID %s, ' %resp.id)
# Update the file index
for i in range(len(index)):
if index[i]['filedate'] == tweets[idx]['filedate']:
index[i]['tweet'] = resp.id
break
status.put_dict(index)
messages[-1] += ('updated %s' %secret['nisra-deaths-index'])
else:
messages.append('Did not tweet')
print(tweet)
else:
messages.append('Duplicate found %s, did not tweet, ' %tweets[idx]['filedate'])
return {
"statusCode": 200,
"body": json.dumps({
"message:": messages,
}),
} | sam/nisra-tweeter/app.py | import json
import io
import datetime
import logging
import boto3
import pandas
import altair
from shared import S3_scraper_index
from twitter_shared import TwitterAPI
from plot_shared import get_chrome_driver
good_symb = '\u2193'
bad_symb = '\u2191'
def colclean(old):
for name in ['Hospital', 'Care Home', 'Hospice', 'Home', 'Other', 'Total', 'Week Ending']:
if old.startswith(name):
return name
return old
def lambda_handler(event, context):
# Get the secret
sm = boto3.client('secretsmanager')
secretobj = sm.get_secret_value(SecretId='ni-covid-tweets')
secret = json.loads(secretobj['SecretString'])
tweets = []
# Download the most recently updated Excel file
s3 = boto3.client('s3')
for change in event:
obj = s3.get_object(Bucket=secret['bucketname'],Key=change['keyname'])['Body']
stream = io.BytesIO(obj.read())
# Load test data and add extra fields
df = pandas.read_excel(stream,engine='openpyxl',sheet_name='Table 7', header=3)
df.dropna('columns',how='all',inplace=True)
df.rename(columns=colclean,inplace=True)
df.dropna('rows',subset=['Total'],inplace=True)
# Get the latest dates with values for tests and rolling
df['date'] = pandas.to_datetime(df['Week Ending'], format='%d/%m/%Y')
df.sort_values('date', inplace=True)
latest = df.iloc[-1]
# Check against previous day's reports
status = S3_scraper_index(s3, secret['bucketname'], secret['nisra-deaths-index'])
index = status.get_dict()
plots = []
if latest['Total'] == 0:
tweet = '''No deaths registered in Northern Ireland, week ended {date}
'''.format(
date=latest['date'].strftime('%A %-d %B %Y'),
)
else:
if latest['Total'] == 1:
tweet = '''One death registered in Northern Ireland, week ended {date}, in:
'''.format(
date=latest['date'].strftime('%A %-d %B %Y')
)
else:
tweet = '''{deaths:,} deaths registered in Northern Ireland, week ended {date}, in:
'''.format(
date=latest['date'].strftime('%A %-d %B %Y'),
deaths=int(latest['Total'])
)
for name in ['Hospital', 'Care Home', 'Hospice', 'Home', 'Other']:
if latest[name] > 0:
tweet += '\u2022 %s: %s\n' %(name, int(latest[name]))
tweet += '\n'
if len(df) > 1:
prev = df.iloc[-2]
diff = latest['Total'] - prev['Total']
tweet += '''{symb} {diff} {comp} than previous week
'''.format(
symb=good_symb if diff < 0 else bad_symb,
diff=abs(int(diff)),
comp='fewer' if diff < 0 else 'more'
)
try:
driver = get_chrome_driver()
plots = []
if driver is None:
logging.error('Failed to start chrome')
else:
toplot = df[(df['Week Ending'] > df['Week Ending'].max()-pandas.to_timedelta(84, unit='d'))]
toplot = toplot.drop(columns=['Week of Death','date','Total']).melt(id_vars='Week Ending', var_name='Location', value_name='Deaths')
print(toplot)
p = altair.vconcat(
altair.Chart(
toplot
).mark_area().encode(
x = altair.X('Week Ending:T', axis=altair.Axis(title='Week of death')),
y = altair.Y('sum(Deaths):Q', axis=altair.Axis(title='Deaths', orient="right", tickMinStep=1)),
color=altair.Color('Location', sort=altair.SortField('order',order='descending')),
).properties(
height=450,
width=800,
title='NI COVID-19 Deaths reported by NISRA from %s to %s' %(toplot['Week Ending'].min().strftime('%-d %B %Y'), toplot['Week Ending'].max().strftime('%-d %B %Y'))
),
).properties(
title=altair.TitleParams(
['Data from NISRA',
'https://twitter.com/ni_covid19_data on %s' %datetime.datetime.now().date().strftime('%A %-d %B %Y')],
baseline='bottom',
orient='bottom',
anchor='end',
fontWeight='normal',
fontSize=10,
dy=10
),
)
plotname = 'nisra-deaths-time-%s.png'%datetime.datetime.now().date().strftime('%Y-%d-%m')
plotstore = io.BytesIO()
p.save(fp=plotstore, format='png', method='selenium', webdriver=driver)
plotstore.seek(0)
plots.append({'name': plotname, 'store': plotstore})
except:
logging.exception('Error creating plot')
tweets.append({
'text': tweet,
'url': change['url'],
'notweet': change.get('notweet'),
'filedate': change['filedate'],
'plots': plots
})
donottweet = []
if len(tweets) > 1:
for i in range(1,len(tweets)):
for j in range(0, i):
if (tweets[i]['text'] == tweets[j]['text']):
donottweet.append(i)
messages = []
for idx in range(len(tweets)):
tweet = tweets[idx]['text'] + tweets[idx]['url']
if (idx not in donottweet):
if tweets[idx].get('notweet') is not True:
api = TwitterAPI(secret['twitter_apikey'], secret['twitter_apisecretkey'], secret['twitter_accesstoken'], secret['twitter_accesstokensecret'])
upload_ids = api.upload_multiple(tweets[idx]['plots'])
if change.get('testtweet') is True:
if len(upload_ids) > 0:
resp = api.dm(secret['twitter_dmaccount'], tweet, upload_ids[0])
else:
resp = api.dm(secret['twitter_dmaccount'], tweet)
messages.append('Tweeted DM ID %s' %(resp.id))
else:
if len(upload_ids) > 0:
resp = api.tweet(tweet, media_ids=upload_ids)
else:
resp = api.tweet(tweet)
messages.append('Tweeted ID %s, ' %resp.id)
# Update the file index
for i in range(len(index)):
if index[i]['filedate'] == tweets[idx]['filedate']:
index[i]['tweet'] = resp.id
break
status.put_dict(index)
messages[-1] += ('updated %s' %secret['nisra-deaths-index'])
else:
messages.append('Did not tweet')
print(tweet)
else:
messages.append('Duplicate found %s, did not tweet, ' %tweets[idx]['filedate'])
return {
"statusCode": 200,
"body": json.dumps({
"message:": messages,
}),
} | 0.372163 | 0.135747 |
import unittest
import os
from dao.text_contents_dao import TextContentsDAO
class TestTextContentsDAO(unittest.TestCase):
def setUp(self):
self.db_addr = "database/test_db.db"
os.popen(f"sqlite3 {self.db_addr} < database/schema.sql")
self.text_contents_dao = TextContentsDAO(self.db_addr)
def tearDown(self):
self.text_contents_dao.clear_text_contents_table()
def test_text_contents_are_created_correctly(self):
text_contents = self.text_contents_dao.get_guild_text_contents(1234)
self.assertEqual(len(text_contents), 0)
self.text_contents_dao.create_text_content(1234)
text_contents = self.text_contents_dao.get_guild_text_contents(1234)
self.assertEqual(len(text_contents), 1)
def test_text_content_is_edited_correctly(self):
self.text_contents_dao.create_text_content(1234, "Test", "TEST")
text_content = self.text_contents_dao.get_guild_text_contents_by_type(1234, "TEST")
self.assertEqual(text_content["content"], "Test")
self.text_contents_dao.edit_text_content(1234, "TEST", "Testing")
text_content = self.text_contents_dao.get_guild_text_contents_by_type(1234, "TEST")
self.assertEqual(text_content["content"], "Testing")
def test_specific_text_contents_are_deleted_correctly(self):
self.text_contents_dao.create_text_content(1234, "Test", "TEST")
text_content = self.text_contents_dao.get_guild_text_contents_by_type(1234, "TEST")
self.assertIsNotNone(text_content)
self.text_contents_dao.delete_text_content(1234, "TEST")
text_content = self.text_contents_dao.get_guild_text_contents_by_type(1234, "TEST")
self.assertIsNone(text_content)
def test_guild_text_contents_are_deleted_correctly(self):
self.text_contents_dao.create_text_content(1234)
self.text_contents_dao.create_text_content(2345)
text_contents1 = self.text_contents_dao.get_guild_text_contents(1234)
text_contents2 = self.text_contents_dao.get_guild_text_contents(2345)
self.assertEqual(len(text_contents1), 1)
self.assertEqual(len(text_contents2), 1)
self.text_contents_dao.delete_guild_text_contents(1234)
text_contents1 = self.text_contents_dao.get_guild_text_contents(1234)
text_contents2 = self.text_contents_dao.get_guild_text_contents(2345)
self.assertEqual(len(text_contents1), 0)
self.assertEqual(len(text_contents2), 1) | src/tests/dao_test/text_contents_dao_test.py | import unittest
import os
from dao.text_contents_dao import TextContentsDAO
class TestTextContentsDAO(unittest.TestCase):
def setUp(self):
self.db_addr = "database/test_db.db"
os.popen(f"sqlite3 {self.db_addr} < database/schema.sql")
self.text_contents_dao = TextContentsDAO(self.db_addr)
def tearDown(self):
self.text_contents_dao.clear_text_contents_table()
def test_text_contents_are_created_correctly(self):
text_contents = self.text_contents_dao.get_guild_text_contents(1234)
self.assertEqual(len(text_contents), 0)
self.text_contents_dao.create_text_content(1234)
text_contents = self.text_contents_dao.get_guild_text_contents(1234)
self.assertEqual(len(text_contents), 1)
def test_text_content_is_edited_correctly(self):
self.text_contents_dao.create_text_content(1234, "Test", "TEST")
text_content = self.text_contents_dao.get_guild_text_contents_by_type(1234, "TEST")
self.assertEqual(text_content["content"], "Test")
self.text_contents_dao.edit_text_content(1234, "TEST", "Testing")
text_content = self.text_contents_dao.get_guild_text_contents_by_type(1234, "TEST")
self.assertEqual(text_content["content"], "Testing")
def test_specific_text_contents_are_deleted_correctly(self):
self.text_contents_dao.create_text_content(1234, "Test", "TEST")
text_content = self.text_contents_dao.get_guild_text_contents_by_type(1234, "TEST")
self.assertIsNotNone(text_content)
self.text_contents_dao.delete_text_content(1234, "TEST")
text_content = self.text_contents_dao.get_guild_text_contents_by_type(1234, "TEST")
self.assertIsNone(text_content)
def test_guild_text_contents_are_deleted_correctly(self):
self.text_contents_dao.create_text_content(1234)
self.text_contents_dao.create_text_content(2345)
text_contents1 = self.text_contents_dao.get_guild_text_contents(1234)
text_contents2 = self.text_contents_dao.get_guild_text_contents(2345)
self.assertEqual(len(text_contents1), 1)
self.assertEqual(len(text_contents2), 1)
self.text_contents_dao.delete_guild_text_contents(1234)
text_contents1 = self.text_contents_dao.get_guild_text_contents(1234)
text_contents2 = self.text_contents_dao.get_guild_text_contents(2345)
self.assertEqual(len(text_contents1), 0)
self.assertEqual(len(text_contents2), 1) | 0.376967 | 0.188716 |
import os
import optparse
import pkg_resources
import subprocess
import sys
import zc.buildout.easy_install
import zc.recipe.egg
class TagsMaker(object):
def __init__(self, buildout, name, options):
self.buildout = buildout
self.name = name
self.options = options
# We do this early so the "extends" functionality works before we get
# to the other options below.
self._delegated = zc.recipe.egg.Egg(buildout, name, options)
options['script'] = os.path.join(buildout['buildout']['bin-directory'],
options.get('script', self.name),
)
if not options.get('working-directory', ''):
options['location'] = os.path.join(
buildout['buildout']['parts-directory'], name)
def install(self):
options = self.options
generated = []
eggs, ws = self._delegated.working_set(('z3c.recipe.tag',))
wd = options.get('working-directory', '')
if not wd:
wd = options['location']
if os.path.exists(wd):
assert os.path.isdir(wd)
else:
os.mkdir(wd)
generated.append(wd)
initialization = initialization_template % (
self.buildout['buildout']['directory'])
env_section = options.get('environment', '').strip()
if env_section:
env = self.buildout[env_section]
for key, value in env.items():
initialization += env_template % (key, value)
initialization_section = options.get('initialization', '').strip()
if initialization_section:
initialization += initialization_section
arguments = options.get('defaults', '')
if arguments:
arguments = arguments + ' + sys.argv[1:]'
generated.extend(zc.buildout.easy_install.scripts(
[(options['script'], 'z3c.recipe.tag', 'build_tags')],
ws, options['executable'],
self.buildout['buildout']['bin-directory'],
extra_paths=self._delegated.extra_paths,
initialization=initialization,
))
return generated
update = install
initialization_template = """import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir(%r)
"""
env_template = """os.environ['%s'] = %r
"""
def getpath(candidates):
paths = os.environ['PATH'].split(os.pathsep)
for c in candidates:
for p in paths:
full = os.path.join(p, c)
if os.path.exists(full):
return full
raise RuntimeError(
'Can\'t find executable for any of: %s' % candidates)
class Builder:
def get_relpaths(self, paths):
working_dir = os.getcwd()
return [os.path.relpath(path, working_dir) for path in paths]
def __call__(self, targets=None, languages=None, tag_relative=False):
if not targets:
targets = ('idutils', 'ctags_vi', 'ctags_emacs') # legacy behavior
self.languages = languages or ''
self.tag_relative = tag_relative
paths = [path for path in sys.path
if os.path.isdir(path)]
if self.tag_relative:
# ctags will ignore --tag-relative=yes for absolute paths so we
# must pass relative paths to it.
paths = self.get_relpaths(paths)
self.paths = paths
results = {}
for target in targets:
tool_candidates, arguments, source, destination = getattr(
self, '_build_%s' % (target,))()
arguments[0:0] = [getpath(tool_candidates)]
res = subprocess.call(arguments)
if res == 0:
res = subprocess.call(['mv', source, destination])
results[target] = res
return results
def _build_idutils(self):
return [
[
'mkid'
], [
'-m',
pkg_resources.resource_filename(
"z3c.recipe.tag", "id-lang.map.txt"),
'-o',
'ID.new'
] + self.paths,
'ID.new',
'ID']
def _build_ctags_vi(self):
res = [['ctags-exuberant', 'ctags'],
['-R',
'--python-kinds=-i',
'-f',
'tags.new'] + self.paths,
'tags.new',
'tags']
if self.languages:
res[1][0:0] = ['--languages=%s' % self.languages]
if self.tag_relative:
res[1][0:0] = ['--tag-relative=yes']
return res
def _build_ctags_emacs(self):
res = self._build_ctags_vi()
res[1][0:0] = ['-e']
res[3] = 'TAGS'
return res
def _build_ctags_bbedit(self):
res = self._build_ctags_vi()
try:
res[1].remove('--tag-relative=yes')
except ValueError:
pass
res[1][0:0] = [
'--excmd=number', '--tag-relative=no', '--fields=+a+m+n+S']
return res
def append_const(option, opt_str, value, parser, const):
# 'append_const' action added in Py 2.5, and we're in 2.4 :-(
if getattr(parser.values, 'targets', None) is None:
parser.values.targets = []
parser.values.targets.append(const)
def build_tags(args=None):
parser = optparse.OptionParser()
parser.add_option('-l', '--languages', dest='languages',
default='-JavaScript',
help='ctags comma-separated list of languages. '
'defaults to ``-JavaScript``')
parser.add_option('-e', '--ctags-emacs', action='callback',
callback=append_const, callback_args=('ctags_emacs',),
help='flag to build emacs ctags ``TAGS`` file')
parser.add_option('-v', '--ctags-vi', action='callback',
callback=append_const, callback_args=('ctags_vi',),
help='flag to build vi ctags ``tags`` file')
parser.add_option('-b', '--ctags-bbedit', action='callback',
callback=append_const, callback_args=('ctags_bbedit',),
help='flag to build bbedit ctags ``tags`` file')
parser.add_option('-i', '--idutils', action='callback',
callback=append_const, callback_args=('idutils',),
help='flag to build idutils ``ID`` file')
parser.add_option('-r', '--tag-relative', action='store_true',
dest='tag_relative', default=False,
help=('generate tags with paths relative to'
' tags file instead of absolute paths'
' (works with vim tags only)'))
options, args = parser.parse_args(args)
if args:
parser.error('no arguments accepted')
targets = getattr(options, 'targets', None)
if (targets and 'ctags_bbedit' in targets and 'ctags_vi' in targets):
parser.error('cannot build both vi and bbedit ctags files (same name)')
builder = Builder()
builder(targets, languages=options.languages,
tag_relative=options.tag_relative)
try:
import paver.easy
except ImportError:
HAS_PAVER = False
else: # pragma: nocover
HAS_PAVER = True
if HAS_PAVER: # pragma: nocover
@paver.easy.task
@paver.easy.consume_args
def tags(args):
"""Build tags database file for emacs, vim, or bbedit"""
build_tags(args) | src/z3c/recipe/tag/__init__.py | import os
import optparse
import pkg_resources
import subprocess
import sys
import zc.buildout.easy_install
import zc.recipe.egg
class TagsMaker(object):
def __init__(self, buildout, name, options):
self.buildout = buildout
self.name = name
self.options = options
# We do this early so the "extends" functionality works before we get
# to the other options below.
self._delegated = zc.recipe.egg.Egg(buildout, name, options)
options['script'] = os.path.join(buildout['buildout']['bin-directory'],
options.get('script', self.name),
)
if not options.get('working-directory', ''):
options['location'] = os.path.join(
buildout['buildout']['parts-directory'], name)
def install(self):
options = self.options
generated = []
eggs, ws = self._delegated.working_set(('z3c.recipe.tag',))
wd = options.get('working-directory', '')
if not wd:
wd = options['location']
if os.path.exists(wd):
assert os.path.isdir(wd)
else:
os.mkdir(wd)
generated.append(wd)
initialization = initialization_template % (
self.buildout['buildout']['directory'])
env_section = options.get('environment', '').strip()
if env_section:
env = self.buildout[env_section]
for key, value in env.items():
initialization += env_template % (key, value)
initialization_section = options.get('initialization', '').strip()
if initialization_section:
initialization += initialization_section
arguments = options.get('defaults', '')
if arguments:
arguments = arguments + ' + sys.argv[1:]'
generated.extend(zc.buildout.easy_install.scripts(
[(options['script'], 'z3c.recipe.tag', 'build_tags')],
ws, options['executable'],
self.buildout['buildout']['bin-directory'],
extra_paths=self._delegated.extra_paths,
initialization=initialization,
))
return generated
update = install
initialization_template = """import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir(%r)
"""
env_template = """os.environ['%s'] = %r
"""
def getpath(candidates):
paths = os.environ['PATH'].split(os.pathsep)
for c in candidates:
for p in paths:
full = os.path.join(p, c)
if os.path.exists(full):
return full
raise RuntimeError(
'Can\'t find executable for any of: %s' % candidates)
class Builder:
def get_relpaths(self, paths):
working_dir = os.getcwd()
return [os.path.relpath(path, working_dir) for path in paths]
def __call__(self, targets=None, languages=None, tag_relative=False):
if not targets:
targets = ('idutils', 'ctags_vi', 'ctags_emacs') # legacy behavior
self.languages = languages or ''
self.tag_relative = tag_relative
paths = [path for path in sys.path
if os.path.isdir(path)]
if self.tag_relative:
# ctags will ignore --tag-relative=yes for absolute paths so we
# must pass relative paths to it.
paths = self.get_relpaths(paths)
self.paths = paths
results = {}
for target in targets:
tool_candidates, arguments, source, destination = getattr(
self, '_build_%s' % (target,))()
arguments[0:0] = [getpath(tool_candidates)]
res = subprocess.call(arguments)
if res == 0:
res = subprocess.call(['mv', source, destination])
results[target] = res
return results
def _build_idutils(self):
return [
[
'mkid'
], [
'-m',
pkg_resources.resource_filename(
"z3c.recipe.tag", "id-lang.map.txt"),
'-o',
'ID.new'
] + self.paths,
'ID.new',
'ID']
def _build_ctags_vi(self):
res = [['ctags-exuberant', 'ctags'],
['-R',
'--python-kinds=-i',
'-f',
'tags.new'] + self.paths,
'tags.new',
'tags']
if self.languages:
res[1][0:0] = ['--languages=%s' % self.languages]
if self.tag_relative:
res[1][0:0] = ['--tag-relative=yes']
return res
def _build_ctags_emacs(self):
res = self._build_ctags_vi()
res[1][0:0] = ['-e']
res[3] = 'TAGS'
return res
def _build_ctags_bbedit(self):
res = self._build_ctags_vi()
try:
res[1].remove('--tag-relative=yes')
except ValueError:
pass
res[1][0:0] = [
'--excmd=number', '--tag-relative=no', '--fields=+a+m+n+S']
return res
def append_const(option, opt_str, value, parser, const):
# 'append_const' action added in Py 2.5, and we're in 2.4 :-(
if getattr(parser.values, 'targets', None) is None:
parser.values.targets = []
parser.values.targets.append(const)
def build_tags(args=None):
parser = optparse.OptionParser()
parser.add_option('-l', '--languages', dest='languages',
default='-JavaScript',
help='ctags comma-separated list of languages. '
'defaults to ``-JavaScript``')
parser.add_option('-e', '--ctags-emacs', action='callback',
callback=append_const, callback_args=('ctags_emacs',),
help='flag to build emacs ctags ``TAGS`` file')
parser.add_option('-v', '--ctags-vi', action='callback',
callback=append_const, callback_args=('ctags_vi',),
help='flag to build vi ctags ``tags`` file')
parser.add_option('-b', '--ctags-bbedit', action='callback',
callback=append_const, callback_args=('ctags_bbedit',),
help='flag to build bbedit ctags ``tags`` file')
parser.add_option('-i', '--idutils', action='callback',
callback=append_const, callback_args=('idutils',),
help='flag to build idutils ``ID`` file')
parser.add_option('-r', '--tag-relative', action='store_true',
dest='tag_relative', default=False,
help=('generate tags with paths relative to'
' tags file instead of absolute paths'
' (works with vim tags only)'))
options, args = parser.parse_args(args)
if args:
parser.error('no arguments accepted')
targets = getattr(options, 'targets', None)
if (targets and 'ctags_bbedit' in targets and 'ctags_vi' in targets):
parser.error('cannot build both vi and bbedit ctags files (same name)')
builder = Builder()
builder(targets, languages=options.languages,
tag_relative=options.tag_relative)
try:
import paver.easy
except ImportError:
HAS_PAVER = False
else: # pragma: nocover
HAS_PAVER = True
if HAS_PAVER: # pragma: nocover
@paver.easy.task
@paver.easy.consume_args
def tags(args):
"""Build tags database file for emacs, vim, or bbedit"""
build_tags(args) | 0.279828 | 0.080574 |
import aws_handler
import simple_logger
logger = simple_logger.logger()
logger.info('runnint test.py')
# key = "dev-client-ben/library/draft/040117_Gemtone_Radiant_Nude_OPP_2_page.pdf"
# key = "dev-client-ben/library/draft/sub_folder/2016_BASF_CC_LysSun_TPP.pdf"
# key = "dev-client-ben/library/draft/AAssembled.pdf"
# key= "dev-client-ben/library/draft/_name_has_space+plus%2Bhiphen%27.pdf"
# key= "dev-client-ben/library/published/_name_has_space+plus%2Bhiphen%27.pdf"
# key = "dev-client-ben/library/draft/CC-FR-17-026-B01.pdf"
# key = "dev-client-ben/library/draft/TestWord.doc"
# key = "dev-client-ben/library/draft/TestWordX.docx"
key = "my-documents/dev-client-devfcarpentier/4887/Pwet.pdf"
event = {
"Records": [
{
"eventVersion": "2.0",
"eventTime": "1970-01-01T00:00:00.000Z",
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"s3": {
"configurationId": "testConfigRule",
"object": {
"eTag": "0123456789abcdef0123456789abcdef",
"sequencer": "0A1B2C3D4E5F678901",
"key": key,
"size": 292000
},
"bucket": {
"arn": "arn:aws:s3:::mybucket",
"name": "dev-beehivr-bucket",
"ownerIdentity": {
"principalId": "EXAMPLE"
}
},
"s3SchemaVersion": "1.0"
},
"responseElements": {
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH",
"x-amz-request-id": "EXAMPLE123456789"
},
"awsRegion": "us-east-1",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"eventSource": "aws:s3"
}
]
}
sns_event = {
"Records": [
{
"EventSource": "aws:sns",
"EventVersion": "1.0",
"EventSubscriptionArn": "arn:aws:sns:us-east-1:457807691790:dev-pdf-update:351e935b-5013-4129-b6cb-29013d2bb068",
"Sns": {
"Type": "Notification",
"MessageId": "2428029d-a5d6-523b-81f0-5187b5151b61",
"TopicArn": "arn:aws:sns:us-east-1:457807691790:dev-pdf-update",
"Subject": "Amazon S3 Notification",
"Message": "{\"Records\":[{\"eventVersion\":\"2.0\",\"eventSource\":\"aws:s3\",\"awsRegion\":\"us-east-1\",\"eventTime\":\"2018-03-07T16:08:34.929Z\",\"eventName\":\"ObjectCreated:Put\",\"userIdentity\":{\"principalId\":\"AWS:AIDAISHW4LNLCJT4KIH3W\"},\"requestParameters\":{\"sourceIPAddress\":\"192.168.127.12\"},\"responseElements\":{\"x-amz-request-id\":\"F58F47B513EA8109\",\"x-amz-id-2\":\"0lNGzL7p9BfzpkkwJVHpZ9zm0X8ccxlRSAwIlhCFYVjkU7iHBm5MyDcrhfJ4/H3TCzfyKBrDLK8=\"},\"s3\":{\"s3SchemaVersion\":\"1.0\",\"configurationId\":\"2a711272-168f-46bf-ad11-69786c37bdc1\",\"bucket\":{\"name\":\"dev-beehivr-bucket\",\"ownerIdentity\":{\"principalId\":\"A24ERPT90EN829\"},\"arn\":\"arn:aws:s3:::dev-beehivr-bucket\"},\"object\":{\"key\":\"" + key + "\",\"size\":554534,\"eTag\":\"c9ca7a036361a0d47656140ac317e9e2\",\"versionId\":\"rvszlc9KvXXZzZA7T9P.L9t8KGbo7Y8x\",\"sequencer\":\"005AA00E82C26B82F7\"}}}]}",
"Timestamp": "2018-03-07T16:08:35.076Z",
"SignatureVersion": "1",
"Signature": "MqdP86bJvM6isRWIwniQU9TNHOE86wh0ON7f4fxlpLRQprMJTm5h4Yv2kq6OioAJrALP1/isqbJLCqjDByig1yC1sqgHED9UwvfwYRAlad0s7v+JAqa3RP8VYoMcolU278QaUMCx/7pdJATRNuRvxBHs770sg2nv+65ppL/HO5fXmYxDJuNi34bCdhnPlEnQQOkeJQTK6Fc90XyYWNUSrqYEPQfdk4VfaDPmGdl+LWPp4lmyWN0PX6xuydsdoHC+Rydihc6329xRp/BtRlpaarcWkefK5r429VimgiubwTqPDocEVpaqVKdpfIX+KNqyb0ZTCq2vhg+ET64Sv21/yA==",
"SigningCertUrl": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-433026a4050d206028891664da859041.pem",
"UnsubscribeUrl": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:457807691790:dev-pdf-update:351e935b-5013-4129-b6cb-29013d2bb068",
"MessageAttributes": {}
}
}
]
}
context = {}
aws_handler.handle(sns_event, context) | functions/simple/test.py | import aws_handler
import simple_logger
logger = simple_logger.logger()
logger.info('runnint test.py')
# key = "dev-client-ben/library/draft/040117_Gemtone_Radiant_Nude_OPP_2_page.pdf"
# key = "dev-client-ben/library/draft/sub_folder/2016_BASF_CC_LysSun_TPP.pdf"
# key = "dev-client-ben/library/draft/AAssembled.pdf"
# key= "dev-client-ben/library/draft/_name_has_space+plus%2Bhiphen%27.pdf"
# key= "dev-client-ben/library/published/_name_has_space+plus%2Bhiphen%27.pdf"
# key = "dev-client-ben/library/draft/CC-FR-17-026-B01.pdf"
# key = "dev-client-ben/library/draft/TestWord.doc"
# key = "dev-client-ben/library/draft/TestWordX.docx"
key = "my-documents/dev-client-devfcarpentier/4887/Pwet.pdf"
event = {
"Records": [
{
"eventVersion": "2.0",
"eventTime": "1970-01-01T00:00:00.000Z",
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"s3": {
"configurationId": "testConfigRule",
"object": {
"eTag": "0123456789abcdef0123456789abcdef",
"sequencer": "0A1B2C3D4E5F678901",
"key": key,
"size": 292000
},
"bucket": {
"arn": "arn:aws:s3:::mybucket",
"name": "dev-beehivr-bucket",
"ownerIdentity": {
"principalId": "EXAMPLE"
}
},
"s3SchemaVersion": "1.0"
},
"responseElements": {
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH",
"x-amz-request-id": "EXAMPLE123456789"
},
"awsRegion": "us-east-1",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"eventSource": "aws:s3"
}
]
}
sns_event = {
"Records": [
{
"EventSource": "aws:sns",
"EventVersion": "1.0",
"EventSubscriptionArn": "arn:aws:sns:us-east-1:457807691790:dev-pdf-update:351e935b-5013-4129-b6cb-29013d2bb068",
"Sns": {
"Type": "Notification",
"MessageId": "2428029d-a5d6-523b-81f0-5187b5151b61",
"TopicArn": "arn:aws:sns:us-east-1:457807691790:dev-pdf-update",
"Subject": "Amazon S3 Notification",
"Message": "{\"Records\":[{\"eventVersion\":\"2.0\",\"eventSource\":\"aws:s3\",\"awsRegion\":\"us-east-1\",\"eventTime\":\"2018-03-07T16:08:34.929Z\",\"eventName\":\"ObjectCreated:Put\",\"userIdentity\":{\"principalId\":\"AWS:AIDAISHW4LNLCJT4KIH3W\"},\"requestParameters\":{\"sourceIPAddress\":\"192.168.127.12\"},\"responseElements\":{\"x-amz-request-id\":\"F58F47B513EA8109\",\"x-amz-id-2\":\"0lNGzL7p9BfzpkkwJVHpZ9zm0X8ccxlRSAwIlhCFYVjkU7iHBm5MyDcrhfJ4/H3TCzfyKBrDLK8=\"},\"s3\":{\"s3SchemaVersion\":\"1.0\",\"configurationId\":\"2a711272-168f-46bf-ad11-69786c37bdc1\",\"bucket\":{\"name\":\"dev-beehivr-bucket\",\"ownerIdentity\":{\"principalId\":\"A24ERPT90EN829\"},\"arn\":\"arn:aws:s3:::dev-beehivr-bucket\"},\"object\":{\"key\":\"" + key + "\",\"size\":554534,\"eTag\":\"c9ca7a036361a0d47656140ac317e9e2\",\"versionId\":\"rvszlc9KvXXZzZA7T9P.L9t8KGbo7Y8x\",\"sequencer\":\"005AA00E82C26B82F7\"}}}]}",
"Timestamp": "2018-03-07T16:08:35.076Z",
"SignatureVersion": "1",
"Signature": "MqdP86bJvM6isRWIwniQU9TNHOE86wh0ON7f4fxlpLRQprMJTm5h4Yv2kq6OioAJrALP1/isqbJLCqjDByig1yC1sqgHED9UwvfwYRAlad0s7v+JAqa3RP8VYoMcolU278QaUMCx/7pdJATRNuRvxBHs770sg2nv+65ppL/HO5fXmYxDJuNi34bCdhnPlEnQQOkeJQTK6Fc90XyYWNUSrqYEPQfdk4VfaDPmGdl+LWPp4lmyWN0PX6xuydsdoHC+Rydihc6329xRp/BtRlpaarcWkefK5r429VimgiubwTqPDocEVpaqVKdpfIX+KNqyb0ZTCq2vhg+ET64Sv21/yA==",
"SigningCertUrl": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-433026a4050d206028891664da859041.pem",
"UnsubscribeUrl": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:457807691790:dev-pdf-update:351e935b-5013-4129-b6cb-29013d2bb068",
"MessageAttributes": {}
}
}
]
}
context = {}
aws_handler.handle(sns_event, context) | 0.353875 | 0.14817 |
import numpy as np
from scipy.ndimage import rotate
def check_extent(n, f, e):
if f > n:
d = f - n
f = n
e = e - d
return (e, f)
def make_patches(image, patch_shape=(8, 256, 256)):
nz, nc, ny, nx = image.shape
ez = 0
ex = 0
ey = 0
dz, dy, dx = patch_shape
patches = list()
while ez < nz:
ex = 0
ey = 0
fz = ez + dz
ez, fz = check_extent(nz, fz, ez)
print(ez)
while ey < ny:
ex = 0
fy = ey + dy
ey, fy = check_extent(ny, fy, ey)
#print('y', ey, fy)
while ex < nx:
fx = ex + dx
ex, fx = check_extent(nx, fx, ex)
#print('x', ex, fx, ey, fy, ez, fz)
patch = image[ez:fz, :, ey:fy, ex:fx]
#print(ex, ey, ez, patch.max(axis=(0,2,3)))
patches.append(patch)
ex += dx
ey += dy
ez += dz
#print(ez)
a = np.stack(patches)
return a
def augment_rotate(stack, step, axes=(-1, -2)):
angle = step
res = stack.copy()
while angle < 360:
#print(angle)
r = rotate(stack, angle, axes=axes, reshape=False)
res = np.concatenate([res, r], axis=0)
#print(res.shape)
angle += step
return res #np.concatenate([stack, r], axis = 0)
def reconstruct(patches, w, nx, ny):
image = np.zeros((ny, nx, patches.shape[-1]), dtype=patches.dtype)
xmax = 0
ymax = 0
xok = True
yok = True
patch_index = 0
while xok:
ymax = 0
yok = True
xs = xmax
xmax += w
if xmax >= nx:
xmax = nx
xs = nx - w
xok = False
while yok:
ys = ymax
ymax += w
if ymax > ny:
ymax = ny
ys = ny - w
yok = False
crop = 32
image[ys:ymax, xs:xmax, :] = patches[patch_index]
image[ys:ys + crop, :] = 0
image[:, xs:xs + crop, :] = 0
image[ymax - crop:ymax, :] = 0
image[:, xmax-crop:xmax] = 0
patch_index += 1
#print(patch_index, ys, ymax, xs, xmax, yok, xok)
return image
def patch_to_image(patches, w, nx, ny):
r1 = reconstruct(dp, 256, data.shape[1] , data.shape[0])
r2 = reconstruct(dp2, 256, data.shape[1] - 0, data.shape[0] - 128)
r3 = reconstruct(dp3, 256, data.shape[1] - 128, data.shape[0] - 0)
r4 = reconstruct(dp4, 256, data.shape[1] - 128, data.shape[0] - 128)
rr2 = np.zeros_like(r1)
rr2[128:, :, :] = r2
rr3 = np.zeros_like(r1)
rr3[:, 128:, :] = r3
rr4 = np.zeros_like(r1)
rr4[128:, 128:, :] = r4
rstack = np.stack((r1, rr2, rr3, rr4), axis=0)
r = rstack.max(axis=0) | PrepTrainingSet/make_patches_3d.py | import numpy as np
from scipy.ndimage import rotate
def check_extent(n, f, e):
if f > n:
d = f - n
f = n
e = e - d
return (e, f)
def make_patches(image, patch_shape=(8, 256, 256)):
nz, nc, ny, nx = image.shape
ez = 0
ex = 0
ey = 0
dz, dy, dx = patch_shape
patches = list()
while ez < nz:
ex = 0
ey = 0
fz = ez + dz
ez, fz = check_extent(nz, fz, ez)
print(ez)
while ey < ny:
ex = 0
fy = ey + dy
ey, fy = check_extent(ny, fy, ey)
#print('y', ey, fy)
while ex < nx:
fx = ex + dx
ex, fx = check_extent(nx, fx, ex)
#print('x', ex, fx, ey, fy, ez, fz)
patch = image[ez:fz, :, ey:fy, ex:fx]
#print(ex, ey, ez, patch.max(axis=(0,2,3)))
patches.append(patch)
ex += dx
ey += dy
ez += dz
#print(ez)
a = np.stack(patches)
return a
def augment_rotate(stack, step, axes=(-1, -2)):
angle = step
res = stack.copy()
while angle < 360:
#print(angle)
r = rotate(stack, angle, axes=axes, reshape=False)
res = np.concatenate([res, r], axis=0)
#print(res.shape)
angle += step
return res #np.concatenate([stack, r], axis = 0)
def reconstruct(patches, w, nx, ny):
image = np.zeros((ny, nx, patches.shape[-1]), dtype=patches.dtype)
xmax = 0
ymax = 0
xok = True
yok = True
patch_index = 0
while xok:
ymax = 0
yok = True
xs = xmax
xmax += w
if xmax >= nx:
xmax = nx
xs = nx - w
xok = False
while yok:
ys = ymax
ymax += w
if ymax > ny:
ymax = ny
ys = ny - w
yok = False
crop = 32
image[ys:ymax, xs:xmax, :] = patches[patch_index]
image[ys:ys + crop, :] = 0
image[:, xs:xs + crop, :] = 0
image[ymax - crop:ymax, :] = 0
image[:, xmax-crop:xmax] = 0
patch_index += 1
#print(patch_index, ys, ymax, xs, xmax, yok, xok)
return image
def patch_to_image(patches, w, nx, ny):
r1 = reconstruct(dp, 256, data.shape[1] , data.shape[0])
r2 = reconstruct(dp2, 256, data.shape[1] - 0, data.shape[0] - 128)
r3 = reconstruct(dp3, 256, data.shape[1] - 128, data.shape[0] - 0)
r4 = reconstruct(dp4, 256, data.shape[1] - 128, data.shape[0] - 128)
rr2 = np.zeros_like(r1)
rr2[128:, :, :] = r2
rr3 = np.zeros_like(r1)
rr3[:, 128:, :] = r3
rr4 = np.zeros_like(r1)
rr4[128:, 128:, :] = r4
rstack = np.stack((r1, rr2, rr3, rr4), axis=0)
r = rstack.max(axis=0) | 0.250088 | 0.324663 |
import os
import random
import django
import djangae.environment
from djangae.settings_base import * # noqa
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
if djangae.environment.is_production_environment():
DEBUG = False
SECRET_KEY = ''.join([
random.SystemRandom().choice('<KEY>')
for i in range(50)
])
ALLOWED_HOSTS = ['.appspot.com']
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
else:
DEBUG = True
SECRET_KEY = '&x$ts1u)tx#5zsi84555$(@mydbz06&q23p8=c6fs1!d4%1a^u'
# Application definition
INSTALLED_APPS = (
'djangae',
'django.contrib.admin',
'djangae.contrib.gauth_datastore',
'djangae.contrib.security',
'django.contrib.contenttypes',
'djangae.contrib.contenttypes',
'django.contrib.sessions',
'testapp',
)
MIDDLEWARE = (
'djangae.contrib.security.middleware.AppEngineSecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'djangae.contrib.gauth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'session_csrf.CsrfMiddleware',
)
if tuple(django.VERSION[:2]) < (1, 10):
MIDDLEWARE_CLASSES = MIDDLEWARE
ROOT_URLCONF = 'testapp.urls'
SITE_ID = 1
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine',
},
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = BASE_DIR + 'static'
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'gauth_datastore.GaeDatastoreUser'
AUTHENTICATION_BACKENDS = (
'djangae.contrib.gauth_datastore.backends.AppEngineUserAPIBackend',
)
# Here because of "You haven't defined a TEMPLATES setting" deprecation message
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
'templates',
],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth'
],
'debug': DEBUG,
},
},
]
from djangae.contrib.gauth.settings import * # noqa | testprodapp/testapp/settings.py | import os
import random
import django
import djangae.environment
from djangae.settings_base import * # noqa
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
if djangae.environment.is_production_environment():
DEBUG = False
SECRET_KEY = ''.join([
random.SystemRandom().choice('<KEY>')
for i in range(50)
])
ALLOWED_HOSTS = ['.appspot.com']
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
else:
DEBUG = True
SECRET_KEY = '&x$ts1u)tx#5zsi84555$(@mydbz06&q23p8=c6fs1!d4%1a^u'
# Application definition
INSTALLED_APPS = (
'djangae',
'django.contrib.admin',
'djangae.contrib.gauth_datastore',
'djangae.contrib.security',
'django.contrib.contenttypes',
'djangae.contrib.contenttypes',
'django.contrib.sessions',
'testapp',
)
MIDDLEWARE = (
'djangae.contrib.security.middleware.AppEngineSecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'djangae.contrib.gauth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'session_csrf.CsrfMiddleware',
)
if tuple(django.VERSION[:2]) < (1, 10):
MIDDLEWARE_CLASSES = MIDDLEWARE
ROOT_URLCONF = 'testapp.urls'
SITE_ID = 1
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine',
},
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = BASE_DIR + 'static'
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'gauth_datastore.GaeDatastoreUser'
AUTHENTICATION_BACKENDS = (
'djangae.contrib.gauth_datastore.backends.AppEngineUserAPIBackend',
)
# Here because of "You haven't defined a TEMPLATES setting" deprecation message
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
'templates',
],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth'
],
'debug': DEBUG,
},
},
]
from djangae.contrib.gauth.settings import * # noqa | 0.30549 | 0.058912 |
import mysql.connector
import pandas as pd
from sklearn import preprocessing
class ReadDB:
def __init__(self, *, config, db_config):
self.config = config
self.db_config = db_config
METHOD_COLUMNS = {
'pi': {
'pi_openness': 'PI_Openness',
'pi_conscientiousness': 'PI_Conscientiousness',
'pi_extraversion': 'PI_Extraversion',
'pi_agreeableness': 'PI_Agreeableness',
'pi_neuroticism': 'PI_Neuroticism'
},
'yarkoni': {
'liwc_openness_yarkoni': 'LIWC_Openness_Yarkoni',
'liwc_conscientiousness_yarkoni': 'LIWC_Conscientiousness_Yarkoni',
'liwc_extraversion_yarkoni': 'LIWC_Extraversion_Yarkoni',
'liwc_agreeableness_yarkoni': 'LIWC_Agreeableness_Yarkoni',
'liwc_neuroticism_yarkoni': 'LIWC_Neuroticism_Yarkoni'
},
'golbeck': {
'liwc_openness_golbeck': 'LIWC_Openness_Golbeck',
'liwc_conscientiousness_golbeck': 'LIWC_Conscientiousness_Golbeck',
'liwc_extraversion_golbeck': 'LIWC_Extraversion_Golbeck',
'liwc_agreeableness_golbeck': 'LIWC_Agreeableness_Golbeck',
'liwc_neuroticism_golbeck': 'LIWC_Neuroticism_Golbeck'
}
}
PI_COLUMNS = ['PI_Openness', 'PI_Conscientiousness', 'PI_Extraversion', 'PI_Agreeableness',
'PI_Neuroticism']
YARKONI_COLUMNS = ['LIWC_Openness_Yarkoni', 'LIWC_Conscientiousness_Yarkoni',
'LIWC_Extraversion_Yarkoni', 'LIWC_Agreeableness_Yarkoni',
'LIWC_Neuroticism_Yarkoni']
GOLBECK_COLUMNS = ['LIWC_Openness_Golbeck', 'LIWC_Conscientiousness_Golbeck',
'LIWC_Extraversion_Golbeck', 'LIWC_Agreeableness_Golbeck',
'LIWC_Neuroticism_Golbeck']
def _create_select(self, where, methods):
select = ''
for method in methods:
for nth in range(0, 5):
select += self._create_select_sub(method, nth) + ',\n'
return select[:-2] + '\nFROM {}\n{}'.format(self.config['table'], where)
def _create_select_sub(self, method, nth):
trait = list(self.METHOD_COLUMNS[method].items())[nth]
return '{}{{}} as {}'.format(*trait)
def read_db(self, where, methods=('pi', 'yarkoni', 'golbeck'), raw=True, decimals=1,
user_id=True, scaling=True):
suffix = '_raw' if raw else ''
where = 'WHERE ' + where if where else ''
select = 'SELECT id as user_id,\n' if user_id else 'SELECT \n'
query = select + self._create_select(where, methods).format(*(suffix,) * 15)
results = self.execute_query(query)
cols = [column for sublist in
[list(self.METHOD_COLUMNS[method].values()) for method in methods] for column in
sublist]
if user_id:
cols.insert(0, 'user_id')
dataframe = pd.DataFrame(results, columns=cols)
dataframe = dataframe.apply(pd.to_numeric)
if user_id:
cols.remove('user_id')
if scaling:
scaler = preprocessing.MinMaxScaler(feature_range=(0, 100))
dataframe[cols] = scaler.fit_transform(dataframe[cols])
return dataframe.round(decimals=decimals), cols
def execute_query(self, query):
cnx = mysql.connector.connect(**self.db_config)
cursor = cnx.cursor()
cursor.execute(query)
results = cursor.fetchall()
cnx.close()
return results | stats/db.py | import mysql.connector
import pandas as pd
from sklearn import preprocessing
class ReadDB:
def __init__(self, *, config, db_config):
self.config = config
self.db_config = db_config
METHOD_COLUMNS = {
'pi': {
'pi_openness': 'PI_Openness',
'pi_conscientiousness': 'PI_Conscientiousness',
'pi_extraversion': 'PI_Extraversion',
'pi_agreeableness': 'PI_Agreeableness',
'pi_neuroticism': 'PI_Neuroticism'
},
'yarkoni': {
'liwc_openness_yarkoni': 'LIWC_Openness_Yarkoni',
'liwc_conscientiousness_yarkoni': 'LIWC_Conscientiousness_Yarkoni',
'liwc_extraversion_yarkoni': 'LIWC_Extraversion_Yarkoni',
'liwc_agreeableness_yarkoni': 'LIWC_Agreeableness_Yarkoni',
'liwc_neuroticism_yarkoni': 'LIWC_Neuroticism_Yarkoni'
},
'golbeck': {
'liwc_openness_golbeck': 'LIWC_Openness_Golbeck',
'liwc_conscientiousness_golbeck': 'LIWC_Conscientiousness_Golbeck',
'liwc_extraversion_golbeck': 'LIWC_Extraversion_Golbeck',
'liwc_agreeableness_golbeck': 'LIWC_Agreeableness_Golbeck',
'liwc_neuroticism_golbeck': 'LIWC_Neuroticism_Golbeck'
}
}
PI_COLUMNS = ['PI_Openness', 'PI_Conscientiousness', 'PI_Extraversion', 'PI_Agreeableness',
'PI_Neuroticism']
YARKONI_COLUMNS = ['LIWC_Openness_Yarkoni', 'LIWC_Conscientiousness_Yarkoni',
'LIWC_Extraversion_Yarkoni', 'LIWC_Agreeableness_Yarkoni',
'LIWC_Neuroticism_Yarkoni']
GOLBECK_COLUMNS = ['LIWC_Openness_Golbeck', 'LIWC_Conscientiousness_Golbeck',
'LIWC_Extraversion_Golbeck', 'LIWC_Agreeableness_Golbeck',
'LIWC_Neuroticism_Golbeck']
def _create_select(self, where, methods):
select = ''
for method in methods:
for nth in range(0, 5):
select += self._create_select_sub(method, nth) + ',\n'
return select[:-2] + '\nFROM {}\n{}'.format(self.config['table'], where)
def _create_select_sub(self, method, nth):
trait = list(self.METHOD_COLUMNS[method].items())[nth]
return '{}{{}} as {}'.format(*trait)
def read_db(self, where, methods=('pi', 'yarkoni', 'golbeck'), raw=True, decimals=1,
user_id=True, scaling=True):
suffix = '_raw' if raw else ''
where = 'WHERE ' + where if where else ''
select = 'SELECT id as user_id,\n' if user_id else 'SELECT \n'
query = select + self._create_select(where, methods).format(*(suffix,) * 15)
results = self.execute_query(query)
cols = [column for sublist in
[list(self.METHOD_COLUMNS[method].values()) for method in methods] for column in
sublist]
if user_id:
cols.insert(0, 'user_id')
dataframe = pd.DataFrame(results, columns=cols)
dataframe = dataframe.apply(pd.to_numeric)
if user_id:
cols.remove('user_id')
if scaling:
scaler = preprocessing.MinMaxScaler(feature_range=(0, 100))
dataframe[cols] = scaler.fit_transform(dataframe[cols])
return dataframe.round(decimals=decimals), cols
def execute_query(self, query):
cnx = mysql.connector.connect(**self.db_config)
cursor = cnx.cursor()
cursor.execute(query)
results = cursor.fetchall()
cnx.close()
return results | 0.472683 | 0.263037 |
import nltk
from nltk.collocations import BigramCollocationFinder
from nltk.tokenize.regexp import RegexpTokenizer
def get_bigram_likelihood(statements, stopwords=[], freq_filter=3, nbest=200):
"""
Returns n (likelihood ratio) bi-grams from a group of documents
:param statements: list of strings
:param output_file: output path for saved file
:param freq_filter: filter for # of appearances in bi-gram
:param nbest: likelihood ratio for bi-grams
"""
words = list()
# print('Generating word list...')
#tokenize sentence into words
for statement in statements:
# remove non-words
tokenizer = RegexpTokenizer(r'\w+')
words.extend(tokenizer.tokenize(statement))
bigram_measures = nltk.collocations.BigramAssocMeasures()
bigram_finder = BigramCollocationFinder.from_words(words)
# only bi-grams that appear n+ times
bigram_finder.apply_freq_filter(freq_filter)
# TODO: use custom stop words
stop = nltk.corpus.stopwords.words('turkish') + stopwords
bigram_finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in stop)
bigram_results = bigram_finder.nbest(bigram_measures.likelihood_ratio, nbest)
return bigram_finder.score_ngrams(bigram_measures.likelihood_ratio)
def get_stop_words(one_star_reviews, five_star_reviews):
stopwords = nltk.corpus.stopwords.words('turkish')
one_star_all_words = []
for text in one_star_reviews:
one_star_all_words.extend(nltk.tokenize.word_tokenize(text))
one_star_common_words = nltk.FreqDist(w.lower() for w in one_star_all_words if w not in stopwords)
one_star_common_words = one_star_common_words.most_common(100)
one_star_common_words = [x[0] for x in one_star_common_words]
five_star_all_words = []
for text in five_star_reviews:
five_star_all_words.extend(nltk.tokenize.word_tokenize(text))
five_star_common_words = nltk.FreqDist(w.lower() for w in five_star_all_words if w not in stopwords)
five_star_common_words = five_star_common_words.most_common(100)
five_star_common_words = [x[0] for x in five_star_common_words]
common_words = []
for word in one_star_common_words:
if word in five_star_common_words:
common_words.append(word)
return common_words | external_sources.py | import nltk
from nltk.collocations import BigramCollocationFinder
from nltk.tokenize.regexp import RegexpTokenizer
def get_bigram_likelihood(statements, stopwords=[], freq_filter=3, nbest=200):
"""
Returns n (likelihood ratio) bi-grams from a group of documents
:param statements: list of strings
:param output_file: output path for saved file
:param freq_filter: filter for # of appearances in bi-gram
:param nbest: likelihood ratio for bi-grams
"""
words = list()
# print('Generating word list...')
#tokenize sentence into words
for statement in statements:
# remove non-words
tokenizer = RegexpTokenizer(r'\w+')
words.extend(tokenizer.tokenize(statement))
bigram_measures = nltk.collocations.BigramAssocMeasures()
bigram_finder = BigramCollocationFinder.from_words(words)
# only bi-grams that appear n+ times
bigram_finder.apply_freq_filter(freq_filter)
# TODO: use custom stop words
stop = nltk.corpus.stopwords.words('turkish') + stopwords
bigram_finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in stop)
bigram_results = bigram_finder.nbest(bigram_measures.likelihood_ratio, nbest)
return bigram_finder.score_ngrams(bigram_measures.likelihood_ratio)
def get_stop_words(one_star_reviews, five_star_reviews):
stopwords = nltk.corpus.stopwords.words('turkish')
one_star_all_words = []
for text in one_star_reviews:
one_star_all_words.extend(nltk.tokenize.word_tokenize(text))
one_star_common_words = nltk.FreqDist(w.lower() for w in one_star_all_words if w not in stopwords)
one_star_common_words = one_star_common_words.most_common(100)
one_star_common_words = [x[0] for x in one_star_common_words]
five_star_all_words = []
for text in five_star_reviews:
five_star_all_words.extend(nltk.tokenize.word_tokenize(text))
five_star_common_words = nltk.FreqDist(w.lower() for w in five_star_all_words if w not in stopwords)
five_star_common_words = five_star_common_words.most_common(100)
five_star_common_words = [x[0] for x in five_star_common_words]
common_words = []
for word in one_star_common_words:
if word in five_star_common_words:
common_words.append(word)
return common_words | 0.374562 | 0.261127 |
from neomodel import (db, StructuredNode, StringProperty, IntegerProperty, FloatProperty, ArrayProperty,
RelationshipTo, RelationshipFrom, StructuredRel)
from metrics import metrics_query_latency, metrics_query_in_progress, metrics_query_count
class Has(StructuredRel):
units = IntegerProperty(required=True)
class Product(StructuredNode):
__abstract_node__ = True
name = StringProperty(unique_index=True, required=True)
price = FloatProperty(required=True)
units = IntegerProperty(index=True, default=1)
@classmethod
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def find_by_name(cls, name=None):
metrics_query_count.labels(object=type(cls).__name__, method='find_by_name').inc()
return cls.nodes.first_or_none(name=name)
@db.transaction
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def save_to_db(self):
metrics_query_count.labels(object=type(self).__name__, method='save_to_db').inc()
self.save()
@db.transaction
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def delete_from_db(self):
metrics_query_count.labels(object=type(self).__name__, method='delete_from_db').inc()
self.delete()
def json(self):
pass
# Product category classes
class Pizza(Product):
ingredients = ArrayProperty(StringProperty(), required=True)
# PIZZA_FORMS = {"REDONDA": "REDONDA", "CUADRADA": "CUADRADA"}
form = StringProperty(required=True, choices={"REDONDA": "REDONDA", "CUADRADA": "CUADRADA"})
rel_package = RelationshipFrom('Package', 'HAS', model=Has)
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
metrics_query_count.labels(object='Pizza', method='json').inc()
return {'name': self.name,
'price': self.price,
'units': self.units,
'ingredients': [ingredient for ingredient in self.ingredients],
'form': self.form}
class Complement(Product):
description = StringProperty(required=True)
ingredients = ArrayProperty(StringProperty(), required=True)
rel_package = RelationshipFrom('Package', 'HAS', model=Has)
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
metrics_query_count.labels(object='Complement', method='json').inc()
return {'name': self.name,
'price': self.price,
'units': self.units,
'ingredients': [ingredient for ingredient in self.ingredients],
'description': self.description}
class Drink(Product):
brand = StringProperty(required=True)
litres = FloatProperty(required=True)
rel_package = RelationshipFrom('Package', 'HAS', model=Has)
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
metrics_query_count.labels(object='Drink', method='json').inc()
return {'name': self.name,
'price': self.price,
'units': self.units,
'brand': self.brand,
'litres': self.litres}
class Sauce(Product):
description = StringProperty(required=True)
rel_package = RelationshipFrom('Package', 'HAS', model=Has)
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
metrics_query_count.labels(object='Sauce', method='json').inc()
return {'name': self.name,
'price': self.price,
'units': self.units,
'description': self.description}
class Package(Product):
pizzas = ArrayProperty(StringProperty())
complements = ArrayProperty(StringProperty())
drinks = ArrayProperty(StringProperty())
sauces = ArrayProperty(StringProperty())
rel_pizza = RelationshipTo('Pizza', 'HAS', model=Has)
rel_complement = RelationshipTo('Complement', 'HAS', model=Has)
rel_drink = RelationshipTo('Drink', 'HAS', model=Has)
rel_sauce = RelationshipTo('Sauce', 'HAS', model=Has)
# noinspection PyTypeChecker
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
rows_pizzas, col_names = self.cypher(
"MATCH (pkg) WHERE id(pkg)={self} MATCH (pkg)-[r:HAS]->(product:Pizza) RETURN product.name, r.units")
rows_comps, col_names = self.cypher(
"MATCH (pkg) WHERE id(pkg)={self} MATCH (pkg)-[r:HAS]->(product:Complement) RETURN product.name, r.units")
rows_drinks, col_names = self.cypher(
"MATCH (pkg) WHERE id(pkg)={self} MATCH (pkg)-[r:HAS]->(product:Drink) RETURN product.name, r.units")
rows_sauces, col_names = self.cypher(
"MATCH (pkg) WHERE id(pkg)={self} MATCH (pkg)-[r:HAS]->(product:Sauce) RETURN product.name, r.units")
package = {'name': self.name,
'price': self.price,
'units': self.units}
# Dictionary comprehensions
if len(rows_pizzas) != 0:
package['pizzas'] = {row[0]: row[1] for row in rows_pizzas}
if len(rows_comps) != 0:
package['complements'] = {row[0]: row[1] for row in rows_comps}
if len(rows_drinks) != 0:
package['drinks'] = {row[0]: row[1] for row in rows_drinks}
if len(rows_sauces) != 0:
package['sauces'] = {row[0]: row[1] for row in rows_sauces}
metrics_query_count.labels(object='Package', method='json').inc()
return package
class Products:
@classmethod
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(cls):
metrics_query_count.labels(object='Products', method='json').inc()
return {"Pizzas": [pizza.json() for pizza in Pizza.nodes.all()],
"Complements": [complement.json() for complement in Complement.nodes.all()],
"Drinks": [drink.json() for drink in Drink.nodes.all()],
"Sauces": [sauce.json() for sauce in Sauce.nodes.all()],
"Packages": [package.json() for package in Package.nodes.all()]} | app/models/products.py | from neomodel import (db, StructuredNode, StringProperty, IntegerProperty, FloatProperty, ArrayProperty,
RelationshipTo, RelationshipFrom, StructuredRel)
from metrics import metrics_query_latency, metrics_query_in_progress, metrics_query_count
class Has(StructuredRel):
units = IntegerProperty(required=True)
class Product(StructuredNode):
__abstract_node__ = True
name = StringProperty(unique_index=True, required=True)
price = FloatProperty(required=True)
units = IntegerProperty(index=True, default=1)
@classmethod
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def find_by_name(cls, name=None):
metrics_query_count.labels(object=type(cls).__name__, method='find_by_name').inc()
return cls.nodes.first_or_none(name=name)
@db.transaction
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def save_to_db(self):
metrics_query_count.labels(object=type(self).__name__, method='save_to_db').inc()
self.save()
@db.transaction
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def delete_from_db(self):
metrics_query_count.labels(object=type(self).__name__, method='delete_from_db').inc()
self.delete()
def json(self):
pass
# Product category classes
class Pizza(Product):
ingredients = ArrayProperty(StringProperty(), required=True)
# PIZZA_FORMS = {"REDONDA": "REDONDA", "CUADRADA": "CUADRADA"}
form = StringProperty(required=True, choices={"REDONDA": "REDONDA", "CUADRADA": "CUADRADA"})
rel_package = RelationshipFrom('Package', 'HAS', model=Has)
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
metrics_query_count.labels(object='Pizza', method='json').inc()
return {'name': self.name,
'price': self.price,
'units': self.units,
'ingredients': [ingredient for ingredient in self.ingredients],
'form': self.form}
class Complement(Product):
description = StringProperty(required=True)
ingredients = ArrayProperty(StringProperty(), required=True)
rel_package = RelationshipFrom('Package', 'HAS', model=Has)
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
metrics_query_count.labels(object='Complement', method='json').inc()
return {'name': self.name,
'price': self.price,
'units': self.units,
'ingredients': [ingredient for ingredient in self.ingredients],
'description': self.description}
class Drink(Product):
brand = StringProperty(required=True)
litres = FloatProperty(required=True)
rel_package = RelationshipFrom('Package', 'HAS', model=Has)
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
metrics_query_count.labels(object='Drink', method='json').inc()
return {'name': self.name,
'price': self.price,
'units': self.units,
'brand': self.brand,
'litres': self.litres}
class Sauce(Product):
description = StringProperty(required=True)
rel_package = RelationshipFrom('Package', 'HAS', model=Has)
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
metrics_query_count.labels(object='Sauce', method='json').inc()
return {'name': self.name,
'price': self.price,
'units': self.units,
'description': self.description}
class Package(Product):
pizzas = ArrayProperty(StringProperty())
complements = ArrayProperty(StringProperty())
drinks = ArrayProperty(StringProperty())
sauces = ArrayProperty(StringProperty())
rel_pizza = RelationshipTo('Pizza', 'HAS', model=Has)
rel_complement = RelationshipTo('Complement', 'HAS', model=Has)
rel_drink = RelationshipTo('Drink', 'HAS', model=Has)
rel_sauce = RelationshipTo('Sauce', 'HAS', model=Has)
# noinspection PyTypeChecker
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(self):
rows_pizzas, col_names = self.cypher(
"MATCH (pkg) WHERE id(pkg)={self} MATCH (pkg)-[r:HAS]->(product:Pizza) RETURN product.name, r.units")
rows_comps, col_names = self.cypher(
"MATCH (pkg) WHERE id(pkg)={self} MATCH (pkg)-[r:HAS]->(product:Complement) RETURN product.name, r.units")
rows_drinks, col_names = self.cypher(
"MATCH (pkg) WHERE id(pkg)={self} MATCH (pkg)-[r:HAS]->(product:Drink) RETURN product.name, r.units")
rows_sauces, col_names = self.cypher(
"MATCH (pkg) WHERE id(pkg)={self} MATCH (pkg)-[r:HAS]->(product:Sauce) RETURN product.name, r.units")
package = {'name': self.name,
'price': self.price,
'units': self.units}
# Dictionary comprehensions
if len(rows_pizzas) != 0:
package['pizzas'] = {row[0]: row[1] for row in rows_pizzas}
if len(rows_comps) != 0:
package['complements'] = {row[0]: row[1] for row in rows_comps}
if len(rows_drinks) != 0:
package['drinks'] = {row[0]: row[1] for row in rows_drinks}
if len(rows_sauces) != 0:
package['sauces'] = {row[0]: row[1] for row in rows_sauces}
metrics_query_count.labels(object='Package', method='json').inc()
return package
class Products:
@classmethod
@metrics_query_latency.time()
@metrics_query_in_progress.track_inprogress()
def json(cls):
metrics_query_count.labels(object='Products', method='json').inc()
return {"Pizzas": [pizza.json() for pizza in Pizza.nodes.all()],
"Complements": [complement.json() for complement in Complement.nodes.all()],
"Drinks": [drink.json() for drink in Drink.nodes.all()],
"Sauces": [sauce.json() for sauce in Sauce.nodes.all()],
"Packages": [package.json() for package in Package.nodes.all()]} | 0.688887 | 0.213931 |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sonia_hardware_states.wait_mission import wait_mission
from sonia_navigation_states.add_pose_to_trajectory import add_pose_to_trajectory
from sonia_navigation_states.init_trajectory import init_trajectory
from sonia_navigation_states.send_to_planner import send_to_planner
from sonia_navigation_states.set_control_mode import set_control_mode
from sonia_navigation_states.wait_target_reached import wait_target_reached
from sonia_navigation_states.yaw_orbit_from_given_point import yaw_orbit_from_given_point
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed May 18 2022
@author: <NAME>
'''
class mission_bags_binsSM(Behavior):
'''
Mission to create bags for the bins
'''
def __init__(self):
super(mission_bags_binsSM, self).__init__()
self.name = 'mission_bags_bins'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:1275 y:612, x:97 y:678
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:49 y:100
OperatableStateMachine.add('wait_mission',
wait_mission(),
transitions={'continue': 'set_ctrl_mode', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:396 y:231
OperatableStateMachine.add('go_down_1',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_1'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_0', 'trajectory': 'go_down_1'})
# x:404 y:429
OperatableStateMachine.add('go_down_2',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_2'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_1', 'trajectory': 'go_down_2'})
# x:642 y:38
OperatableStateMachine.add('go_down_3',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_3'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_2', 'trajectory': 'go_down_3'})
# x:661 y:244
OperatableStateMachine.add('go_down_4',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_4'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_3', 'trajectory': 'go_down_4'})
# x:668 y:431
OperatableStateMachine.add('go_down_5',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_5'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_4', 'trajectory': 'go_down_5'})
# x:181 y:322
OperatableStateMachine.add('init_traj',
init_trajectory(InterpolationMethod=0),
transitions={'continue': 'go_down_0'},
autonomy={'continue': Autonomy.Off},
remapping={'trajectory': 'init_traj'})
# x:386 y:133
OperatableStateMachine.add('rotate_0',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_1'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_0', 'trajectory': 'rotate_0'})
# x:396 y:331
OperatableStateMachine.add('rotate_1',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_2'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_1', 'trajectory': 'rotate_1'})
# x:405 y:526
OperatableStateMachine.add('rotate_2',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_3'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_2', 'trajectory': 'rotate_2'})
# x:639 y:151
OperatableStateMachine.add('rotate_3',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_4'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_3', 'trajectory': 'rotate_3'})
# x:648 y:338
OperatableStateMachine.add('rotate_4',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_5'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_4', 'trajectory': 'rotate_4'})
# x:651 y:522
OperatableStateMachine.add('rotate_5',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'send_to_planner'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_5', 'trajectory': 'rotate_5'})
# x:1046 y:558
OperatableStateMachine.add('send_to_planner',
send_to_planner(),
transitions={'continue': 'wait_target', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'input_traj': 'rotate_5'})
# x:127 y:213
OperatableStateMachine.add('set_ctrl_mode',
set_control_mode(mode=10, timeout=2),
transitions={'continue': 'init_traj', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:1043 y:651
OperatableStateMachine.add('wait_target',
wait_target_reached(),
transitions={'target_reached': 'finished', 'target_not_reached': 'failed', 'error': 'failed'},
autonomy={'target_reached': Autonomy.Off, 'target_not_reached': Autonomy.Off, 'error': Autonomy.Off})
# x:383 y:36
OperatableStateMachine.add('go_down_0',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_0'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'init_traj', 'trajectory': 'go_down_0'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | sonia_flexbe_behaviors/src/sonia_flexbe_behaviors/mission_bags_bins_sm.py |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sonia_hardware_states.wait_mission import wait_mission
from sonia_navigation_states.add_pose_to_trajectory import add_pose_to_trajectory
from sonia_navigation_states.init_trajectory import init_trajectory
from sonia_navigation_states.send_to_planner import send_to_planner
from sonia_navigation_states.set_control_mode import set_control_mode
from sonia_navigation_states.wait_target_reached import wait_target_reached
from sonia_navigation_states.yaw_orbit_from_given_point import yaw_orbit_from_given_point
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed May 18 2022
@author: <NAME>
'''
class mission_bags_binsSM(Behavior):
'''
Mission to create bags for the bins
'''
def __init__(self):
super(mission_bags_binsSM, self).__init__()
self.name = 'mission_bags_bins'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:1275 y:612, x:97 y:678
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:49 y:100
OperatableStateMachine.add('wait_mission',
wait_mission(),
transitions={'continue': 'set_ctrl_mode', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:396 y:231
OperatableStateMachine.add('go_down_1',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_1'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_0', 'trajectory': 'go_down_1'})
# x:404 y:429
OperatableStateMachine.add('go_down_2',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_2'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_1', 'trajectory': 'go_down_2'})
# x:642 y:38
OperatableStateMachine.add('go_down_3',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_3'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_2', 'trajectory': 'go_down_3'})
# x:661 y:244
OperatableStateMachine.add('go_down_4',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_4'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_3', 'trajectory': 'go_down_4'})
# x:668 y:431
OperatableStateMachine.add('go_down_5',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_5'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'rotate_4', 'trajectory': 'go_down_5'})
# x:181 y:322
OperatableStateMachine.add('init_traj',
init_trajectory(InterpolationMethod=0),
transitions={'continue': 'go_down_0'},
autonomy={'continue': Autonomy.Off},
remapping={'trajectory': 'init_traj'})
# x:386 y:133
OperatableStateMachine.add('rotate_0',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_1'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_0', 'trajectory': 'rotate_0'})
# x:396 y:331
OperatableStateMachine.add('rotate_1',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_2'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_1', 'trajectory': 'rotate_1'})
# x:405 y:526
OperatableStateMachine.add('rotate_2',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_3'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_2', 'trajectory': 'rotate_2'})
# x:639 y:151
OperatableStateMachine.add('rotate_3',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_4'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_3', 'trajectory': 'rotate_3'})
# x:648 y:338
OperatableStateMachine.add('rotate_4',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'go_down_5'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_4', 'trajectory': 'rotate_4'})
# x:651 y:522
OperatableStateMachine.add('rotate_5',
yaw_orbit_from_given_point(pointX=0.2415, pointY=0, rotation=360, speed=1),
transitions={'continue': 'send_to_planner'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'go_down_5', 'trajectory': 'rotate_5'})
# x:1046 y:558
OperatableStateMachine.add('send_to_planner',
send_to_planner(),
transitions={'continue': 'wait_target', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'input_traj': 'rotate_5'})
# x:127 y:213
OperatableStateMachine.add('set_ctrl_mode',
set_control_mode(mode=10, timeout=2),
transitions={'continue': 'init_traj', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:1043 y:651
OperatableStateMachine.add('wait_target',
wait_target_reached(),
transitions={'target_reached': 'finished', 'target_not_reached': 'failed', 'error': 'failed'},
autonomy={'target_reached': Autonomy.Off, 'target_not_reached': Autonomy.Off, 'error': Autonomy.Off})
# x:383 y:36
OperatableStateMachine.add('go_down_0',
add_pose_to_trajectory(positionX=0, positionY=0, positionZ=0.5, orientationX=0, orientationY=0, orientationZ=0, frame=1, speed=0, precision=0, long_rotation=False),
transitions={'continue': 'rotate_0'},
autonomy={'continue': Autonomy.Off},
remapping={'input_traj': 'init_traj', 'trajectory': 'go_down_0'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | 0.406509 | 0.24116 |
import unittest
import os, sys
test_dir = os.path.dirname(__file__)
src_dir = "../"
sys.path.insert(0, os.path.abspath(os.path.join(test_dir, src_dir)))
from bank.accounts import Accounts, CheckingAccount, SavingsAccount
from bank.account_holder import AccountHolder
from bank.banks import Bank
from bank.cards import Card
from bank.exceptions import InsufficientBalance, AccountError, ExceedsLimit
bank = Bank()
cormo = AccountHolder(bank, "101", "Mathias", "Cormann")
cormo_checking = CheckingAccount(
"101-checking-1", "checking", cormo.accounts, "101", opening_balance=1000.00
)
cormo_checking_card = Card(
cormo,
cormo_checking,
"Mathias",
"Cormann",
"40001|101-checking-1",
"0101",
"12-12-2024",
"432",
"active",
)
frydy = AccountHolder(bank, "202", "Josh", "Frydenberg")
frydy_savings = SavingsAccount(
"202-savings-1", "savings", frydy.accounts, "202", opening_balance=0.25
)
frydy_savings_card = Card(
frydy,
frydy_savings,
"Josh",
"Frydenberg",
"50001|101-savings-1",
"4321",
"12-12-2024",
"342",
"active",
)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
class BasicTests(unittest.TestCase):
"""
Requirements:
● Deposit, withdraw and maintain a balance for multiple customers.
● Return a customer’s balance and the bank’s total balance.
● Prevent customers from withdrawing more money than they have in their account.
"""
def test_a_withdraw_deposit(self):
# ● Deposit, withdraw and maintain a balance for multiple customers.
trans = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
trans2 = bank.deposit_transaction(frydy_savings_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
trans3 = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
trans4 = bank.withdrawal_transaction(frydy_savings_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
def test_b_more_ahs(self):
# Create 10K accounts, process withdrawals check balances.
bank = Bank()
n = 10000
for i in range(n):
ah = AccountHolder(bank, f"{i}1", "Mathias", "Cormann")
ac = CheckingAccount(
f"{i}1-checking-1",
"checking",
ah.accounts,
f"{i}1",
opening_balance=1000.00,
)
c = Card(
ah,
ac,
"Mathias",
"Cormann",
f"40001|{i}1-checking-1",
"0101",
"12-12-2024",
"432",
"active",
)
print(bank.bank_balance, len(bank.account_holders))
assert bank.bank_balance == 1000 * n
for id, acchldr in bank.account_holders.items():
bank.withdrawal_transaction(f"40001|{id}-checking-1", 500.00)
assert bank.bank_balance == 500 * n
def test_c_balance(self):
# ● Return a customer’s balance and the bank’s total balance.
trans = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
trans2 = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
print(f"{bank.institution} balance: ${bank.bank_balance}")
print(f"{cormo_checking.account_id} balance: ${cormo_checking.balance}")
print(f"{frydy_savings.account_id} balance: ${frydy_savings.balance}")
def test_d_overdraw(self):
# ● Prevent customers from withdrawing more money than they have in their account.
# Attempt to withdraw $1 over current balance.
previous_balance = cormo_checking.balance
trans = bank.withdrawal_transaction(
cormo_checking_card.card_number, cormo_checking.balance + 1.00
)
# Should False for an unsuccessful status/trans.
assert not trans["status"]
# We also get an eplaination (the negative balance shown is done on purpose).
print(trans["error"])
# There should be no change in balance as the transaction was denied.
assert previous_balance == cormo_checking.balance
# Other tests of similar nature.
def test_e_stat(self):
# Exceed withdrawal limit eg. max $5000 can be taken per transaction.
# Deposit required amount.
bank.deposit_transaction(cormo_checking_card.card_number, 5000.00)
# Test the limit exception is handled and trans is denied + balance maintained.
previous_balance = cormo_checking.balance
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 5001.00)
assert not trans["status"]
print(trans["error"])
assert previous_balance == cormo_checking.balance
def test_f_stat(self):
# Account status if an account has been locked or closed.
cormo_checking.status = "locked"
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
# Transaction should be denied.
assert not trans["status"]
print(trans["error"])
# Reopen account.
cormo_checking.status = "open"
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
# We should have a successful transaction.
assert trans["status"]
if __name__ == "__main__":
unittest.main() | tests/project_requirements_test.py | import unittest
import os, sys
test_dir = os.path.dirname(__file__)
src_dir = "../"
sys.path.insert(0, os.path.abspath(os.path.join(test_dir, src_dir)))
from bank.accounts import Accounts, CheckingAccount, SavingsAccount
from bank.account_holder import AccountHolder
from bank.banks import Bank
from bank.cards import Card
from bank.exceptions import InsufficientBalance, AccountError, ExceedsLimit
bank = Bank()
cormo = AccountHolder(bank, "101", "Mathias", "Cormann")
cormo_checking = CheckingAccount(
"101-checking-1", "checking", cormo.accounts, "101", opening_balance=1000.00
)
cormo_checking_card = Card(
cormo,
cormo_checking,
"Mathias",
"Cormann",
"40001|101-checking-1",
"0101",
"12-12-2024",
"432",
"active",
)
frydy = AccountHolder(bank, "202", "Josh", "Frydenberg")
frydy_savings = SavingsAccount(
"202-savings-1", "savings", frydy.accounts, "202", opening_balance=0.25
)
frydy_savings_card = Card(
frydy,
frydy_savings,
"Josh",
"Frydenberg",
"50001|101-savings-1",
"4321",
"12-12-2024",
"342",
"active",
)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
class BasicTests(unittest.TestCase):
"""
Requirements:
● Deposit, withdraw and maintain a balance for multiple customers.
● Return a customer’s balance and the bank’s total balance.
● Prevent customers from withdrawing more money than they have in their account.
"""
def test_a_withdraw_deposit(self):
# ● Deposit, withdraw and maintain a balance for multiple customers.
trans = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
trans2 = bank.deposit_transaction(frydy_savings_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
trans3 = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
trans4 = bank.withdrawal_transaction(frydy_savings_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
def test_b_more_ahs(self):
# Create 10K accounts, process withdrawals check balances.
bank = Bank()
n = 10000
for i in range(n):
ah = AccountHolder(bank, f"{i}1", "Mathias", "Cormann")
ac = CheckingAccount(
f"{i}1-checking-1",
"checking",
ah.accounts,
f"{i}1",
opening_balance=1000.00,
)
c = Card(
ah,
ac,
"Mathias",
"Cormann",
f"40001|{i}1-checking-1",
"0101",
"12-12-2024",
"432",
"active",
)
print(bank.bank_balance, len(bank.account_holders))
assert bank.bank_balance == 1000 * n
for id, acchldr in bank.account_holders.items():
bank.withdrawal_transaction(f"40001|{id}-checking-1", 500.00)
assert bank.bank_balance == 500 * n
def test_c_balance(self):
# ● Return a customer’s balance and the bank’s total balance.
trans = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
trans2 = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
print(f"{bank.institution} balance: ${bank.bank_balance}")
print(f"{cormo_checking.account_id} balance: ${cormo_checking.balance}")
print(f"{frydy_savings.account_id} balance: ${frydy_savings.balance}")
def test_d_overdraw(self):
# ● Prevent customers from withdrawing more money than they have in their account.
# Attempt to withdraw $1 over current balance.
previous_balance = cormo_checking.balance
trans = bank.withdrawal_transaction(
cormo_checking_card.card_number, cormo_checking.balance + 1.00
)
# Should False for an unsuccessful status/trans.
assert not trans["status"]
# We also get an eplaination (the negative balance shown is done on purpose).
print(trans["error"])
# There should be no change in balance as the transaction was denied.
assert previous_balance == cormo_checking.balance
# Other tests of similar nature.
def test_e_stat(self):
# Exceed withdrawal limit eg. max $5000 can be taken per transaction.
# Deposit required amount.
bank.deposit_transaction(cormo_checking_card.card_number, 5000.00)
# Test the limit exception is handled and trans is denied + balance maintained.
previous_balance = cormo_checking.balance
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 5001.00)
assert not trans["status"]
print(trans["error"])
assert previous_balance == cormo_checking.balance
def test_f_stat(self):
# Account status if an account has been locked or closed.
cormo_checking.status = "locked"
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
# Transaction should be denied.
assert not trans["status"]
print(trans["error"])
# Reopen account.
cormo_checking.status = "open"
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
# We should have a successful transaction.
assert trans["status"]
if __name__ == "__main__":
unittest.main() | 0.414425 | 0.464902 |
from railrl.launchers.vae_exp_launcher_util import (
train_vae,
train_reprojection_network_and_update_variant,
)
from railrl.launchers.rl_exp_launcher_util import (
tdm_td3_experiment,
tdm_twin_sac_experiment,
ih_td3_experiment,
ih_twin_sac_experiment,
)
def tdm_experiment(variant):
experiment_variant_preprocess(variant)
rl_variant = variant['rl_variant']
if 'vae_variant' in variant:
if not variant['rl_variant'].get('do_state_exp', False):
train_reprojection_network_and_update_variant(variant)
if 'sac' in rl_variant['algorithm'].lower():
tdm_twin_sac_experiment(rl_variant)
else:
tdm_td3_experiment(rl_variant)
def ih_experiment(variant):
experiment_variant_preprocess(variant)
rl_variant = variant['rl_variant']
if 'vae_variant' in variant:
if not variant['rl_variant'].get('do_state_exp', False):
train_reprojection_network_and_update_variant(variant)
if 'sac' in rl_variant['algorithm'].lower():
ih_twin_sac_experiment(rl_variant)
else:
ih_td3_experiment(rl_variant)
def vae_experiment(variant):
experiment_variant_preprocess(variant)
train_vae(variant["vae_variant"])
def vae_dataset_experiment(variant):
experiment_variant_preprocess(variant)
from railrl.launchers.vae_exp_launcher_util import generate_vae_dataset
from inspect import signature
vae_variant = variant['vae_variant']
generate_vae_dataset_fctn = vae_variant.get('generate_vae_data_fctn', generate_vae_dataset)
sig = signature(generate_vae_dataset_fctn)
if len(sig.parameters) > 1:
generate_vae_dataset_fctn(**vae_variant['generate_vae_dataset_kwargs'])
else:
generate_vae_dataset_fctn(vae_variant['generate_vae_dataset_kwargs'])
def reproj_experiment(variant):
experiment_variant_preprocess(variant)
train_vae(variant["vae_variant"])
train_reprojection_network_and_update_variant(variant)
def experiment_variant_preprocess(variant):
rl_variant = variant['rl_variant']
vae_variant = variant.get('vae_variant', None)
if 'env_id' in variant:
assert 'env_class' not in variant
env_id = variant['env_id']
rl_variant['env_id'] = env_id
if vae_variant:
vae_variant['generate_vae_dataset_kwargs']['env_id'] = env_id
else:
env_class = variant['env_class']
env_kwargs = variant['env_kwargs']
rl_variant['env_class'] = env_class
rl_variant['env_kwargs'] = env_kwargs
if vae_variant:
vae_variant['generate_vae_dataset_kwargs']['env_class'] = env_class
vae_variant['generate_vae_dataset_kwargs']['env_kwargs'] = env_kwargs
init_camera = variant.get('init_camera', None)
imsize = variant.get('imsize', 84)
rl_variant['imsize'] = imsize
rl_variant['init_camera'] = init_camera
if vae_variant:
vae_variant['generate_vae_dataset_kwargs']['init_camera'] = init_camera
vae_variant['generate_vae_dataset_kwargs']['imsize'] = imsize
vae_variant['imsize'] = imsize
if vae_variant.get('vae_type', None) == "VAE-state":
vae_wrapped_env_kwargs = rl_variant.get('vae_wrapped_env_kwargs', {})
vae_wrapped_env_kwargs['vae_input_key_prefix'] = "state"
import copy
vae_variant['vae_wrapped_env_kwargs'] = copy.deepcopy(rl_variant.get('vae_wrapped_env_kwargs', {}))
if 'vis_kwargs' in vae_variant and 'granularity' in vae_variant['vis_kwargs']:
vae_variant['vae_wrapped_env_kwargs']['vis_granularity'] = vae_variant['vis_kwargs']['granularity']
if 'vis_kwargs' in rl_variant and 'granularity' in rl_variant['vis_kwargs']:
rl_variant['vae_wrapped_env_kwargs']['vis_granularity'] = rl_variant['vis_kwargs']['granularity']
if 'generate_vae_dataset_kwargs' in vae_variant \
and 'dataset_path' in vae_variant['generate_vae_dataset_kwargs']:
vae_variant['vae_wrapped_env_kwargs']['vae_dataset_path'] = \
vae_variant['generate_vae_dataset_kwargs']['dataset_path']
rl_variant['vae_wrapped_env_kwargs']['vae_dataset_path'] = \
vae_variant['generate_vae_dataset_kwargs']['dataset_path']
dump_video_kwargs = variant.get(
'dump_video_kwargs',
dict(
rows=1,
pad_length=1,
pad_color=0,
),
)
rl_variant['dump_video_kwargs'] = dump_video_kwargs
rl_variant['dump_video_kwargs']['columns'] = rl_variant['vis_kwargs'].get('num_samples_for_video', 10)
if vae_variant:
vae_variant['dump_video_kwargs'] = copy.deepcopy(dump_video_kwargs)
vae_variant['dump_video_kwargs']['columns'] = vae_variant['vis_kwargs'].get('num_samples_for_video', 10) | railrl/launchers/exp_launcher.py | from railrl.launchers.vae_exp_launcher_util import (
train_vae,
train_reprojection_network_and_update_variant,
)
from railrl.launchers.rl_exp_launcher_util import (
tdm_td3_experiment,
tdm_twin_sac_experiment,
ih_td3_experiment,
ih_twin_sac_experiment,
)
def tdm_experiment(variant):
experiment_variant_preprocess(variant)
rl_variant = variant['rl_variant']
if 'vae_variant' in variant:
if not variant['rl_variant'].get('do_state_exp', False):
train_reprojection_network_and_update_variant(variant)
if 'sac' in rl_variant['algorithm'].lower():
tdm_twin_sac_experiment(rl_variant)
else:
tdm_td3_experiment(rl_variant)
def ih_experiment(variant):
experiment_variant_preprocess(variant)
rl_variant = variant['rl_variant']
if 'vae_variant' in variant:
if not variant['rl_variant'].get('do_state_exp', False):
train_reprojection_network_and_update_variant(variant)
if 'sac' in rl_variant['algorithm'].lower():
ih_twin_sac_experiment(rl_variant)
else:
ih_td3_experiment(rl_variant)
def vae_experiment(variant):
experiment_variant_preprocess(variant)
train_vae(variant["vae_variant"])
def vae_dataset_experiment(variant):
experiment_variant_preprocess(variant)
from railrl.launchers.vae_exp_launcher_util import generate_vae_dataset
from inspect import signature
vae_variant = variant['vae_variant']
generate_vae_dataset_fctn = vae_variant.get('generate_vae_data_fctn', generate_vae_dataset)
sig = signature(generate_vae_dataset_fctn)
if len(sig.parameters) > 1:
generate_vae_dataset_fctn(**vae_variant['generate_vae_dataset_kwargs'])
else:
generate_vae_dataset_fctn(vae_variant['generate_vae_dataset_kwargs'])
def reproj_experiment(variant):
experiment_variant_preprocess(variant)
train_vae(variant["vae_variant"])
train_reprojection_network_and_update_variant(variant)
def experiment_variant_preprocess(variant):
rl_variant = variant['rl_variant']
vae_variant = variant.get('vae_variant', None)
if 'env_id' in variant:
assert 'env_class' not in variant
env_id = variant['env_id']
rl_variant['env_id'] = env_id
if vae_variant:
vae_variant['generate_vae_dataset_kwargs']['env_id'] = env_id
else:
env_class = variant['env_class']
env_kwargs = variant['env_kwargs']
rl_variant['env_class'] = env_class
rl_variant['env_kwargs'] = env_kwargs
if vae_variant:
vae_variant['generate_vae_dataset_kwargs']['env_class'] = env_class
vae_variant['generate_vae_dataset_kwargs']['env_kwargs'] = env_kwargs
init_camera = variant.get('init_camera', None)
imsize = variant.get('imsize', 84)
rl_variant['imsize'] = imsize
rl_variant['init_camera'] = init_camera
if vae_variant:
vae_variant['generate_vae_dataset_kwargs']['init_camera'] = init_camera
vae_variant['generate_vae_dataset_kwargs']['imsize'] = imsize
vae_variant['imsize'] = imsize
if vae_variant.get('vae_type', None) == "VAE-state":
vae_wrapped_env_kwargs = rl_variant.get('vae_wrapped_env_kwargs', {})
vae_wrapped_env_kwargs['vae_input_key_prefix'] = "state"
import copy
vae_variant['vae_wrapped_env_kwargs'] = copy.deepcopy(rl_variant.get('vae_wrapped_env_kwargs', {}))
if 'vis_kwargs' in vae_variant and 'granularity' in vae_variant['vis_kwargs']:
vae_variant['vae_wrapped_env_kwargs']['vis_granularity'] = vae_variant['vis_kwargs']['granularity']
if 'vis_kwargs' in rl_variant and 'granularity' in rl_variant['vis_kwargs']:
rl_variant['vae_wrapped_env_kwargs']['vis_granularity'] = rl_variant['vis_kwargs']['granularity']
if 'generate_vae_dataset_kwargs' in vae_variant \
and 'dataset_path' in vae_variant['generate_vae_dataset_kwargs']:
vae_variant['vae_wrapped_env_kwargs']['vae_dataset_path'] = \
vae_variant['generate_vae_dataset_kwargs']['dataset_path']
rl_variant['vae_wrapped_env_kwargs']['vae_dataset_path'] = \
vae_variant['generate_vae_dataset_kwargs']['dataset_path']
dump_video_kwargs = variant.get(
'dump_video_kwargs',
dict(
rows=1,
pad_length=1,
pad_color=0,
),
)
rl_variant['dump_video_kwargs'] = dump_video_kwargs
rl_variant['dump_video_kwargs']['columns'] = rl_variant['vis_kwargs'].get('num_samples_for_video', 10)
if vae_variant:
vae_variant['dump_video_kwargs'] = copy.deepcopy(dump_video_kwargs)
vae_variant['dump_video_kwargs']['columns'] = vae_variant['vis_kwargs'].get('num_samples_for_video', 10) | 0.482429 | 0.166743 |
from pathlib import Path
from pprint import pprint
import click
from click import style
import tabulate as tabulate_module
from tabulate import tabulate
from .app import App
from .objects import LogLine
from .requests import (CommitRequest, DeploysRequest, JobsRequest,
JobLogRequest, PagesRequest, PagesBuildsRequest,
RunsRequest, StatusRequest)
from .states import Ok
def show_pages(obj):
"""Print the Github Pages details to buffer.
Params
------
obj (Pages): Pages object
"""
app = App.APP
rows = [
[f"{app.style.status(obj.status)} {obj.url}"],
[f" branch: {obj.branch}, path: {obj.path}"],
]
title = "Pages"
table = tabulate(rows, tablefmt="rst").splitlines()
app.writer.add_line(style(title, bold=True), " ", table.pop(0), before=1)
app.writer.indent(to=len(title)+2)
app.writer.add_lines(table)
app.writer.dedent(reset=True)
def show_deploys(deploys, count: int=3):
"""Print list of {count} recent deploys to buffer.
Params
------
deploys (List[Deploy]) : List of Deploy objects
count (int) : Max number to print
"""
app = App.APP
deploys_table = []
for deploy in deploys[:count]:
deploys_table.append({
"OK": app.style.status(deploy.status),
"date": deploy.date,
"id": deploy.id,
"commit": deploy.sha,
"master": deploy.master_sha or " "*Sha.SHORT_LENGTH,
"message": deploy.message[0:50],
})
app.writer.add_block(app.style.header("Repo Deploys"))
app.writer.add_line()
app.writer.indent()
app.writer.add_block(tabulate(deploys_table, headers="keys"))
app.writer.dedent(reset=True)
def show_runs(runs, deploys, builds, count: int=8):
"""Print the recent Github Action Workflow Runs to the buffer.
Params
------
runs (List[Run]) : List of workflow Run objects
deploys (List[Deploy]) : List of repo Deploy objects
"""
app = App.APP
runs_table = []
for run in runs[:count]:
run.find_deploy(deploys)
run.find_build(builds)
runs_table.append({
"#": run.number,
"date": run.date,
"id": run.id,
"commit": run.sha,
"message": run.message.splitlines()[0][0:30],
"job": app.style.job(run.last_job),
"deploy": f"{app.style.status(run.deploy.status)} {run.deploy.sha}" if run.deploy else "",
"build": f"{app.style.status(run.build.status)} {run.build.id}" if run.build else "",
})
app.writer.add_block(app.style.header("Github Action Runs"))
app.writer.add_line()
app.writer.add_block(tabulate(runs_table, headers="keys"))
def show_builds(builds, count: int=3):
"""Print the recent Github Pages builds to the buffer.
Params
------
"""
app = App.APP
table = []
margin = len(" ✔ 2020-12-05 219342206 2b45940 ") + 4
wrap_width = app.width - margin
for build in builds[:count]:
table.append({
"OK": app.style.status(build.status),
"date": build.date,
"id": build.id,
"commit": build.sha,
"message": build.commit.message,
})
app.writer.add_block(app.style.header("Github Pages Builds"))
app.writer.add_line()
app.writer.indent()
app.writer.add_block(tabulate(table, headers="keys"))
app.writer.dedent(reset=True)
def show_failed_job_errors(job):
"""Print the error messages if {job} failed.
Params
------
job (Job): workflow run Job object
"""
if job.ok != Ok.fail:
return
app = App.APP
margin = len(f"| 959 | section | {''} |") + 2
wrap_width, idt = app.width - margin, " " * 2
log = job.log
table = []
last_sec, last_group = LogLine(), LogLine()
for err in log.errors:
sec = log.belongs_to(err, "section")
group = log.belongs_to(err, "group")
if sec != last_sec:
last_sec = sec
table.append((sec.num, sec.category, f"{sec.msg} >"))
if group != last_group:
last_group = group
table.append((group.num, group.category, f"{idt}{group.msg} >"))
table.append((
err.num, "error",
click.wrap_text(
f"- {err.msg}",
width=wrap_width,
initial_indent=(idt*3),
subsequent_indent=idt*4,
preserve_paragraphs=True
)
))
app.writer.add_block(app.style.header("Errors"))
app.writer.indent()
app.writer.add_block(tabulate(table, tablefmt="grid", colalign=("right",)))
app.writer.dedent(reset=True)
def show_failed_build(build):
"""Print the stats and error message if {build} failed.
Params
------
build (Build): build object
"""
if build.ok != Ok.fail:
return
app = App.APP
app.info(build)
ctx = click.get_current_context()
table = [
("Build", f"[{style(build.status.name, fg='red')}] {build.date} {build.id}"),
("Error", build.error),
("More info", app.term.link(build.more_info, "Troubleshooting Github Pages Sites")),
]
app.writer.add_block(app.style.header("\u2757 Last Build Failed"))
app.writer.indent()
app.writer.add_block(tabulate(table, tablefmt="rst"))
app.writer.dedent(reset=True)
def show_failed_job(job):
"""Print the stats and error messages if {job} failed.
Params
------
job (Job): workflow run Job object
"""
if job.ok != Ok.fail:
return
app = App.APP
app.writer.add_block(app.style.header("\u2757 Last Run Failed"))
show_failed_job_stats(job)
show_failed_job_errors(job)
def show_failed_job_stats(job):
"""Print the stats if {job} failed.
Params
------
job (Job): workflow run Job object
"""
if job.ok != Ok.fail:
return
minwidth = len("alissa-huskey/python-class > actions") + 2
app = App.APP
table = [
("Workflow", job.parent.workflow_id),
("Run", f"[{style(job.parent.status.name, fg='red')}] {job.parent.id}"),
("Job", f"[{style(job.status.name, fg='red')}] {job.id}"),
("Step", f"[{style(job.last_step.status.name, fg='red')}] {app.style.step(job.last_step, len(str(job.last_step.number)))}"),
("Log", str(job.log_request.filepath.relative_to(App.APP.data_dir)).ljust(minwidth)),
("URL", app.term.link(job.url, f"{App.APP.repo} > actions")),
]
app.writer.indent()
app.writer.add_block(tabulate(table, tablefmt="rst"))
app.writer.dedent(reset=True)
@click.command()
@click.option("--repo", "-r",
help="Github repo connected to Github Pages. (ie 'executablebooks/jupyter-book')")
@click.option("--path", "-p", default=".",
type=click.Path(exists=True, file_okay=False, resolve_path=True),
help="Path to local checkout of github repo.")
@click.option("--data-root", "-d",
help="Where to save downloaded data files.")
@click.option("--local", "-l", is_flag=True, default=False,
help="Don't download updates, even for pending states.'")
@click.option("--refresh", "-r", is_flag=True, default=False,
help="Download updates.")
@click.option("--verbose", "-v", is_flag=True, default=False,
help="Enable verbose mode.")
def main(**kwargs):
"""Show status information about Gihub Pages and Actions"""
app = App(**kwargs)
tabulate_module.PRESERVE_WHITESPACE = True
pages = PagesRequest()
deploys = DeploysRequest()
runs = RunsRequest()
builds = PagesBuildsRequest()
build = builds.data[0]
show_pages(pages.data)
show_runs(runs.data, deploys.data, builds.data)
show_failed_job(runs.data[0].last_job)
show_failed_build(build)
app.writer.paginate()
def run():
"""Run the click command"""
main(auto_envvar_prefix="GHP")
if __name__ == "__main__":
run() | ghp/cli.py | from pathlib import Path
from pprint import pprint
import click
from click import style
import tabulate as tabulate_module
from tabulate import tabulate
from .app import App
from .objects import LogLine
from .requests import (CommitRequest, DeploysRequest, JobsRequest,
JobLogRequest, PagesRequest, PagesBuildsRequest,
RunsRequest, StatusRequest)
from .states import Ok
def show_pages(obj):
"""Print the Github Pages details to buffer.
Params
------
obj (Pages): Pages object
"""
app = App.APP
rows = [
[f"{app.style.status(obj.status)} {obj.url}"],
[f" branch: {obj.branch}, path: {obj.path}"],
]
title = "Pages"
table = tabulate(rows, tablefmt="rst").splitlines()
app.writer.add_line(style(title, bold=True), " ", table.pop(0), before=1)
app.writer.indent(to=len(title)+2)
app.writer.add_lines(table)
app.writer.dedent(reset=True)
def show_deploys(deploys, count: int=3):
"""Print list of {count} recent deploys to buffer.
Params
------
deploys (List[Deploy]) : List of Deploy objects
count (int) : Max number to print
"""
app = App.APP
deploys_table = []
for deploy in deploys[:count]:
deploys_table.append({
"OK": app.style.status(deploy.status),
"date": deploy.date,
"id": deploy.id,
"commit": deploy.sha,
"master": deploy.master_sha or " "*Sha.SHORT_LENGTH,
"message": deploy.message[0:50],
})
app.writer.add_block(app.style.header("Repo Deploys"))
app.writer.add_line()
app.writer.indent()
app.writer.add_block(tabulate(deploys_table, headers="keys"))
app.writer.dedent(reset=True)
def show_runs(runs, deploys, builds, count: int=8):
"""Print the recent Github Action Workflow Runs to the buffer.
Params
------
runs (List[Run]) : List of workflow Run objects
deploys (List[Deploy]) : List of repo Deploy objects
"""
app = App.APP
runs_table = []
for run in runs[:count]:
run.find_deploy(deploys)
run.find_build(builds)
runs_table.append({
"#": run.number,
"date": run.date,
"id": run.id,
"commit": run.sha,
"message": run.message.splitlines()[0][0:30],
"job": app.style.job(run.last_job),
"deploy": f"{app.style.status(run.deploy.status)} {run.deploy.sha}" if run.deploy else "",
"build": f"{app.style.status(run.build.status)} {run.build.id}" if run.build else "",
})
app.writer.add_block(app.style.header("Github Action Runs"))
app.writer.add_line()
app.writer.add_block(tabulate(runs_table, headers="keys"))
def show_builds(builds, count: int=3):
"""Print the recent Github Pages builds to the buffer.
Params
------
"""
app = App.APP
table = []
margin = len(" ✔ 2020-12-05 219342206 2b45940 ") + 4
wrap_width = app.width - margin
for build in builds[:count]:
table.append({
"OK": app.style.status(build.status),
"date": build.date,
"id": build.id,
"commit": build.sha,
"message": build.commit.message,
})
app.writer.add_block(app.style.header("Github Pages Builds"))
app.writer.add_line()
app.writer.indent()
app.writer.add_block(tabulate(table, headers="keys"))
app.writer.dedent(reset=True)
def show_failed_job_errors(job):
"""Print the error messages if {job} failed.
Params
------
job (Job): workflow run Job object
"""
if job.ok != Ok.fail:
return
app = App.APP
margin = len(f"| 959 | section | {''} |") + 2
wrap_width, idt = app.width - margin, " " * 2
log = job.log
table = []
last_sec, last_group = LogLine(), LogLine()
for err in log.errors:
sec = log.belongs_to(err, "section")
group = log.belongs_to(err, "group")
if sec != last_sec:
last_sec = sec
table.append((sec.num, sec.category, f"{sec.msg} >"))
if group != last_group:
last_group = group
table.append((group.num, group.category, f"{idt}{group.msg} >"))
table.append((
err.num, "error",
click.wrap_text(
f"- {err.msg}",
width=wrap_width,
initial_indent=(idt*3),
subsequent_indent=idt*4,
preserve_paragraphs=True
)
))
app.writer.add_block(app.style.header("Errors"))
app.writer.indent()
app.writer.add_block(tabulate(table, tablefmt="grid", colalign=("right",)))
app.writer.dedent(reset=True)
def show_failed_build(build):
"""Print the stats and error message if {build} failed.
Params
------
build (Build): build object
"""
if build.ok != Ok.fail:
return
app = App.APP
app.info(build)
ctx = click.get_current_context()
table = [
("Build", f"[{style(build.status.name, fg='red')}] {build.date} {build.id}"),
("Error", build.error),
("More info", app.term.link(build.more_info, "Troubleshooting Github Pages Sites")),
]
app.writer.add_block(app.style.header("\u2757 Last Build Failed"))
app.writer.indent()
app.writer.add_block(tabulate(table, tablefmt="rst"))
app.writer.dedent(reset=True)
def show_failed_job(job):
"""Print the stats and error messages if {job} failed.
Params
------
job (Job): workflow run Job object
"""
if job.ok != Ok.fail:
return
app = App.APP
app.writer.add_block(app.style.header("\u2757 Last Run Failed"))
show_failed_job_stats(job)
show_failed_job_errors(job)
def show_failed_job_stats(job):
"""Print the stats if {job} failed.
Params
------
job (Job): workflow run Job object
"""
if job.ok != Ok.fail:
return
minwidth = len("alissa-huskey/python-class > actions") + 2
app = App.APP
table = [
("Workflow", job.parent.workflow_id),
("Run", f"[{style(job.parent.status.name, fg='red')}] {job.parent.id}"),
("Job", f"[{style(job.status.name, fg='red')}] {job.id}"),
("Step", f"[{style(job.last_step.status.name, fg='red')}] {app.style.step(job.last_step, len(str(job.last_step.number)))}"),
("Log", str(job.log_request.filepath.relative_to(App.APP.data_dir)).ljust(minwidth)),
("URL", app.term.link(job.url, f"{App.APP.repo} > actions")),
]
app.writer.indent()
app.writer.add_block(tabulate(table, tablefmt="rst"))
app.writer.dedent(reset=True)
@click.command()
@click.option("--repo", "-r",
help="Github repo connected to Github Pages. (ie 'executablebooks/jupyter-book')")
@click.option("--path", "-p", default=".",
type=click.Path(exists=True, file_okay=False, resolve_path=True),
help="Path to local checkout of github repo.")
@click.option("--data-root", "-d",
help="Where to save downloaded data files.")
@click.option("--local", "-l", is_flag=True, default=False,
help="Don't download updates, even for pending states.'")
@click.option("--refresh", "-r", is_flag=True, default=False,
help="Download updates.")
@click.option("--verbose", "-v", is_flag=True, default=False,
help="Enable verbose mode.")
def main(**kwargs):
"""Show status information about Gihub Pages and Actions"""
app = App(**kwargs)
tabulate_module.PRESERVE_WHITESPACE = True
pages = PagesRequest()
deploys = DeploysRequest()
runs = RunsRequest()
builds = PagesBuildsRequest()
build = builds.data[0]
show_pages(pages.data)
show_runs(runs.data, deploys.data, builds.data)
show_failed_job(runs.data[0].last_job)
show_failed_build(build)
app.writer.paginate()
def run():
"""Run the click command"""
main(auto_envvar_prefix="GHP")
if __name__ == "__main__":
run() | 0.25488 | 0.171442 |
from hailtop.utils import (partition, url_basename, url_join, url_scheme,
url_and_params, parse_docker_image_reference, grouped)
from hailtop.utils.utils import digits_needed, unzip, filter_none, flatten
def test_partition_zero_empty():
assert list(partition(0, [])) == []
def test_partition_even_small():
assert list(partition(3, range(3))) == [range(0, 1), range(1, 2), range(2, 3)]
def test_partition_even_big():
assert list(partition(3, range(9))) == [range(0, 3), range(3, 6), range(6, 9)]
def test_partition_uneven_big():
assert list(partition(2, range(9))) == [range(0, 5), range(5, 9)]
def test_partition_toofew():
assert list(partition(6, range(3))) == [range(0, 1), range(1, 2), range(2, 3),
range(3, 3), range(3, 3), range(3, 3)]
def test_url_basename():
assert url_basename('/path/to/file') == 'file'
assert url_basename('https://hail.is/path/to/file') == 'file'
def test_url_join():
assert url_join('/path/to', 'file') == '/path/to/file'
assert url_join('/path/to/', 'file') == '/path/to/file'
assert url_join('/path/to/', '/absolute/file') == '/absolute/file'
assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file'
assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file'
assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file'
def test_url_scheme():
assert url_scheme('https://hail.is/path/to') == 'https'
assert url_scheme('/path/to') == ''
def test_url_and_params():
assert url_and_params('https://example.com/') == ('https://example.com/', {})
assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {})
assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c': 'd'})
def test_parse_docker_image_reference():
x = parse_docker_image_reference('animage')
assert x.domain is None
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'animage'
assert str(x) == 'animage'
x = parse_docker_image_reference('hailgenetics/animage')
assert x.domain == 'hailgenetics'
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'hailgenetics/animage'
assert str(x) == 'hailgenetics/animage'
x = parse_docker_image_reference('localhost:5000/animage')
assert x.domain == 'localhost:5000'
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'localhost:5000/animage'
assert str(x) == 'localhost:5000/animage'
x = parse_docker_image_reference('localhost:5000/a/b/name')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag is None
assert x.digest is None
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name'
x = parse_docker_image_reference('localhost:5000/a/b/name:tag')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag == 'tag'
assert x.digest is None
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name:tag'
x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag == 'tag'
assert x.digest == 'sha256:abc123'
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123'
x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag is None
assert x.digest == 'sha256:abc123'
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name@sha256:abc123'
x = parse_docker_image_reference('name@sha256:abc123')
assert x.domain is None
assert x.path == 'name'
assert x.tag is None
assert x.digest == 'sha256:abc123'
assert x.name() == 'name'
assert str(x) == 'name@sha256:abc123'
x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312')
assert x.domain == 'gcr.io'
assert x.path == 'hail-vdc/batch-worker'
assert x.tag == '123fds312'
assert x.digest is None
assert x.name() == 'gcr.io/hail-vdc/batch-worker'
assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312'
x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image')
assert x.domain == 'us-docker.pkg.dev'
assert x.path == 'my-project/my-repo/test-image'
assert x.tag is None
assert x.digest is None
assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image'
assert str(x) == 'us-docker.pkg.dev/my-project/my-repo/test-image'
def test_grouped_size_0_groups_9_elements():
try:
list(grouped(0, [1,2,3,4,5,6,7,8,9]))
except ValueError:
pass
else:
assert False
def test_grouped_size_1_groups_9_elements():
actual = list(grouped(1, [1,2,3,4,5,6,7,8,9]))
expected = [[1], [2], [3], [4], [5], [6], [7], [8], [9]]
assert actual == expected
def test_grouped_size_5_groups_9_elements():
actual = list(grouped(5, [1,2,3,4,5,6,7,8,9]))
expected = [[1, 2, 3, 4, 5], [6, 7, 8, 9]]
assert actual == expected
def test_grouped_size_3_groups_0_elements():
actual = list(grouped(3,[]))
expected = []
assert actual == expected
def test_grouped_size_2_groups_1_elements():
actual = list(grouped(2,[1]))
expected = [[1]]
assert actual == expected
def test_grouped_size_1_groups_0_elements():
actual = list(grouped(1,[0]))
expected = [[0]]
assert actual == expected
def test_grouped_size_1_groups_5_elements():
actual = list(grouped(1,['abc', 'def', 'ghi', 'jkl', 'mno']))
expected = [['abc'], ['def'], ['ghi'], ['jkl'], ['mno']]
assert actual == expected
def test_grouped_size_2_groups_5_elements():
actual = list(grouped(2,['abc', 'def', 'ghi', 'jkl', 'mno']))
expected = [['abc', 'def'], ['ghi', 'jkl'], ['mno']]
assert actual == expected
def test_grouped_size_3_groups_6_elements():
actual = list(grouped(3,['abc', 'def', 'ghi', 'jkl', 'mno', '']))
expected = [['abc', 'def', 'ghi'], ['jkl', 'mno', '']]
assert actual == expected
def test_grouped_size_3_groups_7_elements():
actual = list(grouped(3,['abc', 'def', 'ghi', 'jkl', 'mno', 'pqr', 'stu']))
expected = [['abc', 'def', 'ghi'], ['jkl', 'mno', 'pqr'], ['stu']]
assert actual == expected
def test_unzip():
assert unzip([]) == ([], [])
assert unzip([(0, 'a')]) == ([0], ['a'])
assert unzip([(123, '')]) == ([123], [''])
assert unzip([(123, 'abc')]) == ([123], ['abc'])
assert unzip([(123, 456), ('abc', 'def')]) == ([123, 'abc'], [456, 'def'])
assert unzip([(123, 'abc'), (456, 'def'), (789, 'ghi')]) == ([123, 456, 789], ['abc', 'def', 'ghi'])
def test_digits_needed():
assert digits_needed(0) == 1
assert digits_needed(1) == 1
assert digits_needed(12) == 2
assert digits_needed(333) == 3
assert digits_needed(100) == 3
assert digits_needed(3000) == 4
assert digits_needed(50000) == 5
def test_filter_none():
assert filter_none([]) == []
assert filter_none([None, []]) == [[]]
assert filter_none([0, []]) == [0, []]
assert filter_none([1, 2, [None]]) == [1, 2, [None]]
assert filter_none([1, 3.5, 2, 4,]) == [1, 3.5, 2, 4]
assert filter_none([1, 2, 3.0, None, 5]) == [1, 2, 3.0, 5]
assert filter_none(['a', 'b', 'c', None]) == ['a', 'b', 'c']
assert filter_none([None, [None, [None, [None]]]]) == [[None, [None, [None]]]]
def test_flatten():
assert flatten([]) == []
assert flatten([[]]) == []
assert flatten([[], []]) == []
assert flatten([[], [3]]) == [3]
assert flatten([[1, 2, 3], [3], [4, 5]]) == [1, 2, 3, 3, 4, 5]
assert flatten([['a', 'b', 'c'], ['d', 'e']]) == ['a', 'b', 'c', 'd', 'e']
assert flatten([[['a'], ['b']], [[1, 2, 3], [4, 5]]]) == [['a'], ['b'], [1, 2, 3], [4, 5]]
assert flatten([['apples'], ['bannanas'], ['oranges']]) == ['apples', 'bannanas', 'oranges']
assert flatten([['apple', 'bannana'], ['a', 'b', 'c'], [1, 2, 3, 4]]) == ['apple', 'bannana', 'a', 'b', 'c', 1, 2, 3, 4]
assert flatten([['apples'], [''], ['bannanas'], [''], ['oranges'], ['']]) == ['apples', '', 'bannanas', '', 'oranges', ''] | hail/python/test/hailtop/utils/test_utils.py | from hailtop.utils import (partition, url_basename, url_join, url_scheme,
url_and_params, parse_docker_image_reference, grouped)
from hailtop.utils.utils import digits_needed, unzip, filter_none, flatten
def test_partition_zero_empty():
assert list(partition(0, [])) == []
def test_partition_even_small():
assert list(partition(3, range(3))) == [range(0, 1), range(1, 2), range(2, 3)]
def test_partition_even_big():
assert list(partition(3, range(9))) == [range(0, 3), range(3, 6), range(6, 9)]
def test_partition_uneven_big():
assert list(partition(2, range(9))) == [range(0, 5), range(5, 9)]
def test_partition_toofew():
assert list(partition(6, range(3))) == [range(0, 1), range(1, 2), range(2, 3),
range(3, 3), range(3, 3), range(3, 3)]
def test_url_basename():
assert url_basename('/path/to/file') == 'file'
assert url_basename('https://hail.is/path/to/file') == 'file'
def test_url_join():
assert url_join('/path/to', 'file') == '/path/to/file'
assert url_join('/path/to/', 'file') == '/path/to/file'
assert url_join('/path/to/', '/absolute/file') == '/absolute/file'
assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file'
assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file'
assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file'
def test_url_scheme():
assert url_scheme('https://hail.is/path/to') == 'https'
assert url_scheme('/path/to') == ''
def test_url_and_params():
assert url_and_params('https://example.com/') == ('https://example.com/', {})
assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {})
assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c': 'd'})
def test_parse_docker_image_reference():
x = parse_docker_image_reference('animage')
assert x.domain is None
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'animage'
assert str(x) == 'animage'
x = parse_docker_image_reference('hailgenetics/animage')
assert x.domain == 'hailgenetics'
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'hailgenetics/animage'
assert str(x) == 'hailgenetics/animage'
x = parse_docker_image_reference('localhost:5000/animage')
assert x.domain == 'localhost:5000'
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'localhost:5000/animage'
assert str(x) == 'localhost:5000/animage'
x = parse_docker_image_reference('localhost:5000/a/b/name')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag is None
assert x.digest is None
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name'
x = parse_docker_image_reference('localhost:5000/a/b/name:tag')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag == 'tag'
assert x.digest is None
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name:tag'
x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag == 'tag'
assert x.digest == 'sha256:abc123'
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123'
x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag is None
assert x.digest == 'sha256:abc123'
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name@sha256:abc123'
x = parse_docker_image_reference('name@sha256:abc123')
assert x.domain is None
assert x.path == 'name'
assert x.tag is None
assert x.digest == 'sha256:abc123'
assert x.name() == 'name'
assert str(x) == 'name@sha256:abc123'
x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312')
assert x.domain == 'gcr.io'
assert x.path == 'hail-vdc/batch-worker'
assert x.tag == '123fds312'
assert x.digest is None
assert x.name() == 'gcr.io/hail-vdc/batch-worker'
assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312'
x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image')
assert x.domain == 'us-docker.pkg.dev'
assert x.path == 'my-project/my-repo/test-image'
assert x.tag is None
assert x.digest is None
assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image'
assert str(x) == 'us-docker.pkg.dev/my-project/my-repo/test-image'
def test_grouped_size_0_groups_9_elements():
try:
list(grouped(0, [1,2,3,4,5,6,7,8,9]))
except ValueError:
pass
else:
assert False
def test_grouped_size_1_groups_9_elements():
actual = list(grouped(1, [1,2,3,4,5,6,7,8,9]))
expected = [[1], [2], [3], [4], [5], [6], [7], [8], [9]]
assert actual == expected
def test_grouped_size_5_groups_9_elements():
actual = list(grouped(5, [1,2,3,4,5,6,7,8,9]))
expected = [[1, 2, 3, 4, 5], [6, 7, 8, 9]]
assert actual == expected
def test_grouped_size_3_groups_0_elements():
actual = list(grouped(3,[]))
expected = []
assert actual == expected
def test_grouped_size_2_groups_1_elements():
actual = list(grouped(2,[1]))
expected = [[1]]
assert actual == expected
def test_grouped_size_1_groups_0_elements():
actual = list(grouped(1,[0]))
expected = [[0]]
assert actual == expected
def test_grouped_size_1_groups_5_elements():
actual = list(grouped(1,['abc', 'def', 'ghi', 'jkl', 'mno']))
expected = [['abc'], ['def'], ['ghi'], ['jkl'], ['mno']]
assert actual == expected
def test_grouped_size_2_groups_5_elements():
actual = list(grouped(2,['abc', 'def', 'ghi', 'jkl', 'mno']))
expected = [['abc', 'def'], ['ghi', 'jkl'], ['mno']]
assert actual == expected
def test_grouped_size_3_groups_6_elements():
actual = list(grouped(3,['abc', 'def', 'ghi', 'jkl', 'mno', '']))
expected = [['abc', 'def', 'ghi'], ['jkl', 'mno', '']]
assert actual == expected
def test_grouped_size_3_groups_7_elements():
actual = list(grouped(3,['abc', 'def', 'ghi', 'jkl', 'mno', 'pqr', 'stu']))
expected = [['abc', 'def', 'ghi'], ['jkl', 'mno', 'pqr'], ['stu']]
assert actual == expected
def test_unzip():
assert unzip([]) == ([], [])
assert unzip([(0, 'a')]) == ([0], ['a'])
assert unzip([(123, '')]) == ([123], [''])
assert unzip([(123, 'abc')]) == ([123], ['abc'])
assert unzip([(123, 456), ('abc', 'def')]) == ([123, 'abc'], [456, 'def'])
assert unzip([(123, 'abc'), (456, 'def'), (789, 'ghi')]) == ([123, 456, 789], ['abc', 'def', 'ghi'])
def test_digits_needed():
assert digits_needed(0) == 1
assert digits_needed(1) == 1
assert digits_needed(12) == 2
assert digits_needed(333) == 3
assert digits_needed(100) == 3
assert digits_needed(3000) == 4
assert digits_needed(50000) == 5
def test_filter_none():
assert filter_none([]) == []
assert filter_none([None, []]) == [[]]
assert filter_none([0, []]) == [0, []]
assert filter_none([1, 2, [None]]) == [1, 2, [None]]
assert filter_none([1, 3.5, 2, 4,]) == [1, 3.5, 2, 4]
assert filter_none([1, 2, 3.0, None, 5]) == [1, 2, 3.0, 5]
assert filter_none(['a', 'b', 'c', None]) == ['a', 'b', 'c']
assert filter_none([None, [None, [None, [None]]]]) == [[None, [None, [None]]]]
def test_flatten():
assert flatten([]) == []
assert flatten([[]]) == []
assert flatten([[], []]) == []
assert flatten([[], [3]]) == [3]
assert flatten([[1, 2, 3], [3], [4, 5]]) == [1, 2, 3, 3, 4, 5]
assert flatten([['a', 'b', 'c'], ['d', 'e']]) == ['a', 'b', 'c', 'd', 'e']
assert flatten([[['a'], ['b']], [[1, 2, 3], [4, 5]]]) == [['a'], ['b'], [1, 2, 3], [4, 5]]
assert flatten([['apples'], ['bannanas'], ['oranges']]) == ['apples', 'bannanas', 'oranges']
assert flatten([['apple', 'bannana'], ['a', 'b', 'c'], [1, 2, 3, 4]]) == ['apple', 'bannana', 'a', 'b', 'c', 1, 2, 3, 4]
assert flatten([['apples'], [''], ['bannanas'], [''], ['oranges'], ['']]) == ['apples', '', 'bannanas', '', 'oranges', ''] | 0.630344 | 0.637334 |
import pathlib
import uuid
import astropy.units as u
from specutils import Spectrum1D, SpectrumCollection, SpectralRegion
from jdaviz.core.helpers import ConfigHelper
class SpecViz(ConfigHelper):
"""SpecViz Helper class"""
_default_configuration = 'specviz'
def load_data(self, data, data_label=None, format=None):
"""
Loads a data file or `~specutils.Spectrum1D` object into SpecViz.
Parameters
----------
data : str or `~specutils.Spectrum1D`
Spectrum1D spectra, or path to compatible data file.
data_label : str
The Glue data label found in the ``DataCollection``.
format : str
Loader format specification used to indicate data format in
`~specutils.Spectrum1D.read` io method.
"""
# If no data label is assigned, give it a unique identifier
if data_label is None:
data_label = "specviz_data|" + uuid.uuid4().hex
# If data provided is a path, try opening into a Spectrum1D object
try:
path = pathlib.Path(data)
if path.is_file():
data = Spectrum1D.read(path, format=format)
else:
raise FileNotFoundError("No such file: " + path)
# If not, it must be a Spectrum1D object. Otherwise, it's unsupported
except TypeError:
if type(data) is SpectrumCollection:
raise TypeError("`SpectrumCollection` detected. Please "
"provide a `Spectrum1D`.")
elif type(data) is not Spectrum1D:
raise TypeError("Data is not a Spectrum1D object or compatible file")
self.app.add_data(data, data_label)
self.app.add_data_to_viewer('spectrum-viewer', data_label)
def get_spectra(self):
"""Returns the current data loaded into the main viewer"""
return self.app.get_data_from_viewer('spectrum-viewer')
def get_spectral_regions(self):
"""
Retrieves glue subset objects from the spectrum viewer and converts
them to `~specutils.SpectralRegion` objects.
Returns
-------
spec_regs : dict
Mapping from the names of the subsets to the subsets expressed
as `specutils.SpectralRegion` objects.
"""
regions = self.app.get_subsets_from_viewer('spectrum-viewer')
spec_regs = {}
for name, reg in regions.items():
unit = reg.meta.get('spectral_axis_unit', u.Unit('Angstrom'))
spec_reg = SpectralRegion.from_center(reg.center.x * unit,
reg.width * unit)
spec_regs[name] = spec_reg
return spec_regs | jdaviz/configs/specviz/helper.py | import pathlib
import uuid
import astropy.units as u
from specutils import Spectrum1D, SpectrumCollection, SpectralRegion
from jdaviz.core.helpers import ConfigHelper
class SpecViz(ConfigHelper):
"""SpecViz Helper class"""
_default_configuration = 'specviz'
def load_data(self, data, data_label=None, format=None):
"""
Loads a data file or `~specutils.Spectrum1D` object into SpecViz.
Parameters
----------
data : str or `~specutils.Spectrum1D`
Spectrum1D spectra, or path to compatible data file.
data_label : str
The Glue data label found in the ``DataCollection``.
format : str
Loader format specification used to indicate data format in
`~specutils.Spectrum1D.read` io method.
"""
# If no data label is assigned, give it a unique identifier
if data_label is None:
data_label = "specviz_data|" + uuid.uuid4().hex
# If data provided is a path, try opening into a Spectrum1D object
try:
path = pathlib.Path(data)
if path.is_file():
data = Spectrum1D.read(path, format=format)
else:
raise FileNotFoundError("No such file: " + path)
# If not, it must be a Spectrum1D object. Otherwise, it's unsupported
except TypeError:
if type(data) is SpectrumCollection:
raise TypeError("`SpectrumCollection` detected. Please "
"provide a `Spectrum1D`.")
elif type(data) is not Spectrum1D:
raise TypeError("Data is not a Spectrum1D object or compatible file")
self.app.add_data(data, data_label)
self.app.add_data_to_viewer('spectrum-viewer', data_label)
def get_spectra(self):
"""Returns the current data loaded into the main viewer"""
return self.app.get_data_from_viewer('spectrum-viewer')
def get_spectral_regions(self):
"""
Retrieves glue subset objects from the spectrum viewer and converts
them to `~specutils.SpectralRegion` objects.
Returns
-------
spec_regs : dict
Mapping from the names of the subsets to the subsets expressed
as `specutils.SpectralRegion` objects.
"""
regions = self.app.get_subsets_from_viewer('spectrum-viewer')
spec_regs = {}
for name, reg in regions.items():
unit = reg.meta.get('spectral_axis_unit', u.Unit('Angstrom'))
spec_reg = SpectralRegion.from_center(reg.center.x * unit,
reg.width * unit)
spec_regs[name] = spec_reg
return spec_regs | 0.789964 | 0.342159 |
import logging
import os
import re
import yaml
from BackgroundApp import logging_conf # noqa # nopep8
os.environ["KCFG_KIVY_LOG_LEVEL"] = logging_conf["root"]["level"].lower()
# noqa # nopep8
from kivy.app import App # noqa
from kivy.clock import Clock # noqa
from kivy.config import Config # noqa
from kivy.properties import BooleanProperty, DictProperty, StringProperty # noqa
from kivy.uix.boxlayout import BoxLayout # noqa
from kivy.uix.widget import Widget # noqa
logger = logging.getLogger(__name__)
Config.set("graphics", "resizable", False)
Config.set("graphics", "height", 600)
Config.set("graphics", "width", 800)
class ToggleOption(BoxLayout):
state = BooleanProperty(True)
def toggle_state(self):
self.state = not self.state
def get_setting(self):
return self.state
class LoggingOption(BoxLayout):
state = StringProperty("WARNING")
def set_state(self, state):
self.state = state
def get_setting(self):
return self.state
class TextOption(BoxLayout):
def get_setting(self):
return self.input.text
def on_focus(self):
return
def validate_path(self):
if not self.input.focus:
var = re.search("%(.+?)%", self.input.text)
if var is not None:
self.input.text = self.input.text.replace(
f"%{var.group(1)}%", os.environ[var.group(1)])
if not os.path.exists(self.input.text) and self.input.text != "":
logger.warning("Invalid path: %s", self.input.text)
def validate_executable(self):
self.validate_path()
if not self.input.focus:
arg_inst = self.parent.parent.ids["elite_dangerous.arguments"]
if self.input.text.endswith(".exe") or self.input.text == "":
arg_inst.disabled = False
else:
arg_inst.disabled = True
class Settings(Widget):
settings = DictProperty()
def __init__(self, con):
from kivy.core.window import Window
self.Window = Window
self.con = con
Clock.schedule_interval(self.check_queue, 0.5)
self.load_settings()
super().__init__()
self.ids["elite_dangerous.path"].validate_executable()
def check_queue(self, dt):
if self.con.poll():
msg = self.con.recv()
if msg == "restore":
self.Window.minimize()
self.Window.restore()
elif msg == "closed":
logger.debug("Main application closed, closing window")
App.get_running_app().stop()
def load_settings(self):
with open("settings.yaml", "r") as stream:
try:
data = yaml.safe_load(stream)
self.settings = data
except yaml.YAMLError as e:
logger.error("Unable to read settings.yaml, %s", e)
def save_settings(self):
# self.convert_paths()
settings = dict()
for entry in self.ids:
if "." in entry:
cat, sett = entry.split(".")
if cat not in settings:
settings[cat] = dict()
settings[cat][sett] = self.ids[entry].get_setting()
with open("settings.yaml", "w") as stream:
try:
stream.write(yaml.dump(settings))
except yaml.YAMLError as e:
logger.error("Unable to save settings.yaml, %s", e)
self.con.send("changed_settings")
def reset(dt):
self.ids["savebutton"].text = "Save Options"
self.ids["savebutton"].text = "Saved!"
Clock.schedule_once(reset, 2)
class SettingsApp(App):
def __init__(self, con):
self.con = con
super().__init__()
def build(self):
self.title = "Elite Dangerous Rich Presence Settings"
self.icon = "elite-dangerous-clean.ico"
return Settings(self.con)
@staticmethod
def rgba(r, g, b, a):
return r / 255.0, g / 255.0, b / 255.0, a / 255.0
@staticmethod
def hex(hex):
rgb = tuple((int(hex[1:][i:i + 2], 16) for i in (0, 2, 4)))
return SettingsApp.rgba(*rgb, 255)
def on_start(self):
pass
def on_stop(self):
pass | SettingsApp.py | import logging
import os
import re
import yaml
from BackgroundApp import logging_conf # noqa # nopep8
os.environ["KCFG_KIVY_LOG_LEVEL"] = logging_conf["root"]["level"].lower()
# noqa # nopep8
from kivy.app import App # noqa
from kivy.clock import Clock # noqa
from kivy.config import Config # noqa
from kivy.properties import BooleanProperty, DictProperty, StringProperty # noqa
from kivy.uix.boxlayout import BoxLayout # noqa
from kivy.uix.widget import Widget # noqa
logger = logging.getLogger(__name__)
Config.set("graphics", "resizable", False)
Config.set("graphics", "height", 600)
Config.set("graphics", "width", 800)
class ToggleOption(BoxLayout):
state = BooleanProperty(True)
def toggle_state(self):
self.state = not self.state
def get_setting(self):
return self.state
class LoggingOption(BoxLayout):
state = StringProperty("WARNING")
def set_state(self, state):
self.state = state
def get_setting(self):
return self.state
class TextOption(BoxLayout):
def get_setting(self):
return self.input.text
def on_focus(self):
return
def validate_path(self):
if not self.input.focus:
var = re.search("%(.+?)%", self.input.text)
if var is not None:
self.input.text = self.input.text.replace(
f"%{var.group(1)}%", os.environ[var.group(1)])
if not os.path.exists(self.input.text) and self.input.text != "":
logger.warning("Invalid path: %s", self.input.text)
def validate_executable(self):
self.validate_path()
if not self.input.focus:
arg_inst = self.parent.parent.ids["elite_dangerous.arguments"]
if self.input.text.endswith(".exe") or self.input.text == "":
arg_inst.disabled = False
else:
arg_inst.disabled = True
class Settings(Widget):
settings = DictProperty()
def __init__(self, con):
from kivy.core.window import Window
self.Window = Window
self.con = con
Clock.schedule_interval(self.check_queue, 0.5)
self.load_settings()
super().__init__()
self.ids["elite_dangerous.path"].validate_executable()
def check_queue(self, dt):
if self.con.poll():
msg = self.con.recv()
if msg == "restore":
self.Window.minimize()
self.Window.restore()
elif msg == "closed":
logger.debug("Main application closed, closing window")
App.get_running_app().stop()
def load_settings(self):
with open("settings.yaml", "r") as stream:
try:
data = yaml.safe_load(stream)
self.settings = data
except yaml.YAMLError as e:
logger.error("Unable to read settings.yaml, %s", e)
def save_settings(self):
# self.convert_paths()
settings = dict()
for entry in self.ids:
if "." in entry:
cat, sett = entry.split(".")
if cat not in settings:
settings[cat] = dict()
settings[cat][sett] = self.ids[entry].get_setting()
with open("settings.yaml", "w") as stream:
try:
stream.write(yaml.dump(settings))
except yaml.YAMLError as e:
logger.error("Unable to save settings.yaml, %s", e)
self.con.send("changed_settings")
def reset(dt):
self.ids["savebutton"].text = "Save Options"
self.ids["savebutton"].text = "Saved!"
Clock.schedule_once(reset, 2)
class SettingsApp(App):
def __init__(self, con):
self.con = con
super().__init__()
def build(self):
self.title = "Elite Dangerous Rich Presence Settings"
self.icon = "elite-dangerous-clean.ico"
return Settings(self.con)
@staticmethod
def rgba(r, g, b, a):
return r / 255.0, g / 255.0, b / 255.0, a / 255.0
@staticmethod
def hex(hex):
rgb = tuple((int(hex[1:][i:i + 2], 16) for i in (0, 2, 4)))
return SettingsApp.rgba(*rgb, 255)
def on_start(self):
pass
def on_stop(self):
pass | 0.390708 | 0.059921 |
import argparse
def get_args():
parser = argparse.ArgumentParser(description='OWTF WAF-BYPASSER MODULE')
parser.add_argument("-X", "--method",
dest="METHODS",
action='store',
nargs="+",
help=("""
Specify Method . (Ex: -X GET . The option @@@all@@@ loads all the HTTP methods \
which are listed in ./payload/HTTP/methods.txt). Custom methods can be defined in
this file.
"""))
parser.add_argument("-C", "--cookie",
dest="COOKIE",
action='store',
help="Insert a cookie value. (Ex --cookie 'var=value')")
parser.add_argument("-t", "--target",
dest="TARGET",
action='store',
required=True,
help="The target url")
parser.add_argument("-H", "--headers",
dest="HEADERS",
action='store',
nargs='*',
help="Additional headers (ex -header 'Name:value' 'Name2:value2')")
parser.add_argument("-L", "--length",
dest="LENGTH",
action='store',
nargs=1,
type=int,
help="Specify the length of accepted chars. Required in overchar mode")
parser.add_argument("-d", "--data",
dest="DATA",
action='store',
help="POST data (ex --data 'var=value')")
parser.add_argument("-cnt", "--contains",
dest="CONTAINS",
action='store',
nargs='+',
help=("""
DETECTION METHOD(ex1 -cnt 'signature' \n) Optional Arguments:\n Case sensitive :\n
(ex2)-cnt 'signature' cs
"""))
parser.add_argument("-rcd", "--response_code",
dest="RESP_CODE_DET",
action='store',
nargs=1,
help=("""
DETECTION METHOD(Ex1 -rcd 200 \n) (Ex2 -rcd 400,404)+\n(ex3 rcd 200-400)\n
(Ex4 -rcd 100,200-300)
"""))
parser.add_argument("-rt", "--response_time",
dest="RESPONSE_TIME",
action='store',
type=float,
nargs=1,
help="DETECTION METHOD(Ex -rt 30 )")
parser.add_argument("-r", "--reverse",
dest="REVERSE",
action='store_true',
help="Reverse the detection method. (Negative detection)")
parser.add_argument("-pl", "--payloads",
dest="PAYLOADS",
action='store',
nargs='*',
help="FILE with payloads')(Ex file1 , file2)")
parser.add_argument("-fs", "--fuzzing_signature",
dest="FUZZING_SIG",
action='store',
help="The default fuzzing signature is @@@. You can change it with a custom signature.")
parser.add_argument("-apv", "--accepted_value",
dest="ACCEPTED_VALUE",
action='store',
help="Accepted parameter value")
parser.add_argument("-pn", "--param_name",
dest="PARAM_NAME",
action='store',
help="Specify parameter name")
parser.add_argument("-ps", "--param_source",
dest="PARAM_SOURCE",
action='store',
choices=['URL', 'DATA', 'COOKIE', 'HEADER'],
help="Specifies the parameters position.")
parser.add_argument("-dl", "--delay",
dest="DELAY",
action='store',
type=int,
help=("""
Changes the Fuzzing method from asynchronous to synchronous(slower). This allows you
to follow cookies and specify a delay time in seconds before sending a request.
"""))
parser.add_argument("-fc", "--follow-cookies",
dest="FOLLOW_COOKIES",
action='store_true',
help=("Remember and follow cookies in requests"))
parser.add_argument("-m", "--mode",
dest="MODE",
required=True,
choices=['fuzz', 'detect_chars', 'asp_hpp', 'param_overwriting', "length", "overchar",
"detect_accepted_sources", "content_type_tamper", "show_transform_functions"],
action='store',
help=("""
Select mode: (fuzz)Fuzzing mode.\n(detect_chars)Detects the available characters and
attempts to find bypasses.\n (asp_hpp)Splits the payload to comma (,) character and
sends using HPP\n(param_overwriting)Overwrites a parameter by using HPP\n (length)
Detects the length of a content placeholder\n (detect_accepted_sources)
Detected the accepted sources of a parameter\n (content_type_tamper)Content type
tampering is changing the Content-Type header and tries to detect anomalies.\n
(show_tranform_function) Showstransformation functions\n(overchar)
Sends the payloads after a stream of whitelisted characters\n
"""))
args = parser.parse_args()
return {
"target": args.TARGET,
"payloads": args.PAYLOADS,
"headers": args.HEADERS,
"methods": args.METHODS,
"data": args.DATA,
"contains": args.CONTAINS,
"resp_code_det": args.RESP_CODE_DET,
"reverse": args.REVERSE,
"fuzzing_signature": args.FUZZING_SIG,
"accepted_value": args.ACCEPTED_VALUE,
"param_name": args.PARAM_NAME,
"param_source": args.PARAM_SOURCE,
"delay": args.DELAY,
"follow_cookies": args.FOLLOW_COOKIES,
"cookie": args.COOKIE,
"length": args.LENGTH,
"response_time": args.RESPONSE_TIME,
"mode": args.MODE
} | framework/http/wafbypasser/core/argument_parser.py | import argparse
def get_args():
parser = argparse.ArgumentParser(description='OWTF WAF-BYPASSER MODULE')
parser.add_argument("-X", "--method",
dest="METHODS",
action='store',
nargs="+",
help=("""
Specify Method . (Ex: -X GET . The option @@@all@@@ loads all the HTTP methods \
which are listed in ./payload/HTTP/methods.txt). Custom methods can be defined in
this file.
"""))
parser.add_argument("-C", "--cookie",
dest="COOKIE",
action='store',
help="Insert a cookie value. (Ex --cookie 'var=value')")
parser.add_argument("-t", "--target",
dest="TARGET",
action='store',
required=True,
help="The target url")
parser.add_argument("-H", "--headers",
dest="HEADERS",
action='store',
nargs='*',
help="Additional headers (ex -header 'Name:value' 'Name2:value2')")
parser.add_argument("-L", "--length",
dest="LENGTH",
action='store',
nargs=1,
type=int,
help="Specify the length of accepted chars. Required in overchar mode")
parser.add_argument("-d", "--data",
dest="DATA",
action='store',
help="POST data (ex --data 'var=value')")
parser.add_argument("-cnt", "--contains",
dest="CONTAINS",
action='store',
nargs='+',
help=("""
DETECTION METHOD(ex1 -cnt 'signature' \n) Optional Arguments:\n Case sensitive :\n
(ex2)-cnt 'signature' cs
"""))
parser.add_argument("-rcd", "--response_code",
dest="RESP_CODE_DET",
action='store',
nargs=1,
help=("""
DETECTION METHOD(Ex1 -rcd 200 \n) (Ex2 -rcd 400,404)+\n(ex3 rcd 200-400)\n
(Ex4 -rcd 100,200-300)
"""))
parser.add_argument("-rt", "--response_time",
dest="RESPONSE_TIME",
action='store',
type=float,
nargs=1,
help="DETECTION METHOD(Ex -rt 30 )")
parser.add_argument("-r", "--reverse",
dest="REVERSE",
action='store_true',
help="Reverse the detection method. (Negative detection)")
parser.add_argument("-pl", "--payloads",
dest="PAYLOADS",
action='store',
nargs='*',
help="FILE with payloads')(Ex file1 , file2)")
parser.add_argument("-fs", "--fuzzing_signature",
dest="FUZZING_SIG",
action='store',
help="The default fuzzing signature is @@@. You can change it with a custom signature.")
parser.add_argument("-apv", "--accepted_value",
dest="ACCEPTED_VALUE",
action='store',
help="Accepted parameter value")
parser.add_argument("-pn", "--param_name",
dest="PARAM_NAME",
action='store',
help="Specify parameter name")
parser.add_argument("-ps", "--param_source",
dest="PARAM_SOURCE",
action='store',
choices=['URL', 'DATA', 'COOKIE', 'HEADER'],
help="Specifies the parameters position.")
parser.add_argument("-dl", "--delay",
dest="DELAY",
action='store',
type=int,
help=("""
Changes the Fuzzing method from asynchronous to synchronous(slower). This allows you
to follow cookies and specify a delay time in seconds before sending a request.
"""))
parser.add_argument("-fc", "--follow-cookies",
dest="FOLLOW_COOKIES",
action='store_true',
help=("Remember and follow cookies in requests"))
parser.add_argument("-m", "--mode",
dest="MODE",
required=True,
choices=['fuzz', 'detect_chars', 'asp_hpp', 'param_overwriting', "length", "overchar",
"detect_accepted_sources", "content_type_tamper", "show_transform_functions"],
action='store',
help=("""
Select mode: (fuzz)Fuzzing mode.\n(detect_chars)Detects the available characters and
attempts to find bypasses.\n (asp_hpp)Splits the payload to comma (,) character and
sends using HPP\n(param_overwriting)Overwrites a parameter by using HPP\n (length)
Detects the length of a content placeholder\n (detect_accepted_sources)
Detected the accepted sources of a parameter\n (content_type_tamper)Content type
tampering is changing the Content-Type header and tries to detect anomalies.\n
(show_tranform_function) Showstransformation functions\n(overchar)
Sends the payloads after a stream of whitelisted characters\n
"""))
args = parser.parse_args()
return {
"target": args.TARGET,
"payloads": args.PAYLOADS,
"headers": args.HEADERS,
"methods": args.METHODS,
"data": args.DATA,
"contains": args.CONTAINS,
"resp_code_det": args.RESP_CODE_DET,
"reverse": args.REVERSE,
"fuzzing_signature": args.FUZZING_SIG,
"accepted_value": args.ACCEPTED_VALUE,
"param_name": args.PARAM_NAME,
"param_source": args.PARAM_SOURCE,
"delay": args.DELAY,
"follow_cookies": args.FOLLOW_COOKIES,
"cookie": args.COOKIE,
"length": args.LENGTH,
"response_time": args.RESPONSE_TIME,
"mode": args.MODE
} | 0.605449 | 0.159217 |
from refextract import extract_references_from_file
from refextract import extract_references_from_string
from pathlib2 import Path
import re
import sys
sys.path.insert(0,'../')
sys.path.insert(0,'../database_queries/')
from models import Biblio
from database_queries_biblio import *
def array_to_semicolon_separated(input):
result = input[0]
for element in input[1:]:
result += ";"
result += element
return result
def extract_citations_from_pdf(name_of_file):
references = extract_references_from_file(name_of_file)
#reference = extract_references_from_string("text.txt")
print "**************"
array_of_citations = []
for element in references:
for key in element:
print key
print element[key]
print "---"
#array_of_citations.append(element['raw_ref'][0].replace(u'['+element['linemarker'][0]+u'] ',u''))
array_of_citations.append(element['raw_ref'][0])
print "**************"
if len(array_of_citations)>1:
string_of_citations = array_to_semicolon_separated(array_of_citations)
elif len(array_of_citations)==1:
string_of_citations = array_of_citations[0]
else:
string_of_citations = "No references to extract"
print string_of_citations
return string_of_citations
#Puede haber pdfs en los que las referencias esten mal. Hay que revisar
#Esto solo detecta en funcion de los numeros entre corchetes, y sigue la numeracion
#Comprobar que no haya corchetes mas alla del inicio en la referencia
def extract_citations_in_database():
selection = Biblio.select(Biblio.bibtex_id).where(Biblio.raw_bibliography.is_null())
global_path = "/Users/Julio/Documents/PhD/Papers/Security/Multi-Step attacks DB/"
for element in selection:
try:
string_of_path = global_path+"Corpus/"+element.bibtex_id+".pdf"
path_to_file = Path(string_of_path)
if not path_to_file.is_file():
string_of_path = global_path+"Papers/"+element.bibtex_id+".pdf"
path_to_file = Path(string_of_path)
if path_to_file.is_file():
string_with_references = extract_citations_from_pdf(string_of_path)
else:
print "NOT WORKED"
string_with_references = "NOT WORKED"
else:
string_with_references = extract_citations_from_pdf(string_of_path)
update_raw_bibliography(element.bibtex_id,string_with_references)
except TypeError,IndexError:
print "Not worked for: "+element.bibtex_id
update_raw_bibliography(element.bibtex_id,"SOME ERROR OF THE PROGRAM")
def main():
#extract_citations_from_pdf("test.pdf")
extract_citations_in_database()
if __name__ == "__main__":
sys.exit(main()) | extract_references/test_extract_references.py | from refextract import extract_references_from_file
from refextract import extract_references_from_string
from pathlib2 import Path
import re
import sys
sys.path.insert(0,'../')
sys.path.insert(0,'../database_queries/')
from models import Biblio
from database_queries_biblio import *
def array_to_semicolon_separated(input):
result = input[0]
for element in input[1:]:
result += ";"
result += element
return result
def extract_citations_from_pdf(name_of_file):
references = extract_references_from_file(name_of_file)
#reference = extract_references_from_string("text.txt")
print "**************"
array_of_citations = []
for element in references:
for key in element:
print key
print element[key]
print "---"
#array_of_citations.append(element['raw_ref'][0].replace(u'['+element['linemarker'][0]+u'] ',u''))
array_of_citations.append(element['raw_ref'][0])
print "**************"
if len(array_of_citations)>1:
string_of_citations = array_to_semicolon_separated(array_of_citations)
elif len(array_of_citations)==1:
string_of_citations = array_of_citations[0]
else:
string_of_citations = "No references to extract"
print string_of_citations
return string_of_citations
#Puede haber pdfs en los que las referencias esten mal. Hay que revisar
#Esto solo detecta en funcion de los numeros entre corchetes, y sigue la numeracion
#Comprobar que no haya corchetes mas alla del inicio en la referencia
def extract_citations_in_database():
selection = Biblio.select(Biblio.bibtex_id).where(Biblio.raw_bibliography.is_null())
global_path = "/Users/Julio/Documents/PhD/Papers/Security/Multi-Step attacks DB/"
for element in selection:
try:
string_of_path = global_path+"Corpus/"+element.bibtex_id+".pdf"
path_to_file = Path(string_of_path)
if not path_to_file.is_file():
string_of_path = global_path+"Papers/"+element.bibtex_id+".pdf"
path_to_file = Path(string_of_path)
if path_to_file.is_file():
string_with_references = extract_citations_from_pdf(string_of_path)
else:
print "NOT WORKED"
string_with_references = "NOT WORKED"
else:
string_with_references = extract_citations_from_pdf(string_of_path)
update_raw_bibliography(element.bibtex_id,string_with_references)
except TypeError,IndexError:
print "Not worked for: "+element.bibtex_id
update_raw_bibliography(element.bibtex_id,"SOME ERROR OF THE PROGRAM")
def main():
#extract_citations_from_pdf("test.pdf")
extract_citations_in_database()
if __name__ == "__main__":
sys.exit(main()) | 0.179567 | 0.225097 |