code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
# Code starts here
data = pd.read_csv(path)
X = data.drop(columns=['customer.id','paid.back.loan'])
y = data['paid.back.loan']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=0)
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
# Code starts here
fully_paid = y_train.value_counts()
plt.bar(fully_paid.index,fully_paid)
plt.show()
# Code ends here
# --------------
#Importing header files
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
# Code starts here
replace = lambda x: x.replace('%','')
X_train['int.rate'] = X_train['int.rate'].apply(replace)
X_train['int.rate'] = pd.to_numeric(X_train['int.rate'])/100
X_test['int.rate'] = pd.to_numeric(X_test['int.rate'].apply(replace))/100
num_df = X_train.select_dtypes(include=[np.number])
cat_df = X_train.select_dtypes(exclude=[np.number])
#print(X_train.info(),num_df,cat_df)
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
cols = list(num_df.columns)
fig, axes = plt.subplots(9,1, figsize=(10,10))
for i in range(9):
sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i])
# Code ends here
# --------------
# Code starts here
cols = cat_df.columns
fig, axes = plt.subplots(2,2, figsize=(10,10))
for i in range(2):
for j in range(2):
sns.countplot(x=X_train[cols[i*2+j]],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
# Code starts here
le = LabelEncoder()
for i in range(len(cols)):
X_train[cols[i]] = le.fit_transform(X_train[cols[i]])
X_test[cols[i]] = le.transform(X_test[cols[i]])
y_train.replace({'No':0,'Yes':1},inplace=True)
y_test.replace({'No':0,'Yes':1},inplace=True)
model = DecisionTreeClassifier(random_state=0)
model.fit(X_train,y_train)
#y_pred = model.predict(X_test)
acc = model.score(X_test,y_test)
# Code ends here
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2 = DecisionTreeClassifier(random_state=0)
p_tree = GridSearchCV(estimator=model_2,param_grid=parameter_grid,cv=5)
p_tree.fit(X_train,y_train)
acc_2 = p_tree.score(X_test,y_test)
# Code ends here
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big = pydotplus.graph_from_dot_data(dot_data)
# show graph - do not delete/modify the code below this line
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
# Code ends here | Decision-Tree-Loan-Defaulters/code.py |
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
# Code starts here
data = pd.read_csv(path)
X = data.drop(columns=['customer.id','paid.back.loan'])
y = data['paid.back.loan']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=0)
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
# Code starts here
fully_paid = y_train.value_counts()
plt.bar(fully_paid.index,fully_paid)
plt.show()
# Code ends here
# --------------
#Importing header files
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
# Code starts here
replace = lambda x: x.replace('%','')
X_train['int.rate'] = X_train['int.rate'].apply(replace)
X_train['int.rate'] = pd.to_numeric(X_train['int.rate'])/100
X_test['int.rate'] = pd.to_numeric(X_test['int.rate'].apply(replace))/100
num_df = X_train.select_dtypes(include=[np.number])
cat_df = X_train.select_dtypes(exclude=[np.number])
#print(X_train.info(),num_df,cat_df)
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
cols = list(num_df.columns)
fig, axes = plt.subplots(9,1, figsize=(10,10))
for i in range(9):
sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i])
# Code ends here
# --------------
# Code starts here
cols = cat_df.columns
fig, axes = plt.subplots(2,2, figsize=(10,10))
for i in range(2):
for j in range(2):
sns.countplot(x=X_train[cols[i*2+j]],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
# Code starts here
le = LabelEncoder()
for i in range(len(cols)):
X_train[cols[i]] = le.fit_transform(X_train[cols[i]])
X_test[cols[i]] = le.transform(X_test[cols[i]])
y_train.replace({'No':0,'Yes':1},inplace=True)
y_test.replace({'No':0,'Yes':1},inplace=True)
model = DecisionTreeClassifier(random_state=0)
model.fit(X_train,y_train)
#y_pred = model.predict(X_test)
acc = model.score(X_test,y_test)
# Code ends here
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2 = DecisionTreeClassifier(random_state=0)
p_tree = GridSearchCV(estimator=model_2,param_grid=parameter_grid,cv=5)
p_tree.fit(X_train,y_train)
acc_2 = p_tree.score(X_test,y_test)
# Code ends here
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big = pydotplus.graph_from_dot_data(dot_data)
# show graph - do not delete/modify the code below this line
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
# Code ends here | 0.289472 | 0.325494 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('nick_name', models.CharField(max_length=60)),
('email', models.EmailField(max_length=254)),
('bio', models.TextField(help_text='Brief introduction about the author')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=60)),
('body', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified_date', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(max_length=60)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Author')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
] | blog/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('nick_name', models.CharField(max_length=60)),
('email', models.EmailField(max_length=254)),
('bio', models.TextField(help_text='Brief introduction about the author')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=60)),
('body', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified_date', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(max_length=60)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Author')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
] | 0.598195 | 0.165694 |
from spy_state import *
import sys, re
# All these calls are based on the print statements in legion_logging.h
prefix = "\[[0-9]+ - [0-9a-f]+\] \{\w+\}\{legion_spy\}: "
# Calls for the shape of region trees
top_index_pat = re.compile(prefix+"Index Space (?P<uid>[0-9]+)")
index_part_pat = re.compile(prefix+"Index Partition (?P<pid>[0-9]+) (?P<uid>[0-9]+) (?P<disjoint>[0-1]) (?P<color>[0-9]+)")
index_subspace_pat = re.compile(prefix+"Index Subspace (?P<pid>[0-9]+) (?P<uid>[0-9]+) (?P<color>[0-9]+)")
field_space_pat = re.compile(prefix+"Field Space (?P<uid>[0-9]+)")
field_create_pat = re.compile(prefix+"Field Creation (?P<uid>[0-9]+) (?P<fid>[0-9]+)")
region_pat = re.compile(prefix+"Region (?P<iid>[0-9]+) (?P<fid>[0-9]+) (?P<tid>[0-9]+)")
# Logger calls for operations
top_task_pat = re.compile(prefix+"Top Task (?P<uid>[0-9]+) (?P<tid>[0-9]+)")
task_pat = re.compile(prefix+"Task Operation (?P<uid>[0-9]+) (?P<tid>[0-9]+) (?P<pid>[0-9]+) (?P<ctx>[0-9]+)")
mapping_pat = re.compile(prefix+"Mapping Operation (?P<uid>[0-9]+) (?P<pid>[0-9]+) (?P<ctx>[0-9]+)")
deletion_pat = re.compile(prefix+"Deletion Operation (?P<uid>[0-9]+) (?P<pid>[0-9]+) (?P<ctx>[0-9]+)")
task_name_pat = re.compile(prefix+"Task Name (?P<uid>[0-9]+) (?P<name>\w+)")
# Logger calls for logical mapping dependence analysis
requirement_pat = re.compile(prefix+"Logical Requirement (?P<uid>[0-9]+) (?P<index>[0-9]+) (?P<is_reg>[0-1]) (?P<ispace>[0-9]+) (?P<fspace>[0-9]+) (?P<tid>[0-9]+) (?P<priv>[0-9]+) (?P<coher>[0-9]+) (?P<redop>[0-9]+)")
req_field_pat = re.compile(prefix+"Logical Requirement Field (?P<uid>[0-9]+) (?P<index>[0-9]+) (?P<fid>[0-9]+)")
mapping_dep_pat = re.compile(prefix+"Mapping Dependence (?P<pid>[0-9]+) (?P<ctx>[0-9]+) (?P<prev_id>[0-9]+) (?P<pidx>[0-9]+) (?P<next_id>[0-9]+) (?P<nidx>[0-9]+) (?P<dtype>[0-9]+)")
# Logger calls for events
event_event_pat = re.compile(prefix+"Event Event (?P<idone>[0-9]+) (?P<genone>[0-9]+) (?P<idtwo>[0-9]+) (?P<gentwo>[0-9]+)")
task_event_pat = re.compile(prefix+"Task Events (?P<uid>[0-9]+) (?P<index_space>[0-1]) (?P<point>[0-9]+) (?P<startid>[0-9]+) (?P<startgen>[0-9]+) (?P<termid>[0-9]+) (?P<termgen>[0-9]+)")
index_term_pat = re.compile(prefix+"Index Termination (?P<uid>[0-9]+) (?P<termid>[0-9]+) (?P<termgen>[0-9]+)")
copy_event_pat = re.compile(prefix+"Copy Events (?P<srcid>[0-9]+) (?P<dstid>[0-9]+) (?P<srcloc>[0-9]+) (?P<dstloc>[0-9]+) (?P<index>[0-9]+) (?P<field>[0-9]+) (?P<tree>[0-9]+) (?P<startid>[0-9]+) (?P<startgen>[0-9]+) (?P<termid>[0-9]+) (?P<termgen>[0-9]+) (?P<mask>[0-9a-f]+)")
map_event_pat = re.compile(prefix+"Map Events (?P<uid>[0-9]+) (?P<startid>[0-9]+) (?P<startgen>[0-9]+) (?P<termid>[0-9]+) (?P<termgen>[0-9]+)")
def parse_log_file(file_name, trees, ops, events):
log = open(file_name, "r")
matches = 0
for line in log:
matches = matches + 1
# Region tree shapes
m = top_index_pat.match(line)
if m <> None:
trees.add_index_space(int(m.group('uid')))
continue
m = index_part_pat.match(line)
if m <> None:
trees.add_index_partition(int(m.group('pid')), int(m.group('uid')), True if (int(m.group('disjoint'))) == 1 else False, int(m.group('color')))
continue
m = index_subspace_pat.match(line)
if m <> None:
trees.add_index_subspace(int(m.group('pid')), int(m.group('uid')), int(m.group('color')))
continue
m = field_space_pat.match(line)
if m <> None:
trees.add_field_space(int(m.group('uid')))
continue
m = field_create_pat.match(line)
if m <> None:
trees.add_field(int(m.group('uid')), int(m.group('fid')))
continue
m = region_pat.match(line)
if m <> None:
trees.add_region(int(m.group('iid')), int(m.group('fid')), int(m.group('tid')))
continue
# Operations
m = top_task_pat.match(line)
if m <> None:
ops.add_top_task(int(m.group('uid')), int(m.group('tid')))
continue
m = task_pat.match(line)
if m <> None:
ops.add_task(int(m.group('uid')), int(m.group('tid')), int(m.group('pid')), int(m.group('ctx')))
continue
m = mapping_pat.match(line)
if m <> None:
ops.add_mapping(int(m.group('uid')), int(m.group('pid')), int (m.group('ctx')))
continue
m = deletion_pat.match(line)
if m <> None:
ops.add_deletion(int(m.group('uid')), int(m.group('pid')), int(m.group('ctx')))
continue
m = task_name_pat.match(line)
if m <> None:
ops.add_name(int(m.group('uid')), m.group('name'))
continue
# Mapping dependence analysis
m = requirement_pat.match(line)
if m <> None:
ops.add_requirement(int(m.group('uid')), int(m.group('index')), True if (int(m.group('is_reg')))==1 else False, int(m.group('ispace')), int(m.group('fspace')), int(m.group('tid')), int(m.group('priv')), int(m.group('coher')), int(m.group('redop')))
continue
m = req_field_pat.match(line)
if m <> None:
ops.add_req_field(int(m.group('uid')), int(m.group('index')), int(m.group('fid')))
continue
m = mapping_dep_pat.match(line)
if m <> None:
ops.add_mapping_dependence(int(m.group('pid')), int(m.group('ctx')), int(m.group('prev_id')), int(m.group('pidx')), int(m.group('next_id')), int(m.group('nidx')), int(m.group('dtype')))
continue
# Event analysis
m = event_event_pat.match(line)
if m <> None:
events.add_event_dependence(int(m.group('idone')), int(m.group('genone')), int(m.group('idtwo')), int(m.group('gentwo')))
continue
m = task_event_pat.match(line)
if m <> None:
events.add_task_instance(int(m.group('uid')), True if (int(m.group('index_space')))==1 else False, int(m.group('point')), int(m.group('startid')), int(m.group('startgen')), int(m.group('termid')), int(m.group('termgen')))
continue
m = index_term_pat.match(line)
if m <> None:
events.add_index_term(int(m.group('uid')), int(m.group('termid')), int(m.group('termgen')))
continue
m = copy_event_pat.match(line)
if m <> None:
events.add_copy_instance(int(m.group('srcid')), int(m.group('dstid')), int(m.group('srcloc')), int(m.group('dstloc')), int(m.group('index')), int(m.group('field')), int(m.group('tree')), int(m.group('startid')), int(m.group('startgen')), int(m.group('termid')), int(m.group('termgen')), m.group('mask'))
continue
m = map_event_pat.match(line)
if m <> None:
events.add_map_instance(int(m.group('uid')), int(m.group('startid')), int(m.group('startgen')), int(m.group('termid')), int(m.group('termgen')))
continue
# If we made it here we didn't match
matches = matches - 1
log.close()
return matches | tools/spy_parser.py |
from spy_state import *
import sys, re
# All these calls are based on the print statements in legion_logging.h
prefix = "\[[0-9]+ - [0-9a-f]+\] \{\w+\}\{legion_spy\}: "
# Calls for the shape of region trees
top_index_pat = re.compile(prefix+"Index Space (?P<uid>[0-9]+)")
index_part_pat = re.compile(prefix+"Index Partition (?P<pid>[0-9]+) (?P<uid>[0-9]+) (?P<disjoint>[0-1]) (?P<color>[0-9]+)")
index_subspace_pat = re.compile(prefix+"Index Subspace (?P<pid>[0-9]+) (?P<uid>[0-9]+) (?P<color>[0-9]+)")
field_space_pat = re.compile(prefix+"Field Space (?P<uid>[0-9]+)")
field_create_pat = re.compile(prefix+"Field Creation (?P<uid>[0-9]+) (?P<fid>[0-9]+)")
region_pat = re.compile(prefix+"Region (?P<iid>[0-9]+) (?P<fid>[0-9]+) (?P<tid>[0-9]+)")
# Logger calls for operations
top_task_pat = re.compile(prefix+"Top Task (?P<uid>[0-9]+) (?P<tid>[0-9]+)")
task_pat = re.compile(prefix+"Task Operation (?P<uid>[0-9]+) (?P<tid>[0-9]+) (?P<pid>[0-9]+) (?P<ctx>[0-9]+)")
mapping_pat = re.compile(prefix+"Mapping Operation (?P<uid>[0-9]+) (?P<pid>[0-9]+) (?P<ctx>[0-9]+)")
deletion_pat = re.compile(prefix+"Deletion Operation (?P<uid>[0-9]+) (?P<pid>[0-9]+) (?P<ctx>[0-9]+)")
task_name_pat = re.compile(prefix+"Task Name (?P<uid>[0-9]+) (?P<name>\w+)")
# Logger calls for logical mapping dependence analysis
requirement_pat = re.compile(prefix+"Logical Requirement (?P<uid>[0-9]+) (?P<index>[0-9]+) (?P<is_reg>[0-1]) (?P<ispace>[0-9]+) (?P<fspace>[0-9]+) (?P<tid>[0-9]+) (?P<priv>[0-9]+) (?P<coher>[0-9]+) (?P<redop>[0-9]+)")
req_field_pat = re.compile(prefix+"Logical Requirement Field (?P<uid>[0-9]+) (?P<index>[0-9]+) (?P<fid>[0-9]+)")
mapping_dep_pat = re.compile(prefix+"Mapping Dependence (?P<pid>[0-9]+) (?P<ctx>[0-9]+) (?P<prev_id>[0-9]+) (?P<pidx>[0-9]+) (?P<next_id>[0-9]+) (?P<nidx>[0-9]+) (?P<dtype>[0-9]+)")
# Logger calls for events
event_event_pat = re.compile(prefix+"Event Event (?P<idone>[0-9]+) (?P<genone>[0-9]+) (?P<idtwo>[0-9]+) (?P<gentwo>[0-9]+)")
task_event_pat = re.compile(prefix+"Task Events (?P<uid>[0-9]+) (?P<index_space>[0-1]) (?P<point>[0-9]+) (?P<startid>[0-9]+) (?P<startgen>[0-9]+) (?P<termid>[0-9]+) (?P<termgen>[0-9]+)")
index_term_pat = re.compile(prefix+"Index Termination (?P<uid>[0-9]+) (?P<termid>[0-9]+) (?P<termgen>[0-9]+)")
copy_event_pat = re.compile(prefix+"Copy Events (?P<srcid>[0-9]+) (?P<dstid>[0-9]+) (?P<srcloc>[0-9]+) (?P<dstloc>[0-9]+) (?P<index>[0-9]+) (?P<field>[0-9]+) (?P<tree>[0-9]+) (?P<startid>[0-9]+) (?P<startgen>[0-9]+) (?P<termid>[0-9]+) (?P<termgen>[0-9]+) (?P<mask>[0-9a-f]+)")
map_event_pat = re.compile(prefix+"Map Events (?P<uid>[0-9]+) (?P<startid>[0-9]+) (?P<startgen>[0-9]+) (?P<termid>[0-9]+) (?P<termgen>[0-9]+)")
def parse_log_file(file_name, trees, ops, events):
log = open(file_name, "r")
matches = 0
for line in log:
matches = matches + 1
# Region tree shapes
m = top_index_pat.match(line)
if m <> None:
trees.add_index_space(int(m.group('uid')))
continue
m = index_part_pat.match(line)
if m <> None:
trees.add_index_partition(int(m.group('pid')), int(m.group('uid')), True if (int(m.group('disjoint'))) == 1 else False, int(m.group('color')))
continue
m = index_subspace_pat.match(line)
if m <> None:
trees.add_index_subspace(int(m.group('pid')), int(m.group('uid')), int(m.group('color')))
continue
m = field_space_pat.match(line)
if m <> None:
trees.add_field_space(int(m.group('uid')))
continue
m = field_create_pat.match(line)
if m <> None:
trees.add_field(int(m.group('uid')), int(m.group('fid')))
continue
m = region_pat.match(line)
if m <> None:
trees.add_region(int(m.group('iid')), int(m.group('fid')), int(m.group('tid')))
continue
# Operations
m = top_task_pat.match(line)
if m <> None:
ops.add_top_task(int(m.group('uid')), int(m.group('tid')))
continue
m = task_pat.match(line)
if m <> None:
ops.add_task(int(m.group('uid')), int(m.group('tid')), int(m.group('pid')), int(m.group('ctx')))
continue
m = mapping_pat.match(line)
if m <> None:
ops.add_mapping(int(m.group('uid')), int(m.group('pid')), int (m.group('ctx')))
continue
m = deletion_pat.match(line)
if m <> None:
ops.add_deletion(int(m.group('uid')), int(m.group('pid')), int(m.group('ctx')))
continue
m = task_name_pat.match(line)
if m <> None:
ops.add_name(int(m.group('uid')), m.group('name'))
continue
# Mapping dependence analysis
m = requirement_pat.match(line)
if m <> None:
ops.add_requirement(int(m.group('uid')), int(m.group('index')), True if (int(m.group('is_reg')))==1 else False, int(m.group('ispace')), int(m.group('fspace')), int(m.group('tid')), int(m.group('priv')), int(m.group('coher')), int(m.group('redop')))
continue
m = req_field_pat.match(line)
if m <> None:
ops.add_req_field(int(m.group('uid')), int(m.group('index')), int(m.group('fid')))
continue
m = mapping_dep_pat.match(line)
if m <> None:
ops.add_mapping_dependence(int(m.group('pid')), int(m.group('ctx')), int(m.group('prev_id')), int(m.group('pidx')), int(m.group('next_id')), int(m.group('nidx')), int(m.group('dtype')))
continue
# Event analysis
m = event_event_pat.match(line)
if m <> None:
events.add_event_dependence(int(m.group('idone')), int(m.group('genone')), int(m.group('idtwo')), int(m.group('gentwo')))
continue
m = task_event_pat.match(line)
if m <> None:
events.add_task_instance(int(m.group('uid')), True if (int(m.group('index_space')))==1 else False, int(m.group('point')), int(m.group('startid')), int(m.group('startgen')), int(m.group('termid')), int(m.group('termgen')))
continue
m = index_term_pat.match(line)
if m <> None:
events.add_index_term(int(m.group('uid')), int(m.group('termid')), int(m.group('termgen')))
continue
m = copy_event_pat.match(line)
if m <> None:
events.add_copy_instance(int(m.group('srcid')), int(m.group('dstid')), int(m.group('srcloc')), int(m.group('dstloc')), int(m.group('index')), int(m.group('field')), int(m.group('tree')), int(m.group('startid')), int(m.group('startgen')), int(m.group('termid')), int(m.group('termgen')), m.group('mask'))
continue
m = map_event_pat.match(line)
if m <> None:
events.add_map_instance(int(m.group('uid')), int(m.group('startid')), int(m.group('startgen')), int(m.group('termid')), int(m.group('termgen')))
continue
# If we made it here we didn't match
matches = matches - 1
log.close()
return matches | 0.149811 | 0.344526 |
from logging.config import dictConfig
from reports.core import BaseBidsUtility, NEW_ALG_DATE, CHANGE_2019_DATE
from reports.helpers import (
thresholds_headers,
get_arguments_parser,
read_config
)
from reports.helpers import DEFAULT_TIMEZONE, DEFAULT_MODE
class InvoicesUtility(BaseBidsUtility):
number_of_ranges = 6
number_of_counters = 10
def __init__(self, broker, period, config,
timezone=DEFAULT_TIMEZONE, mode=DEFAULT_MODE):
super(InvoicesUtility, self).__init__(
broker, period, config,
operation='invoices', timezone=timezone, mode=mode)
self.headers = thresholds_headers(
self.config.thresholds
)
@staticmethod
def get_counter_line(record):
state = record.get('state', '')
if state:
if record["startdate"] >= CHANGE_2019_DATE: # counters 5-9 for tenders started after 2019-08-22
return state + 4
return state # counter 1-4 for tenders NEW_ALG_DATE< and <2019-08-22
else:
return 0 # the oldest alg tender counters
def get_payment_year(self, record):
"""
Returns the costs version applicable for the specific record
find them in the config by their keys (2017 and 2019)
we didn't have 2016 in the legacy version of this code
"""
if record["startdate"] >= CHANGE_2019_DATE:
return 2019
return 2017
def row(self, record):
value, rate = self.convert_value(record)
payment_year = self.get_payment_year(record)
payment = self.get_payment(value, year=payment_year)
p = self.config.payments(payment_year)
c = self.counters[self.get_counter_line(record)]
for i, x in enumerate(p):
if payment == x:
msg = 'Invoices: bill {} for tender {} '\
'with value {}'.format(payment, record['tender'], value)
self.Logger.info(msg)
c[i] += 1
def rows(self):
for resp in self.response: # prepare counters
self.row(resp['value'])
costs_2017 = self.config.payments(grid=2017)
for row in [
[self.version_headers[0]],
self.counters[0],
costs_2017,
[c * v for c, v in zip(self.counters[0], costs_2017)],
]:
yield row
yield [self.version_headers[1]]
for row in self.get_2017_algorithm_rows(self.counters[1], self.counters[2], self.counters[3]):
yield row
yield [self.version_headers[2]]
for row in self.get_2017_algorithm_rows(self.counters[5], self.counters[6], self.counters[7], costs_year=2019):
yield row
def get_2017_algorithm_rows(self, *lines, **kwargs):
a_line, b_line, c_line = lines
for line in lines:
yield line
total_line = [a - b - c for a, b, c in zip(a_line, b_line, c_line)]
yield total_line
costs_line = self.config.payments(grid=kwargs.get("costs_year", 2017))
yield costs_line
yield [c * v for c, v in zip(total_line, costs_line)]
def run():
parser = get_arguments_parser()
args = parser.parse_args()
config = read_config(args.config)
dictConfig(config)
utility = InvoicesUtility(
args.broker, args.period,
config, timezone=args.timezone, mode=args.mode)
utility.run()
if __name__ == "__main__":
run() | reports/utilities/invoices.py | from logging.config import dictConfig
from reports.core import BaseBidsUtility, NEW_ALG_DATE, CHANGE_2019_DATE
from reports.helpers import (
thresholds_headers,
get_arguments_parser,
read_config
)
from reports.helpers import DEFAULT_TIMEZONE, DEFAULT_MODE
class InvoicesUtility(BaseBidsUtility):
number_of_ranges = 6
number_of_counters = 10
def __init__(self, broker, period, config,
timezone=DEFAULT_TIMEZONE, mode=DEFAULT_MODE):
super(InvoicesUtility, self).__init__(
broker, period, config,
operation='invoices', timezone=timezone, mode=mode)
self.headers = thresholds_headers(
self.config.thresholds
)
@staticmethod
def get_counter_line(record):
state = record.get('state', '')
if state:
if record["startdate"] >= CHANGE_2019_DATE: # counters 5-9 for tenders started after 2019-08-22
return state + 4
return state # counter 1-4 for tenders NEW_ALG_DATE< and <2019-08-22
else:
return 0 # the oldest alg tender counters
def get_payment_year(self, record):
"""
Returns the costs version applicable for the specific record
find them in the config by their keys (2017 and 2019)
we didn't have 2016 in the legacy version of this code
"""
if record["startdate"] >= CHANGE_2019_DATE:
return 2019
return 2017
def row(self, record):
value, rate = self.convert_value(record)
payment_year = self.get_payment_year(record)
payment = self.get_payment(value, year=payment_year)
p = self.config.payments(payment_year)
c = self.counters[self.get_counter_line(record)]
for i, x in enumerate(p):
if payment == x:
msg = 'Invoices: bill {} for tender {} '\
'with value {}'.format(payment, record['tender'], value)
self.Logger.info(msg)
c[i] += 1
def rows(self):
for resp in self.response: # prepare counters
self.row(resp['value'])
costs_2017 = self.config.payments(grid=2017)
for row in [
[self.version_headers[0]],
self.counters[0],
costs_2017,
[c * v for c, v in zip(self.counters[0], costs_2017)],
]:
yield row
yield [self.version_headers[1]]
for row in self.get_2017_algorithm_rows(self.counters[1], self.counters[2], self.counters[3]):
yield row
yield [self.version_headers[2]]
for row in self.get_2017_algorithm_rows(self.counters[5], self.counters[6], self.counters[7], costs_year=2019):
yield row
def get_2017_algorithm_rows(self, *lines, **kwargs):
a_line, b_line, c_line = lines
for line in lines:
yield line
total_line = [a - b - c for a, b, c in zip(a_line, b_line, c_line)]
yield total_line
costs_line = self.config.payments(grid=kwargs.get("costs_year", 2017))
yield costs_line
yield [c * v for c, v in zip(total_line, costs_line)]
def run():
parser = get_arguments_parser()
args = parser.parse_args()
config = read_config(args.config)
dictConfig(config)
utility = InvoicesUtility(
args.broker, args.period,
config, timezone=args.timezone, mode=args.mode)
utility.run()
if __name__ == "__main__":
run() | 0.645567 | 0.168549 |
import os
import pandas as pd
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial import distance
from IPython.display import clear_output
from itertools import combinations
import multiprocessing as mp
import logging
import argparse
import time
'''
This script will compare action sequences between participants taken on the same structure, within a phase
see in block_silhouette_sequences.ipynb
- input to that, is dfa:
- see this function: get_aggregate_distances_btw_ppts: takes as input df_structure, which contains action sequences for all participants for a given structure, in a given phase, i.e., the groups in this: groupby(['targetName','phase_extended']
'''
### GLOBAL VARS AND DICTS
targets = ['hand_selected_004', 'hand_selected_005', 'hand_selected_006',
'hand_selected_008', 'hand_selected_009', 'hand_selected_011',
'hand_selected_012', 'hand_selected_016']
prettyTarg = dict(zip(sorted(targets), [i.split('_')[-1] for i in sorted(targets)]))
prettyPhase = {'pre':1, 'repetition 1': 2, 'repetition 2': 3, 'post': 4}
### GENERAL HELPERS
def make_dir_if_not_exists(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
return dir_name
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
### SPECIALIZED HELPERS
def optimal_sort(distance_matrix):
optimal_assignment = linear_sum_assignment(distance_matrix)[1]
sorted_matrix = distance_matrix[:, optimal_assignment]
return sorted_matrix
def get_distance_matrix(A,B,distance_measure=distance.euclidean, truncating = True):
'''
Returns distance matrix truncated to shortest sequence
'''
# truncate to length of smaller set of actions
n_actions = min(len(A),len(B))
if truncating:
A = A.iloc[0:n_actions]
B = B.iloc[0:n_actions]
start = time.time()
AB = pd.concat([A[['x','y','w','h']], B[['x','y','w','h']]], axis=0)
fullmat = distance.pdist(AB, metric='euclidean')
ABmat = distance.squareform(fullmat)[:n_actions,n_actions:]
end = time.time()
elapsed = end-start
return ABmat
def compute_transformed_distance(I, J):
return np.mean(np.diag(optimal_sort(get_distance_matrix(I,J))))
def get_aggregate_distances_btw_ppts(B, out_path, distance_measure=distance.euclidean):
'''
Group is a dataframe (usually groupby of targetName and gameID)
distance_measure is the distance between action vectors [x,y,w,h]
Returns the variance between participants for a given structure and phase
'''
## get phase dists
start = time.time()
combos = list(combinations(B.gameID.unique(),2))
phase_dists = [compute_transformed_distance(B[B['gameID']==i], B[B['gameID']==j]) for (i, j) in combos]
end = time.time()
elapsed = end - start
print('Analyzing optimal distance for {} | {} sec.'.format(B['target_phase_condition'].unique()[0], np.round(elapsed,3)))
## calculate variance
sum_sq_diffs = np.sum(np.square(phase_dists))
var = sum_sq_diffs/(B['gameID'].nunique()**2)
## create df
df = pd.DataFrame([B['target_phase_condition'].unique()[0], var]).transpose()
df.columns = ['target_phase_condition', 'var']
## save out to file
with open(out_path, 'a') as f:
df.to_csv(f, mode='a', header=f.tell()==0)
return var
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--in_path', type=str,
help='input csv', default='../results/csv/block_silhouette_Exp2Pilot3_all_dfa.csv')
parser.add_argument('--batch_dir', type=str,
help='where to save action batches', default='../results/csv/action_batches')
parser.add_argument('--out_path', type=str,
help='output file', default='../results/csv/block_silhouette_Exp2Pilot3_transformedActionDistances.csv')
args = parser.parse_args()
## load in CSV
D = pd.read_csv(args.in_path)
## assign unique target-phase-condition identifiers
D = (D.assign(target_phase_condition = D.apply(lambda x:
'{}_{}_{}'.format(prettyTarg[x['targetName']], prettyPhase[x['phase_extended']], x['condition']),axis=1)))
## set up batching system
dir_name = make_dir_if_not_exists(args.batch_dir)
## divide CSV data into batches to ease parallelization
tpc_ids = D.target_phase_condition.unique()
for i, curr_batch in enumerate(tpc_ids):
t = D[D['target_phase_condition']==curr_batch]
target = t['target_phase_condition'].unique()[0].split('_')[0]
phase = t['target_phase_condition'].unique()[0].split('_')[1]
cond = t['target_phase_condition'].unique()[0].split('_')[2]
out_path = os.path.join(args.batch_dir,'actions_target-{}_phase-{}_cond-{}.csv'.format(target, phase, cond))
t.to_csv(out_path, index=False)
print('Saved batch to {}'.format(out_path))
clear_output(wait=True)
print('Done! Saved {} batches in total.'.format(i))
## Group by phase and targetName, apply spatial-distance measure and aggregate by taking mean of the diagonal
tpc_batches = [os.path.join(os.path.abspath(args.batch_dir),batch) for batch in os.listdir(args.batch_dir)]
jobs = []
for batch_ind, batch in enumerate(tpc_batches):
B = pd.read_csv(batch)
logger = mp.get_logger()
logger.setLevel(logging.INFO)
p = mp.Process(target=get_aggregate_distances_btw_ppts, args=(B,args.out_path,))
jobs.append(p)
p.start() | analysis/ool2020/get_transformed_action_distance.py | import os
import pandas as pd
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial import distance
from IPython.display import clear_output
from itertools import combinations
import multiprocessing as mp
import logging
import argparse
import time
'''
This script will compare action sequences between participants taken on the same structure, within a phase
see in block_silhouette_sequences.ipynb
- input to that, is dfa:
- see this function: get_aggregate_distances_btw_ppts: takes as input df_structure, which contains action sequences for all participants for a given structure, in a given phase, i.e., the groups in this: groupby(['targetName','phase_extended']
'''
### GLOBAL VARS AND DICTS
targets = ['hand_selected_004', 'hand_selected_005', 'hand_selected_006',
'hand_selected_008', 'hand_selected_009', 'hand_selected_011',
'hand_selected_012', 'hand_selected_016']
prettyTarg = dict(zip(sorted(targets), [i.split('_')[-1] for i in sorted(targets)]))
prettyPhase = {'pre':1, 'repetition 1': 2, 'repetition 2': 3, 'post': 4}
### GENERAL HELPERS
def make_dir_if_not_exists(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
return dir_name
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
### SPECIALIZED HELPERS
def optimal_sort(distance_matrix):
optimal_assignment = linear_sum_assignment(distance_matrix)[1]
sorted_matrix = distance_matrix[:, optimal_assignment]
return sorted_matrix
def get_distance_matrix(A,B,distance_measure=distance.euclidean, truncating = True):
'''
Returns distance matrix truncated to shortest sequence
'''
# truncate to length of smaller set of actions
n_actions = min(len(A),len(B))
if truncating:
A = A.iloc[0:n_actions]
B = B.iloc[0:n_actions]
start = time.time()
AB = pd.concat([A[['x','y','w','h']], B[['x','y','w','h']]], axis=0)
fullmat = distance.pdist(AB, metric='euclidean')
ABmat = distance.squareform(fullmat)[:n_actions,n_actions:]
end = time.time()
elapsed = end-start
return ABmat
def compute_transformed_distance(I, J):
return np.mean(np.diag(optimal_sort(get_distance_matrix(I,J))))
def get_aggregate_distances_btw_ppts(B, out_path, distance_measure=distance.euclidean):
'''
Group is a dataframe (usually groupby of targetName and gameID)
distance_measure is the distance between action vectors [x,y,w,h]
Returns the variance between participants for a given structure and phase
'''
## get phase dists
start = time.time()
combos = list(combinations(B.gameID.unique(),2))
phase_dists = [compute_transformed_distance(B[B['gameID']==i], B[B['gameID']==j]) for (i, j) in combos]
end = time.time()
elapsed = end - start
print('Analyzing optimal distance for {} | {} sec.'.format(B['target_phase_condition'].unique()[0], np.round(elapsed,3)))
## calculate variance
sum_sq_diffs = np.sum(np.square(phase_dists))
var = sum_sq_diffs/(B['gameID'].nunique()**2)
## create df
df = pd.DataFrame([B['target_phase_condition'].unique()[0], var]).transpose()
df.columns = ['target_phase_condition', 'var']
## save out to file
with open(out_path, 'a') as f:
df.to_csv(f, mode='a', header=f.tell()==0)
return var
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--in_path', type=str,
help='input csv', default='../results/csv/block_silhouette_Exp2Pilot3_all_dfa.csv')
parser.add_argument('--batch_dir', type=str,
help='where to save action batches', default='../results/csv/action_batches')
parser.add_argument('--out_path', type=str,
help='output file', default='../results/csv/block_silhouette_Exp2Pilot3_transformedActionDistances.csv')
args = parser.parse_args()
## load in CSV
D = pd.read_csv(args.in_path)
## assign unique target-phase-condition identifiers
D = (D.assign(target_phase_condition = D.apply(lambda x:
'{}_{}_{}'.format(prettyTarg[x['targetName']], prettyPhase[x['phase_extended']], x['condition']),axis=1)))
## set up batching system
dir_name = make_dir_if_not_exists(args.batch_dir)
## divide CSV data into batches to ease parallelization
tpc_ids = D.target_phase_condition.unique()
for i, curr_batch in enumerate(tpc_ids):
t = D[D['target_phase_condition']==curr_batch]
target = t['target_phase_condition'].unique()[0].split('_')[0]
phase = t['target_phase_condition'].unique()[0].split('_')[1]
cond = t['target_phase_condition'].unique()[0].split('_')[2]
out_path = os.path.join(args.batch_dir,'actions_target-{}_phase-{}_cond-{}.csv'.format(target, phase, cond))
t.to_csv(out_path, index=False)
print('Saved batch to {}'.format(out_path))
clear_output(wait=True)
print('Done! Saved {} batches in total.'.format(i))
## Group by phase and targetName, apply spatial-distance measure and aggregate by taking mean of the diagonal
tpc_batches = [os.path.join(os.path.abspath(args.batch_dir),batch) for batch in os.listdir(args.batch_dir)]
jobs = []
for batch_ind, batch in enumerate(tpc_batches):
B = pd.read_csv(batch)
logger = mp.get_logger()
logger.setLevel(logging.INFO)
p = mp.Process(target=get_aggregate_distances_btw_ppts, args=(B,args.out_path,))
jobs.append(p)
p.start() | 0.612657 | 0.400632 |
from typing import Optional
import starstruct
from starstruct.element import register, Element
from starstruct.modes import Mode
class Escapor:
def __init__(self, start=None, separator=None, end=None, opts=None):
self._start = start
self._separator = separator
self._end = end
self._opts = opts
@property
def start(self):
if self._start is not None:
return self._start
else:
return b''
@property
def separator(self):
if self._separator is not None:
return self._separator
else:
return b''
@property
def end(self):
if self._end is not None:
return self._end
else:
return b''
@register
class ElementEscaped(Element):
"""
Initialize a StarStruct element object.
:param field: The fields passed into the constructor of the element
:param mode: The mode in which to pack the bytes
:param alignment: Number of bytes to align to
"""
def __init__(self, field: list, mode: Optional[Mode]=Mode.Native, alignment: Optional[int]=1):
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
# Escaped elements don't use the normal struct format, the format is
# a StarStruct.Message object, but change the mode to match the
# current mode.
self.format = field[1]
self.escapor = Escapor(**field[2]['escape'])
self._mode = mode
self._alignment = alignment
self.update(mode, alignment)
@staticmethod
def valid(field: tuple) -> bool:
"""
See :py:func:`starstruct.element.Element.valid`
:param field: The items to determine the structure of the element
"""
if len(field) == 3:
return isinstance(field[1], starstruct.message.Message) \
and isinstance(field[2], dict) \
and 'escape' in field[2].keys()
else:
return False
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
All elements that are Variable must reference valid Length elements.
"""
# TODO: Any validation needed here?
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if self._mode is not None:
self._mode = mode
if self._alignment is not None:
self._alignment = alignment
self.format.update(self._mode, self._alignment)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# When packing use the length of the current element to determine
# how many elements to pack, not the length element of the message
# (which should not be specified manually).
iterator = msg[self.name]
if not isinstance(iterator, list):
iterator = [iterator]
ret = self.escapor.start
for item in iterator:
ret += self.format.pack(item)
ret += self.escapor.separator
ret += self.escapor.end
# There is no need to make sure that the packed data is properly
# aligned, because that should already be done by the individual
# messages that have been packed.
return ret
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
# When unpacking a variable element, reference the already unpacked
# length field to determine how many elements need unpacked.
ret = []
# Check the starting value
if buf[:len(self.escapor.start)] == self.escapor.start:
buf = buf[len(self.escapor.start):]
else:
raise ValueError('Buf did not start with expected start sequence: {0}'.format(
self.escapor.start.decode()))
unused = buf
while True:
(val, unused) = self.format.unpack_partial(unused)
ret.append(val)
if unused[:len(self.escapor.separator)] == self.escapor.separator:
unused = unused[len(self.escapor.separator):]
else:
raise ValueError('Buf did not separate with expected separate sequence: {0}'.format(
self.escapor.separator.decode()))
if unused[:len(self.escapor.end)] == self.escapor.end:
unused = unused[len(self.escapor.end):]
break
# There is no need to make sure that the unpacked data consumes a
# properly aligned number of bytes because that should already be done
# by the individual messages that have been unpacked.
return (ret, unused)
def make(self, msg):
"""Return the expected "made" value"""
ret = []
for val in msg[self.name]:
ret.append(self.format.make(val))
return ret | starstruct/elementescaped.py |
from typing import Optional
import starstruct
from starstruct.element import register, Element
from starstruct.modes import Mode
class Escapor:
def __init__(self, start=None, separator=None, end=None, opts=None):
self._start = start
self._separator = separator
self._end = end
self._opts = opts
@property
def start(self):
if self._start is not None:
return self._start
else:
return b''
@property
def separator(self):
if self._separator is not None:
return self._separator
else:
return b''
@property
def end(self):
if self._end is not None:
return self._end
else:
return b''
@register
class ElementEscaped(Element):
"""
Initialize a StarStruct element object.
:param field: The fields passed into the constructor of the element
:param mode: The mode in which to pack the bytes
:param alignment: Number of bytes to align to
"""
def __init__(self, field: list, mode: Optional[Mode]=Mode.Native, alignment: Optional[int]=1):
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
# Escaped elements don't use the normal struct format, the format is
# a StarStruct.Message object, but change the mode to match the
# current mode.
self.format = field[1]
self.escapor = Escapor(**field[2]['escape'])
self._mode = mode
self._alignment = alignment
self.update(mode, alignment)
@staticmethod
def valid(field: tuple) -> bool:
"""
See :py:func:`starstruct.element.Element.valid`
:param field: The items to determine the structure of the element
"""
if len(field) == 3:
return isinstance(field[1], starstruct.message.Message) \
and isinstance(field[2], dict) \
and 'escape' in field[2].keys()
else:
return False
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
All elements that are Variable must reference valid Length elements.
"""
# TODO: Any validation needed here?
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if self._mode is not None:
self._mode = mode
if self._alignment is not None:
self._alignment = alignment
self.format.update(self._mode, self._alignment)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# When packing use the length of the current element to determine
# how many elements to pack, not the length element of the message
# (which should not be specified manually).
iterator = msg[self.name]
if not isinstance(iterator, list):
iterator = [iterator]
ret = self.escapor.start
for item in iterator:
ret += self.format.pack(item)
ret += self.escapor.separator
ret += self.escapor.end
# There is no need to make sure that the packed data is properly
# aligned, because that should already be done by the individual
# messages that have been packed.
return ret
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
# When unpacking a variable element, reference the already unpacked
# length field to determine how many elements need unpacked.
ret = []
# Check the starting value
if buf[:len(self.escapor.start)] == self.escapor.start:
buf = buf[len(self.escapor.start):]
else:
raise ValueError('Buf did not start with expected start sequence: {0}'.format(
self.escapor.start.decode()))
unused = buf
while True:
(val, unused) = self.format.unpack_partial(unused)
ret.append(val)
if unused[:len(self.escapor.separator)] == self.escapor.separator:
unused = unused[len(self.escapor.separator):]
else:
raise ValueError('Buf did not separate with expected separate sequence: {0}'.format(
self.escapor.separator.decode()))
if unused[:len(self.escapor.end)] == self.escapor.end:
unused = unused[len(self.escapor.end):]
break
# There is no need to make sure that the unpacked data consumes a
# properly aligned number of bytes because that should already be done
# by the individual messages that have been unpacked.
return (ret, unused)
def make(self, msg):
"""Return the expected "made" value"""
ret = []
for val in msg[self.name]:
ret.append(self.format.make(val))
return ret | 0.84317 | 0.384103 |
import asyncio
import time as ttime
from bluesky_queueserver.manager.task_results import TaskResults
def test_TaskResults_update_uid():
"""
TaskResults: Test that task result UID is updated.
"""
async def testing():
tr = TaskResults()
uid = tr.task_results_uid
assert isinstance(uid, str)
tr._update_task_results_uid()
assert tr.task_results_uid != uid
asyncio.run(testing())
def test_TaskResults_add_running_task():
"""
TaskResults: tests for ``add_running_task``, ``clear_running_task``
"""
async def testing():
tr = TaskResults()
assert tr._running_tasks == {}
uid = tr.task_results_uid
await tr.add_running_task(task_uid="abc")
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
assert len(tr._running_tasks) == 2
assert tr._running_tasks["abc"]["payload"] == {}
assert isinstance(tr._running_tasks["abc"]["time"], float)
assert tr._running_tasks["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._running_tasks["def"]["time"], float)
await tr.clear_running_tasks()
assert tr._running_tasks == {}
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_remove_running_task():
"""
TaskResults: tests for ``remove_running_task``
"""
async def testing():
tr = TaskResults()
assert tr._running_tasks == {}
uid = tr.task_results_uid
await tr.add_running_task(task_uid="abc")
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
assert len(tr._running_tasks) == 2
await tr.remove_running_task(task_uid="abc")
assert len(tr._running_tasks) == 1
assert tr._running_tasks["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._running_tasks["def"]["time"], float)
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_add_completed_task():
"""
TaskResults: tests for ``add_running_task``, ``clear_running_task``
"""
async def testing():
tr = TaskResults()
uid1 = tr.task_results_uid
# Add running tasks. The running tasks should be removed as completed tasks
# with the same UID are added.
await tr.add_running_task(task_uid="abc", payload={"some_value": "arbitrary_payload"})
await tr.add_running_task(task_uid="def", payload={"some_value": "arbitrary_payload"})
assert tr.task_results_uid == uid1
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
assert len(tr._running_tasks) == 2
await tr.add_completed_task(task_uid="abc")
uid2 = tr.task_results_uid
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
uid3 = tr.task_results_uid
assert len(tr._completed_tasks_time) == 2
assert len(tr._completed_tasks_data) == 2
assert len(tr._running_tasks) == 0
assert uid1 != uid2
assert uid2 != uid3
assert tr._completed_tasks_data["abc"]["payload"] == {}
assert isinstance(tr._completed_tasks_data["abc"]["time"], float)
assert tr._completed_tasks_time[0]["task_uid"] == "abc"
assert tr._completed_tasks_time[0]["time"] == tr._completed_tasks_data["abc"]["time"]
assert tr._completed_tasks_data["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._completed_tasks_data["def"]["time"], float)
assert tr._completed_tasks_time[1]["task_uid"] == "def"
assert tr._completed_tasks_time[1]["time"] == tr._completed_tasks_data["def"]["time"]
await tr.clear()
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
# UID is not expected to change
assert tr.task_results_uid == uid3
asyncio.run(testing())
def test_TaskResults_clear():
"""
TaskResults: tests for ``clear``
"""
async def testing():
tr = TaskResults()
await tr.add_running_task(task_uid="abc", payload={"some_value": "arbitrary_payload"})
await tr.add_running_task(task_uid="def", payload={"some_value": "arbitrary_payload"})
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_time) == 1
assert len(tr._completed_tasks_data) == 1
assert len(tr._running_tasks) == 1
uid = tr.task_results_uid
await tr.clear()
assert tr._running_tasks == {}
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_clean_completed_tasks_1():
"""
TaskResults: tests for ``clean_completed_tasks``
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
await tr.clean_completed_tasks() # No effect
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(0.8)
# 'add_completed_task' is expected to 'clean' tha task list, but there are no expired tasks yet.
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
assert len(tr._completed_tasks_data) == 2
assert len(tr._completed_tasks_time) == 2
ttime.sleep(0.5)
await tr.clean_completed_tasks() # Should remove the 1st task
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(0.8)
await tr.clean_completed_tasks() # Should remove the 2nd task
assert len(tr._completed_tasks_data) == 0
assert len(tr._completed_tasks_time) == 0
asyncio.run(testing())
def test_TaskResults_clean_completed_tasks_2():
"""
TaskResults: tests that ``clean_completed_tasks`` is implicitely called when completed task is added.
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(1.5)
# Adds the 2nd task, but removes the 1st (because it is expired)
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
assert tr._completed_tasks_time[0]["task_uid"] == "def"
assert list(tr._completed_tasks_data.keys())[0] == "def"
asyncio.run(testing())
def test_TaskResults_get_task_info():
"""
TaskResults: ``get_task_info``.
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_running_task(task_uid="abc", payload={"some_value": 5})
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
await tr.add_completed_task(task_uid="def", payload={"some_value": 20})
status, payload = await tr.get_task_info(task_uid="abc")
assert status == "running"
assert payload == {"some_value": 5}
status, payload = await tr.get_task_info(task_uid="def")
assert status == "completed"
assert payload == {"some_value": 20}
status, payload = await tr.get_task_info(task_uid="gih")
assert status == "not_found"
assert payload == {}
asyncio.run(testing()) | bluesky_queueserver/manager/tests/test_task_results.py | import asyncio
import time as ttime
from bluesky_queueserver.manager.task_results import TaskResults
def test_TaskResults_update_uid():
"""
TaskResults: Test that task result UID is updated.
"""
async def testing():
tr = TaskResults()
uid = tr.task_results_uid
assert isinstance(uid, str)
tr._update_task_results_uid()
assert tr.task_results_uid != uid
asyncio.run(testing())
def test_TaskResults_add_running_task():
"""
TaskResults: tests for ``add_running_task``, ``clear_running_task``
"""
async def testing():
tr = TaskResults()
assert tr._running_tasks == {}
uid = tr.task_results_uid
await tr.add_running_task(task_uid="abc")
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
assert len(tr._running_tasks) == 2
assert tr._running_tasks["abc"]["payload"] == {}
assert isinstance(tr._running_tasks["abc"]["time"], float)
assert tr._running_tasks["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._running_tasks["def"]["time"], float)
await tr.clear_running_tasks()
assert tr._running_tasks == {}
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_remove_running_task():
"""
TaskResults: tests for ``remove_running_task``
"""
async def testing():
tr = TaskResults()
assert tr._running_tasks == {}
uid = tr.task_results_uid
await tr.add_running_task(task_uid="abc")
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
assert len(tr._running_tasks) == 2
await tr.remove_running_task(task_uid="abc")
assert len(tr._running_tasks) == 1
assert tr._running_tasks["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._running_tasks["def"]["time"], float)
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_add_completed_task():
"""
TaskResults: tests for ``add_running_task``, ``clear_running_task``
"""
async def testing():
tr = TaskResults()
uid1 = tr.task_results_uid
# Add running tasks. The running tasks should be removed as completed tasks
# with the same UID are added.
await tr.add_running_task(task_uid="abc", payload={"some_value": "arbitrary_payload"})
await tr.add_running_task(task_uid="def", payload={"some_value": "arbitrary_payload"})
assert tr.task_results_uid == uid1
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
assert len(tr._running_tasks) == 2
await tr.add_completed_task(task_uid="abc")
uid2 = tr.task_results_uid
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
uid3 = tr.task_results_uid
assert len(tr._completed_tasks_time) == 2
assert len(tr._completed_tasks_data) == 2
assert len(tr._running_tasks) == 0
assert uid1 != uid2
assert uid2 != uid3
assert tr._completed_tasks_data["abc"]["payload"] == {}
assert isinstance(tr._completed_tasks_data["abc"]["time"], float)
assert tr._completed_tasks_time[0]["task_uid"] == "abc"
assert tr._completed_tasks_time[0]["time"] == tr._completed_tasks_data["abc"]["time"]
assert tr._completed_tasks_data["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._completed_tasks_data["def"]["time"], float)
assert tr._completed_tasks_time[1]["task_uid"] == "def"
assert tr._completed_tasks_time[1]["time"] == tr._completed_tasks_data["def"]["time"]
await tr.clear()
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
# UID is not expected to change
assert tr.task_results_uid == uid3
asyncio.run(testing())
def test_TaskResults_clear():
"""
TaskResults: tests for ``clear``
"""
async def testing():
tr = TaskResults()
await tr.add_running_task(task_uid="abc", payload={"some_value": "arbitrary_payload"})
await tr.add_running_task(task_uid="def", payload={"some_value": "arbitrary_payload"})
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_time) == 1
assert len(tr._completed_tasks_data) == 1
assert len(tr._running_tasks) == 1
uid = tr.task_results_uid
await tr.clear()
assert tr._running_tasks == {}
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_clean_completed_tasks_1():
"""
TaskResults: tests for ``clean_completed_tasks``
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
await tr.clean_completed_tasks() # No effect
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(0.8)
# 'add_completed_task' is expected to 'clean' tha task list, but there are no expired tasks yet.
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
assert len(tr._completed_tasks_data) == 2
assert len(tr._completed_tasks_time) == 2
ttime.sleep(0.5)
await tr.clean_completed_tasks() # Should remove the 1st task
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(0.8)
await tr.clean_completed_tasks() # Should remove the 2nd task
assert len(tr._completed_tasks_data) == 0
assert len(tr._completed_tasks_time) == 0
asyncio.run(testing())
def test_TaskResults_clean_completed_tasks_2():
"""
TaskResults: tests that ``clean_completed_tasks`` is implicitely called when completed task is added.
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(1.5)
# Adds the 2nd task, but removes the 1st (because it is expired)
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
assert tr._completed_tasks_time[0]["task_uid"] == "def"
assert list(tr._completed_tasks_data.keys())[0] == "def"
asyncio.run(testing())
def test_TaskResults_get_task_info():
"""
TaskResults: ``get_task_info``.
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_running_task(task_uid="abc", payload={"some_value": 5})
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
await tr.add_completed_task(task_uid="def", payload={"some_value": 20})
status, payload = await tr.get_task_info(task_uid="abc")
assert status == "running"
assert payload == {"some_value": 5}
status, payload = await tr.get_task_info(task_uid="def")
assert status == "completed"
assert payload == {"some_value": 20}
status, payload = await tr.get_task_info(task_uid="gih")
assert status == "not_found"
assert payload == {}
asyncio.run(testing()) | 0.655446 | 0.581095 |
from datetime import datetime
import boto3
from botocore.client import ClientError
import requests
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from saleor.product.models import ProductImage
class Command(BaseCommand):
version = "1.0"
def add_arguments(self, parser):
parser.add_argument('--start_date', type=str, help='product creation start date')
parser.add_argument('--end_date', type=str, help='product creation end date')
parser.add_argument('--source', type=str, help='s3 source bucket')
parser.add_argument('--target', type=str, help='s3 target bucket')
parser.add_argument('--backup', type=str, help='s3 backup bucket')
parser.add_argument('--mode', type=str, help='processing mode')
def handle(self, *args, **options):
self.start_date = options['start_date']
self.end_date = options['end_date']
self.source = options['source']
self.target = options['target']
self.backup = options['backup']
self.mode = options['mode']
self.validate_dates()
if self.mode == 'backup':
self.validate_bucket(self.backup)
self.process_images_backup_mode()
elif self.mode == 'migration':
self.process_images_migration_mode()
def process_images_migration_mode(self):
images = self.get_images()
url = f'{settings.REMOVER_API_URL}/process_images/migration'
headers = {
"X-API-KEY": settings.REMOVER_API_KEY
}
data = {
"source": self.source,
"target": self.target,
"images": images
}
response = requests.post(
url=url,
json=data,
headers=headers
)
print(response.json())
def process_images_backup_mode(self):
images = self.get_images()
url = f'{settings.REMOVER_API_URL}/process_images/backup'
headers = {
"X-API-KEY": settings.REMOVER_API_KEY
}
data = {
"source": self.source,
"target": self.target,
"backup": self.backup,
"images": images
}
response = requests.post(
url=url,
json=data,
headers=headers
)
print(response.json())
def get_images(self):
images = ProductImage.objects.raw('''
select
ppi.id,
ppi.image,
ppi.ppoi
from
product_product pp,
product_productimage ppi,
product_producttype pt,
product_productvariant pv,
product_assignedproductattribute paa,
product_assignedproductattribute_values paav,
product_attributevalue pav,
product_attribute pa
where
pp.id = ppi.product_id
and pp.product_type_id = pt.id
and pp.id = pv.product_id
and pp.id = paa.product_id
and paa.id = paav.assignedproductattribute_id
and paav.attributevalue_id = pav.id
and pav.attribute_id = pa.id
and cast(pp.created_at as date) between %s and %s
and pa."name" = 'Kolor'
and pav."name" != 'biały'
and pt."name" not like 'Biustonosz%%'
order by pv.sku
''', [self.start_date, self.end_date])
images_list = [image.image.name for image in images]
return images_list
def validate_bucket(self, bucket):
s3 = boto3.resource('s3')
try:
s3.meta.client.head_bucket(Bucket=bucket)
except ClientError:
raise CommandError(
"Wrong backup bucket name. "
)
def validate_dates(self):
if not self.start_date:
raise CommandError(
"Unknown start date. "
"Use `--start_date` flag "
"eg. --start_date '2021-08-17'"
)
if not self.end_date:
raise CommandError(
"Unknown end_date date. "
"Use `--end_date` flag "
"eg. --end_date '2021-08-17'"
)
try:
start_date = datetime.strptime(self.start_date, "%Y-%m-%d")
except ValueError:
raise CommandError(
"Wrong end date. "
"`--end_date` flag should be in format eg. `2021-08-17`"
)
try:
end_date = datetime.strptime(self.end_date, "%Y-%m-%d")
except ValueError:
raise CommandError(
"Wrong end date. "
"`--end_date` flag should be in format eg. `2021-08-17`"
)
if start_date > end_date:
raise CommandError(
"Provided start date is greater than end date."
) | saleor/core/management/commands/remove_background.py | from datetime import datetime
import boto3
from botocore.client import ClientError
import requests
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from saleor.product.models import ProductImage
class Command(BaseCommand):
version = "1.0"
def add_arguments(self, parser):
parser.add_argument('--start_date', type=str, help='product creation start date')
parser.add_argument('--end_date', type=str, help='product creation end date')
parser.add_argument('--source', type=str, help='s3 source bucket')
parser.add_argument('--target', type=str, help='s3 target bucket')
parser.add_argument('--backup', type=str, help='s3 backup bucket')
parser.add_argument('--mode', type=str, help='processing mode')
def handle(self, *args, **options):
self.start_date = options['start_date']
self.end_date = options['end_date']
self.source = options['source']
self.target = options['target']
self.backup = options['backup']
self.mode = options['mode']
self.validate_dates()
if self.mode == 'backup':
self.validate_bucket(self.backup)
self.process_images_backup_mode()
elif self.mode == 'migration':
self.process_images_migration_mode()
def process_images_migration_mode(self):
images = self.get_images()
url = f'{settings.REMOVER_API_URL}/process_images/migration'
headers = {
"X-API-KEY": settings.REMOVER_API_KEY
}
data = {
"source": self.source,
"target": self.target,
"images": images
}
response = requests.post(
url=url,
json=data,
headers=headers
)
print(response.json())
def process_images_backup_mode(self):
images = self.get_images()
url = f'{settings.REMOVER_API_URL}/process_images/backup'
headers = {
"X-API-KEY": settings.REMOVER_API_KEY
}
data = {
"source": self.source,
"target": self.target,
"backup": self.backup,
"images": images
}
response = requests.post(
url=url,
json=data,
headers=headers
)
print(response.json())
def get_images(self):
images = ProductImage.objects.raw('''
select
ppi.id,
ppi.image,
ppi.ppoi
from
product_product pp,
product_productimage ppi,
product_producttype pt,
product_productvariant pv,
product_assignedproductattribute paa,
product_assignedproductattribute_values paav,
product_attributevalue pav,
product_attribute pa
where
pp.id = ppi.product_id
and pp.product_type_id = pt.id
and pp.id = pv.product_id
and pp.id = paa.product_id
and paa.id = paav.assignedproductattribute_id
and paav.attributevalue_id = pav.id
and pav.attribute_id = pa.id
and cast(pp.created_at as date) between %s and %s
and pa."name" = 'Kolor'
and pav."name" != 'biały'
and pt."name" not like 'Biustonosz%%'
order by pv.sku
''', [self.start_date, self.end_date])
images_list = [image.image.name for image in images]
return images_list
def validate_bucket(self, bucket):
s3 = boto3.resource('s3')
try:
s3.meta.client.head_bucket(Bucket=bucket)
except ClientError:
raise CommandError(
"Wrong backup bucket name. "
)
def validate_dates(self):
if not self.start_date:
raise CommandError(
"Unknown start date. "
"Use `--start_date` flag "
"eg. --start_date '2021-08-17'"
)
if not self.end_date:
raise CommandError(
"Unknown end_date date. "
"Use `--end_date` flag "
"eg. --end_date '2021-08-17'"
)
try:
start_date = datetime.strptime(self.start_date, "%Y-%m-%d")
except ValueError:
raise CommandError(
"Wrong end date. "
"`--end_date` flag should be in format eg. `2021-08-17`"
)
try:
end_date = datetime.strptime(self.end_date, "%Y-%m-%d")
except ValueError:
raise CommandError(
"Wrong end date. "
"`--end_date` flag should be in format eg. `2021-08-17`"
)
if start_date > end_date:
raise CommandError(
"Provided start date is greater than end date."
) | 0.494385 | 0.082254 |
import sqlalchemy
from flask_taxonomies.constants import INCLUDE_DELETED, INCLUDE_DESCENDANTS, \
INCLUDE_DESCENDANTS_COUNT, INCLUDE_STATUS, INCLUDE_SELF
from flask_taxonomies.models import TaxonomyTerm, TermStatusEnum, Representation
from flask_taxonomies.proxies import current_flask_taxonomies
from flask_taxonomies.term_identification import TermIdentification
from flask_taxonomies.views.common import build_descendants
from flask_taxonomies.views.paginator import Paginator
from flask import current_app
def get_taxonomy_json(code=None,
slug=None,
prefer: Representation = Representation("taxonomy"),
page=None,
size=None,
status_code=200,
q=None,
request=None):
taxonomy = current_flask_taxonomies.get_taxonomy(code)
prefer = taxonomy.merge_select(prefer)
if request:
current_flask_taxonomies.permissions.taxonomy_term_read.enforce(request=request,
taxonomy=taxonomy,
slug=slug)
if INCLUDE_DELETED in prefer:
status_cond = sqlalchemy.sql.true()
else:
status_cond = TaxonomyTerm.status == TermStatusEnum.alive
return_descendants = INCLUDE_DESCENDANTS in prefer
if return_descendants:
query = current_flask_taxonomies.descendants_or_self(
TermIdentification(taxonomy=code, slug=slug),
levels=prefer.options.get('levels', None),
status_cond=status_cond,
return_descendants_count=INCLUDE_DESCENDANTS_COUNT in prefer,
return_descendants_busy_count=INCLUDE_STATUS in prefer
)
else:
query = current_flask_taxonomies.filter_term(
TermIdentification(taxonomy=code, slug=slug),
status_cond=status_cond,
return_descendants_count=INCLUDE_DESCENDANTS_COUNT in prefer,
return_descendants_busy_count=INCLUDE_STATUS in prefer
)
if q:
query = current_flask_taxonomies.apply_term_query(query, q, code)
paginator = Paginator(
prefer,
query, page if return_descendants else None,
size if return_descendants else None,
json_converter=lambda data:
build_descendants(data, prefer, root_slug=None),
allow_empty=INCLUDE_SELF not in prefer, single_result=INCLUDE_SELF in prefer,
has_query=q is not None
)
return paginator
def taxonomy_term_to_json(term):
"""
Converts taxonomy term to default JSON. Use only if the term
has ancestors pre-populated, otherwise it is not an efficient
implementation - use the one from API instead.
:param term: term to serialize
:return: array of json terms
"""
ret = []
while term:
data = {
**(term.extra_data or {}),
'slug': term.slug,
'level': term.level + 1,
}
if term.obsoleted_by_id:
data['obsoleted_by'] = term.obsoleted_by.slug
data['links'] = {
'self': 'https://' + \
current_app.config['SERVER_NAME'] + \
current_app.config['FLASK_TAXONOMIES_URL_PREFIX'] + \
term.slug
}
ret.append(data)
term = term.parent
return ret | oarepo_taxonomies/utils.py | import sqlalchemy
from flask_taxonomies.constants import INCLUDE_DELETED, INCLUDE_DESCENDANTS, \
INCLUDE_DESCENDANTS_COUNT, INCLUDE_STATUS, INCLUDE_SELF
from flask_taxonomies.models import TaxonomyTerm, TermStatusEnum, Representation
from flask_taxonomies.proxies import current_flask_taxonomies
from flask_taxonomies.term_identification import TermIdentification
from flask_taxonomies.views.common import build_descendants
from flask_taxonomies.views.paginator import Paginator
from flask import current_app
def get_taxonomy_json(code=None,
slug=None,
prefer: Representation = Representation("taxonomy"),
page=None,
size=None,
status_code=200,
q=None,
request=None):
taxonomy = current_flask_taxonomies.get_taxonomy(code)
prefer = taxonomy.merge_select(prefer)
if request:
current_flask_taxonomies.permissions.taxonomy_term_read.enforce(request=request,
taxonomy=taxonomy,
slug=slug)
if INCLUDE_DELETED in prefer:
status_cond = sqlalchemy.sql.true()
else:
status_cond = TaxonomyTerm.status == TermStatusEnum.alive
return_descendants = INCLUDE_DESCENDANTS in prefer
if return_descendants:
query = current_flask_taxonomies.descendants_or_self(
TermIdentification(taxonomy=code, slug=slug),
levels=prefer.options.get('levels', None),
status_cond=status_cond,
return_descendants_count=INCLUDE_DESCENDANTS_COUNT in prefer,
return_descendants_busy_count=INCLUDE_STATUS in prefer
)
else:
query = current_flask_taxonomies.filter_term(
TermIdentification(taxonomy=code, slug=slug),
status_cond=status_cond,
return_descendants_count=INCLUDE_DESCENDANTS_COUNT in prefer,
return_descendants_busy_count=INCLUDE_STATUS in prefer
)
if q:
query = current_flask_taxonomies.apply_term_query(query, q, code)
paginator = Paginator(
prefer,
query, page if return_descendants else None,
size if return_descendants else None,
json_converter=lambda data:
build_descendants(data, prefer, root_slug=None),
allow_empty=INCLUDE_SELF not in prefer, single_result=INCLUDE_SELF in prefer,
has_query=q is not None
)
return paginator
def taxonomy_term_to_json(term):
"""
Converts taxonomy term to default JSON. Use only if the term
has ancestors pre-populated, otherwise it is not an efficient
implementation - use the one from API instead.
:param term: term to serialize
:return: array of json terms
"""
ret = []
while term:
data = {
**(term.extra_data or {}),
'slug': term.slug,
'level': term.level + 1,
}
if term.obsoleted_by_id:
data['obsoleted_by'] = term.obsoleted_by.slug
data['links'] = {
'self': 'https://' + \
current_app.config['SERVER_NAME'] + \
current_app.config['FLASK_TAXONOMIES_URL_PREFIX'] + \
term.slug
}
ret.append(data)
term = term.parent
return ret | 0.558327 | 0.133754 |
import numpy as np
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import sys
from tools_TC202010 import read_score
def main( top='', stime=datetime(2020, 9, 1, 0 ), etime=datetime(2020, 9, 1, 0) ):
time = stime
while time < etime:
data_ = read_score( top=top, time=time )
if time == stime:
# initiate a dictionary
data = dict( data_ )
data.update( {'time': [ stime, stime ] } )
else:
for key in data.keys():
if key == 'time':
data[key] = data[key] + [ time, time ]
else:
data[key] = data[key] + data_[key]
time += timedelta( hours=6 )
fig, ( ( ax1, ax2, ax3, ax4, ax5)) = plt.subplots( 5, 1, figsize=( 8, 9.5 ) )
ax_l = [ ax1, ax2, ax3, ax4, ax5]
tit_l = [ 'U', 'V', 'T', 'PS', 'Q' ]
ymax_l = [ 50000, 50000, 5000, 1000, 5000 ]
ymin_l = [ 0, 0, 0, 0, 0 ]
for key in data.keys():
if 'NOBS_U' in key:
ax = ax1
elif 'NOBS_V' in key:
ax = ax2
elif 'NOBS_T' in key:
ax = ax3
elif 'NOBS_PS' in key:
ax = ax4
elif 'NOBS_Q' in key:
ax = ax5
else:
print( "skip ", key )
continue
ls = 'solid'
c = 'k'
ax.plot( data['time'], data[key], color=c, ls=ls )
stime_ = stime - timedelta( hours=stime.hour )
etime_ = etime - timedelta( hours=etime.hour )
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 0.99, tit_l[i],
fontsize=13, transform=ax.transAxes,
ha="center",
va='top',
)
# ax.hlines( y=0.0, xmin=stime_, xmax=etime_, ls='dotted',
# color='k', lw=1.0 )
ax.set_xlim( stime_, etime_ )
ax.set_ylim( ymin_l[i], ymax_l[i] )
if i == 4:
ax.xaxis.set_major_locator( mdates.HourLocator(interval=24) )
#ax.xaxis.set_major_formatter( mdates.DateFormatter('%d%H\n%m/%d') )
ax.xaxis.set_major_formatter( mdates.DateFormatter('%d') )
#ax.xaxis.set_major_formatter( mdates.DateFormatter('%m/%d') )
else:
ax.set_xticks([], [])
plt.show()
sys.exit()
# time = stime
stime = datetime( 2020, 8, 16, 6, 0 )
etime = datetime( 2020, 9, 2, 0, 0 )
top = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/TC2020/D1/D1_20210629"
#stime = datetime( 2017, 6, 16, 6, 0 )
#etime = datetime( 2017, 7, 5, 0, 0 )
#top = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/KYUSHU2017_D1_20210629"
time = stime
main( top=top, stime=stime, etime=etime, ) | src/nobs_tseris.py | import numpy as np
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import sys
from tools_TC202010 import read_score
def main( top='', stime=datetime(2020, 9, 1, 0 ), etime=datetime(2020, 9, 1, 0) ):
time = stime
while time < etime:
data_ = read_score( top=top, time=time )
if time == stime:
# initiate a dictionary
data = dict( data_ )
data.update( {'time': [ stime, stime ] } )
else:
for key in data.keys():
if key == 'time':
data[key] = data[key] + [ time, time ]
else:
data[key] = data[key] + data_[key]
time += timedelta( hours=6 )
fig, ( ( ax1, ax2, ax3, ax4, ax5)) = plt.subplots( 5, 1, figsize=( 8, 9.5 ) )
ax_l = [ ax1, ax2, ax3, ax4, ax5]
tit_l = [ 'U', 'V', 'T', 'PS', 'Q' ]
ymax_l = [ 50000, 50000, 5000, 1000, 5000 ]
ymin_l = [ 0, 0, 0, 0, 0 ]
for key in data.keys():
if 'NOBS_U' in key:
ax = ax1
elif 'NOBS_V' in key:
ax = ax2
elif 'NOBS_T' in key:
ax = ax3
elif 'NOBS_PS' in key:
ax = ax4
elif 'NOBS_Q' in key:
ax = ax5
else:
print( "skip ", key )
continue
ls = 'solid'
c = 'k'
ax.plot( data['time'], data[key], color=c, ls=ls )
stime_ = stime - timedelta( hours=stime.hour )
etime_ = etime - timedelta( hours=etime.hour )
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 0.99, tit_l[i],
fontsize=13, transform=ax.transAxes,
ha="center",
va='top',
)
# ax.hlines( y=0.0, xmin=stime_, xmax=etime_, ls='dotted',
# color='k', lw=1.0 )
ax.set_xlim( stime_, etime_ )
ax.set_ylim( ymin_l[i], ymax_l[i] )
if i == 4:
ax.xaxis.set_major_locator( mdates.HourLocator(interval=24) )
#ax.xaxis.set_major_formatter( mdates.DateFormatter('%d%H\n%m/%d') )
ax.xaxis.set_major_formatter( mdates.DateFormatter('%d') )
#ax.xaxis.set_major_formatter( mdates.DateFormatter('%m/%d') )
else:
ax.set_xticks([], [])
plt.show()
sys.exit()
# time = stime
stime = datetime( 2020, 8, 16, 6, 0 )
etime = datetime( 2020, 9, 2, 0, 0 )
top = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/TC2020/D1/D1_20210629"
#stime = datetime( 2017, 6, 16, 6, 0 )
#etime = datetime( 2017, 7, 5, 0, 0 )
#top = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/KYUSHU2017_D1_20210629"
time = stime
main( top=top, stime=stime, etime=etime, ) | 0.161155 | 0.399519 |
import numpy as np
import pytest
import opexebo
from opexebo.general import accumulate_spatial as func
print("=== tests_general_accumulate_spatial ===")
def test_invalid_inputs():
# No `arena_size` keyword
with pytest.raises(TypeError):
pos = np.random.rand(100)
func(pos)
# Misdefined bins
with pytest.raises(KeyError):
pos = np.random.rand(100)
func(pos, arena_size=1, bin_number=10, bin_width=2.5)
# Misdefined `limit` keyword
with pytest.raises(ValueError):
post = np.random.rand(100)
func(post, arena_size=1, limits="abc")
def test_1d_input():
arena_size = 80
pos = np.random.rand(1000) * arena_size
bin_width = 2.32
limits = (np.nanmin(pos), np.nanmax(pos) * 1.0001)
hist, edges = func(pos, arena_size=arena_size, limits=limits, bin_width=bin_width)
assert hist.ndim == 1
assert hist.size == opexebo.general.bin_width_to_bin_number(arena_size, bin_width)
assert edges.size == hist.size + 1
assert pos.size == np.sum(hist)
def test_2d_input():
arena_size = np.array((80, 120))
pos = (np.random.rand(1000, 2) * arena_size).transpose()
limits = (0, 80.001, 0, 120.001)
bin_width = 4.3
hist, (edge_x, edge_y) = func(
pos, arena_size=arena_size, limits=limits, bin_width=bin_width
)
assert edge_x[0] == limits[0]
assert hist.ndim == 2
for i in range(hist.ndim):
# Note: the array is transposed, so the shape swaps order
# print(hist.shape)
# print(opexebo.general.bin_width_to_bin_number(arena_size, bin_width))
# print(edge_x[0], edge_x[1])
# print(np.min(pos[0]), np.max(pos[0]))
assert (
hist.shape[i]
== opexebo.general.bin_width_to_bin_number(arena_size, bin_width)[i - 1]
)
assert pos.shape[1] == np.sum(hist)
def test_2d_bin_number():
arena_size = np.array((80, 120))
pos = (np.random.rand(1000, 2) * arena_size).transpose()
limits = (0, 80.001, 0, 120.001)
bin_number = (8, 12)
hist, (edge_x, edge_y) = func(
pos, arena_size=arena_size, limits=limits, bin_number=bin_number
)
assert edge_x.size == bin_number[0] + 1
assert edge_y.size == bin_number[1] + 1
assert pos.shape[1] == np.sum(hist)
bin_number = 8
hist, (edge_x, edge_y) = func(
pos, arena_size=arena_size, limits=limits, bin_number=bin_number
)
assert edge_x.size == edge_y.size == bin_number + 1
assert pos.shape[1] == np.sum(hist)
def test_2d_bin_edges():
arena_size = np.array((80, 120))
pos = (np.random.rand(1000, 2) * arena_size).transpose()
limits = (0, 80.001, 0, 120.001)
bin_edges = [np.arange(arena_size[i] + 1) for i in range(2)]
hist, edges = func(pos, arena_size=arena_size, limits=limits, bin_edges=bin_edges)
for i in range(2):
assert np.array_equal(edges[i], bin_edges[i])
# Also test that passing in an array instead of a list works:
bin_edges = np.array(bin_edges)
hist, edges = func(pos, arena_size=arena_size, limits=limits, bin_edges=bin_edges)
for i in range(2):
assert np.array_equal(edges[i], bin_edges[i])
if __name__ == "__main__":
test_invalid_inputs()
test_2d_input()
test_2d_bin_number()
test_2d_bin_edges() | opexebo/tests/test_general/test_accumulateSpatial.py | import numpy as np
import pytest
import opexebo
from opexebo.general import accumulate_spatial as func
print("=== tests_general_accumulate_spatial ===")
def test_invalid_inputs():
# No `arena_size` keyword
with pytest.raises(TypeError):
pos = np.random.rand(100)
func(pos)
# Misdefined bins
with pytest.raises(KeyError):
pos = np.random.rand(100)
func(pos, arena_size=1, bin_number=10, bin_width=2.5)
# Misdefined `limit` keyword
with pytest.raises(ValueError):
post = np.random.rand(100)
func(post, arena_size=1, limits="abc")
def test_1d_input():
arena_size = 80
pos = np.random.rand(1000) * arena_size
bin_width = 2.32
limits = (np.nanmin(pos), np.nanmax(pos) * 1.0001)
hist, edges = func(pos, arena_size=arena_size, limits=limits, bin_width=bin_width)
assert hist.ndim == 1
assert hist.size == opexebo.general.bin_width_to_bin_number(arena_size, bin_width)
assert edges.size == hist.size + 1
assert pos.size == np.sum(hist)
def test_2d_input():
arena_size = np.array((80, 120))
pos = (np.random.rand(1000, 2) * arena_size).transpose()
limits = (0, 80.001, 0, 120.001)
bin_width = 4.3
hist, (edge_x, edge_y) = func(
pos, arena_size=arena_size, limits=limits, bin_width=bin_width
)
assert edge_x[0] == limits[0]
assert hist.ndim == 2
for i in range(hist.ndim):
# Note: the array is transposed, so the shape swaps order
# print(hist.shape)
# print(opexebo.general.bin_width_to_bin_number(arena_size, bin_width))
# print(edge_x[0], edge_x[1])
# print(np.min(pos[0]), np.max(pos[0]))
assert (
hist.shape[i]
== opexebo.general.bin_width_to_bin_number(arena_size, bin_width)[i - 1]
)
assert pos.shape[1] == np.sum(hist)
def test_2d_bin_number():
arena_size = np.array((80, 120))
pos = (np.random.rand(1000, 2) * arena_size).transpose()
limits = (0, 80.001, 0, 120.001)
bin_number = (8, 12)
hist, (edge_x, edge_y) = func(
pos, arena_size=arena_size, limits=limits, bin_number=bin_number
)
assert edge_x.size == bin_number[0] + 1
assert edge_y.size == bin_number[1] + 1
assert pos.shape[1] == np.sum(hist)
bin_number = 8
hist, (edge_x, edge_y) = func(
pos, arena_size=arena_size, limits=limits, bin_number=bin_number
)
assert edge_x.size == edge_y.size == bin_number + 1
assert pos.shape[1] == np.sum(hist)
def test_2d_bin_edges():
arena_size = np.array((80, 120))
pos = (np.random.rand(1000, 2) * arena_size).transpose()
limits = (0, 80.001, 0, 120.001)
bin_edges = [np.arange(arena_size[i] + 1) for i in range(2)]
hist, edges = func(pos, arena_size=arena_size, limits=limits, bin_edges=bin_edges)
for i in range(2):
assert np.array_equal(edges[i], bin_edges[i])
# Also test that passing in an array instead of a list works:
bin_edges = np.array(bin_edges)
hist, edges = func(pos, arena_size=arena_size, limits=limits, bin_edges=bin_edges)
for i in range(2):
assert np.array_equal(edges[i], bin_edges[i])
if __name__ == "__main__":
test_invalid_inputs()
test_2d_input()
test_2d_bin_number()
test_2d_bin_edges() | 0.402862 | 0.746809 |
import os
import platform
import subprocess
def _pv_linux_machine(machine):
if machine == 'x86_64':
return machine
cpu_info = subprocess.check_output(['cat', '/proc/cpuinfo']).decode()
hardware_info = [x for x in cpu_info.split('\n') if 'Hardware' in x][0]
model_info = [x for x in cpu_info.split('\n') if 'model name' in x][0]
if 'BCM' in hardware_info:
if 'rev 7' in model_info:
return 'arm11'
elif 'rev 5' in model_info:
return 'cortex-a7'
elif 'rev 4' in model_info:
return 'cortex-a53'
elif 'rev 3' in model_info:
return 'cortex-a72'
elif 'AM33' in hardware_info:
return 'beaglebone'
else:
raise NotImplementedError('unsupported CPU:\n%s' % cpu_info)
def _pv_platform():
pv_system = platform.system()
if pv_system not in {'Darwin', 'Linux', 'Windows'}:
raise ValueError("unsupported system '%s'" % pv_system)
if pv_system == 'Linux':
pv_machine = _pv_linux_machine(platform.machine())
else:
pv_machine = platform.machine()
return pv_system, pv_machine
_PV_SYSTEM, _PV_MACHINE = _pv_platform()
_RASPBERRY_PI_MACHINES = {'arm11', 'cortex-a7', 'cortex-a53', 'cortex-a72'}
def _abs_path(rel_path):
return os.path.join(os.path.dirname(__file__), '../../../', rel_path)
def _rhino_library_path():
if _PV_SYSTEM == 'Darwin':
return _abs_path('lib/mac/x86_64/libpv_rhino.dylib')
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return _abs_path('lib/linux/x86_64/libpv_rhino.so')
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return _abs_path('lib/raspberry-pi/%s/libpv_rhino.so' % _PV_MACHINE)
elif _PV_MACHINE == 'beaglebone':
return _abs_path('lib/beaglebone/libpv_rhino.so')
elif _PV_SYSTEM == 'Windows':
return _abs_path('lib/windows/amd64/libpv_rhino.dll')
raise NotImplementedError('unsupported platform')
RHINO_LIBRARY_PATH = _rhino_library_path()
def _porcupine_library_path():
if _PV_SYSTEM == 'Darwin':
return _abs_path('resources/porcupine/lib/mac/x86_64/libpv_porcupine.dylib')
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return _abs_path('resources/porcupine/lib/linux/x86_64/libpv_porcupine.so')
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return _abs_path('resources/porcupine/lib/raspberry-pi/%s/libpv_porcupine.so' % _PV_MACHINE)
elif _PV_MACHINE == 'beaglebone':
return _abs_path('resources/porcupine/lib/beaglebone/libpv_porcupine.so')
elif _PV_SYSTEM == 'Windows':
return _abs_path('resources/porcupine/lib/windows/amd64/libpv_porcupine.dll')
raise NotImplementedError('unsupported platform')
PORCUPINE_LIBRARY_PATH = _porcupine_library_path()
RHINO_MODEL_FILE_PATH = _abs_path('lib/common/rhino_params.pv')
PORCUPINE_MODEL_FILE_PATH = _abs_path('resources/porcupine/lib/common/porcupine_params.pv')
def _context_files_subdir():
if _PV_SYSTEM == 'Darwin':
return 'mac'
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return 'linux'
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return 'raspberry-pi'
elif _PV_MACHINE == 'beaglebone':
return 'beaglebone'
elif _PV_SYSTEM == 'Windows':
return 'windows'
raise NotImplementedError('unsupported platform')
def _context_file_paths():
context_files_dir = _abs_path('resources/contexts/%s' % _context_files_subdir())
res = dict()
for x in os.listdir(context_files_dir):
res[x.rsplit('_', maxsplit=1)[0]] = os.path.join(context_files_dir, x)
return res
CONTEXT_FILE_PATHS = _context_file_paths()
CONTEXTS = CONTEXT_FILE_PATHS.keys()
def _keyword_files_subdir():
if _PV_SYSTEM == 'Darwin':
return 'mac'
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return 'linux'
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return 'raspberry-pi'
elif _PV_MACHINE == 'beaglebone':
return 'beaglebone'
elif _PV_SYSTEM == 'Windows':
return 'windows'
raise NotImplementedError('unsupported platform')
def _keyword_file_paths():
keyword_files_dir = _abs_path('resources/porcupine/resources/keyword_files/%s' % _keyword_files_subdir())
res = dict()
for x in os.listdir(keyword_files_dir):
if '_compressed' not in x:
res[x.rsplit('_')[0]] = os.path.join(keyword_files_dir, x)
return res
KEYWORD_FILE_PATHS = _keyword_file_paths()
KEYWORDS = KEYWORD_FILE_PATHS.keys() | resources/util/python/util.py | import os
import platform
import subprocess
def _pv_linux_machine(machine):
if machine == 'x86_64':
return machine
cpu_info = subprocess.check_output(['cat', '/proc/cpuinfo']).decode()
hardware_info = [x for x in cpu_info.split('\n') if 'Hardware' in x][0]
model_info = [x for x in cpu_info.split('\n') if 'model name' in x][0]
if 'BCM' in hardware_info:
if 'rev 7' in model_info:
return 'arm11'
elif 'rev 5' in model_info:
return 'cortex-a7'
elif 'rev 4' in model_info:
return 'cortex-a53'
elif 'rev 3' in model_info:
return 'cortex-a72'
elif 'AM33' in hardware_info:
return 'beaglebone'
else:
raise NotImplementedError('unsupported CPU:\n%s' % cpu_info)
def _pv_platform():
pv_system = platform.system()
if pv_system not in {'Darwin', 'Linux', 'Windows'}:
raise ValueError("unsupported system '%s'" % pv_system)
if pv_system == 'Linux':
pv_machine = _pv_linux_machine(platform.machine())
else:
pv_machine = platform.machine()
return pv_system, pv_machine
_PV_SYSTEM, _PV_MACHINE = _pv_platform()
_RASPBERRY_PI_MACHINES = {'arm11', 'cortex-a7', 'cortex-a53', 'cortex-a72'}
def _abs_path(rel_path):
return os.path.join(os.path.dirname(__file__), '../../../', rel_path)
def _rhino_library_path():
if _PV_SYSTEM == 'Darwin':
return _abs_path('lib/mac/x86_64/libpv_rhino.dylib')
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return _abs_path('lib/linux/x86_64/libpv_rhino.so')
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return _abs_path('lib/raspberry-pi/%s/libpv_rhino.so' % _PV_MACHINE)
elif _PV_MACHINE == 'beaglebone':
return _abs_path('lib/beaglebone/libpv_rhino.so')
elif _PV_SYSTEM == 'Windows':
return _abs_path('lib/windows/amd64/libpv_rhino.dll')
raise NotImplementedError('unsupported platform')
RHINO_LIBRARY_PATH = _rhino_library_path()
def _porcupine_library_path():
if _PV_SYSTEM == 'Darwin':
return _abs_path('resources/porcupine/lib/mac/x86_64/libpv_porcupine.dylib')
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return _abs_path('resources/porcupine/lib/linux/x86_64/libpv_porcupine.so')
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return _abs_path('resources/porcupine/lib/raspberry-pi/%s/libpv_porcupine.so' % _PV_MACHINE)
elif _PV_MACHINE == 'beaglebone':
return _abs_path('resources/porcupine/lib/beaglebone/libpv_porcupine.so')
elif _PV_SYSTEM == 'Windows':
return _abs_path('resources/porcupine/lib/windows/amd64/libpv_porcupine.dll')
raise NotImplementedError('unsupported platform')
PORCUPINE_LIBRARY_PATH = _porcupine_library_path()
RHINO_MODEL_FILE_PATH = _abs_path('lib/common/rhino_params.pv')
PORCUPINE_MODEL_FILE_PATH = _abs_path('resources/porcupine/lib/common/porcupine_params.pv')
def _context_files_subdir():
if _PV_SYSTEM == 'Darwin':
return 'mac'
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return 'linux'
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return 'raspberry-pi'
elif _PV_MACHINE == 'beaglebone':
return 'beaglebone'
elif _PV_SYSTEM == 'Windows':
return 'windows'
raise NotImplementedError('unsupported platform')
def _context_file_paths():
context_files_dir = _abs_path('resources/contexts/%s' % _context_files_subdir())
res = dict()
for x in os.listdir(context_files_dir):
res[x.rsplit('_', maxsplit=1)[0]] = os.path.join(context_files_dir, x)
return res
CONTEXT_FILE_PATHS = _context_file_paths()
CONTEXTS = CONTEXT_FILE_PATHS.keys()
def _keyword_files_subdir():
if _PV_SYSTEM == 'Darwin':
return 'mac'
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return 'linux'
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return 'raspberry-pi'
elif _PV_MACHINE == 'beaglebone':
return 'beaglebone'
elif _PV_SYSTEM == 'Windows':
return 'windows'
raise NotImplementedError('unsupported platform')
def _keyword_file_paths():
keyword_files_dir = _abs_path('resources/porcupine/resources/keyword_files/%s' % _keyword_files_subdir())
res = dict()
for x in os.listdir(keyword_files_dir):
if '_compressed' not in x:
res[x.rsplit('_')[0]] = os.path.join(keyword_files_dir, x)
return res
KEYWORD_FILE_PATHS = _keyword_file_paths()
KEYWORDS = KEYWORD_FILE_PATHS.keys() | 0.312475 | 0.088072 |
class Day4:
def part1(self):
min = 356261
max = 846303
password = <PASSWORD>
possibleCount = 0
while password <= max:
passwordText = str(password)
isPossible = False
stringIndex = 0
while stringIndex < len(str(max)) - 1:
if passwordText[stringIndex] == passwordText[stringIndex + 1]:
isPossible = True
if passwordText[stringIndex] > passwordText[stringIndex + 1]:
isPossible = False
break
stringIndex += 1
if isPossible:
possibleCount += 1
password += 1
print("Day 4, part 1: " + str(possibleCount))
def part2(self):
min = 356261
max = 846303
password = <PASSWORD>
possibleCount = 0
while password <= max:
passwordText = str(password)
isPossible = True
stringIndex = 0
if not self.doesHaveSameDigits(passwordText):
password += 1
continue
while stringIndex < len(str(max)) - 1:
if passwordText[stringIndex] > passwordText[stringIndex + 1]:
isPossible = False
break
stringIndex += 1
if isPossible:
possibleCount += 1
password += 1
print("Day 4, part 2: " + str(possibleCount))
def doesHaveSameDigits(self, passwordText) -> bool:
usedDigits = []
index = 0
while index < len(passwordText) - 1:
if passwordText[index] == passwordText[index + 1]:
if not passwordText[index] in usedDigits:
if index < len(passwordText) - 2:
if passwordText[index] == passwordText[index + 2]:
index += 1
usedDigits.append(passwordText[index])
continue
else:
return True
else:
return True
usedDigits.append(passwordText[index])
index += 1
return False | AOC2019/Day4.py | class Day4:
def part1(self):
min = 356261
max = 846303
password = <PASSWORD>
possibleCount = 0
while password <= max:
passwordText = str(password)
isPossible = False
stringIndex = 0
while stringIndex < len(str(max)) - 1:
if passwordText[stringIndex] == passwordText[stringIndex + 1]:
isPossible = True
if passwordText[stringIndex] > passwordText[stringIndex + 1]:
isPossible = False
break
stringIndex += 1
if isPossible:
possibleCount += 1
password += 1
print("Day 4, part 1: " + str(possibleCount))
def part2(self):
min = 356261
max = 846303
password = <PASSWORD>
possibleCount = 0
while password <= max:
passwordText = str(password)
isPossible = True
stringIndex = 0
if not self.doesHaveSameDigits(passwordText):
password += 1
continue
while stringIndex < len(str(max)) - 1:
if passwordText[stringIndex] > passwordText[stringIndex + 1]:
isPossible = False
break
stringIndex += 1
if isPossible:
possibleCount += 1
password += 1
print("Day 4, part 2: " + str(possibleCount))
def doesHaveSameDigits(self, passwordText) -> bool:
usedDigits = []
index = 0
while index < len(passwordText) - 1:
if passwordText[index] == passwordText[index + 1]:
if not passwordText[index] in usedDigits:
if index < len(passwordText) - 2:
if passwordText[index] == passwordText[index + 2]:
index += 1
usedDigits.append(passwordText[index])
continue
else:
return True
else:
return True
usedDigits.append(passwordText[index])
index += 1
return False | 0.132739 | 0.395893 |
import numpy as np
from models.model import BaseModel
from tqdm.autonotebook import tqdm
from xgboost import XGBRegressor
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
class Boosting(BaseModel):
def __init__ (self, generator, cfg, **kwargs):
super().__init__(generator, cfg, **kwargs)
self.params = self.cfg.params
self.build_model()
def prep_data(self, X):
pass
def build_model(self):
self.model = XGBRegressor(booster = "gbtree", **self.params)
def train(self, cv=False):
if cv:
res, idx = [], []
for lmbd in tqdm(self.cfg.l):
for d in tqdm(range(1,9), leave=False):
for e in tqdm(range(10, 110, 10), leave=False):
model = XGBRegressor(booster = "gbtree", reg_lambda=lmbd, max_depth=d, n_estimators=e)
kfold = KFold(n_splits=self.cfg.splits)
results = cross_val_score(model, self.generator.X, self.generator.y, cv=kfold, scoring="neg_mean_absolute_error")
res.append(results.copy())
idx.append((lmbd, d, e))
self.params = dict(zip(["reg_lambda", "max_depth", "n_estimators" ], idx[np.argmax(np.median(np.array(res), axis=1))]))
self.build_model()
self.model.fit(self.generator.X, self.generator.y)
def predict (self, X, y=None, online=False):
if online:
assert(online and not (y is None)), "if online provide a y"
preds = []
data = self.generator.X.values.copy()
labels = self.generator.y.values.copy()
for x, y in zip(X.values, y.values):
x = x[np.newaxis, ...]
preds.append(model.predict(x))
data, labels = np.concatenate([data, x]), np.append(labels, y)
print(data.shape)
model = XGBRegressor(booster = "gbtree", reg_lambda=0.005, max_depth=1, n_estimators=30)
model.fit(data, labels)
return np.concatenate(preds)
else:
return self.model.predict(X) | models/boosting.py | import numpy as np
from models.model import BaseModel
from tqdm.autonotebook import tqdm
from xgboost import XGBRegressor
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
class Boosting(BaseModel):
def __init__ (self, generator, cfg, **kwargs):
super().__init__(generator, cfg, **kwargs)
self.params = self.cfg.params
self.build_model()
def prep_data(self, X):
pass
def build_model(self):
self.model = XGBRegressor(booster = "gbtree", **self.params)
def train(self, cv=False):
if cv:
res, idx = [], []
for lmbd in tqdm(self.cfg.l):
for d in tqdm(range(1,9), leave=False):
for e in tqdm(range(10, 110, 10), leave=False):
model = XGBRegressor(booster = "gbtree", reg_lambda=lmbd, max_depth=d, n_estimators=e)
kfold = KFold(n_splits=self.cfg.splits)
results = cross_val_score(model, self.generator.X, self.generator.y, cv=kfold, scoring="neg_mean_absolute_error")
res.append(results.copy())
idx.append((lmbd, d, e))
self.params = dict(zip(["reg_lambda", "max_depth", "n_estimators" ], idx[np.argmax(np.median(np.array(res), axis=1))]))
self.build_model()
self.model.fit(self.generator.X, self.generator.y)
def predict (self, X, y=None, online=False):
if online:
assert(online and not (y is None)), "if online provide a y"
preds = []
data = self.generator.X.values.copy()
labels = self.generator.y.values.copy()
for x, y in zip(X.values, y.values):
x = x[np.newaxis, ...]
preds.append(model.predict(x))
data, labels = np.concatenate([data, x]), np.append(labels, y)
print(data.shape)
model = XGBRegressor(booster = "gbtree", reg_lambda=0.005, max_depth=1, n_estimators=30)
model.fit(data, labels)
return np.concatenate(preds)
else:
return self.model.predict(X) | 0.463687 | 0.220091 |
import numpy as np
import matplotlib.pyplot as plt
def df_to_matrix(df, quantity, replicate_identifier):
""" Construct a matrix from a Dataframe for a given vector-valued
quantity in which each vector replicate is labeled by a unique
replicate identifier.
Parameters
----------
df : DataFrame
Dataframe containing table of results from DLS microrheology
analysis for a single condition
quantity : str
Name of variable to plot as defined in the Dataframe
replicate_identifier : str
Name of quantity to average over
Returns
-------
M : 2-d array
Matrix where each row is a replicate of the vector quantity.
"""
# Construct a matrix in which each row represents a frequency sweep
ids = set(df[replicate_identifier].values)
M_list = []
list_len = []
for idx in ids:
xi = df[df[replicate_identifier] == idx][quantity].values
list_len.append(len(xi))
M_list.append(xi)
# Ensure all rows are the same length
limit = np.min(np.array(list_len))
M_list = list(m[0:limit] for m in M_list)
M = np.vstack(M_list)
return M
def bootstrap_matrix_byrows(M, n_bootstrap, estimator):
""" Gets bootstrap samples of an estimator for frequency (or time)
sweep data from a matrix containing all vectors for a given quantity
over all replicates.
Parameters
----------
M : 2-d array
Matrix of all values for a given quantity over all replicates
n_bootstrap : int
Number of points for bootstrap
estimator : callable function
Function for evaluating center of distribution
Returns
-------
M_bootstrap : 2-d array
Matrix in which each row represents a frequency sweep
"""
n_rep = M.shape[0]
M_bootstrap = np.zeros((n_bootstrap, M.shape[1]))
M_sample = np.zeros(M.shape)
for i in range(n_bootstrap):
inds = np.random.randint(0, n_rep, n_rep)
M_sample = M[inds, :]
M_bootstrap[i, :] = estimator(M_sample, axis=0)
return M_bootstrap
def bootstrap_freq_sweep(df, quantity, replicate_identifier,
n_bootstrap, estimator=np.mean):
""" Gets bootstrap samples of an estimator for frequency (or time)
sweep data from a Dataframe. The Dataframe is assumed to contain a
vector for a given quantity in which each vector replicate is
labeled by a unique replicate identifier.
Parameters
----------
df : DataFrame
Dataframe containing table of results from DLS microrheology
analysis for a single condition
quantity : str
Name of variable to plot as defined in the Dataframe
replicate_identifier : str
Name of quantity to average over
n_bootstrap : int
Number of points for bootstrap
estimator : callable function
Function for evaluating center of distribution
Returns
-------
M_bootstrap : 2-d array
Matrix in which each row represents a frequency sweep
"""
ids = set(df[replicate_identifier].values)
M_list = []
list_len = []
for idx in ids:
xi = df[df[replicate_identifier] == idx][quantity].values
list_len.append(len(xi))
M_list.append(xi)
limit = np.min(np.array(list_len))
M_list = list(m[0:limit] for m in M_list)
M = np.vstack(M_list)
# Get a matrix of bootstrapped row-wise averages given by the estimator
M_bootstrap = bootstrap_matrix_byrows(M, n_bootstrap, estimator)
return M_bootstrap
def bootstrap_freq_sweep_ci(df, quantity, replicate_identifier,
n_bootstrap, ci, estimator=np.mean):
""" Gets bootstrap confidence interval for an estimator of
frequency sweep (or time sweep) data. Boot strap can be either a
percentile bootstrap of an estimator of a studentized bootstrap
of the mean
Parameters
----------
df : DataFrame
Dataframe containing table of results from DLS microrheology
analysis for a single condition
quantity : str
Name of variable to plot as defined in the Dataframe
replicate_identifier : str
Name of quantity to average over
n_bootstrap : int
Number of points for bootstrap
ci : float
Percent of distribution included in error bars
estimator : callable function, `optional`
Function for evaluating center of distribution
Returns
-------
ci_low : 1-d array
Vector of the lower bound of the confidence interval over
entire frequency range.
ci_high : 1-d array
Vector of the upper bound of the confidence interval
over entire frequency range.
"""
M_bootstrap = bootstrap_freq_sweep(df, quantity, replicate_identifier,
n_bootstrap, estimator=estimator)
ci_low = np.percentile(M_bootstrap, 50.-ci/2., axis=0)
ci_high = np.percentile(M_bootstrap, 50.+ci/2., axis=0)
return [ci_low, ci_high]
def plot_replicates_from_df(df, my_quantity, plot_ci=True, myci=68.,
estimator=np.mean, color='m', ls='-',
err_alpha=0.25, err_lw=2.5, identifier='replicate'):
""" Plot a given quantity from the Dataframe, averaging across
all replicates in that Dataframe.
Parameters
----------
df : DataFrame
Dataframe containing table of results from DLS microrheology
analysis for a single condition
my_quantity : str
Name of variable to plot as defined in the Dataframe
plot_ci : boolean, `optional`
If `True`, plot error bars
myci : float, `optional`
Percent of distribution plotted in error bars
estimator : callable function, `optional`
Function for evaluating main plotted value
color : str, `optional`
Color of plotted line
ls : str, `optional`
Linestyle of plotted line
err_alpha : float, `optional`
Transparency level of error bars
err_lw : float, `optional`
Linewidth of error bar outlines
identifier : str, `optional`
Name of quantity to average over
"""
y_matrix = df_to_matrix(df, my_quantity, identifier)
replicates = df['replicate'].values
time = df[df['replicate'] == replicates[0]]['omega'].values[0:np.shape(y_matrix)[1]]
ci = bootstrap_freq_sweep_ci(df, my_quantity, identifier, 10000,
myci, estimator=estimator)
ci_low = ci[0]
ci_high = ci[1]
y_mu = estimator(y_matrix, axis=0)
plt.plot(time, y_mu, color=color, ls=ls)
time = np.array(time, dtype=float)
ci_low = np.array(ci_low, dtype=float)
ci_high = np.array(ci_high, dtype=float)
if plot_ci:
plt.fill_between(time, ci_low, ci_high, color=color,
alpha=err_alpha, linewidth=err_lw)
def add_w_scaling(omega, scaling, w_b, placement):
""" Plot a given scaling on complex modulus plot to compared against the complex modulus of a sample.
Parameters
----------
omega : 1-d array
Vector of frequency range covered by the complex modulus
plotted in the plot
scaling : list of float
List of 2 floats, where the first number is numerator
of fraction and second is denominator of fraction
w_b : float
Value of complex modulus plotted where scaling should appear
on the plot
placement : list of float
First element in list is lower bound of scaling line,
second element in list is upper bound of scaling line,
where both elements are values between 0 and 1. The
value of the first element should be less than the value
of the second element
"""
lolim = np.int(len(omega)*placement[0])
hilim = np.int(len(omega)*placement[1])
omega = np.array(omega,dtype=float)
g_scale = np.float_power(omega, scaling[0]/scaling[1])*w_b/np.float_power(omega[lolim],scaling[0]/scaling[1])
plt.plot(omega[lolim:hilim], g_scale[lolim:hilim], ls='--', color='k',linewidth=2)
model = np.int(0.6*(lolim+hilim))
plt.text(omega[model],g_scale[model]*1.6,
'$\omega^{%(top)s/%(bot)s}$'%{'top':np.int(scaling[0]),
'bot':np.int(scaling[1])},fontsize=12) | dlsmicro/backend/plot_tools.py | import numpy as np
import matplotlib.pyplot as plt
def df_to_matrix(df, quantity, replicate_identifier):
""" Construct a matrix from a Dataframe for a given vector-valued
quantity in which each vector replicate is labeled by a unique
replicate identifier.
Parameters
----------
df : DataFrame
Dataframe containing table of results from DLS microrheology
analysis for a single condition
quantity : str
Name of variable to plot as defined in the Dataframe
replicate_identifier : str
Name of quantity to average over
Returns
-------
M : 2-d array
Matrix where each row is a replicate of the vector quantity.
"""
# Construct a matrix in which each row represents a frequency sweep
ids = set(df[replicate_identifier].values)
M_list = []
list_len = []
for idx in ids:
xi = df[df[replicate_identifier] == idx][quantity].values
list_len.append(len(xi))
M_list.append(xi)
# Ensure all rows are the same length
limit = np.min(np.array(list_len))
M_list = list(m[0:limit] for m in M_list)
M = np.vstack(M_list)
return M
def bootstrap_matrix_byrows(M, n_bootstrap, estimator):
""" Gets bootstrap samples of an estimator for frequency (or time)
sweep data from a matrix containing all vectors for a given quantity
over all replicates.
Parameters
----------
M : 2-d array
Matrix of all values for a given quantity over all replicates
n_bootstrap : int
Number of points for bootstrap
estimator : callable function
Function for evaluating center of distribution
Returns
-------
M_bootstrap : 2-d array
Matrix in which each row represents a frequency sweep
"""
n_rep = M.shape[0]
M_bootstrap = np.zeros((n_bootstrap, M.shape[1]))
M_sample = np.zeros(M.shape)
for i in range(n_bootstrap):
inds = np.random.randint(0, n_rep, n_rep)
M_sample = M[inds, :]
M_bootstrap[i, :] = estimator(M_sample, axis=0)
return M_bootstrap
def bootstrap_freq_sweep(df, quantity, replicate_identifier,
n_bootstrap, estimator=np.mean):
""" Gets bootstrap samples of an estimator for frequency (or time)
sweep data from a Dataframe. The Dataframe is assumed to contain a
vector for a given quantity in which each vector replicate is
labeled by a unique replicate identifier.
Parameters
----------
df : DataFrame
Dataframe containing table of results from DLS microrheology
analysis for a single condition
quantity : str
Name of variable to plot as defined in the Dataframe
replicate_identifier : str
Name of quantity to average over
n_bootstrap : int
Number of points for bootstrap
estimator : callable function
Function for evaluating center of distribution
Returns
-------
M_bootstrap : 2-d array
Matrix in which each row represents a frequency sweep
"""
ids = set(df[replicate_identifier].values)
M_list = []
list_len = []
for idx in ids:
xi = df[df[replicate_identifier] == idx][quantity].values
list_len.append(len(xi))
M_list.append(xi)
limit = np.min(np.array(list_len))
M_list = list(m[0:limit] for m in M_list)
M = np.vstack(M_list)
# Get a matrix of bootstrapped row-wise averages given by the estimator
M_bootstrap = bootstrap_matrix_byrows(M, n_bootstrap, estimator)
return M_bootstrap
def bootstrap_freq_sweep_ci(df, quantity, replicate_identifier,
n_bootstrap, ci, estimator=np.mean):
""" Gets bootstrap confidence interval for an estimator of
frequency sweep (or time sweep) data. Boot strap can be either a
percentile bootstrap of an estimator of a studentized bootstrap
of the mean
Parameters
----------
df : DataFrame
Dataframe containing table of results from DLS microrheology
analysis for a single condition
quantity : str
Name of variable to plot as defined in the Dataframe
replicate_identifier : str
Name of quantity to average over
n_bootstrap : int
Number of points for bootstrap
ci : float
Percent of distribution included in error bars
estimator : callable function, `optional`
Function for evaluating center of distribution
Returns
-------
ci_low : 1-d array
Vector of the lower bound of the confidence interval over
entire frequency range.
ci_high : 1-d array
Vector of the upper bound of the confidence interval
over entire frequency range.
"""
M_bootstrap = bootstrap_freq_sweep(df, quantity, replicate_identifier,
n_bootstrap, estimator=estimator)
ci_low = np.percentile(M_bootstrap, 50.-ci/2., axis=0)
ci_high = np.percentile(M_bootstrap, 50.+ci/2., axis=0)
return [ci_low, ci_high]
def plot_replicates_from_df(df, my_quantity, plot_ci=True, myci=68.,
estimator=np.mean, color='m', ls='-',
err_alpha=0.25, err_lw=2.5, identifier='replicate'):
""" Plot a given quantity from the Dataframe, averaging across
all replicates in that Dataframe.
Parameters
----------
df : DataFrame
Dataframe containing table of results from DLS microrheology
analysis for a single condition
my_quantity : str
Name of variable to plot as defined in the Dataframe
plot_ci : boolean, `optional`
If `True`, plot error bars
myci : float, `optional`
Percent of distribution plotted in error bars
estimator : callable function, `optional`
Function for evaluating main plotted value
color : str, `optional`
Color of plotted line
ls : str, `optional`
Linestyle of plotted line
err_alpha : float, `optional`
Transparency level of error bars
err_lw : float, `optional`
Linewidth of error bar outlines
identifier : str, `optional`
Name of quantity to average over
"""
y_matrix = df_to_matrix(df, my_quantity, identifier)
replicates = df['replicate'].values
time = df[df['replicate'] == replicates[0]]['omega'].values[0:np.shape(y_matrix)[1]]
ci = bootstrap_freq_sweep_ci(df, my_quantity, identifier, 10000,
myci, estimator=estimator)
ci_low = ci[0]
ci_high = ci[1]
y_mu = estimator(y_matrix, axis=0)
plt.plot(time, y_mu, color=color, ls=ls)
time = np.array(time, dtype=float)
ci_low = np.array(ci_low, dtype=float)
ci_high = np.array(ci_high, dtype=float)
if plot_ci:
plt.fill_between(time, ci_low, ci_high, color=color,
alpha=err_alpha, linewidth=err_lw)
def add_w_scaling(omega, scaling, w_b, placement):
""" Plot a given scaling on complex modulus plot to compared against the complex modulus of a sample.
Parameters
----------
omega : 1-d array
Vector of frequency range covered by the complex modulus
plotted in the plot
scaling : list of float
List of 2 floats, where the first number is numerator
of fraction and second is denominator of fraction
w_b : float
Value of complex modulus plotted where scaling should appear
on the plot
placement : list of float
First element in list is lower bound of scaling line,
second element in list is upper bound of scaling line,
where both elements are values between 0 and 1. The
value of the first element should be less than the value
of the second element
"""
lolim = np.int(len(omega)*placement[0])
hilim = np.int(len(omega)*placement[1])
omega = np.array(omega,dtype=float)
g_scale = np.float_power(omega, scaling[0]/scaling[1])*w_b/np.float_power(omega[lolim],scaling[0]/scaling[1])
plt.plot(omega[lolim:hilim], g_scale[lolim:hilim], ls='--', color='k',linewidth=2)
model = np.int(0.6*(lolim+hilim))
plt.text(omega[model],g_scale[model]*1.6,
'$\omega^{%(top)s/%(bot)s}$'%{'top':np.int(scaling[0]),
'bot':np.int(scaling[1])},fontsize=12) | 0.895065 | 0.699408 |
import time
import grove_rgb_lcd
sleep = time.sleep
setText = grove_rgb_lcd.setText
setText_norefresh = grove_rgb_lcd.setText_norefresh
setRGB = grove_rgb_lcd.setRGB
class LCDControl(object):
def __init__(self, red = 100, green = 100, blue = 100):
#Set default background colour
self.red = red
self.green = green
self.blue = blue
self.rgb(self.red, self.green, self.blue)
#Send text to LCD with refresh but no scroll
def text(self, text):
setText(text)
#Send text to LCD with no refresh and no scroll
def text_norefresh(self, text):
setText_norefresh(text)
#Refresh LCD
def refresh(self):
self.text("")
#Send text to LCD with scroll.
#cycles = the number of complete scrolling cycles of the text (1 to 10)
#speed = speed of scolling (1 to 5)
def text_scroll(self, text, cycles = 1, speed = 1):
try:
if cycles < 1 or cycles > 10:
raise ValueError("Cycles value must be between 1 an 10.")
if speed < 1 or speed > 10:
raise ValueError("Speed value must be between 1 an 5.")
length = len(text)
if length > 32: #Scroll required
scroll_text = text + " "
length = len(scroll_text)
for i in range(cycles):
for s in range(length):
self.text_norefresh(scroll_text)
#Move first character to the end of the string
char = scroll_text[0]
scroll_text = scroll_text[1: length] + char
sleep(0.1 / (speed * 0.25))
self.text_norefresh(scroll_text)
else:
#No scroll required since text fully fits onto display
self.text(text)
except ValueError as e:
print e.args[0]
exit()
#Set RGB values for background display
def rgb(self, red, green ,blue):
setRGB(red, green, blue)
#Prompt with input and input echo
#prompt = text string requesting input (max 16 characters)
def input(self, prompt):
try:
if len(prompt) > 16:
raise Exception("Prompt cannot be longer than 16 characters.")
self.text(prompt + "\n")
reply = raw_input(prompt + " ")
self.text(prompt + "\n" + reply)
return(reply)
except Exception as e:
print e.args[0]
exit()
# An example of what the class can do
if __name__ == "__main__":
lcd = LCDControl(100, 20, 20)
lcd.text_scroll("This is an LCD screen scrolling example.", 1, 3 )
sleep(5)
lcd.rgb(50, 50, 50)
name = lcd.input("Name please:")
print("Name = " + name)
sleep(1)
while True:
age = lcd.input("Age please:")
try:
age = int(age)
break
except ValueError:
print "Integer please"
print("Age = %d" % age)
sleep(1)
lcd.rgb(100, 20, 20)
lcd.text_scroll("Well, hello %s, you're not looking bad for %d years old." % (name, age), 2, 3)
sleep(2)
lcd.refresh()
lcd.rgb(0, 0, 0) | LCD_Screen_Control.py |
import time
import grove_rgb_lcd
sleep = time.sleep
setText = grove_rgb_lcd.setText
setText_norefresh = grove_rgb_lcd.setText_norefresh
setRGB = grove_rgb_lcd.setRGB
class LCDControl(object):
def __init__(self, red = 100, green = 100, blue = 100):
#Set default background colour
self.red = red
self.green = green
self.blue = blue
self.rgb(self.red, self.green, self.blue)
#Send text to LCD with refresh but no scroll
def text(self, text):
setText(text)
#Send text to LCD with no refresh and no scroll
def text_norefresh(self, text):
setText_norefresh(text)
#Refresh LCD
def refresh(self):
self.text("")
#Send text to LCD with scroll.
#cycles = the number of complete scrolling cycles of the text (1 to 10)
#speed = speed of scolling (1 to 5)
def text_scroll(self, text, cycles = 1, speed = 1):
try:
if cycles < 1 or cycles > 10:
raise ValueError("Cycles value must be between 1 an 10.")
if speed < 1 or speed > 10:
raise ValueError("Speed value must be between 1 an 5.")
length = len(text)
if length > 32: #Scroll required
scroll_text = text + " "
length = len(scroll_text)
for i in range(cycles):
for s in range(length):
self.text_norefresh(scroll_text)
#Move first character to the end of the string
char = scroll_text[0]
scroll_text = scroll_text[1: length] + char
sleep(0.1 / (speed * 0.25))
self.text_norefresh(scroll_text)
else:
#No scroll required since text fully fits onto display
self.text(text)
except ValueError as e:
print e.args[0]
exit()
#Set RGB values for background display
def rgb(self, red, green ,blue):
setRGB(red, green, blue)
#Prompt with input and input echo
#prompt = text string requesting input (max 16 characters)
def input(self, prompt):
try:
if len(prompt) > 16:
raise Exception("Prompt cannot be longer than 16 characters.")
self.text(prompt + "\n")
reply = raw_input(prompt + " ")
self.text(prompt + "\n" + reply)
return(reply)
except Exception as e:
print e.args[0]
exit()
# An example of what the class can do
if __name__ == "__main__":
lcd = LCDControl(100, 20, 20)
lcd.text_scroll("This is an LCD screen scrolling example.", 1, 3 )
sleep(5)
lcd.rgb(50, 50, 50)
name = lcd.input("Name please:")
print("Name = " + name)
sleep(1)
while True:
age = lcd.input("Age please:")
try:
age = int(age)
break
except ValueError:
print "Integer please"
print("Age = %d" % age)
sleep(1)
lcd.rgb(100, 20, 20)
lcd.text_scroll("Well, hello %s, you're not looking bad for %d years old." % (name, age), 2, 3)
sleep(2)
lcd.refresh()
lcd.rgb(0, 0, 0) | 0.344443 | 0.11737 |
import os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
try:
import keyring
except:
keyring = None
class GUI(object):
UI_DATA_PATH = os.path.join(os.path.dirname(__file__), "..", "ui")
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file(os.path.join(self.UI_DATA_PATH, "mountn.glade"))
def get_file(self, parent=None, title="Open...", action = Gtk.FileChooserAction.OPEN):
if action == Gtk.FileChooserAction.SAVE:
buttons=(Gtk.STOCK_CANCEL,Gtk.ResponseType.CANCEL,Gtk.STOCK_SAVE,Gtk.ResponseType.OK)
elif action == Gtk.FileChooserAction.CREATE_FOLDER:
buttons=(Gtk.STOCK_CANCEL,Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN,Gtk.ResponseType.OK)
else:
buttons=(Gtk.STOCK_CANCEL,Gtk.ResponseType.CANCEL,Gtk.STOCK_OPEN,Gtk.ResponseType.OK)
dialog = Gtk.FileChooserDialog(title,
parent,
action,
buttons)
dialog.set_default_response(Gtk.ResponseType.OK)
filter = Gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
filename = None
if response == Gtk.ResponseType.OK:
filename = dialog.get_filename()
dialog.destroy()
return filename
def get_password(self, parent=None, message="", save_id=None):
"""
Display a dialog with a text entry.
Returns the text, or None if canceled.
"""
if keyring and save_id:
password = keyring.get_password("mountn:"+save_id, "")
if password:
return password
dialog = self.builder.get_object("dlg_password")
chk_save = self.builder.get_object("chk_save")
txt_pass = self.builder.get_object("txt_password")
if not (keyring and save_id):
chk_save.hide()
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.set_keep_above(True)
response = dialog.run()
dialog.hide()
if response == Gtk.ResponseType.OK:
password = txt_pass.get_text().decode("utf8")
if password:
if keyring and save_id:
keyring.set_password("<PASSWORD>:"+save_id,"", password)
return password
return None
gui = GUI() | mountn/gui.py | import os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
try:
import keyring
except:
keyring = None
class GUI(object):
UI_DATA_PATH = os.path.join(os.path.dirname(__file__), "..", "ui")
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file(os.path.join(self.UI_DATA_PATH, "mountn.glade"))
def get_file(self, parent=None, title="Open...", action = Gtk.FileChooserAction.OPEN):
if action == Gtk.FileChooserAction.SAVE:
buttons=(Gtk.STOCK_CANCEL,Gtk.ResponseType.CANCEL,Gtk.STOCK_SAVE,Gtk.ResponseType.OK)
elif action == Gtk.FileChooserAction.CREATE_FOLDER:
buttons=(Gtk.STOCK_CANCEL,Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN,Gtk.ResponseType.OK)
else:
buttons=(Gtk.STOCK_CANCEL,Gtk.ResponseType.CANCEL,Gtk.STOCK_OPEN,Gtk.ResponseType.OK)
dialog = Gtk.FileChooserDialog(title,
parent,
action,
buttons)
dialog.set_default_response(Gtk.ResponseType.OK)
filter = Gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
filename = None
if response == Gtk.ResponseType.OK:
filename = dialog.get_filename()
dialog.destroy()
return filename
def get_password(self, parent=None, message="", save_id=None):
"""
Display a dialog with a text entry.
Returns the text, or None if canceled.
"""
if keyring and save_id:
password = keyring.get_password("mountn:"+save_id, "")
if password:
return password
dialog = self.builder.get_object("dlg_password")
chk_save = self.builder.get_object("chk_save")
txt_pass = self.builder.get_object("txt_password")
if not (keyring and save_id):
chk_save.hide()
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.set_keep_above(True)
response = dialog.run()
dialog.hide()
if response == Gtk.ResponseType.OK:
password = txt_pass.get_text().decode("utf8")
if password:
if keyring and save_id:
keyring.set_password("<PASSWORD>:"+save_id,"", password)
return password
return None
gui = GUI() | 0.291787 | 0.071494 |
import asyncio
from typing import Union
import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import box
class SupportCommands(commands.Cog):
@commands.group(name="supportset", aliases=["sset"])
@commands.guild_only()
@commands.admin()
async def support(self, ctx: commands.Context):
"""Base support settings"""
pass
# Check running button tasks and update guild task if exists
async def refresh_tasks(self, guild_id: str):
for task in asyncio.all_tasks():
if guild_id == task.get_name():
task.cancel()
await self.add_components()
@support.command(name="view")
async def view_settings(self, ctx: commands.Context):
"""View support settings"""
conf = await self.config.guild(ctx.guild).all()
category = self.bot.get_channel(conf["category"])
if not category:
category = conf['category']
button_channel = self.bot.get_channel(conf['channel_id'])
if button_channel:
button_channel = button_channel.mention
else:
button_channel = conf['channel_id']
msg = f"`Ticket Category: `{category}\n" \
f"`Button MessageID: `{conf['message_id']}\n" \
f"`Button Channel: `{button_channel}\n" \
f"`Max Tickets: `{conf['max_tickets']}\n" \
f"`Button Content: `{conf['button_content']}\n" \
f"`Button Emoji: `{conf['emoji']}\n" \
f"`DM Alerts: `{conf['dm']}\n" \
f"`Users can Rename: `{conf['user_can_rename']}\n" \
f"`Users can Close: `{conf['user_can_close']}\n" \
f"`Users can Manage: `{conf['user_can_manage']}\n" \
f"`Save Transcripts: `{conf['transcript']}\n" \
f"`Auto Close: `{conf['auto_close']}\n" \
f"`Ticket Name: `{conf['ticket_name']}\n"
log = conf["log"]
if log:
lchannel = ctx.guild.get_channel(log)
if lchannel:
msg += f"`Log Channel: `{lchannel.mention}\n"
else:
msg += f"`Log Channel: `{log}\n"
support = conf["support"]
suproles = ""
if support:
for role_id in support:
role = ctx.guild.get_role(role_id)
if role:
suproles += f"{role.mention}\n"
blacklist = conf["blacklist"]
busers = ""
if blacklist:
for user_id in blacklist:
user = ctx.guild.get_member(user_id)
if user:
busers += f"{user.name}-{user.id}\n"
else:
busers += f"LeftGuild-{user_id}\n"
embed = discord.Embed(
title="Support Settings",
description=msg,
color=discord.Color.random()
)
if suproles:
embed.add_field(
name="Support Roles",
value=suproles,
inline=False
)
if busers:
embed.add_field(
name="Blacklisted Users",
value=busers,
inline=False
)
if conf["message"] != "{default}":
embed.add_field(
name="Ticket Message",
value=box(conf["message"]),
inline=False
)
await ctx.send(embed=embed)
@support.command(name="category")
async def category(self, ctx: commands.Context, category: discord.CategoryChannel):
"""Set the category ticket channels will be created in"""
if not category.permissions_for(ctx.guild.me).manage_channels:
return await ctx.send(
"I do not have 'Manage Channels' permissions in that category"
)
await self.config.guild(ctx.guild).category.set(category.id)
await ctx.send(f"Tickets will now be created in the {category.name} category")
@support.command(name="supportmessage")
async def set_support_button_message(self, ctx: commands.Context, message_id: discord.Message):
"""
Set the support ticket message
The support button will be added to this message
"""
if not message_id.channel.permissions_for(ctx.guild.me).view_channel:
return await ctx.send("I cant see that channel")
if not message_id.channel.permissions_for(ctx.guild.me).read_messages:
return await ctx.send("I cant read messages in that channel")
if not message_id.channel.permissions_for(ctx.guild.me).read_message_history:
return await ctx.send("I cant read message history in that channel")
if message_id.author.id != self.bot.user.id:
return await ctx.send("I can only add buttons to my own messages!")
await self.config.guild(ctx.guild).message_id.set(message_id.id)
await self.config.guild(ctx.guild).channel_id.set(message_id.channel.id)
await ctx.send("Support ticket message has been set!")
await self.refresh_tasks(str(ctx.guild.id))
@support.command(name="ticketmessage")
async def set_support_ticket_message(self, ctx: commands.Context, *, message: str):
"""
Set the message sent when a ticket is opened
You can include any of these in the message to be replaced by their value when the message is sent
`{username}` - Person's Discord username
`{mention}` - This will mention the user
`{id}` - This is the ID of the user that created the ticket
You can set this to {default} to restore original settings
"""
if len(message) > 1024:
return await ctx.send("Message length is too long! Must be less than 1024 chars")
await self.config.guild(ctx.guild).message.set(message)
if message.lower() == "default":
await ctx.send("Message has been reset to default")
else:
await ctx.send("Message has been set!")
@support.command(name="supportrole")
async def set_support_role(self, ctx: commands.Context, *, role: discord.Role):
"""
Add/Remove ticket support roles (one at a time)
To remove a role, simply run this command with it again to remove it
"""
async with self.config.guild(ctx.guild).support() as roles:
if role.id in roles:
roles.remove(role.id)
await ctx.send(f"{role.name} has been removed from support roles")
else:
roles.append(role.id)
await ctx.send(f"{role.name} has been added to support roles")
@support.command(name="blacklist")
async def set_user_blacklist(self, ctx: commands.Context, *, user: discord.Member):
"""
Add/Remove users from the blacklist
Users in the blacklist will not be able to create a ticket
"""
async with self.config.guild(ctx.guild).blacklist() as bl:
if user.id in bl:
bl.remove(user.id)
await ctx.send(f"{user.name} has been removed from the blacklist")
else:
bl.append(user.id)
await ctx.send(f"{user.name} has been added to the blacklist")
@support.command(name="maxtickets")
async def set_max_tickets(self, ctx: commands.Context, max_tickets: int):
"""Set the max amount of tickets a user can have opened"""
await self.config.guild(ctx.guild).max_tickets.set(max_tickets)
await ctx.tick()
@support.command(name="logchannel")
async def set_log_channel(self, ctx: commands.Context, *, log_channel: discord.TextChannel):
"""Set the log channel"""
await self.config.guild(ctx.guild).log.set(log_channel.id)
await ctx.tick()
@support.command(name="buttoncontent")
async def set_button_content(self, ctx: commands.Context, *, button_content: str):
"""Set what you want the support button to say"""
if len(button_content) <= 80:
await self.config.guild(ctx.guild).button_content.set(button_content)
await ctx.tick()
await self.refresh_tasks(str(ctx.guild.id))
else:
await ctx.send("Button content is too long! Must be less than 80 characters")
@support.command(name="buttoncolor")
async def set_button_color(self, ctx: commands.Context, button_color: str):
"""Set button color(red/blue/green/grey only)"""
c = button_color.lower()
valid = ["red", "blue", "green", "grey", "gray"]
if c not in valid:
return await ctx.send("That is not a valid color, must be red, blue, green, or grey")
await self.config.guild(ctx.guild).bcolor.set(c)
await ctx.tick()
await self.refresh_tasks(str(ctx.guild.id))
@support.command(name="buttonemoji")
async def set_button_emoji(self, ctx: commands.Context, emoji: Union[discord.Emoji, discord.PartialEmoji]):
"""Set a button emoji"""
await self.config.guild(ctx.guild).emoji.set(str(emoji))
await ctx.tick()
await self.refresh_tasks(str(ctx.guild.id))
@support.command(name="tname")
async def set_def_ticket_name(self, ctx: commands.Context, *, default_name: str):
"""
Set the default ticket channel name
You can include the following in the name
`{num}` - Ticket number
`{user}` - user's name
`{id}` - user's ID
`{shortdate}` - mm-dd
`{longdate}` - mm-dd-yyyy
`{time}` - hh-mm AM/PM according to bot host system time
You can set this to {default} to use default "Ticket-Username
"""
await self.config.guild(ctx.guild).ticket_name.set(default_name)
await ctx.tick()
# TOGGLES --------------------------------------------------------------------------------
@support.command(name="ticketembed")
async def toggle_ticket_embed(self, ctx: commands.Context):
"""
(Toggle) Ticket message as an embed
When user opens a ticket, the formatted message will be an embed instead
"""
toggle = await self.config.guild(ctx.guild).embeds()
if toggle:
await self.config.guild(ctx.guild).embeds.set(False)
await ctx.send("Ticket message embeds have been **Disabled**")
else:
await self.config.guild(ctx.guild).embeds.set(True)
await ctx.send("Ticket message embeds have been **Enabled**")
@support.command(name="dm")
async def toggle_dms(self, ctx: commands.Context):
"""(Toggle) The bot sending DM's for ticket alerts"""
toggle = await self.config.guild(ctx.guild).dm()
if toggle:
await self.config.guild(ctx.guild).dm.set(False)
await ctx.send("DM alerts have been **Disabled**")
else:
await self.config.guild(ctx.guild).dm.set(True)
await ctx.send("DM alerts have been **Enabled**")
@support.command(name="selfrename")
async def toggle_rename(self, ctx: commands.Context):
"""(Toggle) If users can rename their own tickets"""
toggle = await self.config.guild(ctx.guild).user_can_rename()
if toggle:
await self.config.guild(ctx.guild).user_can_rename.set(False)
await ctx.send("User can no longer rename their support channel")
else:
await self.config.guild(ctx.guild).user_can_rename.set(True)
await ctx.send("User can now rename their support channel")
@support.command(name="selfclose")
async def toggle_selfclose(self, ctx: commands.Context):
"""(Toggle) If users can close their own tickets"""
toggle = await self.config.guild(ctx.guild).user_can_close()
if toggle:
await self.config.guild(ctx.guild).user_can_close.set(False)
await ctx.send("User can no longer close their support channel")
else:
await self.config.guild(ctx.guild).user_can_close.set(True)
await ctx.send("User can now close their support channel")
@support.command(name="selfmanage")
async def toggle_selfmanage(self, ctx: commands.Context):
"""
(Toggle) If users can manage their own tickets
Users will be able to add/remove others to their support ticket
"""
toggle = await self.config.guild(ctx.guild).user_can_manage()
if toggle:
await self.config.guild(ctx.guild).user_can_manage.set(False)
await ctx.send("User can no longer manage their support channel")
else:
await self.config.guild(ctx.guild).user_can_manage.set(True)
await ctx.send("User can now manage their support channel")
@support.command(name="autoclose")
async def toggle_autoclose(self, ctx: commands.Context):
"""(Toggle) Auto ticket close if user leaves guild"""
toggle = await self.config.guild(ctx.guild).auto_close()
if toggle:
await self.config.guild(ctx.guild).auto_close.set(False)
await ctx.send("Tickets will no longer be closed if a user leaves the guild")
else:
await self.config.guild(ctx.guild).auto_close.set(True)
await ctx.send("Tickets will now be closed if a user leaves the guild")
@support.command(name="transcript")
async def toggle_transcript(self, ctx: commands.Context):
"""
(Toggle) Ticket transcripts
Closed tickets will have their transcripts uploaded to the log channel
"""
toggle = await self.config.guild(ctx.guild).transcript()
if toggle:
await self.config.guild(ctx.guild).transcript.set(False)
await ctx.send("Transcripts of closed tickets will no longer be saved")
else:
await self.config.guild(ctx.guild).transcript.set(True)
await ctx.send("Transcripts of closed tickets will now be saved") | support/commands.py | import asyncio
from typing import Union
import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import box
class SupportCommands(commands.Cog):
@commands.group(name="supportset", aliases=["sset"])
@commands.guild_only()
@commands.admin()
async def support(self, ctx: commands.Context):
"""Base support settings"""
pass
# Check running button tasks and update guild task if exists
async def refresh_tasks(self, guild_id: str):
for task in asyncio.all_tasks():
if guild_id == task.get_name():
task.cancel()
await self.add_components()
@support.command(name="view")
async def view_settings(self, ctx: commands.Context):
"""View support settings"""
conf = await self.config.guild(ctx.guild).all()
category = self.bot.get_channel(conf["category"])
if not category:
category = conf['category']
button_channel = self.bot.get_channel(conf['channel_id'])
if button_channel:
button_channel = button_channel.mention
else:
button_channel = conf['channel_id']
msg = f"`Ticket Category: `{category}\n" \
f"`Button MessageID: `{conf['message_id']}\n" \
f"`Button Channel: `{button_channel}\n" \
f"`Max Tickets: `{conf['max_tickets']}\n" \
f"`Button Content: `{conf['button_content']}\n" \
f"`Button Emoji: `{conf['emoji']}\n" \
f"`DM Alerts: `{conf['dm']}\n" \
f"`Users can Rename: `{conf['user_can_rename']}\n" \
f"`Users can Close: `{conf['user_can_close']}\n" \
f"`Users can Manage: `{conf['user_can_manage']}\n" \
f"`Save Transcripts: `{conf['transcript']}\n" \
f"`Auto Close: `{conf['auto_close']}\n" \
f"`Ticket Name: `{conf['ticket_name']}\n"
log = conf["log"]
if log:
lchannel = ctx.guild.get_channel(log)
if lchannel:
msg += f"`Log Channel: `{lchannel.mention}\n"
else:
msg += f"`Log Channel: `{log}\n"
support = conf["support"]
suproles = ""
if support:
for role_id in support:
role = ctx.guild.get_role(role_id)
if role:
suproles += f"{role.mention}\n"
blacklist = conf["blacklist"]
busers = ""
if blacklist:
for user_id in blacklist:
user = ctx.guild.get_member(user_id)
if user:
busers += f"{user.name}-{user.id}\n"
else:
busers += f"LeftGuild-{user_id}\n"
embed = discord.Embed(
title="Support Settings",
description=msg,
color=discord.Color.random()
)
if suproles:
embed.add_field(
name="Support Roles",
value=suproles,
inline=False
)
if busers:
embed.add_field(
name="Blacklisted Users",
value=busers,
inline=False
)
if conf["message"] != "{default}":
embed.add_field(
name="Ticket Message",
value=box(conf["message"]),
inline=False
)
await ctx.send(embed=embed)
@support.command(name="category")
async def category(self, ctx: commands.Context, category: discord.CategoryChannel):
"""Set the category ticket channels will be created in"""
if not category.permissions_for(ctx.guild.me).manage_channels:
return await ctx.send(
"I do not have 'Manage Channels' permissions in that category"
)
await self.config.guild(ctx.guild).category.set(category.id)
await ctx.send(f"Tickets will now be created in the {category.name} category")
@support.command(name="supportmessage")
async def set_support_button_message(self, ctx: commands.Context, message_id: discord.Message):
"""
Set the support ticket message
The support button will be added to this message
"""
if not message_id.channel.permissions_for(ctx.guild.me).view_channel:
return await ctx.send("I cant see that channel")
if not message_id.channel.permissions_for(ctx.guild.me).read_messages:
return await ctx.send("I cant read messages in that channel")
if not message_id.channel.permissions_for(ctx.guild.me).read_message_history:
return await ctx.send("I cant read message history in that channel")
if message_id.author.id != self.bot.user.id:
return await ctx.send("I can only add buttons to my own messages!")
await self.config.guild(ctx.guild).message_id.set(message_id.id)
await self.config.guild(ctx.guild).channel_id.set(message_id.channel.id)
await ctx.send("Support ticket message has been set!")
await self.refresh_tasks(str(ctx.guild.id))
@support.command(name="ticketmessage")
async def set_support_ticket_message(self, ctx: commands.Context, *, message: str):
"""
Set the message sent when a ticket is opened
You can include any of these in the message to be replaced by their value when the message is sent
`{username}` - Person's Discord username
`{mention}` - This will mention the user
`{id}` - This is the ID of the user that created the ticket
You can set this to {default} to restore original settings
"""
if len(message) > 1024:
return await ctx.send("Message length is too long! Must be less than 1024 chars")
await self.config.guild(ctx.guild).message.set(message)
if message.lower() == "default":
await ctx.send("Message has been reset to default")
else:
await ctx.send("Message has been set!")
@support.command(name="supportrole")
async def set_support_role(self, ctx: commands.Context, *, role: discord.Role):
"""
Add/Remove ticket support roles (one at a time)
To remove a role, simply run this command with it again to remove it
"""
async with self.config.guild(ctx.guild).support() as roles:
if role.id in roles:
roles.remove(role.id)
await ctx.send(f"{role.name} has been removed from support roles")
else:
roles.append(role.id)
await ctx.send(f"{role.name} has been added to support roles")
@support.command(name="blacklist")
async def set_user_blacklist(self, ctx: commands.Context, *, user: discord.Member):
"""
Add/Remove users from the blacklist
Users in the blacklist will not be able to create a ticket
"""
async with self.config.guild(ctx.guild).blacklist() as bl:
if user.id in bl:
bl.remove(user.id)
await ctx.send(f"{user.name} has been removed from the blacklist")
else:
bl.append(user.id)
await ctx.send(f"{user.name} has been added to the blacklist")
@support.command(name="maxtickets")
async def set_max_tickets(self, ctx: commands.Context, max_tickets: int):
"""Set the max amount of tickets a user can have opened"""
await self.config.guild(ctx.guild).max_tickets.set(max_tickets)
await ctx.tick()
@support.command(name="logchannel")
async def set_log_channel(self, ctx: commands.Context, *, log_channel: discord.TextChannel):
"""Set the log channel"""
await self.config.guild(ctx.guild).log.set(log_channel.id)
await ctx.tick()
@support.command(name="buttoncontent")
async def set_button_content(self, ctx: commands.Context, *, button_content: str):
"""Set what you want the support button to say"""
if len(button_content) <= 80:
await self.config.guild(ctx.guild).button_content.set(button_content)
await ctx.tick()
await self.refresh_tasks(str(ctx.guild.id))
else:
await ctx.send("Button content is too long! Must be less than 80 characters")
@support.command(name="buttoncolor")
async def set_button_color(self, ctx: commands.Context, button_color: str):
"""Set button color(red/blue/green/grey only)"""
c = button_color.lower()
valid = ["red", "blue", "green", "grey", "gray"]
if c not in valid:
return await ctx.send("That is not a valid color, must be red, blue, green, or grey")
await self.config.guild(ctx.guild).bcolor.set(c)
await ctx.tick()
await self.refresh_tasks(str(ctx.guild.id))
@support.command(name="buttonemoji")
async def set_button_emoji(self, ctx: commands.Context, emoji: Union[discord.Emoji, discord.PartialEmoji]):
"""Set a button emoji"""
await self.config.guild(ctx.guild).emoji.set(str(emoji))
await ctx.tick()
await self.refresh_tasks(str(ctx.guild.id))
@support.command(name="tname")
async def set_def_ticket_name(self, ctx: commands.Context, *, default_name: str):
"""
Set the default ticket channel name
You can include the following in the name
`{num}` - Ticket number
`{user}` - user's name
`{id}` - user's ID
`{shortdate}` - mm-dd
`{longdate}` - mm-dd-yyyy
`{time}` - hh-mm AM/PM according to bot host system time
You can set this to {default} to use default "Ticket-Username
"""
await self.config.guild(ctx.guild).ticket_name.set(default_name)
await ctx.tick()
# TOGGLES --------------------------------------------------------------------------------
@support.command(name="ticketembed")
async def toggle_ticket_embed(self, ctx: commands.Context):
"""
(Toggle) Ticket message as an embed
When user opens a ticket, the formatted message will be an embed instead
"""
toggle = await self.config.guild(ctx.guild).embeds()
if toggle:
await self.config.guild(ctx.guild).embeds.set(False)
await ctx.send("Ticket message embeds have been **Disabled**")
else:
await self.config.guild(ctx.guild).embeds.set(True)
await ctx.send("Ticket message embeds have been **Enabled**")
@support.command(name="dm")
async def toggle_dms(self, ctx: commands.Context):
"""(Toggle) The bot sending DM's for ticket alerts"""
toggle = await self.config.guild(ctx.guild).dm()
if toggle:
await self.config.guild(ctx.guild).dm.set(False)
await ctx.send("DM alerts have been **Disabled**")
else:
await self.config.guild(ctx.guild).dm.set(True)
await ctx.send("DM alerts have been **Enabled**")
@support.command(name="selfrename")
async def toggle_rename(self, ctx: commands.Context):
"""(Toggle) If users can rename their own tickets"""
toggle = await self.config.guild(ctx.guild).user_can_rename()
if toggle:
await self.config.guild(ctx.guild).user_can_rename.set(False)
await ctx.send("User can no longer rename their support channel")
else:
await self.config.guild(ctx.guild).user_can_rename.set(True)
await ctx.send("User can now rename their support channel")
@support.command(name="selfclose")
async def toggle_selfclose(self, ctx: commands.Context):
"""(Toggle) If users can close their own tickets"""
toggle = await self.config.guild(ctx.guild).user_can_close()
if toggle:
await self.config.guild(ctx.guild).user_can_close.set(False)
await ctx.send("User can no longer close their support channel")
else:
await self.config.guild(ctx.guild).user_can_close.set(True)
await ctx.send("User can now close their support channel")
@support.command(name="selfmanage")
async def toggle_selfmanage(self, ctx: commands.Context):
"""
(Toggle) If users can manage their own tickets
Users will be able to add/remove others to their support ticket
"""
toggle = await self.config.guild(ctx.guild).user_can_manage()
if toggle:
await self.config.guild(ctx.guild).user_can_manage.set(False)
await ctx.send("User can no longer manage their support channel")
else:
await self.config.guild(ctx.guild).user_can_manage.set(True)
await ctx.send("User can now manage their support channel")
@support.command(name="autoclose")
async def toggle_autoclose(self, ctx: commands.Context):
"""(Toggle) Auto ticket close if user leaves guild"""
toggle = await self.config.guild(ctx.guild).auto_close()
if toggle:
await self.config.guild(ctx.guild).auto_close.set(False)
await ctx.send("Tickets will no longer be closed if a user leaves the guild")
else:
await self.config.guild(ctx.guild).auto_close.set(True)
await ctx.send("Tickets will now be closed if a user leaves the guild")
@support.command(name="transcript")
async def toggle_transcript(self, ctx: commands.Context):
"""
(Toggle) Ticket transcripts
Closed tickets will have their transcripts uploaded to the log channel
"""
toggle = await self.config.guild(ctx.guild).transcript()
if toggle:
await self.config.guild(ctx.guild).transcript.set(False)
await ctx.send("Transcripts of closed tickets will no longer be saved")
else:
await self.config.guild(ctx.guild).transcript.set(True)
await ctx.send("Transcripts of closed tickets will now be saved") | 0.680772 | 0.073863 |
import os
import sys
import argparse
try:
assert (sys.version_info[0] == 3)
except:
sys.stderr.write("Please use Python-3.4 to run this program. Exiting now ...\n");
sys.exit(1)
def create_directory(directory):
if not os.path.isdir(directory):
os.system('mkdir %s' % directory)
else:
sys.stderr.write('Unable to create directory %s, it already exists. Overwriting!\n' % directory)
def create_parser():
""" Parse arguments """
parser = argparse.ArgumentParser(description="""
Perform enrichment analysis for a given comparisons listing (see wiki page:).
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c', '--comparisons', help='provide comparisons file.', required=True)
parser.add_argument('-f', '--feature_matrix', help='feature matrix in appropriate format.', required=True)
parser.add_argument('-o', '--output', help='output directory of results.', required=True)
args = parser.parse_args()
return args
# parse arguments
args = create_parser()
comparisons_file = args.comparisons
feature_matrix = args.feature_matrix
output_directory = os.path.abspath(args.output) + '/'
create_directory(output_directory)
strainlists_dir = output_directory + 'strainlists/'
create_directory(strainlists_dir)
enrichments_dir = output_directory + 'enrichments/'
create_directory(enrichments_dir)
scripts_directory = os.path.dirname(os.path.realpath(__file__))
enrichment_program = scripts_directory + '/FeatureMatrixToFisherExact.py'
tmp = {}
comp_id_to_name = {}
with open(comparisons_file) as ocf:
for line in ocf:
line = line.strip()
if not line: continue
ls = line.split('=')
if line.startswith('//') and len(tmp) > 0:
gAf = strainlists_dir + tmp['ID'] + '_groupA.txt'
gBf = strainlists_dir + tmp['ID'] + '_groupB.txt'
ogAf = open(gAf, 'w')
ogBf = open(gBf, 'w')
ogAf.write('\n'.join(tmp['GroupA'].split(',')) + '\n')
ogBf.write('\n'.join(tmp['GroupB'].split(',')) + '\n')
ogAf.close(); ogBf.close()
enrichment_result = enrichments_dir + tmp['ID'] + '.txt'
enrichment_cmd = ['python', enrichment_program, '-i', feature_matrix, '-a', gAf, '-b', gBf, '-o', enrichment_result]
os.system(' '.join(enrichment_cmd))
comp_id_to_name[tmp['ID']] = tmp['Name']
tmp = {}
else:
tmp[ls[0]] = '='.join(ls[1:])
if len(tmp) > 0:
gAf = strainlists_dir + tmp['ID'] + '_groupA.txt'
gBf = strainlists_dir + tmp['ID'] + '_groupB.txt'
ogAf = open(gAf, 'w')
ogBf = open(gBf, 'w')
ogAf.write('\n'.join(tmp['GroupA'].split(',')) + '\n')
ogBf.write('\n'.join(tmp['GroupB'].split(',')) + '\n')
ogAf.close();
ogBf.close()
enrichment_result = enrichments_dir + tmp['ID'] + '.txt'
enrichment_cmd = ['python', enrichment_program, '-i', feature_matrix, '-a', gAf, '-b', gBf, '-o', enrichment_result]
os.system(' '.join(enrichment_cmd))
comp_id_to_name[tmp['ID']] = tmp['Name']
final_results = open(output_directory + 'consolidated_results.txt', 'w')
final_filtered_results = open(output_directory + 'consolidated_results.filt.txt', 'w')
for j, f in enumerate(os.listdir(enrichments_dir)):
comparison_name = comp_id_to_name[f.split('.txt')[0]]
with open(enrichments_dir + f) as of:
for i, line in enumerate(of):
if i == 0 and j == 0:
final_results.write('comparison\t' + line)
final_filtered_results.write('comparison\t' + line)
continue
elif i == 0: continue
line = line.strip()
ls = line.split('\t')
qvalue = float(ls[2])
groupA_prop = float(ls[-2])
groupB_prop = float(ls[-1])
final_results.write(comparison_name + '\t' + line + '\n')
if qvalue < 0.05 and ( (groupA_prop >= 0.75 and groupB_prop <= 0.25) or (groupA_prop <= 0.25 and groupB_prop >= 0.75) ):
final_filtered_results.write(comparison_name + '\t' + line + '\n')
final_results.close()
final_filtered_results.close() | runFeatureTests.py | import os
import sys
import argparse
try:
assert (sys.version_info[0] == 3)
except:
sys.stderr.write("Please use Python-3.4 to run this program. Exiting now ...\n");
sys.exit(1)
def create_directory(directory):
if not os.path.isdir(directory):
os.system('mkdir %s' % directory)
else:
sys.stderr.write('Unable to create directory %s, it already exists. Overwriting!\n' % directory)
def create_parser():
""" Parse arguments """
parser = argparse.ArgumentParser(description="""
Perform enrichment analysis for a given comparisons listing (see wiki page:).
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c', '--comparisons', help='provide comparisons file.', required=True)
parser.add_argument('-f', '--feature_matrix', help='feature matrix in appropriate format.', required=True)
parser.add_argument('-o', '--output', help='output directory of results.', required=True)
args = parser.parse_args()
return args
# parse arguments
args = create_parser()
comparisons_file = args.comparisons
feature_matrix = args.feature_matrix
output_directory = os.path.abspath(args.output) + '/'
create_directory(output_directory)
strainlists_dir = output_directory + 'strainlists/'
create_directory(strainlists_dir)
enrichments_dir = output_directory + 'enrichments/'
create_directory(enrichments_dir)
scripts_directory = os.path.dirname(os.path.realpath(__file__))
enrichment_program = scripts_directory + '/FeatureMatrixToFisherExact.py'
tmp = {}
comp_id_to_name = {}
with open(comparisons_file) as ocf:
for line in ocf:
line = line.strip()
if not line: continue
ls = line.split('=')
if line.startswith('//') and len(tmp) > 0:
gAf = strainlists_dir + tmp['ID'] + '_groupA.txt'
gBf = strainlists_dir + tmp['ID'] + '_groupB.txt'
ogAf = open(gAf, 'w')
ogBf = open(gBf, 'w')
ogAf.write('\n'.join(tmp['GroupA'].split(',')) + '\n')
ogBf.write('\n'.join(tmp['GroupB'].split(',')) + '\n')
ogAf.close(); ogBf.close()
enrichment_result = enrichments_dir + tmp['ID'] + '.txt'
enrichment_cmd = ['python', enrichment_program, '-i', feature_matrix, '-a', gAf, '-b', gBf, '-o', enrichment_result]
os.system(' '.join(enrichment_cmd))
comp_id_to_name[tmp['ID']] = tmp['Name']
tmp = {}
else:
tmp[ls[0]] = '='.join(ls[1:])
if len(tmp) > 0:
gAf = strainlists_dir + tmp['ID'] + '_groupA.txt'
gBf = strainlists_dir + tmp['ID'] + '_groupB.txt'
ogAf = open(gAf, 'w')
ogBf = open(gBf, 'w')
ogAf.write('\n'.join(tmp['GroupA'].split(',')) + '\n')
ogBf.write('\n'.join(tmp['GroupB'].split(',')) + '\n')
ogAf.close();
ogBf.close()
enrichment_result = enrichments_dir + tmp['ID'] + '.txt'
enrichment_cmd = ['python', enrichment_program, '-i', feature_matrix, '-a', gAf, '-b', gBf, '-o', enrichment_result]
os.system(' '.join(enrichment_cmd))
comp_id_to_name[tmp['ID']] = tmp['Name']
final_results = open(output_directory + 'consolidated_results.txt', 'w')
final_filtered_results = open(output_directory + 'consolidated_results.filt.txt', 'w')
for j, f in enumerate(os.listdir(enrichments_dir)):
comparison_name = comp_id_to_name[f.split('.txt')[0]]
with open(enrichments_dir + f) as of:
for i, line in enumerate(of):
if i == 0 and j == 0:
final_results.write('comparison\t' + line)
final_filtered_results.write('comparison\t' + line)
continue
elif i == 0: continue
line = line.strip()
ls = line.split('\t')
qvalue = float(ls[2])
groupA_prop = float(ls[-2])
groupB_prop = float(ls[-1])
final_results.write(comparison_name + '\t' + line + '\n')
if qvalue < 0.05 and ( (groupA_prop >= 0.75 and groupB_prop <= 0.25) or (groupA_prop <= 0.25 and groupB_prop >= 0.75) ):
final_filtered_results.write(comparison_name + '\t' + line + '\n')
final_results.close()
final_filtered_results.close() | 0.16043 | 0.148386 |
"""Benchmark for the dataflow GGNN pipeline."""
import contextlib
import os
import pathlib
import sys
import tempfile
import warnings
from sklearn.exceptions import UndefinedMetricWarning
from tqdm import tqdm
from labm8.py import app
from labm8.py import ppar
from labm8.py import prof
from programl.models.ggnn.ggnn import Ggnn
from programl.proto import epoch_pb2
from programl.task.dataflow.ggnn_batch_builder import DataflowGgnnBatchBuilder
from programl.task.dataflow.graph_loader import DataflowGraphLoader
from programl.test.py.plugins import llvm_program_graph
from programl.test.py.plugins import llvm_reachability_features
app.DEFINE_integer("graph_count", None, "The number of graphs to load.")
app.DEFINE_integer("batch_size", 10000, "The size of batches.")
app.DEFINE_integer(
"train_batch_count", 3, "The number of batches for testing model training"
)
app.DEFINE_integer(
"test_batch_count", 3, "The number of batches for testing model training"
)
FLAGS = app.FLAGS
@contextlib.contextmanager
def data_directory() -> pathlib.Path:
"""Create a dataset directory."""
with tempfile.TemporaryDirectory() as d:
d = pathlib.Path(d)
(d / "labels").mkdir()
os.symlink(llvm_program_graph.LLVM_IR_GRAPHS, d / "graphs")
os.symlink(llvm_program_graph.LLVM_IR_GRAPHS, d / "train")
os.symlink(llvm_program_graph.LLVM_IR_GRAPHS, d / "val")
os.symlink(llvm_program_graph.LLVM_IR_GRAPHS, d / "test")
os.symlink(
llvm_reachability_features.LLVM_REACHABILITY_FEATURES,
d / "labels" / "reachability",
)
yield d
def GraphLoader(path, use_cdfg: bool = False):
return DataflowGraphLoader(
path=path,
epoch_type=epoch_pb2.TRAIN,
analysis="reachability",
min_graph_count=FLAGS.graph_count or 1,
max_graph_count=FLAGS.graph_count,
logfile=open(path / "graph_reader_log.txt", "w"),
use_cdfg=use_cdfg,
)
def BatchBuilder(
graph_loader, vocab, max_batch_count=None, use_cdfg: bool = False
):
return DataflowGgnnBatchBuilder(
graph_loader=graph_loader,
vocabulary=vocab,
max_node_size=FLAGS.batch_size,
max_batch_count=max_batch_count,
use_cdfg=use_cdfg,
)
def Vocab():
return {"": 0}
def Print(msg):
print()
print(msg)
sys.stdout.flush()
def Main():
# NOTE(github.com/ChrisCummins/ProGraML/issues/13): F1 score computation
# warns that it is undefined when there are missing instances from a class,
# which is fine for our usage.
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
with data_directory() as path:
Print("=== BENCHMARK 1: Loading graphs from filesystem ===")
graph_loader = GraphLoader(path)
graphs = ppar.ThreadedIterator(graph_loader, max_queue_size=100)
with prof.Profile("Benchmark graph loader"):
for _ in tqdm(graphs, unit=" graphs"):
pass
app.Log(1, "Skip count: %s", graph_loader.skip_count)
Print(
"=== BENCHMARK 1: Loading graphs from filesystem and converting to CDFG ==="
)
graph_loader = GraphLoader(path, use_cdfg=True)
graphs = ppar.ThreadedIterator(graph_loader, max_queue_size=100)
with prof.Profile("Benchmark CDFG graph loader"):
for _ in tqdm(graphs, unit=" graphs"):
pass
app.Log(1, "Skip count: %s", graph_loader.skip_count)
Print("=== BENCHMARK 2: Batch construction ===")
batches = BatchBuilder(GraphLoader(path), Vocab())
batches = ppar.ThreadedIterator(batches, max_queue_size=100)
cached_batches = []
with prof.Profile("Benchmark batch construction"):
for batch in tqdm(batches, unit=" batches"):
cached_batches.append(batch)
Print("=== BENCHMARK 2: CDFG batch construction ===")
batches = BatchBuilder(
GraphLoader(path, use_cdfg=True), Vocab(), use_cdfg=True
)
batches = ppar.ThreadedIterator(batches, max_queue_size=100)
cached_batches = []
with prof.Profile("Benchmark batch construction"):
for batch in tqdm(batches, unit=" batches"):
cached_batches.append(batch)
Print("=== BENCHMARK 3: Model training ===")
model = Ggnn(
vocabulary=Vocab(),
node_y_dimensionality=2,
graph_y_dimensionality=0,
graph_x_dimensionality=0,
use_selector_embeddings=True,
)
with prof.Profile("Benchmark training (prebuilt batches)"):
model.RunBatches(
epoch_pb2.TRAIN,
cached_batches[: FLAGS.train_batch_count],
log_prefix="Train",
total_graph_count=sum(
b.graph_count for b in cached_batches[: FLAGS.train_batch_count]
),
)
with prof.Profile("Benchmark training"):
model.RunBatches(
epoch_pb2.TRAIN,
BatchBuilder(GraphLoader(path), Vocab(), FLAGS.train_batch_count),
log_prefix="Train",
)
Print("=== BENCHMARK 4: Model inference ===")
model = Ggnn(
vocabulary=Vocab(),
test_only=True,
node_y_dimensionality=2,
graph_y_dimensionality=0,
graph_x_dimensionality=0,
use_selector_embeddings=True,
)
with prof.Profile("Benchmark inference (prebuilt batches)"):
model.RunBatches(
epoch_pb2.TEST,
cached_batches[: FLAGS.test_batch_count],
log_prefix="Val",
total_graph_count=sum(
b.graph_count for b in cached_batches[: FLAGS.test_batch_count]
),
)
with prof.Profile("Benchmark inference"):
model.RunBatches(
epoch_pb2.TEST,
BatchBuilder(GraphLoader(path), Vocab(), FLAGS.test_batch_count),
log_prefix="Val",
)
if __name__ == "__main__":
app.Run(Main) | programl/test/benchmarks/benchmark_dataflow_ggnn.py | """Benchmark for the dataflow GGNN pipeline."""
import contextlib
import os
import pathlib
import sys
import tempfile
import warnings
from sklearn.exceptions import UndefinedMetricWarning
from tqdm import tqdm
from labm8.py import app
from labm8.py import ppar
from labm8.py import prof
from programl.models.ggnn.ggnn import Ggnn
from programl.proto import epoch_pb2
from programl.task.dataflow.ggnn_batch_builder import DataflowGgnnBatchBuilder
from programl.task.dataflow.graph_loader import DataflowGraphLoader
from programl.test.py.plugins import llvm_program_graph
from programl.test.py.plugins import llvm_reachability_features
app.DEFINE_integer("graph_count", None, "The number of graphs to load.")
app.DEFINE_integer("batch_size", 10000, "The size of batches.")
app.DEFINE_integer(
"train_batch_count", 3, "The number of batches for testing model training"
)
app.DEFINE_integer(
"test_batch_count", 3, "The number of batches for testing model training"
)
FLAGS = app.FLAGS
@contextlib.contextmanager
def data_directory() -> pathlib.Path:
"""Create a dataset directory."""
with tempfile.TemporaryDirectory() as d:
d = pathlib.Path(d)
(d / "labels").mkdir()
os.symlink(llvm_program_graph.LLVM_IR_GRAPHS, d / "graphs")
os.symlink(llvm_program_graph.LLVM_IR_GRAPHS, d / "train")
os.symlink(llvm_program_graph.LLVM_IR_GRAPHS, d / "val")
os.symlink(llvm_program_graph.LLVM_IR_GRAPHS, d / "test")
os.symlink(
llvm_reachability_features.LLVM_REACHABILITY_FEATURES,
d / "labels" / "reachability",
)
yield d
def GraphLoader(path, use_cdfg: bool = False):
return DataflowGraphLoader(
path=path,
epoch_type=epoch_pb2.TRAIN,
analysis="reachability",
min_graph_count=FLAGS.graph_count or 1,
max_graph_count=FLAGS.graph_count,
logfile=open(path / "graph_reader_log.txt", "w"),
use_cdfg=use_cdfg,
)
def BatchBuilder(
graph_loader, vocab, max_batch_count=None, use_cdfg: bool = False
):
return DataflowGgnnBatchBuilder(
graph_loader=graph_loader,
vocabulary=vocab,
max_node_size=FLAGS.batch_size,
max_batch_count=max_batch_count,
use_cdfg=use_cdfg,
)
def Vocab():
return {"": 0}
def Print(msg):
print()
print(msg)
sys.stdout.flush()
def Main():
# NOTE(github.com/ChrisCummins/ProGraML/issues/13): F1 score computation
# warns that it is undefined when there are missing instances from a class,
# which is fine for our usage.
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
with data_directory() as path:
Print("=== BENCHMARK 1: Loading graphs from filesystem ===")
graph_loader = GraphLoader(path)
graphs = ppar.ThreadedIterator(graph_loader, max_queue_size=100)
with prof.Profile("Benchmark graph loader"):
for _ in tqdm(graphs, unit=" graphs"):
pass
app.Log(1, "Skip count: %s", graph_loader.skip_count)
Print(
"=== BENCHMARK 1: Loading graphs from filesystem and converting to CDFG ==="
)
graph_loader = GraphLoader(path, use_cdfg=True)
graphs = ppar.ThreadedIterator(graph_loader, max_queue_size=100)
with prof.Profile("Benchmark CDFG graph loader"):
for _ in tqdm(graphs, unit=" graphs"):
pass
app.Log(1, "Skip count: %s", graph_loader.skip_count)
Print("=== BENCHMARK 2: Batch construction ===")
batches = BatchBuilder(GraphLoader(path), Vocab())
batches = ppar.ThreadedIterator(batches, max_queue_size=100)
cached_batches = []
with prof.Profile("Benchmark batch construction"):
for batch in tqdm(batches, unit=" batches"):
cached_batches.append(batch)
Print("=== BENCHMARK 2: CDFG batch construction ===")
batches = BatchBuilder(
GraphLoader(path, use_cdfg=True), Vocab(), use_cdfg=True
)
batches = ppar.ThreadedIterator(batches, max_queue_size=100)
cached_batches = []
with prof.Profile("Benchmark batch construction"):
for batch in tqdm(batches, unit=" batches"):
cached_batches.append(batch)
Print("=== BENCHMARK 3: Model training ===")
model = Ggnn(
vocabulary=Vocab(),
node_y_dimensionality=2,
graph_y_dimensionality=0,
graph_x_dimensionality=0,
use_selector_embeddings=True,
)
with prof.Profile("Benchmark training (prebuilt batches)"):
model.RunBatches(
epoch_pb2.TRAIN,
cached_batches[: FLAGS.train_batch_count],
log_prefix="Train",
total_graph_count=sum(
b.graph_count for b in cached_batches[: FLAGS.train_batch_count]
),
)
with prof.Profile("Benchmark training"):
model.RunBatches(
epoch_pb2.TRAIN,
BatchBuilder(GraphLoader(path), Vocab(), FLAGS.train_batch_count),
log_prefix="Train",
)
Print("=== BENCHMARK 4: Model inference ===")
model = Ggnn(
vocabulary=Vocab(),
test_only=True,
node_y_dimensionality=2,
graph_y_dimensionality=0,
graph_x_dimensionality=0,
use_selector_embeddings=True,
)
with prof.Profile("Benchmark inference (prebuilt batches)"):
model.RunBatches(
epoch_pb2.TEST,
cached_batches[: FLAGS.test_batch_count],
log_prefix="Val",
total_graph_count=sum(
b.graph_count for b in cached_batches[: FLAGS.test_batch_count]
),
)
with prof.Profile("Benchmark inference"):
model.RunBatches(
epoch_pb2.TEST,
BatchBuilder(GraphLoader(path), Vocab(), FLAGS.test_batch_count),
log_prefix="Val",
)
if __name__ == "__main__":
app.Run(Main) | 0.59561 | 0.280244 |
import torch
from torch import nn, transpose
from torch.autograd import Variable
from torch.nn import functional as F
class FCNet(nn.Module):
def __init__(self, shape, task_num):
super(FCNet, self).__init__()
print('Intializing FCNet...')
self.inp_len = shape[1]
self.inp_size = shape[2]
self.task_num = task_num
self.hidden_dim_1 = 128
self.hidden_dim_2 = 16
self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
self.fc3_lst = nn.ModuleList()
for _ in range(self.task_num):
self.fc1_lst.append(nn.Linear(in_features=self.inp_size, out_features=self.hidden_dim_1))
self.fc2_lst.append(nn.Linear(in_features=self.hidden_dim_1, out_features=self.hidden_dim_2))
self.fc3_lst.append(nn.Linear(in_features=self.hidden_dim_2, out_features=1))
def forward(self, x: Variable) -> (Variable):
if self.inp_len > 1:
x = x.mean(dim=1)
outputs = []
feature_vecs = []
for i in range(self.task_num):
x = F.relu(self.fc1_lst[i](x))
x = F.relu(self.fc2_lst[i](x))
feature_vecs.append(x)
outputs.append(self.fc3_lst[i](x).reshape(-1))
return outputs, feature_vecs, None
class AutoregressiveFCNet(nn.Module):
def __init__(self, shape, task_num):
super(AutoregressiveFCNet, self).__init__()
print('Intializing AutoregressiveFCNet...')
self.inp_len = shape[1]
self.inp_size = shape[2] + 2
self.task_num = task_num
self.hidden_dim_1 = 128
self.hidden_dim_2 = 16
self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
self.fc3_lst = nn.ModuleList()
for _ in range(self.task_num):
self.fc1_lst.append(nn.Linear(in_features=self.inp_size, out_features=self.hidden_dim_1))
self.fc2_lst.append(nn.Linear(in_features=self.hidden_dim_1, out_features=self.hidden_dim_2))
self.fc3_lst.append(nn.Linear(in_features=self.hidden_dim_2, out_features=1))
def forward(self, x, auto_x):
if self.inp_len > 1:
x = x.mean(dim=1)
outputs = []
feature_vecs = []
for i in range(self.task_num):
#task_x = torch.cat([torch.zeros(x.size()).cuda(), auto_x], 1)
task_x = torch.cat([x, auto_x], 1) # adding autoregressive features
task_x = F.relu(self.fc1_lst[i](task_x))
task_x = F.relu(self.fc2_lst[i](task_x))
feature_vecs.append(task_x)
outputs.append(self.fc3_lst[i](task_x).reshape(-1))
return outputs, feature_vecs, None
class SimpleMultiTaskResNet(nn.Module):
def __init__(self, shape, task_num, get_attention_maps=False):
super(SimpleMultiTaskResNet, self).__init__()
print('Intializing SimpleMultiTaskResNet...')
self.get_attention_maps = get_attention_maps
self.inp_len = shape[1]
self.inp_size = shape[2]
self.task_num = task_num
self.hidden_dim = 128
self.fc2_dim = 128
self.fc3_dim = 16
if self.get_attention_maps:
self.att_conv1 = nn.Conv1d(in_channels=self.inp_size, out_channels=self.inp_size, kernel_size=5, padding=2, stride=1)
#self.att_bn1 = nn.BatchNorm1d(self.inp_size)
self.att_conv2 = nn.Conv1d(in_channels=self.inp_size, out_channels=self.inp_size, kernel_size=3, padding=1, stride=1)
#self.att_bn2 = nn.BatchNorm1d(self.inp_size)
self.conv11 = nn.Conv1d(in_channels=self.inp_size, out_channels=128, kernel_size=5, padding=1, stride=1)
self.bn11 = nn.BatchNorm1d(128)
self.conv12 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=2)
self.bn12 = nn.BatchNorm1d(256)
self.conv21 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.bn21 = nn.BatchNorm1d(256)
self.conv22 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.bn22 = nn.BatchNorm1d(256)
self.conv3 = nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, padding=1, stride=2)
self.bn3 = nn.BatchNorm1d(512)
self.conv41 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.bn41 = nn.BatchNorm1d(512)
self.conv42 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.bn42 = nn.BatchNorm1d(512)
self.conv5 = nn.Conv1d(in_channels=512, out_channels=1024, kernel_size=3, padding=1, stride=2)
self.bn5 = nn.BatchNorm1d(1024)
self.conv61 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.bn61 = nn.BatchNorm1d(1024)
self.conv62 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.bn62 = nn.BatchNorm1d(1024)
self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
self.fc3_lst = nn.ModuleList()
for _ in range(self.task_num):
self.fc1_lst.append(nn.Linear(in_features=int(1024 * 13), out_features=self.fc2_dim))
self.fc2_lst.append(nn.Linear(in_features=self.fc2_dim, out_features=self.fc3_dim))
self.fc3_lst.append(nn.Linear(in_features=self.fc3_dim, out_features=1))
def forward(self, x: Variable) -> (Variable):
x = transpose(x, 1, 2)
if self.get_attention_maps:
#att_x = F.relu(self.att_bn1(self.att_conv1(x)))
#att_x = F.relu(self.att_bn2(self.att_conv2(att_x)))
att_x = F.relu(self.att_conv1(x))
att_x = F.relu(self.att_conv2(att_x))
att_x = F.softmax(att_x, dim=2)
#att_x = torch.sigmoid(att_x) #* F.softmax(att_x.mean(dim=1).unsqueeze(1), dim=2).expand_as(x)
#x = x * att_x.expand_as(x)
x = x * att_x
x = F.relu(self.bn11(self.conv11(x)))
x = F.relu(self.bn12(self.conv12(x)))
res = x
x = F.relu(self.bn21(self.conv21(x)))
x = F.relu(self.bn22(self.conv22(x)))
x += res
x = F.relu(self.bn3(self.conv3(x)))
res = x
x = F.relu(self.bn41(self.conv41(x)))
x = F.relu(self.bn42(self.conv42(x)))
x += res
x = F.relu(self.bn5(self.conv5(x)))
res = x
x = F.relu(self.bn61(self.conv61(x)))
x = F.relu(self.bn62(self.conv62(x)))
x += res
x = x.view(-1, int(1024 * 13))
outputs = []
feature_vecs = []
for i in range(self.task_num):
task_x = F.relu(self.fc1_lst[i](x))
task_x = F.relu(self.fc2_lst[i](task_x))
feature_vecs.append(task_x)
outputs.append(self.fc3_lst[i](task_x).reshape(-1))
if self.get_attention_maps: return outputs, feature_vecs, att_x
return outputs, feature_vecs, None
class MultiTaskCNN(nn.Module):
def __init__(self, shape, task_num):
super(SimpleMultiTaskResNet, self).__init__()
print('Intializing MultiTaskCNN...')
self.inp_len = shape[1]
self.inp_size = shape[2]
self.task_num = task_num
self.hidden_dim = 128
self.fc2_dim = 128
self.conv_base = nn.Conv1d(in_channels=self.inp_size, out_channels=128, kernel_size=5, padding=3, stride=2)
self.bn11 = nn.BatchNorm1d(128)
self.conv11 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, padding=2, stride=2)
self.bn12 = nn.BatchNorm1d(256)
self.conv12 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.w1 = nn.Linear(in_features=128 * 51, out_features=256 * 27)
self.bn21 = nn.BatchNorm1d(256)
self.conv21 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=2, stride=2)
self.bn22 = nn.BatchNorm1d(256)
self.conv22 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.w2 = nn.Linear(in_features=256 * 27, out_features=256 * 15)
self.bn31 = nn.BatchNorm1d(256)
self.conv31 = nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, padding=2, stride=2)
self.bn32 = nn.BatchNorm1d(512)
self.conv32 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.w3 = nn.Linear(in_features=256 * 15, out_features=512 * 9)
self.bn41 = nn.BatchNorm1d(512)
self.conv41 = nn.Conv1d(in_channels=512, out_channels=1024, kernel_size=3, padding=2, stride=2)
self.bn42 = nn.BatchNorm1d(1024)
self.conv42 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.w4 = nn.Linear(in_features=512 * 9, out_features=1024 * 6)
self.bn51 = nn.BatchNorm1d(1024)
self.conv51 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=2)
self.bn52 = nn.BatchNorm1d(1024)
self.conv52 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.w5 = nn.Linear(in_features=1024 * 6, out_features=1024 * 3)
self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
for _ in range(self.task_num):
self.fc1_lst.append(nn.Linear(in_features=int(1024 * 3), out_features=self.fc2_dim))
self.fc2_lst.append(nn.Linear(in_features=self.fc2_dim, out_features=1))
def forward(self, x: Variable) -> (Variable):
x = F.relu(self.conv_base(transpose(x, 1, 2)))
res = x.view(-1, 128 * 51)
x = self.conv11(F.relu(self.bn11(x)))
x = self.conv12(F.relu(self.bn12(x)))
x += self.w1(res).view(-1, 256, 27)
res = x.view(-1, 256 * 27)
x = self.conv21(F.relu(self.bn21(x)))
x = self.conv22(F.relu(self.bn22(x)))
x += self.w2(res).view(-1, 256, 15)
res = x.view(-1, 256 * 15)
x = self.conv31(F.relu(self.bn31(x)))
x = self.conv32(F.relu(self.bn32(x)))
x += self.w3(res).view(-1, 512, 9)
res = x.view(-1, 512 * 9)
x = self.conv41(F.relu(self.bn41(x)))
x = self.conv42(F.relu(self.bn42(x)))
x += self.w4(res).view(-1, 1024, 6)
res = x.view(-1, 1024 * 6)
x = self.conv51(F.relu(self.bn51(x)))
x = self.conv52(F.relu(self.bn52(x)))
x += self.w5(res).view(-1, 1024, 3)
x = x.view(-1, int(1024 * 3))
outputs = []
for i in range(self.task_num):
task_x = F.relu(self.fc1_lst[i](x))
outputs.append(self.fc2_lst[i](task_x).reshape(-1))
return outputs
class AutoregressiveMultiTaskResNet(nn.Module):
def __init__(self, shape, task_num, get_attention_maps=False):
super(AutoregressiveMultiTaskResNet, self).__init__()
print('Intializing AutoregressiveMultiTaskResNet...')
self.get_attention_maps = get_attention_maps
self.inp_len = shape[1]
self.inp_size = shape[2]
self.task_num = task_num
self.hidden_dim = 128
self.fc2_dim = 128
self.fc3_dim = 16
if self.get_attention_maps:
self.att_conv1 = nn.Conv1d(in_channels=self.inp_size, out_channels=self.inp_size, kernel_size=5, padding=2, stride=1)
self.att_conv2 = nn.Conv1d(in_channels=self.inp_size, out_channels=self.inp_size, kernel_size=3, padding=1, stride=1)
self.conv11 = nn.Conv1d(in_channels=self.inp_size, out_channels=128, kernel_size=5, padding=1, stride=1)
self.bn11 = nn.BatchNorm1d(128)
self.conv12 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=2)
self.bn12 = nn.BatchNorm1d(256)
self.conv21 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.bn21 = nn.BatchNorm1d(256)
self.conv22 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.bn22 = nn.BatchNorm1d(256)
self.conv3 = nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, padding=1, stride=2)
self.bn3 = nn.BatchNorm1d(512)
self.conv41 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.bn41 = nn.BatchNorm1d(512)
self.conv42 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.bn42 = nn.BatchNorm1d(512)
self.conv5 = nn.Conv1d(in_channels=512, out_channels=1024, kernel_size=3, padding=1, stride=2)
self.bn5 = nn.BatchNorm1d(1024)
self.conv61 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.bn61 = nn.BatchNorm1d(1024)
self.conv62 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.bn62 = nn.BatchNorm1d(1024)
self.fc1 = nn.Linear(in_features=int(1024 * 13) + 2 * self.task_num, out_features=self.fc2_dim)
#self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
self.fc3_lst = nn.ModuleList()
for _ in range(self.task_num):
#self.fc1_lst.append(nn.Linear(in_features=int(1024 * 13) + 2, out_features=self.fc2_dim))
self.fc2_lst.append(nn.Linear(in_features=self.fc2_dim, out_features=self.fc3_dim))
self.fc3_lst.append(nn.Linear(in_features=self.fc3_dim, out_features=1))
def forward(self, x, auto_x):
x = transpose(x, 1, 2)
if self.get_attention_maps:
#att_x = F.relu(self.att_bn1(self.att_conv1(x)))
#att_x = F.relu(self.att_bn2(self.att_conv2(att_x)))
att_x = F.relu(self.att_conv1(x))
att_x = F.relu(self.att_conv2(att_x))
att_x = F.softmax(att_x, dim=2)
#att_x = torch.sigmoid(att_x) #* F.softmax(att_x.mean(dim=1).unsqueeze(1), dim=2).expand_as(x)
#x = x * att_x.expand_as(x)
x = x * att_x
x = F.relu(self.bn11(self.conv11(x)))
x = F.relu(self.bn12(self.conv12(x)))
res = x
x = F.relu(self.bn21(self.conv21(x)))
x = F.relu(self.bn22(self.conv22(x)))
x += res
x = F.relu(self.bn3(self.conv3(x)))
res = x
x = F.relu(self.bn41(self.conv41(x)))
x = F.relu(self.bn42(self.conv42(x)))
x += res
x = F.relu(self.bn5(self.conv5(x)))
res = x
x = F.relu(self.bn61(self.conv61(x)))
x = F.relu(self.bn62(self.conv62(x)))
x += res
x = x.view(-1, int(1024 * 13))
ar_x = torch.cat([x, auto_x], dim=1) # adding autoregressive features
ar_x = F.relu(self.fc1(ar_x))
outputs = []
feature_vecs = []
for i in range(self.task_num):
task_x = F.relu(self.fc2_lst[i](ar_x))
#task_x = torch.cat([torch.zeros(x.size()).cuda(), auto_x], 1)
#task_x = torch.cat([x, auto_x], dim=1) # adding autoregressive features
#task_x = F.relu(self.fc1_lst[i](task_x))
#task_x = F.relu(self.fc2_lst[i](task_x))
feature_vecs.append(task_x)
outputs.append(self.fc3_lst[i](task_x).reshape(-1))
if self.get_attention_maps: return outputs, feature_vecs, att_x
return outputs, feature_vecs, None | DIGDriver/region_model/nets/cnn_predictors.py | import torch
from torch import nn, transpose
from torch.autograd import Variable
from torch.nn import functional as F
class FCNet(nn.Module):
def __init__(self, shape, task_num):
super(FCNet, self).__init__()
print('Intializing FCNet...')
self.inp_len = shape[1]
self.inp_size = shape[2]
self.task_num = task_num
self.hidden_dim_1 = 128
self.hidden_dim_2 = 16
self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
self.fc3_lst = nn.ModuleList()
for _ in range(self.task_num):
self.fc1_lst.append(nn.Linear(in_features=self.inp_size, out_features=self.hidden_dim_1))
self.fc2_lst.append(nn.Linear(in_features=self.hidden_dim_1, out_features=self.hidden_dim_2))
self.fc3_lst.append(nn.Linear(in_features=self.hidden_dim_2, out_features=1))
def forward(self, x: Variable) -> (Variable):
if self.inp_len > 1:
x = x.mean(dim=1)
outputs = []
feature_vecs = []
for i in range(self.task_num):
x = F.relu(self.fc1_lst[i](x))
x = F.relu(self.fc2_lst[i](x))
feature_vecs.append(x)
outputs.append(self.fc3_lst[i](x).reshape(-1))
return outputs, feature_vecs, None
class AutoregressiveFCNet(nn.Module):
def __init__(self, shape, task_num):
super(AutoregressiveFCNet, self).__init__()
print('Intializing AutoregressiveFCNet...')
self.inp_len = shape[1]
self.inp_size = shape[2] + 2
self.task_num = task_num
self.hidden_dim_1 = 128
self.hidden_dim_2 = 16
self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
self.fc3_lst = nn.ModuleList()
for _ in range(self.task_num):
self.fc1_lst.append(nn.Linear(in_features=self.inp_size, out_features=self.hidden_dim_1))
self.fc2_lst.append(nn.Linear(in_features=self.hidden_dim_1, out_features=self.hidden_dim_2))
self.fc3_lst.append(nn.Linear(in_features=self.hidden_dim_2, out_features=1))
def forward(self, x, auto_x):
if self.inp_len > 1:
x = x.mean(dim=1)
outputs = []
feature_vecs = []
for i in range(self.task_num):
#task_x = torch.cat([torch.zeros(x.size()).cuda(), auto_x], 1)
task_x = torch.cat([x, auto_x], 1) # adding autoregressive features
task_x = F.relu(self.fc1_lst[i](task_x))
task_x = F.relu(self.fc2_lst[i](task_x))
feature_vecs.append(task_x)
outputs.append(self.fc3_lst[i](task_x).reshape(-1))
return outputs, feature_vecs, None
class SimpleMultiTaskResNet(nn.Module):
def __init__(self, shape, task_num, get_attention_maps=False):
super(SimpleMultiTaskResNet, self).__init__()
print('Intializing SimpleMultiTaskResNet...')
self.get_attention_maps = get_attention_maps
self.inp_len = shape[1]
self.inp_size = shape[2]
self.task_num = task_num
self.hidden_dim = 128
self.fc2_dim = 128
self.fc3_dim = 16
if self.get_attention_maps:
self.att_conv1 = nn.Conv1d(in_channels=self.inp_size, out_channels=self.inp_size, kernel_size=5, padding=2, stride=1)
#self.att_bn1 = nn.BatchNorm1d(self.inp_size)
self.att_conv2 = nn.Conv1d(in_channels=self.inp_size, out_channels=self.inp_size, kernel_size=3, padding=1, stride=1)
#self.att_bn2 = nn.BatchNorm1d(self.inp_size)
self.conv11 = nn.Conv1d(in_channels=self.inp_size, out_channels=128, kernel_size=5, padding=1, stride=1)
self.bn11 = nn.BatchNorm1d(128)
self.conv12 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=2)
self.bn12 = nn.BatchNorm1d(256)
self.conv21 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.bn21 = nn.BatchNorm1d(256)
self.conv22 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.bn22 = nn.BatchNorm1d(256)
self.conv3 = nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, padding=1, stride=2)
self.bn3 = nn.BatchNorm1d(512)
self.conv41 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.bn41 = nn.BatchNorm1d(512)
self.conv42 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.bn42 = nn.BatchNorm1d(512)
self.conv5 = nn.Conv1d(in_channels=512, out_channels=1024, kernel_size=3, padding=1, stride=2)
self.bn5 = nn.BatchNorm1d(1024)
self.conv61 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.bn61 = nn.BatchNorm1d(1024)
self.conv62 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.bn62 = nn.BatchNorm1d(1024)
self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
self.fc3_lst = nn.ModuleList()
for _ in range(self.task_num):
self.fc1_lst.append(nn.Linear(in_features=int(1024 * 13), out_features=self.fc2_dim))
self.fc2_lst.append(nn.Linear(in_features=self.fc2_dim, out_features=self.fc3_dim))
self.fc3_lst.append(nn.Linear(in_features=self.fc3_dim, out_features=1))
def forward(self, x: Variable) -> (Variable):
x = transpose(x, 1, 2)
if self.get_attention_maps:
#att_x = F.relu(self.att_bn1(self.att_conv1(x)))
#att_x = F.relu(self.att_bn2(self.att_conv2(att_x)))
att_x = F.relu(self.att_conv1(x))
att_x = F.relu(self.att_conv2(att_x))
att_x = F.softmax(att_x, dim=2)
#att_x = torch.sigmoid(att_x) #* F.softmax(att_x.mean(dim=1).unsqueeze(1), dim=2).expand_as(x)
#x = x * att_x.expand_as(x)
x = x * att_x
x = F.relu(self.bn11(self.conv11(x)))
x = F.relu(self.bn12(self.conv12(x)))
res = x
x = F.relu(self.bn21(self.conv21(x)))
x = F.relu(self.bn22(self.conv22(x)))
x += res
x = F.relu(self.bn3(self.conv3(x)))
res = x
x = F.relu(self.bn41(self.conv41(x)))
x = F.relu(self.bn42(self.conv42(x)))
x += res
x = F.relu(self.bn5(self.conv5(x)))
res = x
x = F.relu(self.bn61(self.conv61(x)))
x = F.relu(self.bn62(self.conv62(x)))
x += res
x = x.view(-1, int(1024 * 13))
outputs = []
feature_vecs = []
for i in range(self.task_num):
task_x = F.relu(self.fc1_lst[i](x))
task_x = F.relu(self.fc2_lst[i](task_x))
feature_vecs.append(task_x)
outputs.append(self.fc3_lst[i](task_x).reshape(-1))
if self.get_attention_maps: return outputs, feature_vecs, att_x
return outputs, feature_vecs, None
class MultiTaskCNN(nn.Module):
def __init__(self, shape, task_num):
super(SimpleMultiTaskResNet, self).__init__()
print('Intializing MultiTaskCNN...')
self.inp_len = shape[1]
self.inp_size = shape[2]
self.task_num = task_num
self.hidden_dim = 128
self.fc2_dim = 128
self.conv_base = nn.Conv1d(in_channels=self.inp_size, out_channels=128, kernel_size=5, padding=3, stride=2)
self.bn11 = nn.BatchNorm1d(128)
self.conv11 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, padding=2, stride=2)
self.bn12 = nn.BatchNorm1d(256)
self.conv12 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.w1 = nn.Linear(in_features=128 * 51, out_features=256 * 27)
self.bn21 = nn.BatchNorm1d(256)
self.conv21 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=2, stride=2)
self.bn22 = nn.BatchNorm1d(256)
self.conv22 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.w2 = nn.Linear(in_features=256 * 27, out_features=256 * 15)
self.bn31 = nn.BatchNorm1d(256)
self.conv31 = nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, padding=2, stride=2)
self.bn32 = nn.BatchNorm1d(512)
self.conv32 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.w3 = nn.Linear(in_features=256 * 15, out_features=512 * 9)
self.bn41 = nn.BatchNorm1d(512)
self.conv41 = nn.Conv1d(in_channels=512, out_channels=1024, kernel_size=3, padding=2, stride=2)
self.bn42 = nn.BatchNorm1d(1024)
self.conv42 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.w4 = nn.Linear(in_features=512 * 9, out_features=1024 * 6)
self.bn51 = nn.BatchNorm1d(1024)
self.conv51 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=2)
self.bn52 = nn.BatchNorm1d(1024)
self.conv52 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.w5 = nn.Linear(in_features=1024 * 6, out_features=1024 * 3)
self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
for _ in range(self.task_num):
self.fc1_lst.append(nn.Linear(in_features=int(1024 * 3), out_features=self.fc2_dim))
self.fc2_lst.append(nn.Linear(in_features=self.fc2_dim, out_features=1))
def forward(self, x: Variable) -> (Variable):
x = F.relu(self.conv_base(transpose(x, 1, 2)))
res = x.view(-1, 128 * 51)
x = self.conv11(F.relu(self.bn11(x)))
x = self.conv12(F.relu(self.bn12(x)))
x += self.w1(res).view(-1, 256, 27)
res = x.view(-1, 256 * 27)
x = self.conv21(F.relu(self.bn21(x)))
x = self.conv22(F.relu(self.bn22(x)))
x += self.w2(res).view(-1, 256, 15)
res = x.view(-1, 256 * 15)
x = self.conv31(F.relu(self.bn31(x)))
x = self.conv32(F.relu(self.bn32(x)))
x += self.w3(res).view(-1, 512, 9)
res = x.view(-1, 512 * 9)
x = self.conv41(F.relu(self.bn41(x)))
x = self.conv42(F.relu(self.bn42(x)))
x += self.w4(res).view(-1, 1024, 6)
res = x.view(-1, 1024 * 6)
x = self.conv51(F.relu(self.bn51(x)))
x = self.conv52(F.relu(self.bn52(x)))
x += self.w5(res).view(-1, 1024, 3)
x = x.view(-1, int(1024 * 3))
outputs = []
for i in range(self.task_num):
task_x = F.relu(self.fc1_lst[i](x))
outputs.append(self.fc2_lst[i](task_x).reshape(-1))
return outputs
class AutoregressiveMultiTaskResNet(nn.Module):
def __init__(self, shape, task_num, get_attention_maps=False):
super(AutoregressiveMultiTaskResNet, self).__init__()
print('Intializing AutoregressiveMultiTaskResNet...')
self.get_attention_maps = get_attention_maps
self.inp_len = shape[1]
self.inp_size = shape[2]
self.task_num = task_num
self.hidden_dim = 128
self.fc2_dim = 128
self.fc3_dim = 16
if self.get_attention_maps:
self.att_conv1 = nn.Conv1d(in_channels=self.inp_size, out_channels=self.inp_size, kernel_size=5, padding=2, stride=1)
self.att_conv2 = nn.Conv1d(in_channels=self.inp_size, out_channels=self.inp_size, kernel_size=3, padding=1, stride=1)
self.conv11 = nn.Conv1d(in_channels=self.inp_size, out_channels=128, kernel_size=5, padding=1, stride=1)
self.bn11 = nn.BatchNorm1d(128)
self.conv12 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=2)
self.bn12 = nn.BatchNorm1d(256)
self.conv21 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.bn21 = nn.BatchNorm1d(256)
self.conv22 = nn.Conv1d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1)
self.bn22 = nn.BatchNorm1d(256)
self.conv3 = nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, padding=1, stride=2)
self.bn3 = nn.BatchNorm1d(512)
self.conv41 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.bn41 = nn.BatchNorm1d(512)
self.conv42 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1)
self.bn42 = nn.BatchNorm1d(512)
self.conv5 = nn.Conv1d(in_channels=512, out_channels=1024, kernel_size=3, padding=1, stride=2)
self.bn5 = nn.BatchNorm1d(1024)
self.conv61 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.bn61 = nn.BatchNorm1d(1024)
self.conv62 = nn.Conv1d(in_channels=1024, out_channels=1024, kernel_size=3, padding=1, stride=1)
self.bn62 = nn.BatchNorm1d(1024)
self.fc1 = nn.Linear(in_features=int(1024 * 13) + 2 * self.task_num, out_features=self.fc2_dim)
#self.fc1_lst = nn.ModuleList()
self.fc2_lst = nn.ModuleList()
self.fc3_lst = nn.ModuleList()
for _ in range(self.task_num):
#self.fc1_lst.append(nn.Linear(in_features=int(1024 * 13) + 2, out_features=self.fc2_dim))
self.fc2_lst.append(nn.Linear(in_features=self.fc2_dim, out_features=self.fc3_dim))
self.fc3_lst.append(nn.Linear(in_features=self.fc3_dim, out_features=1))
def forward(self, x, auto_x):
x = transpose(x, 1, 2)
if self.get_attention_maps:
#att_x = F.relu(self.att_bn1(self.att_conv1(x)))
#att_x = F.relu(self.att_bn2(self.att_conv2(att_x)))
att_x = F.relu(self.att_conv1(x))
att_x = F.relu(self.att_conv2(att_x))
att_x = F.softmax(att_x, dim=2)
#att_x = torch.sigmoid(att_x) #* F.softmax(att_x.mean(dim=1).unsqueeze(1), dim=2).expand_as(x)
#x = x * att_x.expand_as(x)
x = x * att_x
x = F.relu(self.bn11(self.conv11(x)))
x = F.relu(self.bn12(self.conv12(x)))
res = x
x = F.relu(self.bn21(self.conv21(x)))
x = F.relu(self.bn22(self.conv22(x)))
x += res
x = F.relu(self.bn3(self.conv3(x)))
res = x
x = F.relu(self.bn41(self.conv41(x)))
x = F.relu(self.bn42(self.conv42(x)))
x += res
x = F.relu(self.bn5(self.conv5(x)))
res = x
x = F.relu(self.bn61(self.conv61(x)))
x = F.relu(self.bn62(self.conv62(x)))
x += res
x = x.view(-1, int(1024 * 13))
ar_x = torch.cat([x, auto_x], dim=1) # adding autoregressive features
ar_x = F.relu(self.fc1(ar_x))
outputs = []
feature_vecs = []
for i in range(self.task_num):
task_x = F.relu(self.fc2_lst[i](ar_x))
#task_x = torch.cat([torch.zeros(x.size()).cuda(), auto_x], 1)
#task_x = torch.cat([x, auto_x], dim=1) # adding autoregressive features
#task_x = F.relu(self.fc1_lst[i](task_x))
#task_x = F.relu(self.fc2_lst[i](task_x))
feature_vecs.append(task_x)
outputs.append(self.fc3_lst[i](task_x).reshape(-1))
if self.get_attention_maps: return outputs, feature_vecs, att_x
return outputs, feature_vecs, None | 0.934043 | 0.383641 |
import os
import json
import logging
logger = logging.getLogger('amr_postprocessing')
def get_default_amr():
default = '(w / want-01 :ARG0 (b / boy) :ARG1 (g / go-01 :ARG0 b))'
return default
def write_to_file(lst, file_new):
with open(file_new, 'w', encoding='utf-8') as out_f:
for line in lst:
out_f.write(line.strip() + '\n')
out_f.close()
def get_files_by_ext(direc, ext):
"""Function that traverses a directory and returns all files that match a certain extension"""
return_files = []
for root, dirs, files in os.walk(direc):
for f in files:
if f.endswith(ext):
return_files.append(os.path.join(root, f))
return return_files
def tokenize_line(line):
new_l = line.replace('(', ' ( ').replace(')', ' ) ')
return " ".join(new_l.split())
def reverse_tokenize(new_line):
while ' )' in new_line or '( ' in new_line: # restore tokenizing
new_line = new_line.replace(' )', ')').replace('( ', '(')
return new_line
def load_dict(d):
"""Function that loads json dictionaries"""
with open(d,
'r',
encoding='utf-8') as in_f: # load reference dict (based on training data) to settle disputes based on frequency
dic = json.load(in_f)
in_f.close()
return dic
def add_to_dict(d, key, base):
"""Function to add key to dictionary, either add base or start with base"""
if key in d:
d[key] += base
else:
d[key] = base
return d
def countparens(text):
""" proper nested parens counting """
currcount = 0
for i in text:
if i == "(":
currcount += 1
elif i == ")":
currcount -= 1
if currcount < 0:
return False
return currcount == 0
def valid_amr(amr_text):
from . import amr
if not countparens(amr_text): ## wrong parentheses, return false
return False
try:
theamr = amr.AMR.parse_AMR_line(amr_text)
if theamr is None:
return False
logger.error(f"MAJOR WARNING: couldn't build amr out of {amr_text} using smatch code")
else:
return True
except (AttributeError, Exception) as e:
logger.error(e)
return False
return True | amr_seq2seq/utils/amr_utils.py | import os
import json
import logging
logger = logging.getLogger('amr_postprocessing')
def get_default_amr():
default = '(w / want-01 :ARG0 (b / boy) :ARG1 (g / go-01 :ARG0 b))'
return default
def write_to_file(lst, file_new):
with open(file_new, 'w', encoding='utf-8') as out_f:
for line in lst:
out_f.write(line.strip() + '\n')
out_f.close()
def get_files_by_ext(direc, ext):
"""Function that traverses a directory and returns all files that match a certain extension"""
return_files = []
for root, dirs, files in os.walk(direc):
for f in files:
if f.endswith(ext):
return_files.append(os.path.join(root, f))
return return_files
def tokenize_line(line):
new_l = line.replace('(', ' ( ').replace(')', ' ) ')
return " ".join(new_l.split())
def reverse_tokenize(new_line):
while ' )' in new_line or '( ' in new_line: # restore tokenizing
new_line = new_line.replace(' )', ')').replace('( ', '(')
return new_line
def load_dict(d):
"""Function that loads json dictionaries"""
with open(d,
'r',
encoding='utf-8') as in_f: # load reference dict (based on training data) to settle disputes based on frequency
dic = json.load(in_f)
in_f.close()
return dic
def add_to_dict(d, key, base):
"""Function to add key to dictionary, either add base or start with base"""
if key in d:
d[key] += base
else:
d[key] = base
return d
def countparens(text):
""" proper nested parens counting """
currcount = 0
for i in text:
if i == "(":
currcount += 1
elif i == ")":
currcount -= 1
if currcount < 0:
return False
return currcount == 0
def valid_amr(amr_text):
from . import amr
if not countparens(amr_text): ## wrong parentheses, return false
return False
try:
theamr = amr.AMR.parse_AMR_line(amr_text)
if theamr is None:
return False
logger.error(f"MAJOR WARNING: couldn't build amr out of {amr_text} using smatch code")
else:
return True
except (AttributeError, Exception) as e:
logger.error(e)
return False
return True | 0.40251 | 0.152158 |
from model import *
from utils import *
from evaluate import *
from dataloader import *
def load_data(args):
data = dataloader()
batch = []
cti = load_tkn_to_idx(args[1]) # char_to_idx
wti = load_tkn_to_idx(args[2]) # word_to_idx
itt = load_idx_to_tkn(args[3]) # idx_to_tkn
print("loading %s..." % args[4])
with open(args[4]) as fo:
text = fo.read().strip().split("\n" * (HRE + 1))
for block in text:
data.append_row()
for line in block.split("\n"):
x, y = line.split("\t")
x = [x.split(":") for x in x.split(" ")]
y = tuple(map(int, y.split(" ")))
xc, xw = zip(*[(list(map(int, xc.split("+"))), int(xw)) for xc, xw in x])
data.append_item(xc = xc, xw = xw, y0 = y)
for _batch in data.split():
xc, xw, y0, lens = _batch.xc, _batch.xw, _batch.y0, _batch.lens
xc, xw = data.tensor(bc = xc, bw = xw, lens = lens)
_, y0 = data.tensor(bw = y0, sos = True)
batch.append((xc, xw, y0))
print("data size: %d" % len(data.y0))
print("batch size: %d" % BATCH_SIZE)
return batch, cti, wti, itt
def train(args):
num_epochs = int(args[-1])
batch, cti, wti, itt = load_data(args)
model = rnn_crf(len(cti), len(wti), len(itt))
optim = torch.optim.Adam(model.parameters(), lr = LEARNING_RATE)
print(model)
epoch = load_checkpoint(args[0], model) if isfile(args[0]) else 0
filename = re.sub("\.epoch[0-9]+$", "", args[0])
print("training model...")
for ei in range(epoch + 1, epoch + num_epochs + 1):
loss_sum = 0
timer = time()
for xc, xw, y0 in batch:
loss = model(xc, xw, y0) # forward pass and compute loss
loss.backward() # compute gradients
optim.step() # update parameters
loss_sum += loss.item()
timer = time() - timer
loss_sum /= len(batch)
if ei % SAVE_EVERY and ei != epoch + num_epochs:
save_checkpoint("", None, ei, loss_sum, timer)
else:
save_checkpoint(filename, model, ei, loss_sum, timer)
if len(args) == 7 and (ei % EVAL_EVERY == 0 or ei == epoch + num_epochs):
evaluate(predict(model, cti, wti, itt, args[5]), True)
model.train()
print()
if __name__ == "__main__":
if len(sys.argv) not in [7, 8]:
sys.exit("Usage: %s model char_to_idx word_to_idx tag_to_idx training_data (validation_data) num_epoch" % sys.argv[0])
train(sys.argv[1:]) | lstm/train.py | from model import *
from utils import *
from evaluate import *
from dataloader import *
def load_data(args):
data = dataloader()
batch = []
cti = load_tkn_to_idx(args[1]) # char_to_idx
wti = load_tkn_to_idx(args[2]) # word_to_idx
itt = load_idx_to_tkn(args[3]) # idx_to_tkn
print("loading %s..." % args[4])
with open(args[4]) as fo:
text = fo.read().strip().split("\n" * (HRE + 1))
for block in text:
data.append_row()
for line in block.split("\n"):
x, y = line.split("\t")
x = [x.split(":") for x in x.split(" ")]
y = tuple(map(int, y.split(" ")))
xc, xw = zip(*[(list(map(int, xc.split("+"))), int(xw)) for xc, xw in x])
data.append_item(xc = xc, xw = xw, y0 = y)
for _batch in data.split():
xc, xw, y0, lens = _batch.xc, _batch.xw, _batch.y0, _batch.lens
xc, xw = data.tensor(bc = xc, bw = xw, lens = lens)
_, y0 = data.tensor(bw = y0, sos = True)
batch.append((xc, xw, y0))
print("data size: %d" % len(data.y0))
print("batch size: %d" % BATCH_SIZE)
return batch, cti, wti, itt
def train(args):
num_epochs = int(args[-1])
batch, cti, wti, itt = load_data(args)
model = rnn_crf(len(cti), len(wti), len(itt))
optim = torch.optim.Adam(model.parameters(), lr = LEARNING_RATE)
print(model)
epoch = load_checkpoint(args[0], model) if isfile(args[0]) else 0
filename = re.sub("\.epoch[0-9]+$", "", args[0])
print("training model...")
for ei in range(epoch + 1, epoch + num_epochs + 1):
loss_sum = 0
timer = time()
for xc, xw, y0 in batch:
loss = model(xc, xw, y0) # forward pass and compute loss
loss.backward() # compute gradients
optim.step() # update parameters
loss_sum += loss.item()
timer = time() - timer
loss_sum /= len(batch)
if ei % SAVE_EVERY and ei != epoch + num_epochs:
save_checkpoint("", None, ei, loss_sum, timer)
else:
save_checkpoint(filename, model, ei, loss_sum, timer)
if len(args) == 7 and (ei % EVAL_EVERY == 0 or ei == epoch + num_epochs):
evaluate(predict(model, cti, wti, itt, args[5]), True)
model.train()
print()
if __name__ == "__main__":
if len(sys.argv) not in [7, 8]:
sys.exit("Usage: %s model char_to_idx word_to_idx tag_to_idx training_data (validation_data) num_epoch" % sys.argv[0])
train(sys.argv[1:]) | 0.435661 | 0.351311 |
__author__ = 'wangqiang'
'''
基于多线程实现1对多的websocket
一个server,多个client
'''
import websockets
import threading
import asyncio
import time
import uuid
import random
def start_server(host, port):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(websockets.serve(server, host, port))
# 如果没有run_forever,server会立即退出
loop.run_forever()
def start_server_thread(host, port):
t = threading.Thread(target=start_server, args=(host, port))
t.start()
print(f"Serve ready at {host}:{port}")
return t
async def server(websocket, path):
while True:
try:
recv_text = await websocket.recv()
t = time.strftime("%Y-%m-%d %H:%M%S", time.localtime())
echo = f"Server got message: {recv_text} at {t}"
await websocket.send(echo)
except Exception as exp:
if exp.code == 1000:
print(f"connection close with {exp.code} for reason {exp.reason}")
break
async def client(uri, name):
# 暂停一秒,确保端口已经启动
time.sleep(1)
async with websockets.connect(uri) as websocket:
await websocket.send(f"{name} connect server")
for i in range(5):
message = str(uuid.uuid4())
await websocket.send(f"{name} send {message}")
recv_text = await websocket.recv()
print(f">{recv_text}")
time.sleep(random.randint(1, 3))
await websocket.close(reason=f"{name} close connection")
def start_client(uri, name):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(client(uri, name))
def start_client_threads(uri, count):
threads = [threading.Thread(target=start_client, args=(uri, f"Client-{i}")) for i in range(count)]
[t.start() for t in threads]
[t.join() for t in threads]
def start_client_threads_delay(uri, count, delay):
time.sleep(delay)
threads = [threading.Thread(target=start_client, args=(uri, f"DelayClient-{i}")) for i in range(count)]
[t.start() for t in threads]
[t.join() for t in threads]
if __name__ == '__main__':
# 启动websocket服务端
t_server = start_server_thread("localhost", "40002")
start_client_threads("ws://localhost:40002", 10)
# 模拟中途连接websocket
start_client_threads_delay("ws://localhost:40002", 5, 10)
t_server.join() | open_modules/about_websocket_threading.py | __author__ = 'wangqiang'
'''
基于多线程实现1对多的websocket
一个server,多个client
'''
import websockets
import threading
import asyncio
import time
import uuid
import random
def start_server(host, port):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(websockets.serve(server, host, port))
# 如果没有run_forever,server会立即退出
loop.run_forever()
def start_server_thread(host, port):
t = threading.Thread(target=start_server, args=(host, port))
t.start()
print(f"Serve ready at {host}:{port}")
return t
async def server(websocket, path):
while True:
try:
recv_text = await websocket.recv()
t = time.strftime("%Y-%m-%d %H:%M%S", time.localtime())
echo = f"Server got message: {recv_text} at {t}"
await websocket.send(echo)
except Exception as exp:
if exp.code == 1000:
print(f"connection close with {exp.code} for reason {exp.reason}")
break
async def client(uri, name):
# 暂停一秒,确保端口已经启动
time.sleep(1)
async with websockets.connect(uri) as websocket:
await websocket.send(f"{name} connect server")
for i in range(5):
message = str(uuid.uuid4())
await websocket.send(f"{name} send {message}")
recv_text = await websocket.recv()
print(f">{recv_text}")
time.sleep(random.randint(1, 3))
await websocket.close(reason=f"{name} close connection")
def start_client(uri, name):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(client(uri, name))
def start_client_threads(uri, count):
threads = [threading.Thread(target=start_client, args=(uri, f"Client-{i}")) for i in range(count)]
[t.start() for t in threads]
[t.join() for t in threads]
def start_client_threads_delay(uri, count, delay):
time.sleep(delay)
threads = [threading.Thread(target=start_client, args=(uri, f"DelayClient-{i}")) for i in range(count)]
[t.start() for t in threads]
[t.join() for t in threads]
if __name__ == '__main__':
# 启动websocket服务端
t_server = start_server_thread("localhost", "40002")
start_client_threads("ws://localhost:40002", 10)
# 模拟中途连接websocket
start_client_threads_delay("ws://localhost:40002", 5, 10)
t_server.join() | 0.21158 | 0.105579 |
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2', 'CDD')
#FITS=('Ar41:linear','Ar40:linear', 'Ar39:parabolic','Ar38:parabolic','Ar37:parabolic','Ar36:parabolic')
def main():
#simulate CO2 analysis
#open('T')
#sleep(5)
#close('L')
#display information with info(msg)
info('unknown measurement script')
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
#open a plot panel for this detectors
activate_detectors(*ACTIVE_DETECTORS)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector)
#position mass spectrometer
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#gas is staged behind inlet
#make a pipette volume
close('S')
sleep(1)
meqtime = mx.whiff.eqtime
equil(meqtime, False)
result = whiff(ncounts=mx.whiff.counts, conditionals=mx.whiff.conditionals)
info('Whiff result={}'.format(result))
wab=1.0
if result=='run_remainder':
open('R')
open('S')
sleep(eqtime-meqtime)
close('R')
post_equilibration()
elif result=='pump':
reset_measurement(ACTIVE_DETECTORS)
activate_detectors(*ACTIVE_DETECTORS)
#pump out spectrometer and sniff volume
open('R')
open(mx.equilibration.outlet)
sleep(15)
#close(mx.equilibration.outlet)
close('R')
sleep(1)
open('S')
sleep(2)
close('T')
sleep(2)
close(mx.equilibration.outlet)
equil(eqtime)
multicollect(ncounts=mx.multicollect.counts*wab, integration_time=1)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts*wab, mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
info('finished measure script')
def equil(eqt, do_post=True, set_tzero=True):
#post equilibration script triggered after eqtime elapsed
#equilibrate is non blocking
#so use either a sniff of sleep as a placeholder until eq finished
equilibrate(eqtime=eqt, do_post_equilibration=do_post,
inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet)
if set_tzero:
#equilibrate returns immediately after the inlet opens
set_time_zero(0)
sniff(eqt)
#set default regression
set_fits()
set_baseline_fits()
#========================EOF============================================================== | docs/user_guide/operation/scripts/examples/argus/measurement/jan_cocktail_whiff.py | ACTIVE_DETECTORS=('H2','H1','AX','L1','L2', 'CDD')
#FITS=('Ar41:linear','Ar40:linear', 'Ar39:parabolic','Ar38:parabolic','Ar37:parabolic','Ar36:parabolic')
def main():
#simulate CO2 analysis
#open('T')
#sleep(5)
#close('L')
#display information with info(msg)
info('unknown measurement script')
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
#open a plot panel for this detectors
activate_detectors(*ACTIVE_DETECTORS)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector)
#position mass spectrometer
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#gas is staged behind inlet
#make a pipette volume
close('S')
sleep(1)
meqtime = mx.whiff.eqtime
equil(meqtime, False)
result = whiff(ncounts=mx.whiff.counts, conditionals=mx.whiff.conditionals)
info('Whiff result={}'.format(result))
wab=1.0
if result=='run_remainder':
open('R')
open('S')
sleep(eqtime-meqtime)
close('R')
post_equilibration()
elif result=='pump':
reset_measurement(ACTIVE_DETECTORS)
activate_detectors(*ACTIVE_DETECTORS)
#pump out spectrometer and sniff volume
open('R')
open(mx.equilibration.outlet)
sleep(15)
#close(mx.equilibration.outlet)
close('R')
sleep(1)
open('S')
sleep(2)
close('T')
sleep(2)
close(mx.equilibration.outlet)
equil(eqtime)
multicollect(ncounts=mx.multicollect.counts*wab, integration_time=1)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts*wab, mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
info('finished measure script')
def equil(eqt, do_post=True, set_tzero=True):
#post equilibration script triggered after eqtime elapsed
#equilibrate is non blocking
#so use either a sniff of sleep as a placeholder until eq finished
equilibrate(eqtime=eqt, do_post_equilibration=do_post,
inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet)
if set_tzero:
#equilibrate returns immediately after the inlet opens
set_time_zero(0)
sniff(eqt)
#set default regression
set_fits()
set_baseline_fits()
#========================EOF============================================================== | 0.284874 | 0.282858 |
import json
from argparse import ArgumentParser
from pathlib import Path
from typing import Set
import numpy as np
import pandas as pd
from loguru import logger
from utils.constants import (CATEGORICAL, FLOAT, INTEGER, NUMERICAL, ORDINAL)
from utils.utils import json_numpy_serialzer
# Please define the set of the ordinal attributes which values can be
# automatically sorted (using the sorted() python function)
IMPLICIT_ORDINAL_ATTRIBUTES = {'age'}
# Please define the set of the ordinal attributes which values are ordered
# manually
EXPLICIT_ORDINAL_ATTRIBUTES = {
'education': ['Preschool', '1st-4th', '5th-6th', '7th-8th', '9th', '10th',
'11th', '12th', 'HS-grad', 'Prof-school', 'Assoc-acdm',
'Assoc-voc', 'Some-college', 'Bachelors', 'Masters',
'Doctorate']}
ORDINAL_ATTRIBUTES = IMPLICIT_ORDINAL_ATTRIBUTES.union(
set(EXPLICIT_ORDINAL_ATTRIBUTES.keys()))
OUTPUT_FILE_SUFFIX = '.json'
JSON_SPACE_INDENT = 2
def main():
"""Generate the json metadata file."""
# Parse the arguments
argparser = ArgumentParser()
argparser.add_argument('--dataset', '-i', type=str, required=True,
help='Path to the dataset in csv format')
argparser.add_argument('--output', '-o', type=str,
help='Path where to write the json metadata file')
args = argparser.parse_args()
# Load the dataset
logger.info(f'Loading the data from {args.dataset}')
dataset_path = Path(args.dataset)
dataset = pd.read_csv(dataset_path, header=0)
logger.debug(f'Sample of the loaded dataset:\n{dataset}')
dataset.info()
# Generate the metadata of each attribute
logger.info('Generating the metadata of the attributes')
attributes = []
for column in dataset.columns:
# Get the numpy type of the column
numpy_type = dataset[column].dtype
logger.debug(f'{column} has the numpy type {numpy_type}')
# Infer its type among (Integer, Float, Ordinal, Categorical)
inferred_type = infer_type(column, numpy_type, ORDINAL_ATTRIBUTES)
column_infos = {'name': column, 'type': inferred_type}
logger.debug(column_infos)
# If the type is numerical, set the min and max value
if inferred_type in NUMERICAL:
column_infos['min'] = dataset[column].min()
column_infos['max'] = dataset[column].max()
else:
# If the type is explicitely ordinal, we retrieve its ordered
# values which are set manually in EXPLICIT_ORDINAL_ATTRIBUTES.
# Otherwise (implicit ordinal or categorical), we get the sorted
# list of values from the dataset (the second parameter of get()).
ordered_values = EXPLICIT_ORDINAL_ATTRIBUTES.get(
column, sorted(dataset[column].unique()))
column_infos['size'] = len(ordered_values)
# If the values are numbers, we cast them to strings as the
# metadata configuration files seem to have the values of ordinal
# and categorical attributes specified as strings
if isinstance(ordered_values[0], np.number):
ordered_values = [str(value) for value in ordered_values]
column_infos['i2s'] = ordered_values
attributes.append(column_infos)
# Write the json metadata file
if args.output:
output_path = args.output
else:
output_path = dataset_path.with_name(
dataset_path.stem + OUTPUT_FILE_SUFFIX)
logger.info(f'Writting the metadata to {output_path}')
with open(output_path, 'w+') as json_output_file:
json.dump({'columns': attributes}, json_output_file,
indent=JSON_SPACE_INDENT, default=json_numpy_serialzer)
def infer_type(column: str, numpy_type: str, ordinal_attributes: Set[str]
) -> str:
"""Infer the type of an attribute given its numpy type.
Args:
column: The name of the column.
numpy_type: The numpy type of the column.
ordinal_attributes: The set of the ordinal attributes.
"""
if column in ordinal_attributes:
return ORDINAL
if np.issubdtype(numpy_type, np.integer):
return INTEGER
if np.issubdtype(numpy_type, np.floating):
return FLOAT
return CATEGORICAL
if __name__ == "__main__":
main() | executables/generate_metadata_file.py | import json
from argparse import ArgumentParser
from pathlib import Path
from typing import Set
import numpy as np
import pandas as pd
from loguru import logger
from utils.constants import (CATEGORICAL, FLOAT, INTEGER, NUMERICAL, ORDINAL)
from utils.utils import json_numpy_serialzer
# Please define the set of the ordinal attributes which values can be
# automatically sorted (using the sorted() python function)
IMPLICIT_ORDINAL_ATTRIBUTES = {'age'}
# Please define the set of the ordinal attributes which values are ordered
# manually
EXPLICIT_ORDINAL_ATTRIBUTES = {
'education': ['Preschool', '1st-4th', '5th-6th', '7th-8th', '9th', '10th',
'11th', '12th', 'HS-grad', 'Prof-school', 'Assoc-acdm',
'Assoc-voc', 'Some-college', 'Bachelors', 'Masters',
'Doctorate']}
ORDINAL_ATTRIBUTES = IMPLICIT_ORDINAL_ATTRIBUTES.union(
set(EXPLICIT_ORDINAL_ATTRIBUTES.keys()))
OUTPUT_FILE_SUFFIX = '.json'
JSON_SPACE_INDENT = 2
def main():
"""Generate the json metadata file."""
# Parse the arguments
argparser = ArgumentParser()
argparser.add_argument('--dataset', '-i', type=str, required=True,
help='Path to the dataset in csv format')
argparser.add_argument('--output', '-o', type=str,
help='Path where to write the json metadata file')
args = argparser.parse_args()
# Load the dataset
logger.info(f'Loading the data from {args.dataset}')
dataset_path = Path(args.dataset)
dataset = pd.read_csv(dataset_path, header=0)
logger.debug(f'Sample of the loaded dataset:\n{dataset}')
dataset.info()
# Generate the metadata of each attribute
logger.info('Generating the metadata of the attributes')
attributes = []
for column in dataset.columns:
# Get the numpy type of the column
numpy_type = dataset[column].dtype
logger.debug(f'{column} has the numpy type {numpy_type}')
# Infer its type among (Integer, Float, Ordinal, Categorical)
inferred_type = infer_type(column, numpy_type, ORDINAL_ATTRIBUTES)
column_infos = {'name': column, 'type': inferred_type}
logger.debug(column_infos)
# If the type is numerical, set the min and max value
if inferred_type in NUMERICAL:
column_infos['min'] = dataset[column].min()
column_infos['max'] = dataset[column].max()
else:
# If the type is explicitely ordinal, we retrieve its ordered
# values which are set manually in EXPLICIT_ORDINAL_ATTRIBUTES.
# Otherwise (implicit ordinal or categorical), we get the sorted
# list of values from the dataset (the second parameter of get()).
ordered_values = EXPLICIT_ORDINAL_ATTRIBUTES.get(
column, sorted(dataset[column].unique()))
column_infos['size'] = len(ordered_values)
# If the values are numbers, we cast them to strings as the
# metadata configuration files seem to have the values of ordinal
# and categorical attributes specified as strings
if isinstance(ordered_values[0], np.number):
ordered_values = [str(value) for value in ordered_values]
column_infos['i2s'] = ordered_values
attributes.append(column_infos)
# Write the json metadata file
if args.output:
output_path = args.output
else:
output_path = dataset_path.with_name(
dataset_path.stem + OUTPUT_FILE_SUFFIX)
logger.info(f'Writting the metadata to {output_path}')
with open(output_path, 'w+') as json_output_file:
json.dump({'columns': attributes}, json_output_file,
indent=JSON_SPACE_INDENT, default=json_numpy_serialzer)
def infer_type(column: str, numpy_type: str, ordinal_attributes: Set[str]
) -> str:
"""Infer the type of an attribute given its numpy type.
Args:
column: The name of the column.
numpy_type: The numpy type of the column.
ordinal_attributes: The set of the ordinal attributes.
"""
if column in ordinal_attributes:
return ORDINAL
if np.issubdtype(numpy_type, np.integer):
return INTEGER
if np.issubdtype(numpy_type, np.floating):
return FLOAT
return CATEGORICAL
if __name__ == "__main__":
main() | 0.844794 | 0.239199 |
import numpy as np
import matplotlib.pyplot as plt
from pandas.io.parsers import read_csv
import scipy.optimize as opt
from sklearn.preprocessing import PolynomialFeatures
def load_csv(file_name):
values = read_csv(file_name, header=None).values
return values.astype(float)
def gradient(thetas, XX, Y, lamb):
m = np.shape(XX)[0]
H = h(thetas, XX)
grad = (1/len(Y)) * np.dot(XX.T, H-Y)
grad += (lamb/m) * np.c_[thetas]
return grad
def cost(thetas, X, Y, lamb):
m = np.shape(X)[0]
H = h(thetas, X)
c = (-1/m) * (np.dot(Y.T, np.log(H)) + np.dot((1-Y).T, np.log(1-H)))
c += (lamb/(2*m)) * (thetas**2).sum()
return c
def sigmoid(Z):
return 1/(1 + np.e**(-Z))
def h(thetas, X):
return np.c_[sigmoid(np.dot(X, thetas))]
def show_decision_boundary(thetas, X, Y, poly):
plt.figure()
x1_min, x1_max = X[:, 0].min(), X[:, 0].max()
x2_min, x2_max = X[:, 1].min(), X[:, 1].max()
xx1, xx2 = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
h = sigmoid(poly.fit_transform(np.c_[xx1.ravel(), xx2.ravel()]).dot(thetas))
h = h.reshape(xx1.shape)
positives = np.where(Y == 1)
negatives = np.where(Y == 0)
plt.scatter(X[positives, 0], X[positives, 1], marker='+', color='blue')
plt.scatter(X[negatives, 0], X[negatives, 1], color='red')
plt.contour(xx1, xx2, h, [0.5], linewidths=1, colors='g')
plt.savefig("images/regresion_logistic_regularized.png")
plt.show()
plt.close()
def evaluate(thetas, X, Y, degree):
poly = PolynomialFeatures(degree)
X_poly = poly.fit_transform(X)
result = h(thetas, X_poly)
passed_missed = np.logical_and((result >= 0.5), (Y == 0)).sum()
failed_missed = np.logical_and((result < 0.5), (Y == 1)).sum()
errors = (passed_missed + failed_missed)
return (result.shape[0] - errors) / (result.shape[0])
def train(X, Y, degree=2, lamb=1, verbose = True):
poly = PolynomialFeatures(degree)
X_poly = poly.fit_transform(X)
m = np.shape(X_poly)[0]
n = np.shape(X_poly)[1]
thetas = np.zeros((n, 1), dtype=float)
result = opt.fmin_tnc(func=cost, x0=thetas, fprime=gradient, args=(X_poly, Y, lamb), disp = 5 if verbose else 0)
thetas = result[0]
return thetas
def main():
# DATA PREPROCESSING
datos = load_csv("data/ex2data2.csv")
X = datos[:, :-1]
Y = datos[:, -1][np.newaxis].T
thetas = train(X, Y)
print("Accuracy: ", evaluate(thetas, X, Y, 2)*100, "%")
show_decision_boundary(thetas, X, Y, poly)
if __name__ == "__main__":
main() | src/regression/regresion_logistic_regularized.py | import numpy as np
import matplotlib.pyplot as plt
from pandas.io.parsers import read_csv
import scipy.optimize as opt
from sklearn.preprocessing import PolynomialFeatures
def load_csv(file_name):
values = read_csv(file_name, header=None).values
return values.astype(float)
def gradient(thetas, XX, Y, lamb):
m = np.shape(XX)[0]
H = h(thetas, XX)
grad = (1/len(Y)) * np.dot(XX.T, H-Y)
grad += (lamb/m) * np.c_[thetas]
return grad
def cost(thetas, X, Y, lamb):
m = np.shape(X)[0]
H = h(thetas, X)
c = (-1/m) * (np.dot(Y.T, np.log(H)) + np.dot((1-Y).T, np.log(1-H)))
c += (lamb/(2*m)) * (thetas**2).sum()
return c
def sigmoid(Z):
return 1/(1 + np.e**(-Z))
def h(thetas, X):
return np.c_[sigmoid(np.dot(X, thetas))]
def show_decision_boundary(thetas, X, Y, poly):
plt.figure()
x1_min, x1_max = X[:, 0].min(), X[:, 0].max()
x2_min, x2_max = X[:, 1].min(), X[:, 1].max()
xx1, xx2 = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
h = sigmoid(poly.fit_transform(np.c_[xx1.ravel(), xx2.ravel()]).dot(thetas))
h = h.reshape(xx1.shape)
positives = np.where(Y == 1)
negatives = np.where(Y == 0)
plt.scatter(X[positives, 0], X[positives, 1], marker='+', color='blue')
plt.scatter(X[negatives, 0], X[negatives, 1], color='red')
plt.contour(xx1, xx2, h, [0.5], linewidths=1, colors='g')
plt.savefig("images/regresion_logistic_regularized.png")
plt.show()
plt.close()
def evaluate(thetas, X, Y, degree):
poly = PolynomialFeatures(degree)
X_poly = poly.fit_transform(X)
result = h(thetas, X_poly)
passed_missed = np.logical_and((result >= 0.5), (Y == 0)).sum()
failed_missed = np.logical_and((result < 0.5), (Y == 1)).sum()
errors = (passed_missed + failed_missed)
return (result.shape[0] - errors) / (result.shape[0])
def train(X, Y, degree=2, lamb=1, verbose = True):
poly = PolynomialFeatures(degree)
X_poly = poly.fit_transform(X)
m = np.shape(X_poly)[0]
n = np.shape(X_poly)[1]
thetas = np.zeros((n, 1), dtype=float)
result = opt.fmin_tnc(func=cost, x0=thetas, fprime=gradient, args=(X_poly, Y, lamb), disp = 5 if verbose else 0)
thetas = result[0]
return thetas
def main():
# DATA PREPROCESSING
datos = load_csv("data/ex2data2.csv")
X = datos[:, :-1]
Y = datos[:, -1][np.newaxis].T
thetas = train(X, Y)
print("Accuracy: ", evaluate(thetas, X, Y, 2)*100, "%")
show_decision_boundary(thetas, X, Y, poly)
if __name__ == "__main__":
main() | 0.577138 | 0.588889 |
import cv2
import base64
import numpy as np
import pandas as pd
from unittest import TestCase, main
from ..core.constants import IMAGE_ID_COL, RLE_MASK_COL, DEFAULT_IMAGE_SIZE
from ..core.utils import (decode_rle, rescale, check_square_size,
get_image_rle_masks, decode_image_b64,
check_image_rgb, convert_history,
ImageMaskDownsampler)
class ConvertHistoryTest(TestCase):
"""
Tests `utils.convert_history` function.
"""
def test_convert_history(self):
"""
Tests that the function produces an
identical history dictionary with the
correct datatypes.
"""
test_case_history = {
'loss': [np.float64(0.1), np.float64(0.02)],
'val_loss': [np.float64(0.11), np.float64(0.019)]}
expected = {'loss': [0.1, 0.02], 'val_loss': [0.11, 0.019]}
self.assertDictEqual(
convert_history(test_case_history),
expected)
class CheckImageRgbTest(TestCase):
"""
Tests `utils.check_image_rgb` function.
"""
def test_is_rgb(self):
"""
Tests that exception is not raised.
"""
test_case_image = np.zeros(shape=(10, 10, 3))
check_image_rgb(test_case_image)
def test_not_rgb(self):
"""
Tests that exception is raised.
"""
for test_case_shape in ((10, 10), (20, 20, 1)):
with self.assertRaises(ValueError):
check_square_size(np.zeros(shape=test_case_shape))
class CheckSquareSizeTest(TestCase):
"""
Tests `utils.check_square_size` function.
"""
def test_is_square(self):
"""
Tests that exception is not raised.
"""
for test_case_size in ((1, 1), (20, 20), (40, 40)):
check_square_size(test_case_size)
def test_not_square(self):
"""
Tests that exception is raised.
"""
for test_case_size in ((1, 2), (20, 24)):
with self.assertRaises(ValueError):
check_square_size(test_case_size)
class DecodeRleTest(TestCase):
"""
Tests `utils.decode_rle` function.
"""
def test_decode_rle(self):
"""
Tests correct decoding.
"""
test_case_rle = '11 5 20 5'
test_case_size = (6, 6)
expected = np.array([
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0]])
np.testing.assert_array_equal(
decode_rle(test_case_rle, test_case_size), expected)
class RescaleTest(TestCase):
"""
Tests `utils.rescale` function.
"""
def test_rescale(self):
"""
Test correct rescaling.
"""
test_case_image = (np.eye(10, 10) * 255).astype('uint8')
test_case_scale = 0.2
expected = np.array([[255, 0], [0, 255]])
np.testing.assert_array_equal(
rescale(test_case_image, test_case_scale), expected)
class GetImageRleMasksTest(TestCase):
"""
Tests `utils.get_image_rle_masks` function.
"""
def test_get_image_rle_masks(self):
"""
Tests encoded masks are correctly computed.
"""
test_case_ground_truth = pd.DataFrame({
IMAGE_ID_COL: ['test_1', 'test_2', 'test_2', 'test_3'],
RLE_MASK_COL: [np.nan, '10 17', '59 2', '0']})
expected = pd.DataFrame({
IMAGE_ID_COL: ['test_1', 'test_2', 'test_3'],
RLE_MASK_COL: [np.nan, '10 17 59 2', '0']})
pd.testing.assert_frame_equal(
get_image_rle_masks(test_case_ground_truth),
expected)
class ImageMaskDownsamplerTest(TestCase):
"""
Tests `utils.ImageDownSampler`.
"""
def test_size_not_correct(self):
"""
Test that exception is raise when mask sizes not correct.
"""
for test_case_output_size in ((128, 129), (129, 129)):
with self.assertRaises(ValueError):
_ = ImageMaskDownsampler(output_size=test_case_output_size)
def test_downsample(self):
"""
Tests `utils.ImageDownsampler.downsample` correctly
downsamples masks.
"""
downsampler = ImageMaskDownsampler(output_size=(128, 128))
test_case_mask = np.zeros(shape=DEFAULT_IMAGE_SIZE)
test_case_mask[0, 0] = 1
test_case_mask[-1, -1] = 1
for test_case_output_size in ((128, 128), (2, 2), (1, 1)):
downsampler = ImageMaskDownsampler(output_size=test_case_output_size)
result = downsampler.downsample(test_case_mask)
expected = np.zeros(shape=test_case_output_size)
expected[0, 0] = 1
expected[-1, -1] = 1
np.testing.assert_array_equal(result, expected)
class DecodeImageB64Test(TestCase):
"""
Tests `utils.decode_image_b64` function.
"""
def test_decode_image_b64(self):
"""
Tests that the image is correctly decoded.
"""
test_case_image = np.zeros(shape=(50, 50, 3))
test_case_image_bytes = cv2.imencode('.png', test_case_image)[1].tobytes()
test_case_image_b64 = base64.b64encode(test_case_image_bytes).decode()
result = decode_image_b64(test_case_image_b64)
np.testing.assert_array_equal(result, test_case_image)
if __name__ == '__main__':
main() | asdc/tests/test_utils.py |
import cv2
import base64
import numpy as np
import pandas as pd
from unittest import TestCase, main
from ..core.constants import IMAGE_ID_COL, RLE_MASK_COL, DEFAULT_IMAGE_SIZE
from ..core.utils import (decode_rle, rescale, check_square_size,
get_image_rle_masks, decode_image_b64,
check_image_rgb, convert_history,
ImageMaskDownsampler)
class ConvertHistoryTest(TestCase):
"""
Tests `utils.convert_history` function.
"""
def test_convert_history(self):
"""
Tests that the function produces an
identical history dictionary with the
correct datatypes.
"""
test_case_history = {
'loss': [np.float64(0.1), np.float64(0.02)],
'val_loss': [np.float64(0.11), np.float64(0.019)]}
expected = {'loss': [0.1, 0.02], 'val_loss': [0.11, 0.019]}
self.assertDictEqual(
convert_history(test_case_history),
expected)
class CheckImageRgbTest(TestCase):
"""
Tests `utils.check_image_rgb` function.
"""
def test_is_rgb(self):
"""
Tests that exception is not raised.
"""
test_case_image = np.zeros(shape=(10, 10, 3))
check_image_rgb(test_case_image)
def test_not_rgb(self):
"""
Tests that exception is raised.
"""
for test_case_shape in ((10, 10), (20, 20, 1)):
with self.assertRaises(ValueError):
check_square_size(np.zeros(shape=test_case_shape))
class CheckSquareSizeTest(TestCase):
"""
Tests `utils.check_square_size` function.
"""
def test_is_square(self):
"""
Tests that exception is not raised.
"""
for test_case_size in ((1, 1), (20, 20), (40, 40)):
check_square_size(test_case_size)
def test_not_square(self):
"""
Tests that exception is raised.
"""
for test_case_size in ((1, 2), (20, 24)):
with self.assertRaises(ValueError):
check_square_size(test_case_size)
class DecodeRleTest(TestCase):
"""
Tests `utils.decode_rle` function.
"""
def test_decode_rle(self):
"""
Tests correct decoding.
"""
test_case_rle = '11 5 20 5'
test_case_size = (6, 6)
expected = np.array([
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0]])
np.testing.assert_array_equal(
decode_rle(test_case_rle, test_case_size), expected)
class RescaleTest(TestCase):
"""
Tests `utils.rescale` function.
"""
def test_rescale(self):
"""
Test correct rescaling.
"""
test_case_image = (np.eye(10, 10) * 255).astype('uint8')
test_case_scale = 0.2
expected = np.array([[255, 0], [0, 255]])
np.testing.assert_array_equal(
rescale(test_case_image, test_case_scale), expected)
class GetImageRleMasksTest(TestCase):
"""
Tests `utils.get_image_rle_masks` function.
"""
def test_get_image_rle_masks(self):
"""
Tests encoded masks are correctly computed.
"""
test_case_ground_truth = pd.DataFrame({
IMAGE_ID_COL: ['test_1', 'test_2', 'test_2', 'test_3'],
RLE_MASK_COL: [np.nan, '10 17', '59 2', '0']})
expected = pd.DataFrame({
IMAGE_ID_COL: ['test_1', 'test_2', 'test_3'],
RLE_MASK_COL: [np.nan, '10 17 59 2', '0']})
pd.testing.assert_frame_equal(
get_image_rle_masks(test_case_ground_truth),
expected)
class ImageMaskDownsamplerTest(TestCase):
"""
Tests `utils.ImageDownSampler`.
"""
def test_size_not_correct(self):
"""
Test that exception is raise when mask sizes not correct.
"""
for test_case_output_size in ((128, 129), (129, 129)):
with self.assertRaises(ValueError):
_ = ImageMaskDownsampler(output_size=test_case_output_size)
def test_downsample(self):
"""
Tests `utils.ImageDownsampler.downsample` correctly
downsamples masks.
"""
downsampler = ImageMaskDownsampler(output_size=(128, 128))
test_case_mask = np.zeros(shape=DEFAULT_IMAGE_SIZE)
test_case_mask[0, 0] = 1
test_case_mask[-1, -1] = 1
for test_case_output_size in ((128, 128), (2, 2), (1, 1)):
downsampler = ImageMaskDownsampler(output_size=test_case_output_size)
result = downsampler.downsample(test_case_mask)
expected = np.zeros(shape=test_case_output_size)
expected[0, 0] = 1
expected[-1, -1] = 1
np.testing.assert_array_equal(result, expected)
class DecodeImageB64Test(TestCase):
"""
Tests `utils.decode_image_b64` function.
"""
def test_decode_image_b64(self):
"""
Tests that the image is correctly decoded.
"""
test_case_image = np.zeros(shape=(50, 50, 3))
test_case_image_bytes = cv2.imencode('.png', test_case_image)[1].tobytes()
test_case_image_b64 = base64.b64encode(test_case_image_bytes).decode()
result = decode_image_b64(test_case_image_b64)
np.testing.assert_array_equal(result, test_case_image)
if __name__ == '__main__':
main() | 0.76145 | 0.713874 |
"""Augmentation ops."""
import functools
import random
from third_party import augment_ops
from third_party import data_util as simclr_ops
from third_party import rand_augment as randaug
import tensorflow as tf
def base_augment(is_training=True, **kwargs):
"""Base (resize and crop) augmentation."""
size, pad_size = kwargs.get('size'), int(0.125 * kwargs.get('size'))
if is_training:
return [
('resize', {
'size': size
}),
('crop', {
'size': pad_size
}),
]
return [('resize', {'size': size})]
def crop_and_resize_augment(is_training=True, **kwargs):
"""Random crop and resize augmentation."""
size = kwargs.get('size')
min_scale = kwargs.get('min_scale', 0.4)
if is_training:
return [
('crop_and_resize', {
'size': size,
'min_scale': min_scale
}),
]
return [('resize', {'size': size})]
def jitter_augment(aug=None, is_training=True, **kwargs):
"""Color jitter augmentation."""
if aug is None:
aug = []
if is_training:
brightness = kwargs.get('brightness', 0.125)
contrast = kwargs.get('contrast', 0.4)
saturation = kwargs.get('saturation', 0.4)
hue = kwargs.get('hue', 0)
return aug + [('jitter', {
'brightness': brightness,
'contrast': contrast,
'saturation': saturation,
'hue': hue
})]
return aug
def cutout_augment(aug=None, is_training=True, **kwargs):
"""Cutout augmentation."""
if aug is None:
aug = []
if is_training:
scale = kwargs.get('scale', 0.5)
return aug + [('cutout', {'scale': scale})]
return aug
def randerase_augment(aug=None, is_training=True, **kwargs):
"""Random erase augmentation."""
if aug is None:
aug = []
if is_training:
scale = kwargs.get('scale', 0.3)
return aug + [('randerase', {'scale': (scale, scale), 'ratio': 1.0})]
return aug
def hflip_augment(aug=None, is_training=True, **kwargs):
"""Horizontal flip augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('hflip', {})]
return aug
def rotate90_augment(aug=None, is_training=True, **kwargs):
"""Rotation by 90 degree augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('rotate90', {})]
return aug
def rotate180_augment(aug=None, is_training=True, **kwargs):
"""Rotation by 180 degree augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('rotate180', {})]
return aug
def rotate270_augment(aug=None, is_training=True, **kwargs):
"""Rotation by 270 degree augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('rotate270', {})]
return aug
def blur_augment(aug=None, is_training=True, **kwargs):
"""Blur augmentation."""
if aug is None:
aug = []
if is_training:
prob = kwargs.get('prob', 0.5)
return aug + [('blur', {'prob': prob})]
return aug
def randaugment(aug=None, is_training=True, **kwargs):
"""Randaugment."""
if aug is None:
aug = []
if is_training:
num_layers = kwargs.get('num_layers', 2)
prob_to_apply = kwargs.get('prob_to_apply', 0.5)
magnitude = kwargs.get('magnitude', None)
num_levels = kwargs.get('num_levels', None)
mode = kwargs.get('mode', 'all')
size = kwargs.get('size', None)
return aug + [('randaug', {
'num_layers': num_layers,
'prob_to_apply': prob_to_apply,
'magnitude': magnitude,
'num_levels': num_levels,
'size': size,
'mode': mode
})]
return aug
class CutOut(object):
"""Cutout."""
def __init__(self, scale=0.5, random_scale=False):
self.scale = scale
self.random_scale = random_scale
@staticmethod
def cutout(image, scale=0.5):
"""Applies Cutout.
Args:
image: A 3D tensor (width, height, depth).
scale: A scalar for the width or height ratio for cutout region.
Returns:
A 3D tensor (width, height, depth) after cutout.
"""
img_shape = tf.shape(image)
img_height, img_width = img_shape[-3], img_shape[-2]
img_height = tf.cast(img_height, dtype=tf.float32)
img_width = tf.cast(img_width, dtype=tf.float32)
cutout_size = (img_height * scale, img_width * scale)
cutout_size = (tf.maximum(1.0,
cutout_size[0]), tf.maximum(1.0, cutout_size[1]))
def _create_cutout_mask():
height_loc = tf.round(
tf.random.uniform(shape=[], minval=0, maxval=img_height))
width_loc = tf.round(
tf.random.uniform(shape=[], minval=0, maxval=img_width))
upper_coord = (tf.maximum(0.0, height_loc - cutout_size[0] // 2),
tf.maximum(0.0, width_loc - cutout_size[1] // 2))
lower_coord = (tf.minimum(img_height, height_loc + cutout_size[0] // 2),
tf.minimum(img_width, width_loc + cutout_size[1] // 2))
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
padding_dims = ((upper_coord[0], img_height - lower_coord[0]),
(upper_coord[1], img_width - lower_coord[1]))
mask = tf.zeros((mask_height, mask_width), dtype=tf.float32)
mask = tf.pad(
mask, tf.cast(padding_dims, dtype=tf.int32), constant_values=1.0)
return tf.expand_dims(mask, -1)
return _create_cutout_mask() * image
def __call__(self, image, is_training=True):
if is_training:
if self.random_scale:
scale = tf.random.uniform(shape=[], minval=0.0, maxval=self.scale)
else:
scale = self.scale
return self.cutout(image, scale) if is_training else image
class RandomErase(object):
"""RandomErasing.
Similar to Cutout, but supports various sizes and aspect ratios of rectangle.
"""
def __init__(self, scale=(0.02, 0.3), ratio=3.3, value=0.0):
self.scale = scale
self.ratio = ratio
self.value = value
assert self.ratio >= 1
@staticmethod
def cutout(image, scale=(0.02, 0.3), ratio=3.3, value=0.0):
"""Applies Cutout with various sizes and aspect ratios of rectangle.
Args:
image: A 3D tensor (width, height, depth).
scale: A tuple for ratio of cutout region.
ratio: A scalar for aspect ratio.
value: A value to fill in cutout region.
Returns:
A 3D tensor (width, height, depth) after cutout.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
image_depth = tf.shape(image)[2]
# Sample the center location in the image where the zero mask will be
# applied.
def _cutout(img):
area = tf.cast(image_height * image_width, tf.float32)
erase_area = tf.random.uniform(
shape=[], minval=scale[0], maxval=scale[1]) * area
aspect_ratio = tf.random.uniform(shape=[], minval=1, maxval=ratio)
aspect_ratio = tf.cond(
tf.random.uniform(shape=[]) > 0.5, lambda: aspect_ratio,
lambda: 1.0 / aspect_ratio)
pad_h = tf.cast(
tf.math.round(tf.math.sqrt(erase_area * aspect_ratio)),
dtype=tf.int32)
pad_h = tf.minimum(pad_h, image_height - 1)
pad_w = tf.cast(
tf.math.round(tf.math.sqrt(erase_area / aspect_ratio)),
dtype=tf.int32)
pad_w = tf.minimum(pad_w, image_width - 1)
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=image_height - pad_h, dtype=tf.int32)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=image_width - pad_w, dtype=tf.int32)
lower_pad = cutout_center_height
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_h)
left_pad = cutout_center_width
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_w)
cutout_shape = [
image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)
]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=img.dtype),
padding_dims,
constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, image_depth])
img = tf.where(
tf.equal(mask, 0),
tf.ones_like(img, dtype=img.dtype) * value, img)
return img
return _cutout(image)
def __call__(self, image, is_training=True):
return self.cutout(image, self.scale, self.ratio,
self.value) if is_training else image
class Resize(object):
"""Resize."""
def __init__(self, size, method=tf.image.ResizeMethod.BILINEAR):
self.size = self._check_input(size)
self.method = method
def _check_input(self, size):
if isinstance(size, int):
size = (size, size)
elif isinstance(size, (list, tuple)) and len(size) == 1:
size = size * 2
else:
raise TypeError('size must be an integer or list/tuple of integers')
return size
def __call__(self, image, is_training=True):
return tf.image.resize(
image, self.size, method=self.method) if is_training else image
class RandomCrop(object):
"""Random Crop."""
def __init__(self, size):
self.pad = self._check_input(size)
def _check_input(self, size):
"""Checks pad shape.
Args:
size: Scalar, list or tuple for pad size.
Returns:
A tuple for pad size.
"""
if isinstance(size, int):
size = (size, size)
elif isinstance(size, (list, tuple)):
if len(size) == 1:
size = tuple(size) * 2
elif len(size) > 2:
size = tuple(size[:2])
else:
raise TypeError('size must be an integer or list/tuple of integers')
return size
def __call__(self, image, is_training=True):
if is_training:
img_size = image.shape[-3:]
image = tf.pad(
image, [[self.pad[0]] * 2, [self.pad[1]] * 2, [0] * 2],
mode='REFLECT')
image = tf.image.random_crop(image, img_size)
return image
class RandomCropAndResize(object):
"""Random crop and resize."""
def __init__(self, size, min_scale=0.4):
self.min_scale = min_scale
self.size = self._check_input(size)
def _check_input(self, size):
"""Checks input size is valid."""
if isinstance(size, int):
size = (size, size)
elif isinstance(size, (list, tuple)) and len(size) == 1:
size = size * 2
else:
raise TypeError('size must be an integer or list/tuple of integers')
return size
def __call__(self, image, is_training=True):
if is_training:
# crop and resize
width = tf.random.uniform(
shape=[],
minval=tf.cast(image.shape[0] * self.min_scale, dtype=tf.int32),
maxval=image.shape[0] + 1,
dtype=tf.int32)
size = (width, tf.minimum(width, image.shape[1]), image.shape[2])
image = tf.image.random_crop(image, size)
image = tf.image.resize(image, size=self.size)
return image
class RandomFlipLeftRight(object):
def __init__(self):
pass
def __call__(self, image, is_training=True):
return tf.image.random_flip_left_right(image) if is_training else image
class ColorJitter(object):
"""Applies color jittering.
This op is equivalent to the following:
https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ColorJitter
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness)
self.contrast = self._check_input(contrast, center=1)
self.saturation = self._check_input(saturation, center=1)
self.hue = self._check_input(hue, bound=0.5)
def _check_input(self, value, center=None, bound=None):
if bound is not None:
value = min(value, bound)
if center is not None:
value = [center - value, center + value]
if value[0] == value[1] == center:
return None
elif value == 0:
return None
return value
def _get_transforms(self):
"""Get randomly shuffled transform ops."""
transforms = []
if self.brightness is not None:
transforms.append(
functools.partial(
tf.image.random_brightness, max_delta=self.brightness))
if self.contrast is not None:
transforms.append(
functools.partial(
tf.image.random_contrast,
lower=self.contrast[0],
upper=self.contrast[1]))
if self.saturation is not None:
transforms.append(
functools.partial(
tf.image.random_saturation,
lower=self.saturation[0],
upper=self.saturation[1]))
if self.hue is not None:
transforms.append(
functools.partial(tf.image.random_hue, max_delta=self.hue))
random.shuffle(transforms)
return transforms
def __call__(self, image, is_training=True):
if not is_training:
return image
for transform in self._get_transforms():
image = transform(image)
return image
class Rotate90(object):
def __init__(self):
pass
def __call__(self, image, is_training=True):
return tf.image.rot90(image, k=1) if is_training else image
class Rotate180(object):
def __init__(self):
pass
def __call__(self, image, is_training=True):
return tf.image.rot90(image, k=2) if is_training else image
class Rotate270(object):
def __init__(self):
pass
def __call__(self, image, is_training=True):
return tf.image.rot90(image, k=3) if is_training else image
class RandomBlur(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, is_training=True):
if is_training:
return image
return simclr_ops.random_blur(
image, image.shape[0], image.shape[1], p=self.prob)
class RandAugment(randaug.RandAugment):
"""RandAugment."""
def __init__(self,
num_layers=1,
prob_to_apply=None,
magnitude=None,
num_levels=10,
size=32,
mode='all'):
super(RandAugment, self).__init__(
num_layers=num_layers,
prob_to_apply=prob_to_apply,
magnitude=magnitude,
num_levels=num_levels)
# override TRANSLATE_CONST
if size == 32:
randaug.TRANSLATE_CONST = 10.
elif size == 96:
randaug.TRANSLATE_CONST = 30.
elif size == 128:
randaug.TRANSLATE_CONST = 40.
elif size == 256:
randaug.TRANSLATE_CONST = 100.
else:
randaug.TRANSLATE_CONST = int(0.3 * size)
assert mode.upper() in [
'ALL', 'COLOR', 'GEO', 'CUTOUT'
], 'RandAugment mode should be `All`, `COLOR` or `GEO`'
self.mode = mode.upper()
self._register_ops()
if mode.upper() == 'CUTOUT':
self.cutout_ops = CutOut(scale=0.5, random_scale=True)
def _generate_branch_fn(self, image, level):
branch_fns = []
for augment_op_name in self.ra_ops:
augment_fn = augment_ops.NAME_TO_FUNC[augment_op_name]
level_to_args_fn = randaug.LEVEL_TO_ARG[augment_op_name]
def _branch_fn(image=image,
augment_fn=augment_fn,
level_to_args_fn=level_to_args_fn):
args = [image] + list(level_to_args_fn(level))
return augment_fn(*args)
branch_fns.append(_branch_fn)
return branch_fns
def _apply_one_layer(self, image):
"""Applies one level of augmentation to the image."""
level = self._get_level()
branch_index = tf.random.uniform(
shape=[], maxval=len(self.ra_ops), dtype=tf.int32)
num_concat = image.shape[2] // 3
images = tf.split(image, num_concat, axis=-1)
aug_images = []
for image_slice in images:
branch_fns = self._generate_branch_fn(image_slice, level)
# pylint: disable=cell-var-from-loop
aug_image_slice = tf.switch_case(
branch_index, branch_fns, default=lambda: image_slice)
aug_images.append(aug_image_slice)
aug_image = tf.concat(aug_images, axis=-1)
if self.prob_to_apply is not None:
return tf.cond(
tf.random.uniform(shape=[], dtype=tf.float32) < self.prob_to_apply,
lambda: aug_image, lambda: image)
else:
return aug_image
def _register_ops(self):
if self.mode == 'ALL':
self.ra_ops = [
'AutoContrast',
'Equalize',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Identity',
'Invert',
'Sharpness',
'SolarizeAdd',
]
self.ra_ops += [
'Rotate',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
]
elif self.mode == 'CUTOUT':
self.ra_ops = [
'AutoContrast',
'Equalize',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Identity',
'Invert',
'Sharpness',
'SolarizeAdd',
]
self.ra_ops += [
'Rotate',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
]
elif self.mode == 'COLOR':
self.ra_ops = [
'AutoContrast',
'Equalize',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Identity',
'Invert',
'Sharpness',
'SolarizeAdd',
]
elif self.mode == 'GEO':
self.ra_ops = [
'Rotate',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
'Identity',
]
else:
raise NotImplementedError
def wrap(self, image):
image += tf.constant(1.0, image.dtype)
image *= tf.constant(255.0 / 2.0, image.dtype)
image = tf.saturate_cast(image, tf.uint8)
return image
def unwrap(self, image):
image = tf.cast(image, tf.float32)
image /= tf.constant(255.0 / 2.0, image.dtype)
image -= tf.constant(1.0, image.dtype)
return image
def _apply_cutout(self, image):
# Cutout assumes pixels are in [-1, 1].
aug_image = self.unwrap(image)
aug_image = self.cutout_ops(aug_image)
aug_image = self.wrap(aug_image)
if self.prob_to_apply is not None:
return tf.cond(
tf.random.uniform(shape=[], dtype=tf.float32) < self.prob_to_apply,
lambda: aug_image, lambda: image)
else:
return aug_image
def __call__(self, image, is_training=True):
if not is_training:
return image
image = self.wrap(image)
if self.mode == 'CUTOUT':
for _ in range(self.num_layers):
# Makes an exception for cutout.
image = tf.cond(
tf.random.uniform(shape=[], dtype=tf.float32) < tf.divide(
tf.constant(1.0), tf.cast(
len(self.ra_ops) + 1, dtype=tf.float32)),
lambda: self._apply_cutout(image),
lambda: self._apply_one_layer(image))
return self.unwrap(image)
else:
for _ in range(self.num_layers):
image = self._apply_one_layer(image)
return self.unwrap(image) | data/augment_ops.py | """Augmentation ops."""
import functools
import random
from third_party import augment_ops
from third_party import data_util as simclr_ops
from third_party import rand_augment as randaug
import tensorflow as tf
def base_augment(is_training=True, **kwargs):
"""Base (resize and crop) augmentation."""
size, pad_size = kwargs.get('size'), int(0.125 * kwargs.get('size'))
if is_training:
return [
('resize', {
'size': size
}),
('crop', {
'size': pad_size
}),
]
return [('resize', {'size': size})]
def crop_and_resize_augment(is_training=True, **kwargs):
"""Random crop and resize augmentation."""
size = kwargs.get('size')
min_scale = kwargs.get('min_scale', 0.4)
if is_training:
return [
('crop_and_resize', {
'size': size,
'min_scale': min_scale
}),
]
return [('resize', {'size': size})]
def jitter_augment(aug=None, is_training=True, **kwargs):
"""Color jitter augmentation."""
if aug is None:
aug = []
if is_training:
brightness = kwargs.get('brightness', 0.125)
contrast = kwargs.get('contrast', 0.4)
saturation = kwargs.get('saturation', 0.4)
hue = kwargs.get('hue', 0)
return aug + [('jitter', {
'brightness': brightness,
'contrast': contrast,
'saturation': saturation,
'hue': hue
})]
return aug
def cutout_augment(aug=None, is_training=True, **kwargs):
"""Cutout augmentation."""
if aug is None:
aug = []
if is_training:
scale = kwargs.get('scale', 0.5)
return aug + [('cutout', {'scale': scale})]
return aug
def randerase_augment(aug=None, is_training=True, **kwargs):
"""Random erase augmentation."""
if aug is None:
aug = []
if is_training:
scale = kwargs.get('scale', 0.3)
return aug + [('randerase', {'scale': (scale, scale), 'ratio': 1.0})]
return aug
def hflip_augment(aug=None, is_training=True, **kwargs):
"""Horizontal flip augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('hflip', {})]
return aug
def rotate90_augment(aug=None, is_training=True, **kwargs):
"""Rotation by 90 degree augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('rotate90', {})]
return aug
def rotate180_augment(aug=None, is_training=True, **kwargs):
"""Rotation by 180 degree augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('rotate180', {})]
return aug
def rotate270_augment(aug=None, is_training=True, **kwargs):
"""Rotation by 270 degree augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('rotate270', {})]
return aug
def blur_augment(aug=None, is_training=True, **kwargs):
"""Blur augmentation."""
if aug is None:
aug = []
if is_training:
prob = kwargs.get('prob', 0.5)
return aug + [('blur', {'prob': prob})]
return aug
def randaugment(aug=None, is_training=True, **kwargs):
"""Randaugment."""
if aug is None:
aug = []
if is_training:
num_layers = kwargs.get('num_layers', 2)
prob_to_apply = kwargs.get('prob_to_apply', 0.5)
magnitude = kwargs.get('magnitude', None)
num_levels = kwargs.get('num_levels', None)
mode = kwargs.get('mode', 'all')
size = kwargs.get('size', None)
return aug + [('randaug', {
'num_layers': num_layers,
'prob_to_apply': prob_to_apply,
'magnitude': magnitude,
'num_levels': num_levels,
'size': size,
'mode': mode
})]
return aug
class CutOut(object):
"""Cutout."""
def __init__(self, scale=0.5, random_scale=False):
self.scale = scale
self.random_scale = random_scale
@staticmethod
def cutout(image, scale=0.5):
"""Applies Cutout.
Args:
image: A 3D tensor (width, height, depth).
scale: A scalar for the width or height ratio for cutout region.
Returns:
A 3D tensor (width, height, depth) after cutout.
"""
img_shape = tf.shape(image)
img_height, img_width = img_shape[-3], img_shape[-2]
img_height = tf.cast(img_height, dtype=tf.float32)
img_width = tf.cast(img_width, dtype=tf.float32)
cutout_size = (img_height * scale, img_width * scale)
cutout_size = (tf.maximum(1.0,
cutout_size[0]), tf.maximum(1.0, cutout_size[1]))
def _create_cutout_mask():
height_loc = tf.round(
tf.random.uniform(shape=[], minval=0, maxval=img_height))
width_loc = tf.round(
tf.random.uniform(shape=[], minval=0, maxval=img_width))
upper_coord = (tf.maximum(0.0, height_loc - cutout_size[0] // 2),
tf.maximum(0.0, width_loc - cutout_size[1] // 2))
lower_coord = (tf.minimum(img_height, height_loc + cutout_size[0] // 2),
tf.minimum(img_width, width_loc + cutout_size[1] // 2))
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
padding_dims = ((upper_coord[0], img_height - lower_coord[0]),
(upper_coord[1], img_width - lower_coord[1]))
mask = tf.zeros((mask_height, mask_width), dtype=tf.float32)
mask = tf.pad(
mask, tf.cast(padding_dims, dtype=tf.int32), constant_values=1.0)
return tf.expand_dims(mask, -1)
return _create_cutout_mask() * image
def __call__(self, image, is_training=True):
if is_training:
if self.random_scale:
scale = tf.random.uniform(shape=[], minval=0.0, maxval=self.scale)
else:
scale = self.scale
return self.cutout(image, scale) if is_training else image
class RandomErase(object):
"""RandomErasing.
Similar to Cutout, but supports various sizes and aspect ratios of rectangle.
"""
def __init__(self, scale=(0.02, 0.3), ratio=3.3, value=0.0):
self.scale = scale
self.ratio = ratio
self.value = value
assert self.ratio >= 1
@staticmethod
def cutout(image, scale=(0.02, 0.3), ratio=3.3, value=0.0):
"""Applies Cutout with various sizes and aspect ratios of rectangle.
Args:
image: A 3D tensor (width, height, depth).
scale: A tuple for ratio of cutout region.
ratio: A scalar for aspect ratio.
value: A value to fill in cutout region.
Returns:
A 3D tensor (width, height, depth) after cutout.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
image_depth = tf.shape(image)[2]
# Sample the center location in the image where the zero mask will be
# applied.
def _cutout(img):
area = tf.cast(image_height * image_width, tf.float32)
erase_area = tf.random.uniform(
shape=[], minval=scale[0], maxval=scale[1]) * area
aspect_ratio = tf.random.uniform(shape=[], minval=1, maxval=ratio)
aspect_ratio = tf.cond(
tf.random.uniform(shape=[]) > 0.5, lambda: aspect_ratio,
lambda: 1.0 / aspect_ratio)
pad_h = tf.cast(
tf.math.round(tf.math.sqrt(erase_area * aspect_ratio)),
dtype=tf.int32)
pad_h = tf.minimum(pad_h, image_height - 1)
pad_w = tf.cast(
tf.math.round(tf.math.sqrt(erase_area / aspect_ratio)),
dtype=tf.int32)
pad_w = tf.minimum(pad_w, image_width - 1)
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=image_height - pad_h, dtype=tf.int32)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=image_width - pad_w, dtype=tf.int32)
lower_pad = cutout_center_height
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_h)
left_pad = cutout_center_width
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_w)
cutout_shape = [
image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)
]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=img.dtype),
padding_dims,
constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, image_depth])
img = tf.where(
tf.equal(mask, 0),
tf.ones_like(img, dtype=img.dtype) * value, img)
return img
return _cutout(image)
def __call__(self, image, is_training=True):
return self.cutout(image, self.scale, self.ratio,
self.value) if is_training else image
class Resize(object):
"""Resize."""
def __init__(self, size, method=tf.image.ResizeMethod.BILINEAR):
self.size = self._check_input(size)
self.method = method
def _check_input(self, size):
if isinstance(size, int):
size = (size, size)
elif isinstance(size, (list, tuple)) and len(size) == 1:
size = size * 2
else:
raise TypeError('size must be an integer or list/tuple of integers')
return size
def __call__(self, image, is_training=True):
return tf.image.resize(
image, self.size, method=self.method) if is_training else image
class RandomCrop(object):
"""Random Crop."""
def __init__(self, size):
self.pad = self._check_input(size)
def _check_input(self, size):
"""Checks pad shape.
Args:
size: Scalar, list or tuple for pad size.
Returns:
A tuple for pad size.
"""
if isinstance(size, int):
size = (size, size)
elif isinstance(size, (list, tuple)):
if len(size) == 1:
size = tuple(size) * 2
elif len(size) > 2:
size = tuple(size[:2])
else:
raise TypeError('size must be an integer or list/tuple of integers')
return size
def __call__(self, image, is_training=True):
if is_training:
img_size = image.shape[-3:]
image = tf.pad(
image, [[self.pad[0]] * 2, [self.pad[1]] * 2, [0] * 2],
mode='REFLECT')
image = tf.image.random_crop(image, img_size)
return image
class RandomCropAndResize(object):
"""Random crop and resize."""
def __init__(self, size, min_scale=0.4):
self.min_scale = min_scale
self.size = self._check_input(size)
def _check_input(self, size):
"""Checks input size is valid."""
if isinstance(size, int):
size = (size, size)
elif isinstance(size, (list, tuple)) and len(size) == 1:
size = size * 2
else:
raise TypeError('size must be an integer or list/tuple of integers')
return size
def __call__(self, image, is_training=True):
if is_training:
# crop and resize
width = tf.random.uniform(
shape=[],
minval=tf.cast(image.shape[0] * self.min_scale, dtype=tf.int32),
maxval=image.shape[0] + 1,
dtype=tf.int32)
size = (width, tf.minimum(width, image.shape[1]), image.shape[2])
image = tf.image.random_crop(image, size)
image = tf.image.resize(image, size=self.size)
return image
class RandomFlipLeftRight(object):
def __init__(self):
pass
def __call__(self, image, is_training=True):
return tf.image.random_flip_left_right(image) if is_training else image
class ColorJitter(object):
"""Applies color jittering.
This op is equivalent to the following:
https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ColorJitter
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness)
self.contrast = self._check_input(contrast, center=1)
self.saturation = self._check_input(saturation, center=1)
self.hue = self._check_input(hue, bound=0.5)
def _check_input(self, value, center=None, bound=None):
if bound is not None:
value = min(value, bound)
if center is not None:
value = [center - value, center + value]
if value[0] == value[1] == center:
return None
elif value == 0:
return None
return value
def _get_transforms(self):
"""Get randomly shuffled transform ops."""
transforms = []
if self.brightness is not None:
transforms.append(
functools.partial(
tf.image.random_brightness, max_delta=self.brightness))
if self.contrast is not None:
transforms.append(
functools.partial(
tf.image.random_contrast,
lower=self.contrast[0],
upper=self.contrast[1]))
if self.saturation is not None:
transforms.append(
functools.partial(
tf.image.random_saturation,
lower=self.saturation[0],
upper=self.saturation[1]))
if self.hue is not None:
transforms.append(
functools.partial(tf.image.random_hue, max_delta=self.hue))
random.shuffle(transforms)
return transforms
def __call__(self, image, is_training=True):
if not is_training:
return image
for transform in self._get_transforms():
image = transform(image)
return image
class Rotate90(object):
def __init__(self):
pass
def __call__(self, image, is_training=True):
return tf.image.rot90(image, k=1) if is_training else image
class Rotate180(object):
def __init__(self):
pass
def __call__(self, image, is_training=True):
return tf.image.rot90(image, k=2) if is_training else image
class Rotate270(object):
def __init__(self):
pass
def __call__(self, image, is_training=True):
return tf.image.rot90(image, k=3) if is_training else image
class RandomBlur(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, is_training=True):
if is_training:
return image
return simclr_ops.random_blur(
image, image.shape[0], image.shape[1], p=self.prob)
class RandAugment(randaug.RandAugment):
"""RandAugment."""
def __init__(self,
num_layers=1,
prob_to_apply=None,
magnitude=None,
num_levels=10,
size=32,
mode='all'):
super(RandAugment, self).__init__(
num_layers=num_layers,
prob_to_apply=prob_to_apply,
magnitude=magnitude,
num_levels=num_levels)
# override TRANSLATE_CONST
if size == 32:
randaug.TRANSLATE_CONST = 10.
elif size == 96:
randaug.TRANSLATE_CONST = 30.
elif size == 128:
randaug.TRANSLATE_CONST = 40.
elif size == 256:
randaug.TRANSLATE_CONST = 100.
else:
randaug.TRANSLATE_CONST = int(0.3 * size)
assert mode.upper() in [
'ALL', 'COLOR', 'GEO', 'CUTOUT'
], 'RandAugment mode should be `All`, `COLOR` or `GEO`'
self.mode = mode.upper()
self._register_ops()
if mode.upper() == 'CUTOUT':
self.cutout_ops = CutOut(scale=0.5, random_scale=True)
def _generate_branch_fn(self, image, level):
branch_fns = []
for augment_op_name in self.ra_ops:
augment_fn = augment_ops.NAME_TO_FUNC[augment_op_name]
level_to_args_fn = randaug.LEVEL_TO_ARG[augment_op_name]
def _branch_fn(image=image,
augment_fn=augment_fn,
level_to_args_fn=level_to_args_fn):
args = [image] + list(level_to_args_fn(level))
return augment_fn(*args)
branch_fns.append(_branch_fn)
return branch_fns
def _apply_one_layer(self, image):
"""Applies one level of augmentation to the image."""
level = self._get_level()
branch_index = tf.random.uniform(
shape=[], maxval=len(self.ra_ops), dtype=tf.int32)
num_concat = image.shape[2] // 3
images = tf.split(image, num_concat, axis=-1)
aug_images = []
for image_slice in images:
branch_fns = self._generate_branch_fn(image_slice, level)
# pylint: disable=cell-var-from-loop
aug_image_slice = tf.switch_case(
branch_index, branch_fns, default=lambda: image_slice)
aug_images.append(aug_image_slice)
aug_image = tf.concat(aug_images, axis=-1)
if self.prob_to_apply is not None:
return tf.cond(
tf.random.uniform(shape=[], dtype=tf.float32) < self.prob_to_apply,
lambda: aug_image, lambda: image)
else:
return aug_image
def _register_ops(self):
if self.mode == 'ALL':
self.ra_ops = [
'AutoContrast',
'Equalize',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Identity',
'Invert',
'Sharpness',
'SolarizeAdd',
]
self.ra_ops += [
'Rotate',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
]
elif self.mode == 'CUTOUT':
self.ra_ops = [
'AutoContrast',
'Equalize',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Identity',
'Invert',
'Sharpness',
'SolarizeAdd',
]
self.ra_ops += [
'Rotate',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
]
elif self.mode == 'COLOR':
self.ra_ops = [
'AutoContrast',
'Equalize',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Identity',
'Invert',
'Sharpness',
'SolarizeAdd',
]
elif self.mode == 'GEO':
self.ra_ops = [
'Rotate',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
'Identity',
]
else:
raise NotImplementedError
def wrap(self, image):
image += tf.constant(1.0, image.dtype)
image *= tf.constant(255.0 / 2.0, image.dtype)
image = tf.saturate_cast(image, tf.uint8)
return image
def unwrap(self, image):
image = tf.cast(image, tf.float32)
image /= tf.constant(255.0 / 2.0, image.dtype)
image -= tf.constant(1.0, image.dtype)
return image
def _apply_cutout(self, image):
# Cutout assumes pixels are in [-1, 1].
aug_image = self.unwrap(image)
aug_image = self.cutout_ops(aug_image)
aug_image = self.wrap(aug_image)
if self.prob_to_apply is not None:
return tf.cond(
tf.random.uniform(shape=[], dtype=tf.float32) < self.prob_to_apply,
lambda: aug_image, lambda: image)
else:
return aug_image
def __call__(self, image, is_training=True):
if not is_training:
return image
image = self.wrap(image)
if self.mode == 'CUTOUT':
for _ in range(self.num_layers):
# Makes an exception for cutout.
image = tf.cond(
tf.random.uniform(shape=[], dtype=tf.float32) < tf.divide(
tf.constant(1.0), tf.cast(
len(self.ra_ops) + 1, dtype=tf.float32)),
lambda: self._apply_cutout(image),
lambda: self._apply_one_layer(image))
return self.unwrap(image)
else:
for _ in range(self.num_layers):
image = self._apply_one_layer(image)
return self.unwrap(image) | 0.912869 | 0.240067 |
from math import radians
import arcade
import pymunk
from pymunk import Space
from ev3dev2simulator.obstacle.Board import Board
from ev3dev2simulator.state.RobotState import RobotState
from ev3dev2simulator.obstacle.Border import Border
from ev3dev2simulator.obstacle.Bottle import Bottle
from ev3dev2simulator.obstacle.Edge import Edge
from ev3dev2simulator.obstacle.Lake import Lake
from ev3dev2simulator.obstacle.Rock import Rock
class WorldState:
def __init__(self, config):
self.sprite_list = arcade.SpriteList()
self.obstacles = []
self.static_obstacles = []
self.touch_obstacles = []
self.falling_obstacles = []
self.color_obstacles = []
self.robots = []
self.space = Space()
self.space.damping = 0.1
self.board_width = int(config['board_width'])
self.board_height = int(config['board_height'])
board_color = eval(config['board_color'])
board = Board(self.board_width / 2, self.board_height / 2, self.board_width, self.board_height, board_color)
self.static_obstacles.append(board)
for robot_conf in config['robots']:
self.robots.append(RobotState(robot_conf))
edge = Edge(self.board_width, self.board_height)
self.static_obstacles.append(edge)
self.falling_obstacles.append(edge)
for key, value in config['obstacles'].items():
if value['type'] == 'lake':
lake = Lake.from_config(value)
self.static_obstacles.append(lake)
if lake.hole is not None:
self.falling_obstacles.append(lake.hole)
self.color_obstacles.append(lake)
elif value['type'] == 'rock':
rock = Rock.from_config(value)
self.obstacles.append(rock)
self.touch_obstacles.append(rock)
elif value['type'] == 'border':
border = Border.from_config(self.board_width, self.board_height, value)
self.static_obstacles.append(border)
self.color_obstacles.append(border)
elif value['type'] == 'bottle':
bottle = Bottle.from_config(value)
self.obstacles.append(bottle)
self.touch_obstacles.append(bottle)
else:
print("unknown obstacle type")
self.color_obstacles.append(board)
self.selected_object = None
def reset(self):
for obstacle in self.obstacles:
obstacle.reset()
def setup_pymunk_shapes(self, scale):
for idx, robot in enumerate(self.robots):
robot.setup_pymunk_shapes(scale)
for shape in robot.get_shapes():
shape.filter = pymunk.ShapeFilter(group=idx+5)
self.space.add(shape)
self.space.add(robot.body)
for obstacle in self.obstacles:
obstacle.create_shape(scale)
self.space.add(obstacle.body)
self.space.add(obstacle.shape)
def setup_visuals(self, scale):
for obstacle in self.static_obstacles:
obstacle.create_shape(scale)
touch_sprites = arcade.SpriteList()
for obstacle in self.obstacles:
obstacle.create_sprite(scale)
self.sprite_list.append(obstacle.sprite)
touch_sprites.append(obstacle.sprite)
for robot in self.robots:
robot.setup_visuals(scale)
robot.set_color_obstacles(self.color_obstacles)
robot.set_touch_obstacles(touch_sprites)
robot.set_falling_obstacles(self.falling_obstacles)
def set_object_at_position_as_selected(self, pos):
max_distance = 15
poly = self.space.point_query_nearest(pos, max_distance, pymunk.ShapeFilter()).shape
if hasattr(poly, 'body'):
self.selected_object = poly.body
def move_selected_object(self, dx, dy):
if self.selected_object:
self.selected_object.position += (dx, dy)
def rotate_selected_object(self, dx):
self.selected_object.angle += radians(dx)
def unselect_object(self):
self.selected_object = None
def get_robots(self) -> [RobotState]:
return self.robots | ev3dev2simulator/state/WorldState.py | from math import radians
import arcade
import pymunk
from pymunk import Space
from ev3dev2simulator.obstacle.Board import Board
from ev3dev2simulator.state.RobotState import RobotState
from ev3dev2simulator.obstacle.Border import Border
from ev3dev2simulator.obstacle.Bottle import Bottle
from ev3dev2simulator.obstacle.Edge import Edge
from ev3dev2simulator.obstacle.Lake import Lake
from ev3dev2simulator.obstacle.Rock import Rock
class WorldState:
def __init__(self, config):
self.sprite_list = arcade.SpriteList()
self.obstacles = []
self.static_obstacles = []
self.touch_obstacles = []
self.falling_obstacles = []
self.color_obstacles = []
self.robots = []
self.space = Space()
self.space.damping = 0.1
self.board_width = int(config['board_width'])
self.board_height = int(config['board_height'])
board_color = eval(config['board_color'])
board = Board(self.board_width / 2, self.board_height / 2, self.board_width, self.board_height, board_color)
self.static_obstacles.append(board)
for robot_conf in config['robots']:
self.robots.append(RobotState(robot_conf))
edge = Edge(self.board_width, self.board_height)
self.static_obstacles.append(edge)
self.falling_obstacles.append(edge)
for key, value in config['obstacles'].items():
if value['type'] == 'lake':
lake = Lake.from_config(value)
self.static_obstacles.append(lake)
if lake.hole is not None:
self.falling_obstacles.append(lake.hole)
self.color_obstacles.append(lake)
elif value['type'] == 'rock':
rock = Rock.from_config(value)
self.obstacles.append(rock)
self.touch_obstacles.append(rock)
elif value['type'] == 'border':
border = Border.from_config(self.board_width, self.board_height, value)
self.static_obstacles.append(border)
self.color_obstacles.append(border)
elif value['type'] == 'bottle':
bottle = Bottle.from_config(value)
self.obstacles.append(bottle)
self.touch_obstacles.append(bottle)
else:
print("unknown obstacle type")
self.color_obstacles.append(board)
self.selected_object = None
def reset(self):
for obstacle in self.obstacles:
obstacle.reset()
def setup_pymunk_shapes(self, scale):
for idx, robot in enumerate(self.robots):
robot.setup_pymunk_shapes(scale)
for shape in robot.get_shapes():
shape.filter = pymunk.ShapeFilter(group=idx+5)
self.space.add(shape)
self.space.add(robot.body)
for obstacle in self.obstacles:
obstacle.create_shape(scale)
self.space.add(obstacle.body)
self.space.add(obstacle.shape)
def setup_visuals(self, scale):
for obstacle in self.static_obstacles:
obstacle.create_shape(scale)
touch_sprites = arcade.SpriteList()
for obstacle in self.obstacles:
obstacle.create_sprite(scale)
self.sprite_list.append(obstacle.sprite)
touch_sprites.append(obstacle.sprite)
for robot in self.robots:
robot.setup_visuals(scale)
robot.set_color_obstacles(self.color_obstacles)
robot.set_touch_obstacles(touch_sprites)
robot.set_falling_obstacles(self.falling_obstacles)
def set_object_at_position_as_selected(self, pos):
max_distance = 15
poly = self.space.point_query_nearest(pos, max_distance, pymunk.ShapeFilter()).shape
if hasattr(poly, 'body'):
self.selected_object = poly.body
def move_selected_object(self, dx, dy):
if self.selected_object:
self.selected_object.position += (dx, dy)
def rotate_selected_object(self, dx):
self.selected_object.angle += radians(dx)
def unselect_object(self):
self.selected_object = None
def get_robots(self) -> [RobotState]:
return self.robots | 0.536556 | 0.360067 |
import pytz
import pytest
from itertools import product
from unittest.mock import Mock
from datetime import datetime, timezone, date
from originexample.common import Unit
from originexample.auth import User
from originexample.agreements.queries import AgreementQuery
from originexample.agreements.models import TradeAgreement, AgreementState
user1 = User(
id=1,
sub='28a7240c-088e-4659-bd66-d76afb8c762f',
name='User 1',
company='Company 1',
email='<EMAIL>',
phone='11111111',
access_token='access_token',
refresh_token='<PASSWORD>_token',
token_expire=datetime(2030, 1, 1, 0, 0, 0),
)
user2 = User(
id=2,
sub='972cfd2e-cbd3-42e6-8e0e-c0c5c502f25f',
name='User 2',
company='Company 2',
email='<EMAIL>',
phone='22222222',
access_token='access_token',
refresh_token='access_token',
token_expire=datetime(2030, 1, 1, 0, 0, 0),
)
user3 = User(
id=3,
sub='7169e62d-e349-4af2-9587-6027a4e86cf9',
name='User 3',
company='Company 3',
email='<EMAIL>',
phone='33333333',
access_token='access_token',
refresh_token='<PASSWORD>_token',
token_expire=datetime(2030, 1, 1, 0, 0, 0),
)
user4 = User(
id=4,
sub='7eca644f-b6df-42e5-b6ae-03cb49067<PASSWORD>',
name='<NAME>',
company='Company 4',
email='<EMAIL>',
phone='44444444',
access_token='<PASSWORD>',
refresh_token='<PASSWORD>',
token_expire=datetime(2030, 1, 1, 0, 0, 0),
)
@pytest.fixture(scope='module')
def seeded_session(session):
"""
Returns a Session object with Ggo + User data seeded for testing
"""
# Dependencies
session.add(user1)
session.add(user2)
session.add(user3)
session.add(user4)
# Input for combinations
users = (
# (user_proposed, user_from, user_to)
(user1, user1, user2),
(user2, user1, user2),
(user1, user1, user3),
(user3, user1, user3),
(user2, user2, user1),
(user1, user2, user1),
(user2, user2, user3),
(user3, user2, user3),
(user3, user3, user1),
(user1, user3, user1),
(user3, user3, user2),
(user2, user3, user2),
)
technologies = (None, 'Wind', 'Marine')
states = (
AgreementState.PENDING,
AgreementState.ACCEPTED,
AgreementState.DECLINED,
AgreementState.CANCELLED,
AgreementState.WITHDRAWN,
)
dates = (
# (date_from, date_to)
(date(2020, 1, 1), date(2020, 1, 31)),
(date(2020, 1, 1), date(2020, 2, 29)),
(date(2020, 2, 1), date(2020, 3, 31)),
)
# Combinations
combinations = product(users, technologies, states, dates)
# Seed Agreements
for i, ((user_propose, user_from, user_to), tech, state, (date_from, date_to)) in enumerate(combinations, start=1):
session.add(TradeAgreement(
id=i,
public_id=str(i),
user_proposed_id=user_propose.id,
user_proposed=user_propose,
user_from_id=user_from.id,
user_from=user_from,
user_to_id=user_to.id,
user_to=user_to,
state=state,
date_from=date_from,
date_to=date_to,
amount=100,
unit=Unit.Wh,
technologies=[tech] if tech else None,
reference='some-reference',
))
if i % 250 == 0:
session.flush()
session.commit()
yield session
# -- TEST CASES --------------------------------------------------------------
@pytest.mark.parametrize('public_id', ('1', '2'))
def test__AgreementQuery__has_public_id__TradeAgreement_exists__returns_correct_agreement(
seeded_session, public_id):
query = AgreementQuery(seeded_session) \
.has_public_id(public_id)
assert query.count() == 1
assert query.one().public_id == public_id
@pytest.mark.parametrize('public_id', ('-1', '0', 'asd'))
def test__AgreementQuery__has_public_id__TradeAgreement_does_not_exist__returs_nothing(
seeded_session, public_id):
query = AgreementQuery(seeded_session) \
.has_public_id(public_id)
assert query.count() == 0
assert query.one_or_none() is None
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__belongs_to__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.belongs_to(user)
assert query.count() > 0
assert all(user.id in (ag.user_from_id, ag.user_to_id) for ag in query.all())
def test__AgreementQuery__belongs_to__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.belongs_to(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_proposed_by__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_proposed_by(user)
assert query.count() > 0
assert all(ag.user_proposed_id == user.id for ag in query.all())
def test__AgreementQuery__is_proposed_by__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_proposed_by(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_proposed_to__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_proposed_to(user)
assert query.count() > 0
assert all(ag.user_proposed_id != user.id for ag in query.all())
assert all(user.id in (ag.user_from_id, ag.user_to_id) for ag in query.all())
def test__AgreementQuery__is_proposed_to__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_proposed_to(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_awaiting_response_by__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_awaiting_response_by(user)
assert query.count() > 0
assert all(ag.state is AgreementState.PENDING for ag in query.all())
assert all(ag.user_proposed_id != user.id for ag in query.all())
assert all(user.id in (ag.user_from_id, ag.user_to_id) for ag in query.all())
def test__AgreementQuery__is_awaiting_response_by__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_awaiting_response_by(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_inbound_to__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_inbound_to(user)
assert query.count() > 0
assert all(ag.user_to_id == user.id for ag in query.all())
def test__AgreementQuery__is_inbound_to__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_inbound_to(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_outbound_from__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_outbound_from(user)
assert query.count() > 0
assert all(ag.user_from_id == user.id for ag in query.all())
def test__AgreementQuery__is_outbound_from__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_outbound_from(user4)
assert query.count() == 0
def test__AgreementQuery__is_pending__returns_correct_agreements(seeded_session):
query = AgreementQuery(seeded_session) \
.is_pending()
assert query.count() > 0
assert all(ag.state is AgreementState.PENDING for ag in query.all())
def test__AgreementQuery__is_accepted__returns_correct_agreements(seeded_session):
query = AgreementQuery(seeded_session) \
.is_accepted()
assert query.count() > 0
assert all(ag.state is AgreementState.ACCEPTED for ag in query.all())
def test__AgreementQuery__is_active__returns_correct_agreements(seeded_session):
query = AgreementQuery(seeded_session) \
.is_active()
assert query.count() > 0
assert all(ag.state is AgreementState.ACCEPTED for ag in query.all())
# -- is_elibigle_to_trade() --------------------------------------------------
@pytest.mark.parametrize('ggo_technology, ggo_begin', (
('Wind', datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 1, 31, 21, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 2, 1, 0, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 2, 29, 21, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 3, 1, 0, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 3, 31, 21, 0, 0, tzinfo=timezone.utc)),
))
def test__AgreementQuery__is_elibigle_to_trade__TradeAgreement_exists__returns_correct_agreements(
seeded_session, ggo_technology, ggo_begin):
# Arrange
ggo = Mock(begin=ggo_begin, technology=ggo_technology, issue_gsrn=None)
# Act
query = AgreementQuery(seeded_session) \
.is_elibigle_to_trade(ggo)
# Assert
assert query.count() > 0
assert all(ag.date_from <= ggo_begin.astimezone(pytz.timezone('Europe/Copenhagen')).date() <= ag.date_to for ag in query.all())
assert all(ag.technologies in (None, []) or ggo_technology in ag.technologies for ag in query.all())
@pytest.mark.parametrize('ggo_begin', (
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
datetime(2020, 1, 31, 23, 0, 0, tzinfo=timezone.utc),
datetime(2020, 2, 1, 0, 0, 0, tzinfo=timezone.utc),
datetime(2020, 2, 29, 23, 0, 0, tzinfo=timezone.utc),
datetime(2020, 3, 1, 0, 0, 0, tzinfo=timezone.utc),
datetime(2020, 3, 31, 21, 0, 0, tzinfo=timezone.utc),
))
def test__AgreementQuery__is_elibigle_to_trade__technology_does_not_exists__returns_only_agreements_without_technology(
seeded_session, ggo_begin):
# Arrange
ggo = Mock(begin=ggo_begin, technology='nonexisting-technology', issue_gsrn=None)
# Act
query = AgreementQuery(seeded_session) \
.is_elibigle_to_trade(ggo)
# Assert
assert query.count() > 0
assert all(ag.technologies in (None, []) for ag in query.all())
@pytest.mark.parametrize('ggo_begin', (
datetime(2019, 12, 31, 21, 0, 0, tzinfo=timezone.utc),
datetime(2020, 4, 1, 0, 0, 0, tzinfo=timezone.utc),
))
def test__AgreementQuery__is_elibigle_to_trade__ggo_date_is_outside_agreements__returns_nothing(
seeded_session, ggo_begin):
# Arrange
ggo = Mock(begin=ggo_begin, technology='Wind', issue_gsrn=None)
# Act
query = AgreementQuery(seeded_session) \
.is_elibigle_to_trade(ggo)
# Assert
assert query.count() == 0 | src/tests/agreements/AgreementQuery_test.py | import pytz
import pytest
from itertools import product
from unittest.mock import Mock
from datetime import datetime, timezone, date
from originexample.common import Unit
from originexample.auth import User
from originexample.agreements.queries import AgreementQuery
from originexample.agreements.models import TradeAgreement, AgreementState
user1 = User(
id=1,
sub='28a7240c-088e-4659-bd66-d76afb8c762f',
name='User 1',
company='Company 1',
email='<EMAIL>',
phone='11111111',
access_token='access_token',
refresh_token='<PASSWORD>_token',
token_expire=datetime(2030, 1, 1, 0, 0, 0),
)
user2 = User(
id=2,
sub='972cfd2e-cbd3-42e6-8e0e-c0c5c502f25f',
name='User 2',
company='Company 2',
email='<EMAIL>',
phone='22222222',
access_token='access_token',
refresh_token='access_token',
token_expire=datetime(2030, 1, 1, 0, 0, 0),
)
user3 = User(
id=3,
sub='7169e62d-e349-4af2-9587-6027a4e86cf9',
name='User 3',
company='Company 3',
email='<EMAIL>',
phone='33333333',
access_token='access_token',
refresh_token='<PASSWORD>_token',
token_expire=datetime(2030, 1, 1, 0, 0, 0),
)
user4 = User(
id=4,
sub='7eca644f-b6df-42e5-b6ae-03cb49067<PASSWORD>',
name='<NAME>',
company='Company 4',
email='<EMAIL>',
phone='44444444',
access_token='<PASSWORD>',
refresh_token='<PASSWORD>',
token_expire=datetime(2030, 1, 1, 0, 0, 0),
)
@pytest.fixture(scope='module')
def seeded_session(session):
"""
Returns a Session object with Ggo + User data seeded for testing
"""
# Dependencies
session.add(user1)
session.add(user2)
session.add(user3)
session.add(user4)
# Input for combinations
users = (
# (user_proposed, user_from, user_to)
(user1, user1, user2),
(user2, user1, user2),
(user1, user1, user3),
(user3, user1, user3),
(user2, user2, user1),
(user1, user2, user1),
(user2, user2, user3),
(user3, user2, user3),
(user3, user3, user1),
(user1, user3, user1),
(user3, user3, user2),
(user2, user3, user2),
)
technologies = (None, 'Wind', 'Marine')
states = (
AgreementState.PENDING,
AgreementState.ACCEPTED,
AgreementState.DECLINED,
AgreementState.CANCELLED,
AgreementState.WITHDRAWN,
)
dates = (
# (date_from, date_to)
(date(2020, 1, 1), date(2020, 1, 31)),
(date(2020, 1, 1), date(2020, 2, 29)),
(date(2020, 2, 1), date(2020, 3, 31)),
)
# Combinations
combinations = product(users, technologies, states, dates)
# Seed Agreements
for i, ((user_propose, user_from, user_to), tech, state, (date_from, date_to)) in enumerate(combinations, start=1):
session.add(TradeAgreement(
id=i,
public_id=str(i),
user_proposed_id=user_propose.id,
user_proposed=user_propose,
user_from_id=user_from.id,
user_from=user_from,
user_to_id=user_to.id,
user_to=user_to,
state=state,
date_from=date_from,
date_to=date_to,
amount=100,
unit=Unit.Wh,
technologies=[tech] if tech else None,
reference='some-reference',
))
if i % 250 == 0:
session.flush()
session.commit()
yield session
# -- TEST CASES --------------------------------------------------------------
@pytest.mark.parametrize('public_id', ('1', '2'))
def test__AgreementQuery__has_public_id__TradeAgreement_exists__returns_correct_agreement(
seeded_session, public_id):
query = AgreementQuery(seeded_session) \
.has_public_id(public_id)
assert query.count() == 1
assert query.one().public_id == public_id
@pytest.mark.parametrize('public_id', ('-1', '0', 'asd'))
def test__AgreementQuery__has_public_id__TradeAgreement_does_not_exist__returs_nothing(
seeded_session, public_id):
query = AgreementQuery(seeded_session) \
.has_public_id(public_id)
assert query.count() == 0
assert query.one_or_none() is None
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__belongs_to__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.belongs_to(user)
assert query.count() > 0
assert all(user.id in (ag.user_from_id, ag.user_to_id) for ag in query.all())
def test__AgreementQuery__belongs_to__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.belongs_to(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_proposed_by__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_proposed_by(user)
assert query.count() > 0
assert all(ag.user_proposed_id == user.id for ag in query.all())
def test__AgreementQuery__is_proposed_by__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_proposed_by(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_proposed_to__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_proposed_to(user)
assert query.count() > 0
assert all(ag.user_proposed_id != user.id for ag in query.all())
assert all(user.id in (ag.user_from_id, ag.user_to_id) for ag in query.all())
def test__AgreementQuery__is_proposed_to__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_proposed_to(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_awaiting_response_by__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_awaiting_response_by(user)
assert query.count() > 0
assert all(ag.state is AgreementState.PENDING for ag in query.all())
assert all(ag.user_proposed_id != user.id for ag in query.all())
assert all(user.id in (ag.user_from_id, ag.user_to_id) for ag in query.all())
def test__AgreementQuery__is_awaiting_response_by__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_awaiting_response_by(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_inbound_to__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_inbound_to(user)
assert query.count() > 0
assert all(ag.user_to_id == user.id for ag in query.all())
def test__AgreementQuery__is_inbound_to__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_inbound_to(user4)
assert query.count() == 0
@pytest.mark.parametrize('user', (user1, user2, user3))
def test__AgreementQuery__is_outbound_from__TradeAgreement_exists__returns_correct_agreements(
seeded_session, user):
query = AgreementQuery(seeded_session) \
.is_outbound_from(user)
assert query.count() > 0
assert all(ag.user_from_id == user.id for ag in query.all())
def test__AgreementQuery__is_outbound_from__TradeAgreement_does_not_exist__returs_nothing(seeded_session):
query = AgreementQuery(seeded_session) \
.is_outbound_from(user4)
assert query.count() == 0
def test__AgreementQuery__is_pending__returns_correct_agreements(seeded_session):
query = AgreementQuery(seeded_session) \
.is_pending()
assert query.count() > 0
assert all(ag.state is AgreementState.PENDING for ag in query.all())
def test__AgreementQuery__is_accepted__returns_correct_agreements(seeded_session):
query = AgreementQuery(seeded_session) \
.is_accepted()
assert query.count() > 0
assert all(ag.state is AgreementState.ACCEPTED for ag in query.all())
def test__AgreementQuery__is_active__returns_correct_agreements(seeded_session):
query = AgreementQuery(seeded_session) \
.is_active()
assert query.count() > 0
assert all(ag.state is AgreementState.ACCEPTED for ag in query.all())
# -- is_elibigle_to_trade() --------------------------------------------------
@pytest.mark.parametrize('ggo_technology, ggo_begin', (
('Wind', datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 1, 31, 21, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 2, 1, 0, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 2, 29, 21, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 3, 1, 0, 0, 0, tzinfo=timezone.utc)),
('Wind', datetime(2020, 3, 31, 21, 0, 0, tzinfo=timezone.utc)),
))
def test__AgreementQuery__is_elibigle_to_trade__TradeAgreement_exists__returns_correct_agreements(
seeded_session, ggo_technology, ggo_begin):
# Arrange
ggo = Mock(begin=ggo_begin, technology=ggo_technology, issue_gsrn=None)
# Act
query = AgreementQuery(seeded_session) \
.is_elibigle_to_trade(ggo)
# Assert
assert query.count() > 0
assert all(ag.date_from <= ggo_begin.astimezone(pytz.timezone('Europe/Copenhagen')).date() <= ag.date_to for ag in query.all())
assert all(ag.technologies in (None, []) or ggo_technology in ag.technologies for ag in query.all())
@pytest.mark.parametrize('ggo_begin', (
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
datetime(2020, 1, 31, 23, 0, 0, tzinfo=timezone.utc),
datetime(2020, 2, 1, 0, 0, 0, tzinfo=timezone.utc),
datetime(2020, 2, 29, 23, 0, 0, tzinfo=timezone.utc),
datetime(2020, 3, 1, 0, 0, 0, tzinfo=timezone.utc),
datetime(2020, 3, 31, 21, 0, 0, tzinfo=timezone.utc),
))
def test__AgreementQuery__is_elibigle_to_trade__technology_does_not_exists__returns_only_agreements_without_technology(
seeded_session, ggo_begin):
# Arrange
ggo = Mock(begin=ggo_begin, technology='nonexisting-technology', issue_gsrn=None)
# Act
query = AgreementQuery(seeded_session) \
.is_elibigle_to_trade(ggo)
# Assert
assert query.count() > 0
assert all(ag.technologies in (None, []) for ag in query.all())
@pytest.mark.parametrize('ggo_begin', (
datetime(2019, 12, 31, 21, 0, 0, tzinfo=timezone.utc),
datetime(2020, 4, 1, 0, 0, 0, tzinfo=timezone.utc),
))
def test__AgreementQuery__is_elibigle_to_trade__ggo_date_is_outside_agreements__returns_nothing(
seeded_session, ggo_begin):
# Arrange
ggo = Mock(begin=ggo_begin, technology='Wind', issue_gsrn=None)
# Act
query = AgreementQuery(seeded_session) \
.is_elibigle_to_trade(ggo)
# Assert
assert query.count() == 0 | 0.478285 | 0.259122 |
import numpy as np
import tensorflow as tf
import lucid.optvis.render as render
import itertools
from lucid.misc.gradient_override import gradient_override_map
def maxpool_override():
def MaxPoolGrad(op, grad):
inp = op.inputs[0]
op_args = [
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
]
smooth_out = tf.nn.avg_pool(inp ** 2, *op_args) / (
1e-2 + tf.nn.avg_pool(tf.abs(inp), *op_args)
)
inp_smooth_grad = tf.gradients(smooth_out, [inp], grad)[0]
return inp_smooth_grad
return {"MaxPool": MaxPoolGrad}
def get_acts(model, layer_name, obses):
with tf.Graph().as_default(), tf.Session():
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
return t_acts.eval()
def default_score_fn(t):
return tf.reduce_sum(t, axis=list(range(len(t.shape)))[1:])
def get_grad_or_attr(
model,
layer_name,
prev_layer_name,
obses,
*,
act_dir=None,
act_poses=None,
score_fn=default_score_fn,
grad_or_attr,
override=None,
integrate_steps=1
):
with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
if prev_layer_name is None:
t_acts_prev = t_obses
else:
t_acts_prev = T(prev_layer_name)
if act_dir is not None:
t_acts = act_dir[None, None, None] * t_acts
if act_poses is not None:
t_acts = tf.gather_nd(
t_acts,
tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1),
)
t_scores = score_fn(t_acts)
assert len(t_scores.shape) >= 1, "score_fn should not reduce the batch dim"
t_score = tf.reduce_sum(t_scores)
t_grad = tf.gradients(t_score, [t_acts_prev])[0]
if integrate_steps > 1:
acts_prev = t_acts_prev.eval()
grad = (
sum(
[
t_grad.eval(feed_dict={t_acts_prev: acts_prev * alpha})
for alpha in np.linspace(0, 1, integrate_steps + 1)[1:]
]
)
/ integrate_steps
)
else:
acts_prev = None
grad = t_grad.eval()
if grad_or_attr == "grad":
return grad
elif grad_or_attr == "attr":
if acts_prev is None:
acts_prev = t_acts_prev.eval()
return acts_prev * grad
else:
raise NotImplementedError
def get_attr(model, layer_name, prev_layer_name, obses, **kwargs):
kwargs["grad_or_attr"] = "attr"
return get_grad_or_attr(model, layer_name, prev_layer_name, obses, **kwargs)
def get_grad(model, layer_name, obses, **kwargs):
kwargs["grad_or_attr"] = "grad"
return get_grad_or_attr(model, layer_name, None, obses, **kwargs)
def get_paths(acts, nmf, *, max_paths, integrate_steps):
acts_reduced = nmf.transform(acts)
residual = acts - nmf.inverse_transform(acts_reduced)
combs = itertools.combinations(range(nmf.features), nmf.features // 2)
if nmf.features % 2 == 0:
combs = np.array([comb for comb in combs if 0 in comb])
else:
combs = np.array(list(combs))
if max_paths is None:
splits = combs
else:
num_splits = min((max_paths + 1) // 2, combs.shape[0])
splits = combs[
np.random.choice(combs.shape[0], size=num_splits, replace=False), :
]
for i, split in enumerate(splits):
indices = np.zeros(nmf.features)
indices[split] = 1.0
indices = indices[tuple(None for _ in range(acts_reduced.ndim - 1))]
complements = [False, True]
if max_paths is not None and i * 2 + 1 == max_paths:
complements = [np.random.choice(complements)]
for complement in complements:
path = []
for alpha in np.linspace(0, 1, integrate_steps + 1)[1:]:
if complement:
coordinates = (1.0 - indices) * alpha ** 2 + indices * (
1.0 - (1.0 - alpha) ** 2
)
else:
coordinates = indices * alpha ** 2 + (1.0 - indices) * (
1.0 - (1.0 - alpha) ** 2
)
path.append(
nmf.inverse_transform(acts_reduced * coordinates) + residual * alpha
)
yield path
def get_multi_path_attr(
model,
layer_name,
prev_layer_name,
obses,
prev_nmf,
*,
act_dir=None,
act_poses=None,
score_fn=default_score_fn,
override=None,
max_paths=50,
integrate_steps=10
):
with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
if prev_layer_name is None:
t_acts_prev = t_obses
else:
t_acts_prev = T(prev_layer_name)
if act_dir is not None:
t_acts = act_dir[None, None, None] * t_acts
if act_poses is not None:
t_acts = tf.gather_nd(
t_acts,
tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1),
)
t_scores = score_fn(t_acts)
assert len(t_scores.shape) >= 1, "score_fn should not reduce the batch dim"
t_score = tf.reduce_sum(t_scores)
t_grad = tf.gradients(t_score, [t_acts_prev])[0]
acts_prev = t_acts_prev.eval()
path_acts = get_paths(
acts_prev, prev_nmf, max_paths=max_paths, integrate_steps=integrate_steps
)
deltas_of_path = lambda path: np.array(
[b - a for a, b in zip([np.zeros_like(acts_prev)] + path[:-1], path)]
)
grads_of_path = lambda path: np.array(
[t_grad.eval(feed_dict={t_acts_prev: acts}) for acts in path]
)
path_attrs = map(
lambda path: (deltas_of_path(path) * grads_of_path(path)).sum(axis=0),
path_acts,
)
total_attr = 0
num_paths = 0
for attr in path_attrs:
total_attr += attr
num_paths += 1
return total_attr / num_paths | lucid/scratch/rl_util/attribution.py | import numpy as np
import tensorflow as tf
import lucid.optvis.render as render
import itertools
from lucid.misc.gradient_override import gradient_override_map
def maxpool_override():
def MaxPoolGrad(op, grad):
inp = op.inputs[0]
op_args = [
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
]
smooth_out = tf.nn.avg_pool(inp ** 2, *op_args) / (
1e-2 + tf.nn.avg_pool(tf.abs(inp), *op_args)
)
inp_smooth_grad = tf.gradients(smooth_out, [inp], grad)[0]
return inp_smooth_grad
return {"MaxPool": MaxPoolGrad}
def get_acts(model, layer_name, obses):
with tf.Graph().as_default(), tf.Session():
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
return t_acts.eval()
def default_score_fn(t):
return tf.reduce_sum(t, axis=list(range(len(t.shape)))[1:])
def get_grad_or_attr(
model,
layer_name,
prev_layer_name,
obses,
*,
act_dir=None,
act_poses=None,
score_fn=default_score_fn,
grad_or_attr,
override=None,
integrate_steps=1
):
with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
if prev_layer_name is None:
t_acts_prev = t_obses
else:
t_acts_prev = T(prev_layer_name)
if act_dir is not None:
t_acts = act_dir[None, None, None] * t_acts
if act_poses is not None:
t_acts = tf.gather_nd(
t_acts,
tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1),
)
t_scores = score_fn(t_acts)
assert len(t_scores.shape) >= 1, "score_fn should not reduce the batch dim"
t_score = tf.reduce_sum(t_scores)
t_grad = tf.gradients(t_score, [t_acts_prev])[0]
if integrate_steps > 1:
acts_prev = t_acts_prev.eval()
grad = (
sum(
[
t_grad.eval(feed_dict={t_acts_prev: acts_prev * alpha})
for alpha in np.linspace(0, 1, integrate_steps + 1)[1:]
]
)
/ integrate_steps
)
else:
acts_prev = None
grad = t_grad.eval()
if grad_or_attr == "grad":
return grad
elif grad_or_attr == "attr":
if acts_prev is None:
acts_prev = t_acts_prev.eval()
return acts_prev * grad
else:
raise NotImplementedError
def get_attr(model, layer_name, prev_layer_name, obses, **kwargs):
kwargs["grad_or_attr"] = "attr"
return get_grad_or_attr(model, layer_name, prev_layer_name, obses, **kwargs)
def get_grad(model, layer_name, obses, **kwargs):
kwargs["grad_or_attr"] = "grad"
return get_grad_or_attr(model, layer_name, None, obses, **kwargs)
def get_paths(acts, nmf, *, max_paths, integrate_steps):
acts_reduced = nmf.transform(acts)
residual = acts - nmf.inverse_transform(acts_reduced)
combs = itertools.combinations(range(nmf.features), nmf.features // 2)
if nmf.features % 2 == 0:
combs = np.array([comb for comb in combs if 0 in comb])
else:
combs = np.array(list(combs))
if max_paths is None:
splits = combs
else:
num_splits = min((max_paths + 1) // 2, combs.shape[0])
splits = combs[
np.random.choice(combs.shape[0], size=num_splits, replace=False), :
]
for i, split in enumerate(splits):
indices = np.zeros(nmf.features)
indices[split] = 1.0
indices = indices[tuple(None for _ in range(acts_reduced.ndim - 1))]
complements = [False, True]
if max_paths is not None and i * 2 + 1 == max_paths:
complements = [np.random.choice(complements)]
for complement in complements:
path = []
for alpha in np.linspace(0, 1, integrate_steps + 1)[1:]:
if complement:
coordinates = (1.0 - indices) * alpha ** 2 + indices * (
1.0 - (1.0 - alpha) ** 2
)
else:
coordinates = indices * alpha ** 2 + (1.0 - indices) * (
1.0 - (1.0 - alpha) ** 2
)
path.append(
nmf.inverse_transform(acts_reduced * coordinates) + residual * alpha
)
yield path
def get_multi_path_attr(
model,
layer_name,
prev_layer_name,
obses,
prev_nmf,
*,
act_dir=None,
act_poses=None,
score_fn=default_score_fn,
override=None,
max_paths=50,
integrate_steps=10
):
with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
if prev_layer_name is None:
t_acts_prev = t_obses
else:
t_acts_prev = T(prev_layer_name)
if act_dir is not None:
t_acts = act_dir[None, None, None] * t_acts
if act_poses is not None:
t_acts = tf.gather_nd(
t_acts,
tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1),
)
t_scores = score_fn(t_acts)
assert len(t_scores.shape) >= 1, "score_fn should not reduce the batch dim"
t_score = tf.reduce_sum(t_scores)
t_grad = tf.gradients(t_score, [t_acts_prev])[0]
acts_prev = t_acts_prev.eval()
path_acts = get_paths(
acts_prev, prev_nmf, max_paths=max_paths, integrate_steps=integrate_steps
)
deltas_of_path = lambda path: np.array(
[b - a for a, b in zip([np.zeros_like(acts_prev)] + path[:-1], path)]
)
grads_of_path = lambda path: np.array(
[t_grad.eval(feed_dict={t_acts_prev: acts}) for acts in path]
)
path_attrs = map(
lambda path: (deltas_of_path(path) * grads_of_path(path)).sum(axis=0),
path_acts,
)
total_attr = 0
num_paths = 0
for attr in path_attrs:
total_attr += attr
num_paths += 1
return total_attr / num_paths | 0.669205 | 0.413892 |
import model
def izpis_igre(igra):
tekst = (
'========================================'
'Število preostalih poskusov: {stevilo_preostalih_poskusov} \n\n'
' {pravilni_del_gesla}\n\n'
'Neuspeli poskusi: {neuspeli_poskusi}\n\n'
'========================================'
).format(
stevilo_preostalih_poskusov=model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak() + 1,
pravilni_del_gesla=igra.pravilni_del_gesla(),
neuspeli_poskusi=igra.nepravilni_ugibi()
)
return tekst
def izpis_zmage(igra):
tekst = (
'Wipiiii, zmaga! Geslo je bilo: {geslo} \n\n'
).format(
geslo=igra.pravilni_del_gesla()
)
return tekst
def izpis_poraza(igra):
tekst = (
'Booooo, poraz! Geslo je bilo: {geslo} \n\n'
).format(
geslo=igra.geslo()
)
return tekst
def zahtevaj_vnos():
return input('Črka:')
def izpis_napake():
return '\n###### Ugiba se ena črka naenkrat\n\n'
def izpis_napake_znak():
return '\n###### Ugiba naj ne vsebuje posebnih znakov\n\n'
def pozeni_vmesnik():
igra = model.novaigra()
while True:
# najprej izpisemo stanje, da vidimo, koliko črk je ipd.
print(izpis_igre(igra))
#čakamo na črko od uporabnika
poskus = zahtevaj_vnos()
rezultat_ugiba = igra.ugibaj(poskus)
if rezultat_ugiba == model.VEC_KOT_CRKA:
print(izpis_napake())
elif rezultat_ugiba == model.POSEBEN_ZNAK:
print(izpis_napake_znak())
elif rezultat_ugiba == model.ZMAGA:
print(izpis_zmage(igra))
ponovni_zagon = ("za ponovni zagon vpišite 1.\n").strip()
if ponovni_zagon == "1":
igra = model.novaigra
else:
break
elif rezultat_ugiba == model.PORAZ:
print(izpis_poraza(igra))
ponovni_zagon = ("za ponovni zagon vpišite 1.\n").strip()
if ponovni_zagon == "1":
igra = model.novaigra
else:
break
#zaženi igro
pozeni_vmesnik() | tekstovni_vmesnik.py | import model
def izpis_igre(igra):
tekst = (
'========================================'
'Število preostalih poskusov: {stevilo_preostalih_poskusov} \n\n'
' {pravilni_del_gesla}\n\n'
'Neuspeli poskusi: {neuspeli_poskusi}\n\n'
'========================================'
).format(
stevilo_preostalih_poskusov=model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak() + 1,
pravilni_del_gesla=igra.pravilni_del_gesla(),
neuspeli_poskusi=igra.nepravilni_ugibi()
)
return tekst
def izpis_zmage(igra):
tekst = (
'Wipiiii, zmaga! Geslo je bilo: {geslo} \n\n'
).format(
geslo=igra.pravilni_del_gesla()
)
return tekst
def izpis_poraza(igra):
tekst = (
'Booooo, poraz! Geslo je bilo: {geslo} \n\n'
).format(
geslo=igra.geslo()
)
return tekst
def zahtevaj_vnos():
return input('Črka:')
def izpis_napake():
return '\n###### Ugiba se ena črka naenkrat\n\n'
def izpis_napake_znak():
return '\n###### Ugiba naj ne vsebuje posebnih znakov\n\n'
def pozeni_vmesnik():
igra = model.novaigra()
while True:
# najprej izpisemo stanje, da vidimo, koliko črk je ipd.
print(izpis_igre(igra))
#čakamo na črko od uporabnika
poskus = zahtevaj_vnos()
rezultat_ugiba = igra.ugibaj(poskus)
if rezultat_ugiba == model.VEC_KOT_CRKA:
print(izpis_napake())
elif rezultat_ugiba == model.POSEBEN_ZNAK:
print(izpis_napake_znak())
elif rezultat_ugiba == model.ZMAGA:
print(izpis_zmage(igra))
ponovni_zagon = ("za ponovni zagon vpišite 1.\n").strip()
if ponovni_zagon == "1":
igra = model.novaigra
else:
break
elif rezultat_ugiba == model.PORAZ:
print(izpis_poraza(igra))
ponovni_zagon = ("za ponovni zagon vpišite 1.\n").strip()
if ponovni_zagon == "1":
igra = model.novaigra
else:
break
#zaženi igro
pozeni_vmesnik() | 0.08947 | 0.283707 |
from __future__ import print_function
import numpy as np
def generate_mult_function_batch_compile(k_list, l_list, m_list, mult_table_vals, n_dims,
product_name, product_mask=None, cuda=False):
"""
Takes a given product and generates the code for a function that evaluates it
"""
if product_mask is None:
k_list_copy = k_list
l_list_copy = l_list
m_list_copy = m_list
mult_table_vals_copy = mult_table_vals
else:
k_list_copy = np.zeros(product_mask.shape[0], dtype=np.int64)
l_list_copy = np.zeros(product_mask.shape[0], dtype=np.int64)
m_list_copy = np.zeros(product_mask.shape[0], dtype=np.int64)
mult_table_vals_copy = np.zeros(product_mask.shape[0])
for i in range(product_mask.shape[0]):
k_list_copy[i] = k_list[product_mask[i]]
l_list_copy[i] = l_list[product_mask[i]]
m_list_copy[i] = m_list[product_mask[i]]
mult_table_vals_copy[i] = mult_table_vals[product_mask[i]]
# Sort them by l list
arg_list = np.argsort(l_list_copy)
def get_output_func_f_string(l_value):
if cuda:
f_string = '@cuda.jit(device=True)\n'
else:
f_string = '@njit\n'
fname = product_name + '_o' + str(l_value)
f_string += 'def ' + fname + '(value, other_value):\n'
f_string += ' return 0'
for ind in arg_list:
l = l_list_copy[ind]
if l == l_value:
k = k_list_copy[ind]
m = m_list_copy[ind]
mtv = mult_table_vals_copy[ind]
f_string += ' + ' + str(mtv) + '*value[' + str(k) + ']*other_value[' + str(m) + ']'
return f_string
total_string = ''
if cuda:
totalfuncstring = '@cuda.jit(device=True)\n'
else:
totalfuncstring = '@njit\n'
totalfuncstring += 'def ' + product_name + '(value, other_value, output):\n'
for i in range(n_dims):
total_string += get_output_func_f_string(i) + '\n\n'
f_name = product_name + '_o' + str(i)
totalfuncstring += ' output[' + str(i) + '] = ' + f_name + '(value,other_value)\n'
total_string += totalfuncstring
return total_string
def write_mult_function_batch_compile(k_list, l_list, m_list, mult_table_vals, n_dims, product_name, file_obj,
product_mask=None, cuda=False):
"""
Takes a given product and generates the code for a function that evaluates it, saves this to file
"""
total_string = generate_mult_function_batch_compile(k_list, l_list, m_list, mult_table_vals, n_dims,
product_name, product_mask=product_mask, cuda=cuda)
print(total_string, file=file_obj)
def write_algebra(file_name, layout, cuda=False):
"""
Writes the functions implementing gmt, omt and imt for a given layout into file_name
"""
with open(file_name, 'w') as file_obj:
# Write the preamble
print('import numpy as np\nfrom numba import njit, cuda\n\n', file=file_obj)
# Write the gmt
write_mult_function_batch_compile(layout.k_list, layout.l_list, layout.m_list, layout.mult_table_vals,
layout.gaDims, 'gmt_func', file_obj, cuda=cuda)
# Write the omt
write_mult_function_batch_compile(layout.k_list, layout.l_list, layout.m_list, layout.mult_table_vals,
layout.gaDims, 'omt_func', file_obj,
product_mask=layout.omt_prod_mask, cuda=cuda)
# Write the imt
write_mult_function_batch_compile(layout.k_list, layout.l_list, layout.m_list, layout.mult_table_vals,
layout.gaDims, 'imt_func', file_obj,
product_mask=layout.imt_prod_mask, cuda=cuda)
if __name__ == '__main__':
from clifford.g3c import *
file_name = 'tools/g3c/cuda_products.py'
write_algebra(file_name, layout, cuda=True) | clifford/code_gen.py | from __future__ import print_function
import numpy as np
def generate_mult_function_batch_compile(k_list, l_list, m_list, mult_table_vals, n_dims,
product_name, product_mask=None, cuda=False):
"""
Takes a given product and generates the code for a function that evaluates it
"""
if product_mask is None:
k_list_copy = k_list
l_list_copy = l_list
m_list_copy = m_list
mult_table_vals_copy = mult_table_vals
else:
k_list_copy = np.zeros(product_mask.shape[0], dtype=np.int64)
l_list_copy = np.zeros(product_mask.shape[0], dtype=np.int64)
m_list_copy = np.zeros(product_mask.shape[0], dtype=np.int64)
mult_table_vals_copy = np.zeros(product_mask.shape[0])
for i in range(product_mask.shape[0]):
k_list_copy[i] = k_list[product_mask[i]]
l_list_copy[i] = l_list[product_mask[i]]
m_list_copy[i] = m_list[product_mask[i]]
mult_table_vals_copy[i] = mult_table_vals[product_mask[i]]
# Sort them by l list
arg_list = np.argsort(l_list_copy)
def get_output_func_f_string(l_value):
if cuda:
f_string = '@cuda.jit(device=True)\n'
else:
f_string = '@njit\n'
fname = product_name + '_o' + str(l_value)
f_string += 'def ' + fname + '(value, other_value):\n'
f_string += ' return 0'
for ind in arg_list:
l = l_list_copy[ind]
if l == l_value:
k = k_list_copy[ind]
m = m_list_copy[ind]
mtv = mult_table_vals_copy[ind]
f_string += ' + ' + str(mtv) + '*value[' + str(k) + ']*other_value[' + str(m) + ']'
return f_string
total_string = ''
if cuda:
totalfuncstring = '@cuda.jit(device=True)\n'
else:
totalfuncstring = '@njit\n'
totalfuncstring += 'def ' + product_name + '(value, other_value, output):\n'
for i in range(n_dims):
total_string += get_output_func_f_string(i) + '\n\n'
f_name = product_name + '_o' + str(i)
totalfuncstring += ' output[' + str(i) + '] = ' + f_name + '(value,other_value)\n'
total_string += totalfuncstring
return total_string
def write_mult_function_batch_compile(k_list, l_list, m_list, mult_table_vals, n_dims, product_name, file_obj,
product_mask=None, cuda=False):
"""
Takes a given product and generates the code for a function that evaluates it, saves this to file
"""
total_string = generate_mult_function_batch_compile(k_list, l_list, m_list, mult_table_vals, n_dims,
product_name, product_mask=product_mask, cuda=cuda)
print(total_string, file=file_obj)
def write_algebra(file_name, layout, cuda=False):
"""
Writes the functions implementing gmt, omt and imt for a given layout into file_name
"""
with open(file_name, 'w') as file_obj:
# Write the preamble
print('import numpy as np\nfrom numba import njit, cuda\n\n', file=file_obj)
# Write the gmt
write_mult_function_batch_compile(layout.k_list, layout.l_list, layout.m_list, layout.mult_table_vals,
layout.gaDims, 'gmt_func', file_obj, cuda=cuda)
# Write the omt
write_mult_function_batch_compile(layout.k_list, layout.l_list, layout.m_list, layout.mult_table_vals,
layout.gaDims, 'omt_func', file_obj,
product_mask=layout.omt_prod_mask, cuda=cuda)
# Write the imt
write_mult_function_batch_compile(layout.k_list, layout.l_list, layout.m_list, layout.mult_table_vals,
layout.gaDims, 'imt_func', file_obj,
product_mask=layout.imt_prod_mask, cuda=cuda)
if __name__ == '__main__':
from clifford.g3c import *
file_name = 'tools/g3c/cuda_products.py'
write_algebra(file_name, layout, cuda=True) | 0.45181 | 0.179495 |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True, verbose_name='Name')),
('slug', models.SlugField(unique=True, verbose_name='Category slug')),
],
options={
'verbose_name_plural': 'Categories',
'verbose_name': 'Category',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=50, verbose_name='name')),
('user_email', models.EmailField(blank=True, max_length=254, verbose_name='email')),
('user_url', models.URLField(blank=True, verbose_name='website')),
('content', models.TextField(verbose_name='comment')),
('created', models.DateTimeField(default=datetime.datetime.now, verbose_name='comment created')),
('is_approved', models.BooleanField(default=False, verbose_name='comment approved')),
('like', models.IntegerField(default=0, verbose_name='Likes')),
('dislike', models.IntegerField(default=0, verbose_name='Dislikes')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Comment', verbose_name='parent comment')),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='Title')),
('slug', models.SlugField(unique=True, verbose_name='Post slug')),
('tease', models.TextField(blank=True, verbose_name='Tease (summary)')),
('body', models.TextField()),
('draft', models.BooleanField(default=True, verbose_name='Is draft')),
('is_comment_allowed', models.BooleanField(default=True, verbose_name='Allowed')),
('created_at', models.DateTimeField(default=datetime.datetime.now, verbose_name='Date of creation')),
('published_at', models.DateTimeField(default=datetime.datetime.now, verbose_name='Date of publication')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='blog.Category')),
],
options={
'ordering': ['-published_at'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True, verbose_name='Tag slug')),
],
),
migrations.AddField(
model_name='post',
name='tag',
field=models.ManyToManyField(related_name='metategs', to='blog.Tag'),
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post', verbose_name='related post'),
),
] | blog/migrations/0001_initial.py | from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True, verbose_name='Name')),
('slug', models.SlugField(unique=True, verbose_name='Category slug')),
],
options={
'verbose_name_plural': 'Categories',
'verbose_name': 'Category',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=50, verbose_name='name')),
('user_email', models.EmailField(blank=True, max_length=254, verbose_name='email')),
('user_url', models.URLField(blank=True, verbose_name='website')),
('content', models.TextField(verbose_name='comment')),
('created', models.DateTimeField(default=datetime.datetime.now, verbose_name='comment created')),
('is_approved', models.BooleanField(default=False, verbose_name='comment approved')),
('like', models.IntegerField(default=0, verbose_name='Likes')),
('dislike', models.IntegerField(default=0, verbose_name='Dislikes')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Comment', verbose_name='parent comment')),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='Title')),
('slug', models.SlugField(unique=True, verbose_name='Post slug')),
('tease', models.TextField(blank=True, verbose_name='Tease (summary)')),
('body', models.TextField()),
('draft', models.BooleanField(default=True, verbose_name='Is draft')),
('is_comment_allowed', models.BooleanField(default=True, verbose_name='Allowed')),
('created_at', models.DateTimeField(default=datetime.datetime.now, verbose_name='Date of creation')),
('published_at', models.DateTimeField(default=datetime.datetime.now, verbose_name='Date of publication')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='blog.Category')),
],
options={
'ordering': ['-published_at'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True, verbose_name='Tag slug')),
],
),
migrations.AddField(
model_name='post',
name='tag',
field=models.ManyToManyField(related_name='metategs', to='blog.Tag'),
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post', verbose_name='related post'),
),
] | 0.592313 | 0.143548 |
import os
import sys
import json
import types
import shutil
import pickle
import joblib
import xxhash
import inspect
import logging
from glob import glob
from time import time
from pathlib import Path
from functools import partial, wraps
CACHE_DIR = Path.home()/'.niq'
def load_cache(cache_path):
try:
return joblib.load(open(cache_path, 'rb'))
except:
return None
def save_cache(cache_path, result):
cache_dir = os.path.dirname(cache_path)
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
joblib.dump(result, open(cache_path, 'wb'))
def cache(func=None, cache_dir=CACHE_DIR):
'''Cache result of function call on disk
Support multiple positional and keyword arguments'''
if func is None:
return partial(cache, cache_dir=cache_dir)
@wraps(func)
def memoized_func(*args, **kwargs):
func_id = identify_func(func, args, kwargs)
use_cache = os.environ.get('NIQ_CACHE', '0') == '1'
refresh_list = os.environ.get('NIQ_REFRESH', '').split(',')
cache_path = os.path.join(cache_dir, func_id)
if use_cache:
if func.__qualname__ in refresh_list:
result = None
else:
result = load_cache(cache_path)
if result is None:
result = func(*args, **kwargs)
save_cache(cache_path, result)
else:
result = func(*args, **kwargs)
return result
return memoized_func
def howlong(func):
'''Decorator to print a function's execution time
Time taken for the most recent call to the decorated function can be accessed via the `last_run` attribute'''
@wraps(func)
def timed_func(*args, **kwargs):
start_time = time()
result = func(*args, **kwargs)
stop_time = time()
timed_func.last_run = stop_time - start_time
print(f'Calling {func.__qualname__} took {timed_func.last_run}')
return result
return timed_func
def identify(x):
'''Return an hex digest of the input'''
return xxhash.xxh64(pickle.dumps(x), seed=0).hexdigest()
def identify_func(func, args, kwargs):
if '.' in func.__qualname__ and not inspect.ismethod(func):
args = args[1:]
return identify((func.__qualname__, args, kwargs)) | niq/_niq.py | import os
import sys
import json
import types
import shutil
import pickle
import joblib
import xxhash
import inspect
import logging
from glob import glob
from time import time
from pathlib import Path
from functools import partial, wraps
CACHE_DIR = Path.home()/'.niq'
def load_cache(cache_path):
try:
return joblib.load(open(cache_path, 'rb'))
except:
return None
def save_cache(cache_path, result):
cache_dir = os.path.dirname(cache_path)
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
joblib.dump(result, open(cache_path, 'wb'))
def cache(func=None, cache_dir=CACHE_DIR):
'''Cache result of function call on disk
Support multiple positional and keyword arguments'''
if func is None:
return partial(cache, cache_dir=cache_dir)
@wraps(func)
def memoized_func(*args, **kwargs):
func_id = identify_func(func, args, kwargs)
use_cache = os.environ.get('NIQ_CACHE', '0') == '1'
refresh_list = os.environ.get('NIQ_REFRESH', '').split(',')
cache_path = os.path.join(cache_dir, func_id)
if use_cache:
if func.__qualname__ in refresh_list:
result = None
else:
result = load_cache(cache_path)
if result is None:
result = func(*args, **kwargs)
save_cache(cache_path, result)
else:
result = func(*args, **kwargs)
return result
return memoized_func
def howlong(func):
'''Decorator to print a function's execution time
Time taken for the most recent call to the decorated function can be accessed via the `last_run` attribute'''
@wraps(func)
def timed_func(*args, **kwargs):
start_time = time()
result = func(*args, **kwargs)
stop_time = time()
timed_func.last_run = stop_time - start_time
print(f'Calling {func.__qualname__} took {timed_func.last_run}')
return result
return timed_func
def identify(x):
'''Return an hex digest of the input'''
return xxhash.xxh64(pickle.dumps(x), seed=0).hexdigest()
def identify_func(func, args, kwargs):
if '.' in func.__qualname__ and not inspect.ismethod(func):
args = args[1:]
return identify((func.__qualname__, args, kwargs)) | 0.302082 | 0.054024 |
from momiji.modules import permissions
import discord
from discord.ext import commands
from momiji.reusables import get_member_helpers
class Utilities(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="ban_member", brief="Ban a member")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
async def ban_member(self, ctx, user_id, *, reason=None):
"""
Ban a member
"""
if self.bot.shadow_guild:
guild = self.bot.shadow_guild
await ctx.send(f"using a guild {guild.name}")
else:
guild = ctx.guild
if not guild:
guild = self.bot.shadow_guild
if not guild:
await ctx.send("command not typed in a guild and no shadow guild set")
return
user = discord.Object(int(user_id))
try:
await guild.ban(user=user, reason=reason)
except Exception as e:
await ctx.send(e)
await ctx.send(f"banned {user_id} with reason `{str(reason)}`")
@commands.command(name="mass_nick", brief="Nickname every user")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def mass_nick(self, ctx, nickname=None):
"""
Give a same nickname to every server member.
If you don't specify anything it will remove all nicknames.
"""
async with ctx.channel.typing():
for member in ctx.guild.members:
try:
await member.edit(nick=nickname)
except Exception as e:
await ctx.send(member.name)
await ctx.send(e)
await ctx.send("Done")
@commands.command(name="prune_role", brief="Remove this role from every member")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def prune_role(self, ctx, role_name):
"""
Remove a specified role from every member who has it
"""
async with ctx.channel.typing():
role = discord.utils.get(ctx.guild.roles, name=role_name)
for member in role.members:
await member.remove_roles(role, reason=f"pruned role `{role_name}`")
await ctx.send("Done")
@commands.command(name="clean_member_roles", brief="Take all roles away from a member")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def clean_member_roles(self, ctx, user_id):
"""
Take away every role a member has
"""
member = get_member_helpers.get_member_guaranteed(ctx, user_id)
if member:
try:
await member.edit(roles=[])
await ctx.send("Done")
except:
await ctx.send("no perms to change nickname and/or remove roles")
def setup(bot):
bot.add_cog(Utilities(bot)) | momiji/cogs/Utilities.py | from momiji.modules import permissions
import discord
from discord.ext import commands
from momiji.reusables import get_member_helpers
class Utilities(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="ban_member", brief="Ban a member")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
async def ban_member(self, ctx, user_id, *, reason=None):
"""
Ban a member
"""
if self.bot.shadow_guild:
guild = self.bot.shadow_guild
await ctx.send(f"using a guild {guild.name}")
else:
guild = ctx.guild
if not guild:
guild = self.bot.shadow_guild
if not guild:
await ctx.send("command not typed in a guild and no shadow guild set")
return
user = discord.Object(int(user_id))
try:
await guild.ban(user=user, reason=reason)
except Exception as e:
await ctx.send(e)
await ctx.send(f"banned {user_id} with reason `{str(reason)}`")
@commands.command(name="mass_nick", brief="Nickname every user")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def mass_nick(self, ctx, nickname=None):
"""
Give a same nickname to every server member.
If you don't specify anything it will remove all nicknames.
"""
async with ctx.channel.typing():
for member in ctx.guild.members:
try:
await member.edit(nick=nickname)
except Exception as e:
await ctx.send(member.name)
await ctx.send(e)
await ctx.send("Done")
@commands.command(name="prune_role", brief="Remove this role from every member")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def prune_role(self, ctx, role_name):
"""
Remove a specified role from every member who has it
"""
async with ctx.channel.typing():
role = discord.utils.get(ctx.guild.roles, name=role_name)
for member in role.members:
await member.remove_roles(role, reason=f"pruned role `{role_name}`")
await ctx.send("Done")
@commands.command(name="clean_member_roles", brief="Take all roles away from a member")
@commands.check(permissions.is_admin)
@commands.check(permissions.is_not_ignored)
@commands.guild_only()
async def clean_member_roles(self, ctx, user_id):
"""
Take away every role a member has
"""
member = get_member_helpers.get_member_guaranteed(ctx, user_id)
if member:
try:
await member.edit(roles=[])
await ctx.send("Done")
except:
await ctx.send("no perms to change nickname and/or remove roles")
def setup(bot):
bot.add_cog(Utilities(bot)) | 0.378229 | 0.080394 |
import turtle
import random
import time
# 画樱花的躯干
def tree(branch, t):
time.sleep(0.0008)
if branch > 3:
if 8 <= branch <= 12:
if random.randint(0, 2) == 0:
t.color('snow')
else:
t.color('lightcoral')
t.pensize(branch / 3)
elif branch < 8:
if random.randint(0, 1) == 0:
t.color('snow')
else:
t.color('lightcoral')
t.pensize(branch / 2)
else:
t.color('sienna')
t.pensize(branch / 10)
t.forward(branch)
a = 1.5 * random.random()
t.right(20 * a)
b = 1.5 * random.random()
tree(branch - 10 * b, t)
t.left(40 * a)
tree(branch - 10 * b, t)
t.right(20 * a)
t.up()
t.backward(branch)
t.down()
# 掉落的花瓣
def petal(m, t):
for i in range(m):
a = 200 - 380 * random.random()
b = 10 - 20 * random.random()
t.up()
t.forward(b)
t.left(90)
t.forward(a)
t.down()
t.color('lightcoral')
t.circle(1)
t.up()
t.backward(a)
t.right(90)
t.backward(b)
def write(t):
t.up()
t.goto(0, -110)
t.pencolor('black')
t.write("Ivy J.\n\n暖春三月,樱花纷纷落落,\n花瓣唱着歌,飘向你心窝。\n愿它的香气能令你的心情快乐,\n愿你拥有樱花般灿烂的生活!^_^",
font=('华文楷体', 16, 'italic'))
"""
.Turtle:注意字母的大写,用于生成一个 turtle 对象
.mainloop:保持画布窗口不消失,用在最后
.mode:"logo",初始位置指向北(上);"standard",初始位置指向东方(右)
.fillcolor:设置要填充的颜色
.color(p1, p2):p1 画笔整体颜色; p2 由画笔画出的图形的填充颜色
turtle.backward(distance):沿当前反方向,画笔绘制distance距离
turtle.forward(distance):沿当前方向,画笔绘制distance距离
turtle.right(degree):顺时针移动degree度
turtle.left(degree):逆时针移动degree度
.seth/setheading(angle):设置当前朝向为angle角度,若模式为“logo”,则顺时针旋转;若模式为“standard”,则逆时针旋转
.heading:返回当前放置的角度
.pu/penup/up:抬笔
.pd/pendown/down:落笔
.goto/setposition/setpos:移动到相对于画布中心点的坐标位置(x,y),画布是一个以初始位置为原点的坐标系
.setx/sety:保持一个坐标不变,移到到另一个坐标,移动的距离是相对于原点来计算的
.xcor/ycor:返回当前箭头所处位置的橫纵坐标
.home:让画笔回到初始位置(原点),同时绘制
.reset:抹去之前所有的痕迹,重新绘画,恢复箭头的初始状态
.clear:抹去之前所有的痕迹,但是保持箭头的初始状态
.circle:一个输入参数时画圆,两个时画弧长,三个参数时画多边形
.pensize:设置画笔大小
.speed:设置画笔移动速度,0为最快速度
.undo:撤销上一次操作
.write:绘制文本
.getscreen:获取画布对象,对画布进行操作
"""
try:
myWin = turtle.Screen()
myWin.title("樱花 ^_^")
myWin.tracer(5, 2)
# 隐藏画笔
turtle.hideturtle()
turtle.setx(-120)
turtle.left(90)
# 抹去之前所有的痕迹,但是保持箭头的初始状态
turtle.clear()
turtle.up()
turtle.backward(150)
turtle.down()
turtle.color('sienna')
# 画樱花的躯干
tree(60, turtle)
# 掉落的花瓣
petal(210, turtle)
# 写字
write(turtle)
turtle.done()
except (turtle.Terminator, BaseException):
pass | cherry/cherry.py | import turtle
import random
import time
# 画樱花的躯干
def tree(branch, t):
time.sleep(0.0008)
if branch > 3:
if 8 <= branch <= 12:
if random.randint(0, 2) == 0:
t.color('snow')
else:
t.color('lightcoral')
t.pensize(branch / 3)
elif branch < 8:
if random.randint(0, 1) == 0:
t.color('snow')
else:
t.color('lightcoral')
t.pensize(branch / 2)
else:
t.color('sienna')
t.pensize(branch / 10)
t.forward(branch)
a = 1.5 * random.random()
t.right(20 * a)
b = 1.5 * random.random()
tree(branch - 10 * b, t)
t.left(40 * a)
tree(branch - 10 * b, t)
t.right(20 * a)
t.up()
t.backward(branch)
t.down()
# 掉落的花瓣
def petal(m, t):
for i in range(m):
a = 200 - 380 * random.random()
b = 10 - 20 * random.random()
t.up()
t.forward(b)
t.left(90)
t.forward(a)
t.down()
t.color('lightcoral')
t.circle(1)
t.up()
t.backward(a)
t.right(90)
t.backward(b)
def write(t):
t.up()
t.goto(0, -110)
t.pencolor('black')
t.write("Ivy J.\n\n暖春三月,樱花纷纷落落,\n花瓣唱着歌,飘向你心窝。\n愿它的香气能令你的心情快乐,\n愿你拥有樱花般灿烂的生活!^_^",
font=('华文楷体', 16, 'italic'))
"""
.Turtle:注意字母的大写,用于生成一个 turtle 对象
.mainloop:保持画布窗口不消失,用在最后
.mode:"logo",初始位置指向北(上);"standard",初始位置指向东方(右)
.fillcolor:设置要填充的颜色
.color(p1, p2):p1 画笔整体颜色; p2 由画笔画出的图形的填充颜色
turtle.backward(distance):沿当前反方向,画笔绘制distance距离
turtle.forward(distance):沿当前方向,画笔绘制distance距离
turtle.right(degree):顺时针移动degree度
turtle.left(degree):逆时针移动degree度
.seth/setheading(angle):设置当前朝向为angle角度,若模式为“logo”,则顺时针旋转;若模式为“standard”,则逆时针旋转
.heading:返回当前放置的角度
.pu/penup/up:抬笔
.pd/pendown/down:落笔
.goto/setposition/setpos:移动到相对于画布中心点的坐标位置(x,y),画布是一个以初始位置为原点的坐标系
.setx/sety:保持一个坐标不变,移到到另一个坐标,移动的距离是相对于原点来计算的
.xcor/ycor:返回当前箭头所处位置的橫纵坐标
.home:让画笔回到初始位置(原点),同时绘制
.reset:抹去之前所有的痕迹,重新绘画,恢复箭头的初始状态
.clear:抹去之前所有的痕迹,但是保持箭头的初始状态
.circle:一个输入参数时画圆,两个时画弧长,三个参数时画多边形
.pensize:设置画笔大小
.speed:设置画笔移动速度,0为最快速度
.undo:撤销上一次操作
.write:绘制文本
.getscreen:获取画布对象,对画布进行操作
"""
try:
myWin = turtle.Screen()
myWin.title("樱花 ^_^")
myWin.tracer(5, 2)
# 隐藏画笔
turtle.hideturtle()
turtle.setx(-120)
turtle.left(90)
# 抹去之前所有的痕迹,但是保持箭头的初始状态
turtle.clear()
turtle.up()
turtle.backward(150)
turtle.down()
turtle.color('sienna')
# 画樱花的躯干
tree(60, turtle)
# 掉落的花瓣
petal(210, turtle)
# 写字
write(turtle)
turtle.done()
except (turtle.Terminator, BaseException):
pass | 0.165965 | 0.349366 |
from sklearn import datasets
from sklearn import svm
from sklearn.model_selection import cross_validate
import numpy as np
import matplotlib.pyplot as plt
def plot_svc(model):
"""Plot the decision function for a 2D SVC"""
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
x = np.linspace(xlim[0], xlim[1], 30)
y = np.linspace(ylim[0], ylim[1], 30)
Y, X = np.meshgrid(y, x)
xy = np.vstack([X.ravel(), Y.ravel()]).T
P = model.decision_function(xy).reshape(X.shape)
# plot decision boundary and margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
# plot support vectors
ax.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=300, linewidth=1, facecolors='none');
ax.set_xlim(xlim)
ax.set_ylim(ylim)
def kfold_svm(X, y, number_of_folds=5, kernel='rbf', C=1, gamma='auto', degree=5, coef0=0.0):
clf = svm.SVC(kernel=kernel, C=C, gamma=gamma, degree=degree, coef0=coef0)
cross_validate_result = cross_validate(clf, X, y, cv=number_of_folds, scoring='accuracy',
return_estimator=True, return_train_score=True)
clfs = cross_validate_result['estimator']
train_scores = cross_validate_result['train_score']
test_scores = cross_validate_result['test_score']
print("train_scores: " )
print(train_scores)
print()
print("test_scores: " )
print(test_scores)
best_one_index = np.argmax(test_scores)
return clfs[best_one_index], test_scores[best_one_index]
def test_case_evaluator(test_case):
if test_case == 0:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=42, factor=0.2)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1)
elif test_case == 1:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=42, factor=0.2)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=5, coef0=0.5)
elif test_case == 2:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=42, factor=0.2)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1) # which doesn't work fine
elif test_case == 10:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=30, factor=0.8)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1)
elif test_case == 11:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=30, factor=0.8)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1000) # overfit
elif test_case == 12:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=30, factor=0.8)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1) # very bad
elif test_case == 13:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=30, factor=0.8)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=5, coef0=0.5)
elif test_case == 20:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1)
elif test_case == 21:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1000) # very overfit
elif test_case == 22:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=3, coef0=0.5)
elif test_case == 23:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1)
elif test_case == 24:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=0.0001, gamma=1) # very unsensitive
elif test_case == 30:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=1, random_state=150)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1)
elif test_case == 31:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=1, random_state=150)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=3, coef0=0.5)
elif test_case == 32:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=1, random_state=150)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1)
elif test_case == 40:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=2, random_state=175)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1) # a liitle overfit
elif test_case == 41:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=2, random_state=175)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=3, coef0=0.5)
elif test_case == 42:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=2, random_state=175)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1)
return x, y, clf, best_score
NUMBER_OF_SAMPLES = 2000
print("Choose a test case: (input number) \n")
print("0. two circles with rbf kernel")
print("1. two circles with poly degree 5 kernel")
print("2. two circles with linear kernel which doesn't work good")
print("10. two more difficult circles with rbf kernel")
print("11. two more difficult circles with rbf kernel that overfits")
print("12. two more difficult circles with linear kernel whcih doesn't work good")
print("13. two more difficult circles with poly degree 5 kernel whcih doesn't work good")
print("20. moon (crescent) with rbf kernel")
print("21. moon (crescent) with rbf kernel with very overfit")
print("22. moon (crescent) with poly degree 3 kernel")
print("23. moon (crescent) with linear kernel which is not very good")
print("24. moon (crescent) with rbf kernel with low sensitivity")
print("30. two blobs with low std and rbf kernel")
print("31. two blobs with low std and poly degree 3 kernel")
print("32. two blobs with low std and linear kernel")
print("40. two blobs with more std and rbf kernel")
print("41. two blobs with more std and poly degree 3 kernel")
print("42. two blobs with more std and linear kernel")
test_case = int(input("input the test case you want: "))
x, y, clf, best_score = test_case_evaluator(test_case)
plt.scatter(x[:, 0], x[:, 1], c=y, s=50, cmap='autumn')
plot_svc(clf)
plt.show()
plt.clf()
print("best score: "+ str(best_score)) | SVM/1- First_Part/main.py | from sklearn import datasets
from sklearn import svm
from sklearn.model_selection import cross_validate
import numpy as np
import matplotlib.pyplot as plt
def plot_svc(model):
"""Plot the decision function for a 2D SVC"""
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
x = np.linspace(xlim[0], xlim[1], 30)
y = np.linspace(ylim[0], ylim[1], 30)
Y, X = np.meshgrid(y, x)
xy = np.vstack([X.ravel(), Y.ravel()]).T
P = model.decision_function(xy).reshape(X.shape)
# plot decision boundary and margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
# plot support vectors
ax.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=300, linewidth=1, facecolors='none');
ax.set_xlim(xlim)
ax.set_ylim(ylim)
def kfold_svm(X, y, number_of_folds=5, kernel='rbf', C=1, gamma='auto', degree=5, coef0=0.0):
clf = svm.SVC(kernel=kernel, C=C, gamma=gamma, degree=degree, coef0=coef0)
cross_validate_result = cross_validate(clf, X, y, cv=number_of_folds, scoring='accuracy',
return_estimator=True, return_train_score=True)
clfs = cross_validate_result['estimator']
train_scores = cross_validate_result['train_score']
test_scores = cross_validate_result['test_score']
print("train_scores: " )
print(train_scores)
print()
print("test_scores: " )
print(test_scores)
best_one_index = np.argmax(test_scores)
return clfs[best_one_index], test_scores[best_one_index]
def test_case_evaluator(test_case):
if test_case == 0:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=42, factor=0.2)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1)
elif test_case == 1:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=42, factor=0.2)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=5, coef0=0.5)
elif test_case == 2:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=42, factor=0.2)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1) # which doesn't work fine
elif test_case == 10:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=30, factor=0.8)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1)
elif test_case == 11:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=30, factor=0.8)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1000) # overfit
elif test_case == 12:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=30, factor=0.8)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1) # very bad
elif test_case == 13:
x, y = datasets.make_circles(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=30, factor=0.8)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=5, coef0=0.5)
elif test_case == 20:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1)
elif test_case == 21:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1000) # very overfit
elif test_case == 22:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=3, coef0=0.5)
elif test_case == 23:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1)
elif test_case == 24:
x, y = datasets.make_moons(n_samples=NUMBER_OF_SAMPLES, noise=0.1, random_state=32)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=0.0001, gamma=1) # very unsensitive
elif test_case == 30:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=1, random_state=150)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1)
elif test_case == 31:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=1, random_state=150)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=3, coef0=0.5)
elif test_case == 32:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=1, random_state=150)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1)
elif test_case == 40:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=2, random_state=175)
clf, best_score = kfold_svm(x, y, kernel='rbf', C=1, gamma=1) # a liitle overfit
elif test_case == 41:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=2, random_state=175)
clf, best_score = kfold_svm(x, y, kernel='poly', C=1, degree=3, coef0=0.5)
elif test_case == 42:
x, y = datasets.make_blobs(n_samples=NUMBER_OF_SAMPLES, centers=2, cluster_std=2, random_state=175)
clf, best_score = kfold_svm(x, y, kernel='linear', C=1)
return x, y, clf, best_score
NUMBER_OF_SAMPLES = 2000
print("Choose a test case: (input number) \n")
print("0. two circles with rbf kernel")
print("1. two circles with poly degree 5 kernel")
print("2. two circles with linear kernel which doesn't work good")
print("10. two more difficult circles with rbf kernel")
print("11. two more difficult circles with rbf kernel that overfits")
print("12. two more difficult circles with linear kernel whcih doesn't work good")
print("13. two more difficult circles with poly degree 5 kernel whcih doesn't work good")
print("20. moon (crescent) with rbf kernel")
print("21. moon (crescent) with rbf kernel with very overfit")
print("22. moon (crescent) with poly degree 3 kernel")
print("23. moon (crescent) with linear kernel which is not very good")
print("24. moon (crescent) with rbf kernel with low sensitivity")
print("30. two blobs with low std and rbf kernel")
print("31. two blobs with low std and poly degree 3 kernel")
print("32. two blobs with low std and linear kernel")
print("40. two blobs with more std and rbf kernel")
print("41. two blobs with more std and poly degree 3 kernel")
print("42. two blobs with more std and linear kernel")
test_case = int(input("input the test case you want: "))
x, y, clf, best_score = test_case_evaluator(test_case)
plt.scatter(x[:, 0], x[:, 1], c=y, s=50, cmap='autumn')
plot_svc(clf)
plt.show()
plt.clf()
print("best score: "+ str(best_score)) | 0.698535 | 0.553747 |
import matplotlib.pyplot as plt
import numpy as np
def get_energy(J, spins):
return -J*np.sum(spins * np.roll(spins, 1, axis=0) +
spins * np.roll(spins, -1, axis=0) +
spins * np.roll(spins, 1, axis=1) +
spins * np.roll(spins, -1, axis=1))/2
def deltaE(J, spins, i, j):
flip = -spins[i, j]
delta = -2*J*flip*(spins[(i+1) % N, j] + spins[(i-1) % N, j] +
spins[i, (j+1) % N] + spins[i, (j-1) % N])
return delta
def do_gibbs_sampling(interaction, spins, energy, temperature, n_samples):
for _ in range(n_samples):
i = np.random.randint(spins.shape[0])
j = np.random.randint(spins.shape[1])
delta = deltaE(interaction, spins, i, j)
if delta < 0 or np.exp(-delta / temperature) > np.random.random():
spins[i, j] *= -1
energy += delta
return spins, energy
def plot_probabilities(energies, T, bins=10):
probabilities = np.exp(-np.array(sorted(energies))/T)
Z = probabilities.sum()
probabilities /= Z
plt.plot(sorted(energies), probabilities)
plt.ylim(0, 1.2*probabilities.max())
plt.show()
def get_energy_distribution(N, temperature, interaction, n_runs, burnin_time,
n_samples, n_sample_distance):
energy_list = []
for run in range(n_runs):
print("Run %d" % run)
spins = np.random.choice([-1, 1], size=(N, N))
energy = get_energy(interaction, spins)
spins, energy = do_gibbs_sampling(interaction, spins, energy,
temperature, burnin_time)
for _ in range(n_samples):
spins, energy = do_gibbs_sampling(interaction, spins, energy,
temperature, n_sample_distance)
energy_list.append(energy)
return energy_list
temperature = 5
N = 5
interaction = 1.0
burnin_time = 10000
n_sample_distance = 1000
n_samples = 100
n_runs = 50
energies = get_energy_distribution(N, temperature, interaction, n_runs,
burnin_time, n_samples, n_sample_distance)
plot_probabilities(energies, temperature, bins=50) | Code/Ising_energy_Gibbs_sampling.py | import matplotlib.pyplot as plt
import numpy as np
def get_energy(J, spins):
return -J*np.sum(spins * np.roll(spins, 1, axis=0) +
spins * np.roll(spins, -1, axis=0) +
spins * np.roll(spins, 1, axis=1) +
spins * np.roll(spins, -1, axis=1))/2
def deltaE(J, spins, i, j):
flip = -spins[i, j]
delta = -2*J*flip*(spins[(i+1) % N, j] + spins[(i-1) % N, j] +
spins[i, (j+1) % N] + spins[i, (j-1) % N])
return delta
def do_gibbs_sampling(interaction, spins, energy, temperature, n_samples):
for _ in range(n_samples):
i = np.random.randint(spins.shape[0])
j = np.random.randint(spins.shape[1])
delta = deltaE(interaction, spins, i, j)
if delta < 0 or np.exp(-delta / temperature) > np.random.random():
spins[i, j] *= -1
energy += delta
return spins, energy
def plot_probabilities(energies, T, bins=10):
probabilities = np.exp(-np.array(sorted(energies))/T)
Z = probabilities.sum()
probabilities /= Z
plt.plot(sorted(energies), probabilities)
plt.ylim(0, 1.2*probabilities.max())
plt.show()
def get_energy_distribution(N, temperature, interaction, n_runs, burnin_time,
n_samples, n_sample_distance):
energy_list = []
for run in range(n_runs):
print("Run %d" % run)
spins = np.random.choice([-1, 1], size=(N, N))
energy = get_energy(interaction, spins)
spins, energy = do_gibbs_sampling(interaction, spins, energy,
temperature, burnin_time)
for _ in range(n_samples):
spins, energy = do_gibbs_sampling(interaction, spins, energy,
temperature, n_sample_distance)
energy_list.append(energy)
return energy_list
temperature = 5
N = 5
interaction = 1.0
burnin_time = 10000
n_sample_distance = 1000
n_samples = 100
n_runs = 50
energies = get_energy_distribution(N, temperature, interaction, n_runs,
burnin_time, n_samples, n_sample_distance)
plot_probabilities(energies, temperature, bins=50) | 0.566498 | 0.500183 |
from colorama import Fore, Style, init
from json import load, load
init(autoreset=True)
file = "data\data.json"
with open(file, "r") as f:
data = load(f)
version_ = data["version"]
with open("data\\api_key.json", "r") as a:
ak = load(a)
apikey = ak["api_key"]
apikey_2 = ak["api_key_moviedb"]
with open("data\\config.json", "r") as j:
dcj = load(j)
bordercolor = dcj["bordercolor"]
logocolor = dcj["logocolor"]
callsign = dcj["callsign"]
channelnukename = dcj["channelnukename"]
with open("data\\token.discord", "r") as d:
dcf = load(d)
token = dcf["token"]
if bordercolor == "red":
bordercolor = Fore.RED
elif bordercolor == "blue":
bordercolor = Fore.BLUE
elif bordercolor == "lightblue":
bordercolor = Fore.LIGHTBLUE_EX
elif bordercolor == "lightred":
bordercolor = Fore.LIGHTRED_EX
elif bordercolor == "green":
bordercolor = Fore.GREEN
elif bordercolor == "lightgreen":
bordercolor = Fore.LIGHTGREEN_EX
elif bordercolor == "grey":
bordercolor = Fore.LIGHTBLACK_EX
elif bordercolor == "cyan":
bordercolor = Fore.CYAN
elif bordercolor == "lightcyan":
bordercolor = Fore.LIGHTCYAN_EX
elif bordercolor == "white":
bordercolor = Fore.WHITE
elif bordercolor == "yellow":
bordercolor = Fore.YELLOW
elif bordercolor == "lightyellow":
bordercolor = Fore.LIGHTYELLOW_EX
elif bordercolor == "magenta":
bordercolor = Fore.MAGENTA
elif bordercolor == "lightmagenta":
bordercolor = Fore.LIGHTMAGENTA_EX
elif bordercolor == "lightwhite":
bordercolor = Fore.LIGHTWHITE_EX
else:
bordercolor = Fore.WHITE
if logocolor == "red":
logocolor = Fore.RED
elif logocolor == "blue":
logocolor = Fore.BLUE
elif logocolor == "lightblue":
logocolor = Fore.LIGHTBLUE_EX
elif logocolor == "lightred":
logocolor = Fore.LIGHTRED_EX
elif logocolor == "green":
logocolor = Fore.GREEN
elif logocolor == "lightgreen":
logocolor = Fore.LIGHTGREEN_EX
elif logocolor == "grey":
logocolor = Fore.LIGHTBLACK_EX
elif logocolor == "cyan":
logocolor = Fore.CYAN
elif logocolor == "lightcyan":
logocolor = Fore.LIGHTCYAN_EX
elif logocolor == "white":
logocolor = Fore.WHITE
elif logocolor == "yellow":
logocolor = Fore.YELLOW
elif logocolor == "lightyellow":
logocolor = Fore.LIGHTYELLOW_EX
elif logocolor == "magenta":
logocolor = Fore.MAGENTA
elif logocolor == "lightmagenta":
logocolor = Fore.LIGHTMAGENTA_EX
elif bordercolor == "lightwhite":
logocolor = Fore.LIGHTWHITE_EX
else:
logocolor = Fore.WHITE
# Logo
class main:
def hellomessage():
sra = Style.RESET_ALL
print(logocolor + """
░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░
░░░░░░░░░ ░░░░░░░░░░░░░░░░ ░░░░░░░░░░░░░░░░░░░░░░░░░
▒▒▒▒▒▒▒▒▒ ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ ▒▒ ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
▒▒ ▒▒ ▒▒ ▒ ▒▒▒ ▒ ▒▒▒▒▒ ▒ ▒▒▒▒▒▒ ▒▒▒▒
▓ ▓▓▓▓▓ ▓ ▓▓▓ ▓ ▓▓ ▓ ▓▓ ▓▓ ▓▓ ▓▓▓ ▓
▓▓▓ ▓▓ ▓▓▓▓▓▓▓ ▓▓▓ ▓ ▓▓ ▓▓ ▓ ▓
▓▓▓▓▓ ▓ ▓ ▓▓▓▓▓▓ ▓▓▓ ▓ ▓▓ ▓▓ ▓ ▓▓▓▓▓▓▓▓
█ ██ ██ ████ ████ █ █ ██ ███ ███
████████████████████ █████████████████████████████████
""")
print(f" {bordercolor}╔══════════════════╗{sra} ")
print(f" {bordercolor}║{Fore.LIGHTRED_EX}Made by {Fore.LIGHTCYAN_EX}@Skyline69{bordercolor}║{sra}")
print(f" {bordercolor}║{sra} Version: {Fore.YELLOW}{version_}{bordercolor} ║{sra}") | src/submain.py | from colorama import Fore, Style, init
from json import load, load
init(autoreset=True)
file = "data\data.json"
with open(file, "r") as f:
data = load(f)
version_ = data["version"]
with open("data\\api_key.json", "r") as a:
ak = load(a)
apikey = ak["api_key"]
apikey_2 = ak["api_key_moviedb"]
with open("data\\config.json", "r") as j:
dcj = load(j)
bordercolor = dcj["bordercolor"]
logocolor = dcj["logocolor"]
callsign = dcj["callsign"]
channelnukename = dcj["channelnukename"]
with open("data\\token.discord", "r") as d:
dcf = load(d)
token = dcf["token"]
if bordercolor == "red":
bordercolor = Fore.RED
elif bordercolor == "blue":
bordercolor = Fore.BLUE
elif bordercolor == "lightblue":
bordercolor = Fore.LIGHTBLUE_EX
elif bordercolor == "lightred":
bordercolor = Fore.LIGHTRED_EX
elif bordercolor == "green":
bordercolor = Fore.GREEN
elif bordercolor == "lightgreen":
bordercolor = Fore.LIGHTGREEN_EX
elif bordercolor == "grey":
bordercolor = Fore.LIGHTBLACK_EX
elif bordercolor == "cyan":
bordercolor = Fore.CYAN
elif bordercolor == "lightcyan":
bordercolor = Fore.LIGHTCYAN_EX
elif bordercolor == "white":
bordercolor = Fore.WHITE
elif bordercolor == "yellow":
bordercolor = Fore.YELLOW
elif bordercolor == "lightyellow":
bordercolor = Fore.LIGHTYELLOW_EX
elif bordercolor == "magenta":
bordercolor = Fore.MAGENTA
elif bordercolor == "lightmagenta":
bordercolor = Fore.LIGHTMAGENTA_EX
elif bordercolor == "lightwhite":
bordercolor = Fore.LIGHTWHITE_EX
else:
bordercolor = Fore.WHITE
if logocolor == "red":
logocolor = Fore.RED
elif logocolor == "blue":
logocolor = Fore.BLUE
elif logocolor == "lightblue":
logocolor = Fore.LIGHTBLUE_EX
elif logocolor == "lightred":
logocolor = Fore.LIGHTRED_EX
elif logocolor == "green":
logocolor = Fore.GREEN
elif logocolor == "lightgreen":
logocolor = Fore.LIGHTGREEN_EX
elif logocolor == "grey":
logocolor = Fore.LIGHTBLACK_EX
elif logocolor == "cyan":
logocolor = Fore.CYAN
elif logocolor == "lightcyan":
logocolor = Fore.LIGHTCYAN_EX
elif logocolor == "white":
logocolor = Fore.WHITE
elif logocolor == "yellow":
logocolor = Fore.YELLOW
elif logocolor == "lightyellow":
logocolor = Fore.LIGHTYELLOW_EX
elif logocolor == "magenta":
logocolor = Fore.MAGENTA
elif logocolor == "lightmagenta":
logocolor = Fore.LIGHTMAGENTA_EX
elif bordercolor == "lightwhite":
logocolor = Fore.LIGHTWHITE_EX
else:
logocolor = Fore.WHITE
# Logo
class main:
def hellomessage():
sra = Style.RESET_ALL
print(logocolor + """
░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░
░░░░░░░░░ ░░░░░░░░░░░░░░░░ ░░░░░░░░░░░░░░░░░░░░░░░░░
▒▒▒▒▒▒▒▒▒ ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ ▒▒ ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
▒▒ ▒▒ ▒▒ ▒ ▒▒▒ ▒ ▒▒▒▒▒ ▒ ▒▒▒▒▒▒ ▒▒▒▒
▓ ▓▓▓▓▓ ▓ ▓▓▓ ▓ ▓▓ ▓ ▓▓ ▓▓ ▓▓ ▓▓▓ ▓
▓▓▓ ▓▓ ▓▓▓▓▓▓▓ ▓▓▓ ▓ ▓▓ ▓▓ ▓ ▓
▓▓▓▓▓ ▓ ▓ ▓▓▓▓▓▓ ▓▓▓ ▓ ▓▓ ▓▓ ▓ ▓▓▓▓▓▓▓▓
█ ██ ██ ████ ████ █ █ ██ ███ ███
████████████████████ █████████████████████████████████
""")
print(f" {bordercolor}╔══════════════════╗{sra} ")
print(f" {bordercolor}║{Fore.LIGHTRED_EX}Made by {Fore.LIGHTCYAN_EX}@Skyline69{bordercolor}║{sra}")
print(f" {bordercolor}║{sra} Version: {Fore.YELLOW}{version_}{bordercolor} ║{sra}") | 0.260107 | 0.172886 |
import os
import math
import random
import numpy as np
import tensorflow as tf
import cv2
slim = tf.contrib.slim
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sys
sys.path.append('../')
from nets import ssd_vgg_300, ssd_common
from preprocessing import ssd_vgg_preprocessing
import visualization
gpu_options = tf.GPUOptions(allow_growth = True)
config = tf.ConfigProto(log_device_placement = False, gpu_options = gpu_options)
isess = tf.InteractiveSession(config = config)
net_shape = (300, 300)
data_format = 'NHWC'
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)
# Define the SSD model.
reuse = True if 'ssd_net' in locals() else None
ssd_net = ssd_vgg_300.SSDNet()
with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)
# Restore SSD model.
ckpt_filename = './logs/model.ckpt-62962'
# ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)
# SSD default anchor boxes.
ssd_anchors = ssd_net.anchors(net_shape)
# Main image processing routine.
def process_image(img, select_threshold=0.5, nms_threshold=.45, net_shape=(300, 300)):
# Run SSD network.
rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
feed_dict={img_input: img})
# Get classes and bboxes from the net outputs.
rclasses, rscores, rbboxes = ssd_common.ssd_bboxes_select(
rpredictions, rlocalisations, ssd_anchors,
select_threshold=select_threshold, img_shape=net_shape, num_classes=21, decode=True)
rbboxes = ssd_common.bboxes_clip(rbbox_img, rbboxes)
rclasses, rscores, rbboxes = ssd_common.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
rclasses, rscores, rbboxes = ssd_common.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
# Resize bboxes to original image shape. Note: useless for Resize.WARP!
rbboxes = ssd_common.bboxes_resize(rbbox_img, rbboxes)
return rclasses, rscores, rbboxes
# Test on some demo image and visualize output.
path = './demo/'
image_names = sorted(os.listdir(path))
for i in range(10):
img = mpimg.imread(path + image_names[i])
rclasses, rscores, rbboxes = process_image(img)
# visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
visualization.plt_bboxes(img, rclasses, rscores, rbboxes) | ssd_visualize.py | import os
import math
import random
import numpy as np
import tensorflow as tf
import cv2
slim = tf.contrib.slim
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sys
sys.path.append('../')
from nets import ssd_vgg_300, ssd_common
from preprocessing import ssd_vgg_preprocessing
import visualization
gpu_options = tf.GPUOptions(allow_growth = True)
config = tf.ConfigProto(log_device_placement = False, gpu_options = gpu_options)
isess = tf.InteractiveSession(config = config)
net_shape = (300, 300)
data_format = 'NHWC'
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)
# Define the SSD model.
reuse = True if 'ssd_net' in locals() else None
ssd_net = ssd_vgg_300.SSDNet()
with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)
# Restore SSD model.
ckpt_filename = './logs/model.ckpt-62962'
# ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)
# SSD default anchor boxes.
ssd_anchors = ssd_net.anchors(net_shape)
# Main image processing routine.
def process_image(img, select_threshold=0.5, nms_threshold=.45, net_shape=(300, 300)):
# Run SSD network.
rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
feed_dict={img_input: img})
# Get classes and bboxes from the net outputs.
rclasses, rscores, rbboxes = ssd_common.ssd_bboxes_select(
rpredictions, rlocalisations, ssd_anchors,
select_threshold=select_threshold, img_shape=net_shape, num_classes=21, decode=True)
rbboxes = ssd_common.bboxes_clip(rbbox_img, rbboxes)
rclasses, rscores, rbboxes = ssd_common.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
rclasses, rscores, rbboxes = ssd_common.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
# Resize bboxes to original image shape. Note: useless for Resize.WARP!
rbboxes = ssd_common.bboxes_resize(rbbox_img, rbboxes)
return rclasses, rscores, rbboxes
# Test on some demo image and visualize output.
path = './demo/'
image_names = sorted(os.listdir(path))
for i in range(10):
img = mpimg.imread(path + image_names[i])
rclasses, rscores, rbboxes = process_image(img)
# visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
visualization.plt_bboxes(img, rclasses, rscores, rbboxes) | 0.412885 | 0.202148 |
from federatedml.evaluation import Evaluation
from sklearn.metrics import roc_auc_score
import numpy as np
import unittest
class TestClassificationEvaluaction(unittest.TestCase):
def assertFloatEqual(self,op1, op2):
diff = np.abs(op1 - op2)
self.assertLess(diff, 1e-6)
def test_auc(self):
y_true = np.array([0,0,1,1])
y_predict = np.array([0.1,0.4,0.35,0.8])
ground_true_auc = 0.75
eva = Evaluation("binary")
auc = eva.auc(y_true,y_predict)
auc = round(auc,2)
self.assertFloatEqual(auc, ground_true_auc)
def test_ks(self):
y_true = np.array([1,1,1,1,1,1,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0])
y_predict = np.array([0.42,0.73,0.55,0.37,0.57,0.70,0.25,0.23,0.46,0.62,0.76,0.46,0.55,0.56,0.56,0.38,0.37,0.73,0.77,0.21,0.39])
ground_true_ks = 0.75
eva = Evaluation("binary")
ks = eva.ks(y_true,y_predict)
ks = round(ks,2)
self.assertFloatEqual(ks, ground_true_ks)
def test_lift(self):
y_true = np.array([1,1,0,0,0,1,1,0,0,1])
y_predict = np.array([0.57,0.70,0.25,0.30,0.46,0.62,0.76,0.46,0.35,0.56])
dict_score = { "0":{0:0,1:1},"0.4":{0:2,1:1.43},"0.6":{0:1.43,1:2} }
eva = Evaluation("binary")
split_thresholds = [0,0.4,0.6]
lifts = eva.lift(y_true,y_predict,thresholds=split_thresholds)
fix_lifts = []
for lift in lifts:
fix_lift = [ round(pos,2) for pos in lift ]
fix_lifts.append(fix_lift)
for i in range(len(split_thresholds)):
score_0 = dict_score[str(split_thresholds[i])][0]
score_1 = dict_score[str(split_thresholds[i])][1]
pos_lift = fix_lifts[i]
self.assertEqual(len(pos_lift), 2)
self.assertFloatEqual(score_0, pos_lift[0])
self.assertFloatEqual(score_1, pos_lift[1])
def test_precision(self):
y_true = np.array([1,1,0,0,0,1,1,0,0,1])
y_predict = np.array([0.57,0.70,0.25,0.30,0.46,0.62,0.76,0.46,0.35,0.56])
dict_score = { "0.4":{0:1,1:0.71},"0.6":{0:0.71,1:1} }
eva = Evaluation("binary")
split_thresholds = [0.4,0.6]
prec_values = eva.precision(y_true,y_predict,thresholds=split_thresholds)
fix_prec_values = []
for prec_value in prec_values:
fix_prec_value = [ round(pos,2) for pos in prec_value ]
fix_prec_values.append(fix_prec_value)
for i in range(len(split_thresholds)):
score_0 = dict_score[str(split_thresholds[i])][0]
score_1 = dict_score[str(split_thresholds[i])][1]
pos_prec_value = fix_prec_values[i]
self.assertEqual(len(pos_prec_value), 2)
self.assertFloatEqual(score_0, pos_prec_value[0])
self.assertFloatEqual(score_1, pos_prec_value[1])
def test_recall(self):
y_true = np.array([1,1,0,0,0,1,1,0,0,1])
y_predict = np.array([0.57,0.70,0.25,0.31,0.46,0.62,0.76,0.46,0.35,0.56])
dict_score = { "0.3":{0:0.2,1:1},"0.4":{0:0.6,1:1} }
eva = Evaluation("binary")
split_thresholds = [0.3,0.4]
recalls = eva.recall(y_true,y_predict,thresholds=split_thresholds)
round_recalls = []
for recall in recalls:
round_recall = [ round(pos,2) for pos in recall ]
round_recalls.append(round_recall)
for i in range(len(split_thresholds)):
score_0 = dict_score[str(split_thresholds[i])][0]
score_1 = dict_score[str(split_thresholds[i])][1]
pos_recall = round_recalls[i]
self.assertEqual(len(pos_recall), 2)
self.assertFloatEqual(score_0, pos_recall[0])
self.assertFloatEqual(score_1, pos_recall[1])
def test_bin_accuracy(self):
y_true = np.array([1,1,0,0,0,1,1,0,0,1])
y_predict = np.array([0.57,0.70,0.25,0.31,0.46,0.62,0.76,0.46,0.35,0.56])
gt_score = {"0.3":0.6, "0.5":1.0, "0.7":0.7 }
split_thresholds = [0.3,0.5,0.7]
eva = Evaluation("binary")
acc = eva.accuracy(y_true,y_predict,thresholds=split_thresholds)
for i in range(len(split_thresholds)):
score = gt_score[str(split_thresholds[i])]
self.assertFloatEqual(score, acc[i])
def test_multi_accuracy(self):
y_true = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4])
y_predict = [1,1,2,2,3,2,1,1,1,1,3,3,3,3,2,4,4,4,4,4]
gt_score = 0.6
gt_number = 12
eva = Evaluation("multi")
acc = eva.accuracy(y_true,y_predict)
self.assertFloatEqual(gt_score, acc)
acc_number = eva.accuracy(y_true,y_predict,normalize=False)
self.assertEqual(acc_number, gt_number)
def test_multi_recall(self):
y_true = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,5,5])
y_predict = np.array([1,1,2,2,3,2,1,1,1,1,3,3,3,3,2,4,4,4,4,4,6,6,6,6,6])
gt_score = {"1":0.4, "3":0.8, "4":1.0,"6":0,"7":-1}
eva = Evaluation("multi")
result_filter = [1,3,4,6,7]
recall_scores = eva.recall(y_true,y_predict,result_filter=result_filter)
for i in range(len(result_filter)):
score = gt_score[str(result_filter[i])]
self.assertFloatEqual(score, recall_scores[i])
def test_multi_precision(self):
y_true = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,5,5])
y_predict = np.array([1,1,2,2,3,2,1,1,1,1,3,3,3,3,2,4,4,4,4,4,6,6,6,6,6])
gt_score = {"2":0.25, "3":0.8, "5":0,"6":0,"7":-1}
eva = Evaluation("multi")
result_filter = [2,3,5,6,7]
precision_scores = eva.precision(y_true,y_predict,result_filter=result_filter)
for i in range(len(result_filter)):
score = gt_score[str(result_filter[i])]
self.assertFloatEqual(score, precision_scores[i])
if __name__ == '__main__':
unittest.main() | federatedml/evaluation/test/evaluation_test.py |
from federatedml.evaluation import Evaluation
from sklearn.metrics import roc_auc_score
import numpy as np
import unittest
class TestClassificationEvaluaction(unittest.TestCase):
def assertFloatEqual(self,op1, op2):
diff = np.abs(op1 - op2)
self.assertLess(diff, 1e-6)
def test_auc(self):
y_true = np.array([0,0,1,1])
y_predict = np.array([0.1,0.4,0.35,0.8])
ground_true_auc = 0.75
eva = Evaluation("binary")
auc = eva.auc(y_true,y_predict)
auc = round(auc,2)
self.assertFloatEqual(auc, ground_true_auc)
def test_ks(self):
y_true = np.array([1,1,1,1,1,1,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0])
y_predict = np.array([0.42,0.73,0.55,0.37,0.57,0.70,0.25,0.23,0.46,0.62,0.76,0.46,0.55,0.56,0.56,0.38,0.37,0.73,0.77,0.21,0.39])
ground_true_ks = 0.75
eva = Evaluation("binary")
ks = eva.ks(y_true,y_predict)
ks = round(ks,2)
self.assertFloatEqual(ks, ground_true_ks)
def test_lift(self):
y_true = np.array([1,1,0,0,0,1,1,0,0,1])
y_predict = np.array([0.57,0.70,0.25,0.30,0.46,0.62,0.76,0.46,0.35,0.56])
dict_score = { "0":{0:0,1:1},"0.4":{0:2,1:1.43},"0.6":{0:1.43,1:2} }
eva = Evaluation("binary")
split_thresholds = [0,0.4,0.6]
lifts = eva.lift(y_true,y_predict,thresholds=split_thresholds)
fix_lifts = []
for lift in lifts:
fix_lift = [ round(pos,2) for pos in lift ]
fix_lifts.append(fix_lift)
for i in range(len(split_thresholds)):
score_0 = dict_score[str(split_thresholds[i])][0]
score_1 = dict_score[str(split_thresholds[i])][1]
pos_lift = fix_lifts[i]
self.assertEqual(len(pos_lift), 2)
self.assertFloatEqual(score_0, pos_lift[0])
self.assertFloatEqual(score_1, pos_lift[1])
def test_precision(self):
y_true = np.array([1,1,0,0,0,1,1,0,0,1])
y_predict = np.array([0.57,0.70,0.25,0.30,0.46,0.62,0.76,0.46,0.35,0.56])
dict_score = { "0.4":{0:1,1:0.71},"0.6":{0:0.71,1:1} }
eva = Evaluation("binary")
split_thresholds = [0.4,0.6]
prec_values = eva.precision(y_true,y_predict,thresholds=split_thresholds)
fix_prec_values = []
for prec_value in prec_values:
fix_prec_value = [ round(pos,2) for pos in prec_value ]
fix_prec_values.append(fix_prec_value)
for i in range(len(split_thresholds)):
score_0 = dict_score[str(split_thresholds[i])][0]
score_1 = dict_score[str(split_thresholds[i])][1]
pos_prec_value = fix_prec_values[i]
self.assertEqual(len(pos_prec_value), 2)
self.assertFloatEqual(score_0, pos_prec_value[0])
self.assertFloatEqual(score_1, pos_prec_value[1])
def test_recall(self):
y_true = np.array([1,1,0,0,0,1,1,0,0,1])
y_predict = np.array([0.57,0.70,0.25,0.31,0.46,0.62,0.76,0.46,0.35,0.56])
dict_score = { "0.3":{0:0.2,1:1},"0.4":{0:0.6,1:1} }
eva = Evaluation("binary")
split_thresholds = [0.3,0.4]
recalls = eva.recall(y_true,y_predict,thresholds=split_thresholds)
round_recalls = []
for recall in recalls:
round_recall = [ round(pos,2) for pos in recall ]
round_recalls.append(round_recall)
for i in range(len(split_thresholds)):
score_0 = dict_score[str(split_thresholds[i])][0]
score_1 = dict_score[str(split_thresholds[i])][1]
pos_recall = round_recalls[i]
self.assertEqual(len(pos_recall), 2)
self.assertFloatEqual(score_0, pos_recall[0])
self.assertFloatEqual(score_1, pos_recall[1])
def test_bin_accuracy(self):
y_true = np.array([1,1,0,0,0,1,1,0,0,1])
y_predict = np.array([0.57,0.70,0.25,0.31,0.46,0.62,0.76,0.46,0.35,0.56])
gt_score = {"0.3":0.6, "0.5":1.0, "0.7":0.7 }
split_thresholds = [0.3,0.5,0.7]
eva = Evaluation("binary")
acc = eva.accuracy(y_true,y_predict,thresholds=split_thresholds)
for i in range(len(split_thresholds)):
score = gt_score[str(split_thresholds[i])]
self.assertFloatEqual(score, acc[i])
def test_multi_accuracy(self):
y_true = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4])
y_predict = [1,1,2,2,3,2,1,1,1,1,3,3,3,3,2,4,4,4,4,4]
gt_score = 0.6
gt_number = 12
eva = Evaluation("multi")
acc = eva.accuracy(y_true,y_predict)
self.assertFloatEqual(gt_score, acc)
acc_number = eva.accuracy(y_true,y_predict,normalize=False)
self.assertEqual(acc_number, gt_number)
def test_multi_recall(self):
y_true = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,5,5])
y_predict = np.array([1,1,2,2,3,2,1,1,1,1,3,3,3,3,2,4,4,4,4,4,6,6,6,6,6])
gt_score = {"1":0.4, "3":0.8, "4":1.0,"6":0,"7":-1}
eva = Evaluation("multi")
result_filter = [1,3,4,6,7]
recall_scores = eva.recall(y_true,y_predict,result_filter=result_filter)
for i in range(len(result_filter)):
score = gt_score[str(result_filter[i])]
self.assertFloatEqual(score, recall_scores[i])
def test_multi_precision(self):
y_true = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,5,5])
y_predict = np.array([1,1,2,2,3,2,1,1,1,1,3,3,3,3,2,4,4,4,4,4,6,6,6,6,6])
gt_score = {"2":0.25, "3":0.8, "5":0,"6":0,"7":-1}
eva = Evaluation("multi")
result_filter = [2,3,5,6,7]
precision_scores = eva.precision(y_true,y_predict,result_filter=result_filter)
for i in range(len(result_filter)):
score = gt_score[str(result_filter[i])]
self.assertFloatEqual(score, precision_scores[i])
if __name__ == '__main__':
unittest.main() | 0.475605 | 0.543166 |
from props import getNode
import comms.events
from mission.task.task import Task
import mission.task.state
class Preflight(Task):
def __init__(self, config_node):
Task.__init__(self)
self.task_node = getNode("/task", True)
self.preflight_node = getNode("/task/preflight", True)
self.ap_node = getNode("/autopilot", True)
self.targets_node = getNode("/autopilot/targets", True)
self.imu_node = getNode("/sensors/imu", True)
self.flight_node = getNode("/controls/flight", True)
#self.saved_fcs_mode = ""
self.timer = 0.0
self.duration_sec = 60.0
self.name = config_node.getString("name")
self.nickname = config_node.getString("nickname")
# copy to /task/preflight
if config_node.hasChild("duration_sec"):
self.duration_sec = config_node.getFloat("duration_sec")
self.preflight_node.setFloat("duration_sec", self.duration_sec)
def activate(self):
self.active = True
# save existing state
mission.task.state.save(modes=True)
if not self.task_node.getBool("is_airborne"):
# set fcs mode to roll+pitch, aka vanity mode? :-)
self.ap_node.setString("mode", "roll+pitch")
self.targets_node.setFloat("roll_deg", 0.0)
self.targets_node.setFloat("pitch_deg", 0.0)
self.flight_node.setFloat("flaps_setpoint", 0.0)
# reset timer
self.timer = 0.0
else:
# we are airborne, don't change modes and configure timer
# to be already expired
self.timer = self.preflight_node.getFloat("duration_sec") + 1.0
comms.events.log("mission", "preflight")
def update(self, dt):
if not self.active:
return False
# print "preflight & updating"
self.timer += dt
def is_complete(self):
# print "timer=%.1f duration=%.1f" % (self.timer, self.duration_sec)
# complete when timer expires or we sense we are airborne
# (sanity check!)
done = False
if self.timer >= self.preflight_node.getFloat("duration_sec") or \
self.task_node.getBool("is_airborne"):
done = True
return done
def close(self):
# restore the previous state
mission.task.state.restore()
self.active = False
return True | src/mission/task/preflight.py | from props import getNode
import comms.events
from mission.task.task import Task
import mission.task.state
class Preflight(Task):
def __init__(self, config_node):
Task.__init__(self)
self.task_node = getNode("/task", True)
self.preflight_node = getNode("/task/preflight", True)
self.ap_node = getNode("/autopilot", True)
self.targets_node = getNode("/autopilot/targets", True)
self.imu_node = getNode("/sensors/imu", True)
self.flight_node = getNode("/controls/flight", True)
#self.saved_fcs_mode = ""
self.timer = 0.0
self.duration_sec = 60.0
self.name = config_node.getString("name")
self.nickname = config_node.getString("nickname")
# copy to /task/preflight
if config_node.hasChild("duration_sec"):
self.duration_sec = config_node.getFloat("duration_sec")
self.preflight_node.setFloat("duration_sec", self.duration_sec)
def activate(self):
self.active = True
# save existing state
mission.task.state.save(modes=True)
if not self.task_node.getBool("is_airborne"):
# set fcs mode to roll+pitch, aka vanity mode? :-)
self.ap_node.setString("mode", "roll+pitch")
self.targets_node.setFloat("roll_deg", 0.0)
self.targets_node.setFloat("pitch_deg", 0.0)
self.flight_node.setFloat("flaps_setpoint", 0.0)
# reset timer
self.timer = 0.0
else:
# we are airborne, don't change modes and configure timer
# to be already expired
self.timer = self.preflight_node.getFloat("duration_sec") + 1.0
comms.events.log("mission", "preflight")
def update(self, dt):
if not self.active:
return False
# print "preflight & updating"
self.timer += dt
def is_complete(self):
# print "timer=%.1f duration=%.1f" % (self.timer, self.duration_sec)
# complete when timer expires or we sense we are airborne
# (sanity check!)
done = False
if self.timer >= self.preflight_node.getFloat("duration_sec") or \
self.task_node.getBool("is_airborne"):
done = True
return done
def close(self):
# restore the previous state
mission.task.state.restore()
self.active = False
return True | 0.294621 | 0.121921 |
from django.contrib import messages
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.shortcuts import render, redirect
from duolingo import Duolingo, DuolingoException
from .forms import NewUserForm
from .models import DuoData
def homepage(request):
return render(request, "home.html", {})
@login_required
def profile(request):
if request.method == "POST":
username, password = request.POST['username'], request.POST['password']
if not DuoData.objects.filter(user_id=request.user.id).exists():
try:
duo_user = Duolingo(username, password)
except DuolingoException:
messages.error(request, "Login failed.")
return redirect(profile)
words_by_language, translations, languages, lang_abrv = {}, {}, duo_user.get_languages(), {}
for lang in languages:
lang_abrv[lang] = duo_user.get_abbreviation_of(lang)
for abrv in lang_abrv.values():
words_by_language[abrv] = duo_user.get_known_words(abrv)
for source in words_by_language:
translations[source] = duo_user.get_translations(target='en', source=source,
words=words_by_language[source])
user_info = duo_user.get_user_info()
DuoData.objects.get_or_create(user_id=request.user.id,
username=username,
duo_id=user_info['id'],
fullname=user_info['fullname'],
bio=user_info['bio'],
location=user_info['location'],
account_created=user_info['created'].strip('\n'),
avatar=str(user_info['avatar']) + '/xxlarge',
known_words=words_by_language,
translations=translations,
languages=languages,
lang_abrv=lang_abrv)
return render(request, "profile.html", {'duo_user': DuoData.objects.filter(user_id=request.user.id).first()})
@login_required
def known_words(request):
lang_selection = None
if 'lang_selection_btn' in request.POST:
lang_selection = DuoData.objects.get(user_id=request.user.id).lang_abrv[request.POST['lang_selection_btn']]
request.session['lang_selection'] = lang_selection
elif 'random_study_btn' in request.POST:
return redirect('flashcard')
return render(request, "known_words.html",
{'duo_user': DuoData.objects.filter(user_id=request.user.id).first(),
'lang_selection': lang_selection})
@login_required
def flashcard(request):
if not request.session.get('lang_selection'):
request.session['lang_selection'] = list(DuoData.objects.get(user_id=request.user.id).lang_abrv.values())[0]
card_side = "front"
word = None
if 'front' in request.POST:
card_side = 'back'
word = request.POST['front']
elif 'back' in request.POST:
card_side = 'front'
return render(request, "flashcard.html",
{'duo_user': DuoData.objects.filter(user_id=request.user.id).first(),
'card_side': card_side, 'lang_selection': request.session['lang_selection'],
'translate_params': [request.session['lang_selection'], word]})
def register_request(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "Registration successful.")
return redirect("homepage")
messages.error(request, "Unsuccessful registration. Invalid information.")
form = NewUserForm()
return render(request, "auth/register.html", {"register_form": form})
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
return redirect("homepage")
else:
messages.error(request, "Invalid username or password.")
else:
messages.error(request, "Invalid username or password.")
form = AuthenticationForm()
return render(request, "auth/login.html", {"login_form": form}) | DuoVocabFE/views.py | from django.contrib import messages
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.shortcuts import render, redirect
from duolingo import Duolingo, DuolingoException
from .forms import NewUserForm
from .models import DuoData
def homepage(request):
return render(request, "home.html", {})
@login_required
def profile(request):
if request.method == "POST":
username, password = request.POST['username'], request.POST['password']
if not DuoData.objects.filter(user_id=request.user.id).exists():
try:
duo_user = Duolingo(username, password)
except DuolingoException:
messages.error(request, "Login failed.")
return redirect(profile)
words_by_language, translations, languages, lang_abrv = {}, {}, duo_user.get_languages(), {}
for lang in languages:
lang_abrv[lang] = duo_user.get_abbreviation_of(lang)
for abrv in lang_abrv.values():
words_by_language[abrv] = duo_user.get_known_words(abrv)
for source in words_by_language:
translations[source] = duo_user.get_translations(target='en', source=source,
words=words_by_language[source])
user_info = duo_user.get_user_info()
DuoData.objects.get_or_create(user_id=request.user.id,
username=username,
duo_id=user_info['id'],
fullname=user_info['fullname'],
bio=user_info['bio'],
location=user_info['location'],
account_created=user_info['created'].strip('\n'),
avatar=str(user_info['avatar']) + '/xxlarge',
known_words=words_by_language,
translations=translations,
languages=languages,
lang_abrv=lang_abrv)
return render(request, "profile.html", {'duo_user': DuoData.objects.filter(user_id=request.user.id).first()})
@login_required
def known_words(request):
lang_selection = None
if 'lang_selection_btn' in request.POST:
lang_selection = DuoData.objects.get(user_id=request.user.id).lang_abrv[request.POST['lang_selection_btn']]
request.session['lang_selection'] = lang_selection
elif 'random_study_btn' in request.POST:
return redirect('flashcard')
return render(request, "known_words.html",
{'duo_user': DuoData.objects.filter(user_id=request.user.id).first(),
'lang_selection': lang_selection})
@login_required
def flashcard(request):
if not request.session.get('lang_selection'):
request.session['lang_selection'] = list(DuoData.objects.get(user_id=request.user.id).lang_abrv.values())[0]
card_side = "front"
word = None
if 'front' in request.POST:
card_side = 'back'
word = request.POST['front']
elif 'back' in request.POST:
card_side = 'front'
return render(request, "flashcard.html",
{'duo_user': DuoData.objects.filter(user_id=request.user.id).first(),
'card_side': card_side, 'lang_selection': request.session['lang_selection'],
'translate_params': [request.session['lang_selection'], word]})
def register_request(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "Registration successful.")
return redirect("homepage")
messages.error(request, "Unsuccessful registration. Invalid information.")
form = NewUserForm()
return render(request, "auth/register.html", {"register_form": form})
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
return redirect("homepage")
else:
messages.error(request, "Invalid username or password.")
else:
messages.error(request, "Invalid username or password.")
form = AuthenticationForm()
return render(request, "auth/login.html", {"login_form": form}) | 0.34632 | 0.089614 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_incremental.utils_pytorch import *
def get_ref_features(self, inputs, outputs):
global ref_features
ref_features = inputs[0]
#ref_features = F.adaptive_avg_pool2d(ref_features, 8).view(ref_features.size(0), -1)
def get_cur_features(self, inputs, outputs):
global cur_features
cur_features = inputs[0]
#cur_features = F.adaptive_avg_pool2d(cur_features, 3).view(cur_features.size(0), -1)
def compute_confusion_matrix(tg_model, tg_feature_model, class_means, evalloader, print_info=False, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tg_model.eval()
tg_feature_model.eval()
correct = 0
correct_icarl = 0
correct_ncm = 0
total = 0
num_classes = tg_model.fc.out_features
cm = np.zeros((3, num_classes, num_classes))
all_targets = []
all_predicted = []
all_predicted_icarl = []
all_predicted_ncm = []
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.cuda(), targets.cuda()
total += targets.size(0)
all_targets.append(targets.cpu().numpy())
outputs = tg_model(inputs)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
all_predicted.append(predicted.cpu().numpy())
outputs_feature = np.squeeze(tg_feature_model(inputs).cpu().numpy())
# Compute score for iCaRL
sqd_icarl = cdist(class_means[:,:,0].T, outputs_feature, 'sqeuclidean')
score_icarl = torch.from_numpy((-sqd_icarl).T).cuda()#to(device)
_, predicted_icarl = score_icarl.max(1)
correct_icarl += predicted_icarl.eq(targets).sum().item()
all_predicted_icarl.append(predicted_icarl.cpu().numpy())
# Compute score for NCM
sqd_ncm = cdist(class_means[:,:,1].T, outputs_feature, 'sqeuclidean')
score_ncm = torch.from_numpy((-sqd_ncm).T).cuda()#to(device)
_, predicted_ncm = score_ncm.max(1)
correct_ncm += predicted_ncm.eq(targets).sum().item()
all_predicted_ncm.append(predicted_ncm.cpu().numpy())
# print(sqd_icarl.shape, score_icarl.shape, predicted_icarl.shape, \
# sqd_ncm.shape, score_ncm.shape, predicted_ncm.shape)
cm[0, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted))
cm[1, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted_icarl))
cm[2, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted_ncm))
if print_info:
print(" top 1 accuracy CNN :\t\t{:.2f} %".format( 100.*correct/total ))
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format( 100.*correct_icarl/total ))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format( 100.*correct_ncm/total ))
print(" top 1 accuracy CNN :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[0])/np.sum(cm[0],axis=1)) ))
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[1])/np.sum(cm[1],axis=1)) ))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[2])/np.sum(cm[2],axis=1)) ))
return cm | utils_incremental/compute_confusion_matrix.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import numpy as np
import time
import os
import copy
import argparse
from PIL import Image
from scipy.spatial.distance import cdist
from sklearn.metrics import confusion_matrix
from utils_incremental.utils_pytorch import *
def get_ref_features(self, inputs, outputs):
global ref_features
ref_features = inputs[0]
#ref_features = F.adaptive_avg_pool2d(ref_features, 8).view(ref_features.size(0), -1)
def get_cur_features(self, inputs, outputs):
global cur_features
cur_features = inputs[0]
#cur_features = F.adaptive_avg_pool2d(cur_features, 3).view(cur_features.size(0), -1)
def compute_confusion_matrix(tg_model, tg_feature_model, class_means, evalloader, print_info=False, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tg_model.eval()
tg_feature_model.eval()
correct = 0
correct_icarl = 0
correct_ncm = 0
total = 0
num_classes = tg_model.fc.out_features
cm = np.zeros((3, num_classes, num_classes))
all_targets = []
all_predicted = []
all_predicted_icarl = []
all_predicted_ncm = []
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.cuda(), targets.cuda()
total += targets.size(0)
all_targets.append(targets.cpu().numpy())
outputs = tg_model(inputs)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
all_predicted.append(predicted.cpu().numpy())
outputs_feature = np.squeeze(tg_feature_model(inputs).cpu().numpy())
# Compute score for iCaRL
sqd_icarl = cdist(class_means[:,:,0].T, outputs_feature, 'sqeuclidean')
score_icarl = torch.from_numpy((-sqd_icarl).T).cuda()#to(device)
_, predicted_icarl = score_icarl.max(1)
correct_icarl += predicted_icarl.eq(targets).sum().item()
all_predicted_icarl.append(predicted_icarl.cpu().numpy())
# Compute score for NCM
sqd_ncm = cdist(class_means[:,:,1].T, outputs_feature, 'sqeuclidean')
score_ncm = torch.from_numpy((-sqd_ncm).T).cuda()#to(device)
_, predicted_ncm = score_ncm.max(1)
correct_ncm += predicted_ncm.eq(targets).sum().item()
all_predicted_ncm.append(predicted_ncm.cpu().numpy())
# print(sqd_icarl.shape, score_icarl.shape, predicted_icarl.shape, \
# sqd_ncm.shape, score_ncm.shape, predicted_ncm.shape)
cm[0, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted))
cm[1, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted_icarl))
cm[2, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted_ncm))
if print_info:
print(" top 1 accuracy CNN :\t\t{:.2f} %".format( 100.*correct/total ))
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format( 100.*correct_icarl/total ))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format( 100.*correct_ncm/total ))
print(" top 1 accuracy CNN :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[0])/np.sum(cm[0],axis=1)) ))
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[1])/np.sum(cm[1],axis=1)) ))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[2])/np.sum(cm[2],axis=1)) ))
return cm | 0.550124 | 0.513607 |
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_created', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('image', models.ImageField(upload_to='images/')),
('message', models.CharField(blank=True, max_length=80)),
('name', models.CharField(max_length=80)),
('caption', models.TextField(blank=True)),
('profile', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='images/')),
('bio', models.CharField(blank=True, max_length=100)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='amos.image')),
('likes', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True)),
('comment_title', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('image', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='amos.image')),
],
),
] | amos/migrations/0001_initial.py |
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_created', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('image', models.ImageField(upload_to='images/')),
('message', models.CharField(blank=True, max_length=80)),
('name', models.CharField(max_length=80)),
('caption', models.TextField(blank=True)),
('profile', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='images/')),
('bio', models.CharField(blank=True, max_length=100)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='amos.image')),
('likes', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True)),
('comment_title', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('image', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='amos.image')),
],
),
] | 0.486088 | 0.173533 |
from collections import OrderedDict
from MDRSREID.Loss_Meter.ID_loss import IDLoss
from MDRSREID.Loss_Meter.ID_smooth_loss import IDSmoothLoss
from MDRSREID.Loss_Meter.triplet_loss import TripletLoss
from MDRSREID.Loss_Meter.triplet_loss import TripletHardLoss
from MDRSREID.Loss_Meter.permutation_loss import PermutationLoss
from MDRSREID.Loss_Meter.verification_loss import VerificationLoss
from MDRSREID.Loss_Meter.PGFA_loss import PGFALoss
from MDRSREID.Loss_Meter.Seg_loss import SegLoss
from MDRSREID.Loss_Meter.Multi_Seg_loss import MultiSegLoss
from MDRSREID.Loss_Meter.Multi_Seg_GP_loss import MultiSegGPLoss
from MDRSREID.Loss_Meter.invariance_loss import InvNet
def loss_function_creation(cfg, tb_writer):
loss_functions = OrderedDict()
if cfg.id_loss.use:
loss_functions[cfg.id_loss.name] = IDLoss(cfg.id_loss, tb_writer)
if cfg.id_smooth_loss.use:
cfg.id_smooth_loss.device = cfg.device
cfg.id_smooth_loss.num_classes = cfg.model.num_classes # cfg.model.num_classes
loss_functions[cfg.id_smooth_loss.name] = IDSmoothLoss(cfg.id_smooth_loss, tb_writer)
if cfg.tri_loss.use:
loss_functions[cfg.tri_loss.name] = TripletLoss(cfg.tri_loss, tb_writer)
if cfg.tri_hard_loss.use:
loss_functions[cfg.tri_hard_loss.name] = TripletHardLoss(cfg.tri_hard_loss, tb_writer)
if cfg.permutation_loss.use:
cfg.permutation_loss.device = cfg.device
loss_functions[cfg.permutation_loss.name] = PermutationLoss(cfg.permutation_loss, tb_writer)
if cfg.verification_loss.use:
loss_functions[cfg.verification_loss.name] = VerificationLoss(cfg.verification_loss, tb_writer)
if cfg.pgfa_loss.use:
loss_functions[cfg.pgfa_loss.name] = PGFALoss(cfg.pgfa_loss, tb_writer)
if cfg.src_seg_loss.use:
loss_functions[cfg.src_seg_loss.name] = SegLoss(cfg.src_seg_loss, tb_writer)
if cfg.src_multi_seg_loss.use:
loss_functions[cfg.src_multi_seg_loss.name] = MultiSegLoss(cfg.src_multi_seg_loss, tb_writer)
if cfg.src_multi_seg_gp_loss.use:
loss_functions[cfg.src_multi_seg_gp_loss.name] = MultiSegGPLoss(cfg.src_multi_seg_gp_loss, tb_writer)
if cfg.inv_loss.use:
cfg.inv_loss.device = cfg.device
cfg.inv_loss.num_classes = cfg.dataset.train.target.num_classes
loss_functions[cfg.inv_loss.name] = InvNet(cfg.inv_loss, tb_writer).to(cfg.device)
return loss_functions | MDRSREID/Trainer/loss_function_creation/__init__.py | from collections import OrderedDict
from MDRSREID.Loss_Meter.ID_loss import IDLoss
from MDRSREID.Loss_Meter.ID_smooth_loss import IDSmoothLoss
from MDRSREID.Loss_Meter.triplet_loss import TripletLoss
from MDRSREID.Loss_Meter.triplet_loss import TripletHardLoss
from MDRSREID.Loss_Meter.permutation_loss import PermutationLoss
from MDRSREID.Loss_Meter.verification_loss import VerificationLoss
from MDRSREID.Loss_Meter.PGFA_loss import PGFALoss
from MDRSREID.Loss_Meter.Seg_loss import SegLoss
from MDRSREID.Loss_Meter.Multi_Seg_loss import MultiSegLoss
from MDRSREID.Loss_Meter.Multi_Seg_GP_loss import MultiSegGPLoss
from MDRSREID.Loss_Meter.invariance_loss import InvNet
def loss_function_creation(cfg, tb_writer):
loss_functions = OrderedDict()
if cfg.id_loss.use:
loss_functions[cfg.id_loss.name] = IDLoss(cfg.id_loss, tb_writer)
if cfg.id_smooth_loss.use:
cfg.id_smooth_loss.device = cfg.device
cfg.id_smooth_loss.num_classes = cfg.model.num_classes # cfg.model.num_classes
loss_functions[cfg.id_smooth_loss.name] = IDSmoothLoss(cfg.id_smooth_loss, tb_writer)
if cfg.tri_loss.use:
loss_functions[cfg.tri_loss.name] = TripletLoss(cfg.tri_loss, tb_writer)
if cfg.tri_hard_loss.use:
loss_functions[cfg.tri_hard_loss.name] = TripletHardLoss(cfg.tri_hard_loss, tb_writer)
if cfg.permutation_loss.use:
cfg.permutation_loss.device = cfg.device
loss_functions[cfg.permutation_loss.name] = PermutationLoss(cfg.permutation_loss, tb_writer)
if cfg.verification_loss.use:
loss_functions[cfg.verification_loss.name] = VerificationLoss(cfg.verification_loss, tb_writer)
if cfg.pgfa_loss.use:
loss_functions[cfg.pgfa_loss.name] = PGFALoss(cfg.pgfa_loss, tb_writer)
if cfg.src_seg_loss.use:
loss_functions[cfg.src_seg_loss.name] = SegLoss(cfg.src_seg_loss, tb_writer)
if cfg.src_multi_seg_loss.use:
loss_functions[cfg.src_multi_seg_loss.name] = MultiSegLoss(cfg.src_multi_seg_loss, tb_writer)
if cfg.src_multi_seg_gp_loss.use:
loss_functions[cfg.src_multi_seg_gp_loss.name] = MultiSegGPLoss(cfg.src_multi_seg_gp_loss, tb_writer)
if cfg.inv_loss.use:
cfg.inv_loss.device = cfg.device
cfg.inv_loss.num_classes = cfg.dataset.train.target.num_classes
loss_functions[cfg.inv_loss.name] = InvNet(cfg.inv_loss, tb_writer).to(cfg.device)
return loss_functions | 0.729905 | 0.108803 |
import math
def distanceOfPosition(pos1, pos2):
"""To get the distance between 2 given 3D points."""
return math.sqrt(pow(pos1[0]-pos2[0],2) + pow(pos1[1]-pos2[1],2) + pow(pos1[2]-pos2[2],2))
class Matrix:
def __init__(self, n):
self._rowlist = list()
self._size = n
i = 0
while i < n:
row = [0]*n
self._rowlist.append(row)
i += 1
def __str__(self):
res = ""
for row in self._rowlist:
for element in row:
res += str(element) + ' '
res += '\n'
return res
def get(self, r, c):
"""Get an element with the row and column number, start from 0"""
return self._rowlist[r][c]
def set(self, r, c, v):
self._rowlist[r][c] = v
def createSubMatrix(self, i, j):
"""Create a new matrix by deleting the row i and the column j"""
m = Matrix(self._size-1)
x = 0
y = 0
while x < self._size:
while y < self._size:
#print("boucle: x=",x,", y=",y)
if x < i and y < j:
m._rowlist[x][y] = self._rowlist[x][y]
elif x < i and y > j:
m._rowlist[x][y-1] = self._rowlist[x][y]
elif x > i and y < j:
m._rowlist[x-1][y] = self._rowlist[x][y]
elif x > i and y > j:
m._rowlist[x-1][y-1] = self._rowlist[x][y]
y += 1
y = 0
x += 1
#print("subMatrix")
#print("i=",i,", j=",j)
#print(self)
#print(m)
return m
def determinant(self):
#print("determinant")
#print(self)
if self._size == 2:
return self._rowlist[0][0]*self._rowlist[1][1] - self._rowlist[0][1]*self._rowlist[1][0]
else:
res = 0
i = 0
while i < len(self._rowlist[0]):
res += pow(-1, i) * self._rowlist[0][i] * self.createSubMatrix(0,i).determinant()
i += 1
return res
def transpose(self):
matrix = Matrix(self._size)
i = 0
j = 0
while i < matrix._size:
while j < matrix._size:
matrix._rowlist[i][j] = self._rowlist[j][i]
j += 1
j = 0
i += 1
return matrix
def minor(self, i, j):
return self.createSubMatrix(i,j).determinant()
def adjoint(self):
i = 0
j = 0
# create a matrix of cofactors
matrix = Matrix(self._size)
while i < self._size:
while j < self._size:
matrix._rowlist[i][j] = pow(-1, i+j) * self.minor(i,j)
j += 1
j = 0
i += 1
return matrix.transpose()
def inverted(self):
"""Get an inversed copy of the matrix.
a) Find the determinant of A -- |A|, shouldn't be 0
b) Find the adjoint of A -- adj(A)
c) The formula: Inv(A) = adj(A)/|A|"""
det = self.determinant()
if det == 0:
print("Impossible to get inverse, det=0")
return None
else:
m = self.adjoint()
i = 0
j = 0
while i < m._size:
while j < m._size:
m._rowlist[i][j] = m._rowlist[i][j] / det
j += 1
j = 0
i += 1
return m
if __name__ == "__main__":
# test for 4
m = Matrix(4)
m.set(0,0,1)
m.set(0,1,3)
m.set(0,2,1)
m.set(0,3,1)
m.set(1,0,1)
m.set(1,1,1)
m.set(1,2,2)
m.set(1,3,2)
m.set(2,0,2)
m.set(2,1,3)
m.set(2,2,4)
m.set(2,3,4)
m.set(3,0,1)
m.set(3,1,5)
m.set(3,2,7)
m.set(3,3,2)
print(m)
print(m.inverted())
# test for 3
m = Matrix(3)
m.set(0,0,1)
m.set(0,1,3)
m.set(0,2,1)
m.set(1,0,1)
m.set(1,1,1)
m.set(1,2,2)
m.set(2,0,2)
m.set(2,1,3)
m.set(2,2,4)
print(m)
print(m.inverted())
# test for distance
print(distanceOfPosition([0.0, 0.0, 0.0], [28.7, -9.6, 55.2])) | recoUtils.py | import math
def distanceOfPosition(pos1, pos2):
"""To get the distance between 2 given 3D points."""
return math.sqrt(pow(pos1[0]-pos2[0],2) + pow(pos1[1]-pos2[1],2) + pow(pos1[2]-pos2[2],2))
class Matrix:
def __init__(self, n):
self._rowlist = list()
self._size = n
i = 0
while i < n:
row = [0]*n
self._rowlist.append(row)
i += 1
def __str__(self):
res = ""
for row in self._rowlist:
for element in row:
res += str(element) + ' '
res += '\n'
return res
def get(self, r, c):
"""Get an element with the row and column number, start from 0"""
return self._rowlist[r][c]
def set(self, r, c, v):
self._rowlist[r][c] = v
def createSubMatrix(self, i, j):
"""Create a new matrix by deleting the row i and the column j"""
m = Matrix(self._size-1)
x = 0
y = 0
while x < self._size:
while y < self._size:
#print("boucle: x=",x,", y=",y)
if x < i and y < j:
m._rowlist[x][y] = self._rowlist[x][y]
elif x < i and y > j:
m._rowlist[x][y-1] = self._rowlist[x][y]
elif x > i and y < j:
m._rowlist[x-1][y] = self._rowlist[x][y]
elif x > i and y > j:
m._rowlist[x-1][y-1] = self._rowlist[x][y]
y += 1
y = 0
x += 1
#print("subMatrix")
#print("i=",i,", j=",j)
#print(self)
#print(m)
return m
def determinant(self):
#print("determinant")
#print(self)
if self._size == 2:
return self._rowlist[0][0]*self._rowlist[1][1] - self._rowlist[0][1]*self._rowlist[1][0]
else:
res = 0
i = 0
while i < len(self._rowlist[0]):
res += pow(-1, i) * self._rowlist[0][i] * self.createSubMatrix(0,i).determinant()
i += 1
return res
def transpose(self):
matrix = Matrix(self._size)
i = 0
j = 0
while i < matrix._size:
while j < matrix._size:
matrix._rowlist[i][j] = self._rowlist[j][i]
j += 1
j = 0
i += 1
return matrix
def minor(self, i, j):
return self.createSubMatrix(i,j).determinant()
def adjoint(self):
i = 0
j = 0
# create a matrix of cofactors
matrix = Matrix(self._size)
while i < self._size:
while j < self._size:
matrix._rowlist[i][j] = pow(-1, i+j) * self.minor(i,j)
j += 1
j = 0
i += 1
return matrix.transpose()
def inverted(self):
"""Get an inversed copy of the matrix.
a) Find the determinant of A -- |A|, shouldn't be 0
b) Find the adjoint of A -- adj(A)
c) The formula: Inv(A) = adj(A)/|A|"""
det = self.determinant()
if det == 0:
print("Impossible to get inverse, det=0")
return None
else:
m = self.adjoint()
i = 0
j = 0
while i < m._size:
while j < m._size:
m._rowlist[i][j] = m._rowlist[i][j] / det
j += 1
j = 0
i += 1
return m
if __name__ == "__main__":
# test for 4
m = Matrix(4)
m.set(0,0,1)
m.set(0,1,3)
m.set(0,2,1)
m.set(0,3,1)
m.set(1,0,1)
m.set(1,1,1)
m.set(1,2,2)
m.set(1,3,2)
m.set(2,0,2)
m.set(2,1,3)
m.set(2,2,4)
m.set(2,3,4)
m.set(3,0,1)
m.set(3,1,5)
m.set(3,2,7)
m.set(3,3,2)
print(m)
print(m.inverted())
# test for 3
m = Matrix(3)
m.set(0,0,1)
m.set(0,1,3)
m.set(0,2,1)
m.set(1,0,1)
m.set(1,1,1)
m.set(1,2,2)
m.set(2,0,2)
m.set(2,1,3)
m.set(2,2,4)
print(m)
print(m.inverted())
# test for distance
print(distanceOfPosition([0.0, 0.0, 0.0], [28.7, -9.6, 55.2])) | 0.533884 | 0.653251 |
import functools
import httplib
import Queue
import os
import re
import string
import socket
import sys
import threading
import telnetlib
import time
import urllib
if __name__ == '__main__':
try:
soapTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body>
<m:X_SendIRCC xmlns:m="urn:schemas-sony-com:service:IRCC:1">
<IRCCCode>AAAAAQAAAAEAAAASAw==</IRCCCode>
</m:X_SendIRCC>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"""
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# INPUT YOUR CONFIGURATION HERE
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
televisionIP = "192.168.178.32"
televisionPort = 80
conn = httplib.HTTPConnection(televisionIP, televisionPort)
conn.connect()
print "Attempting to register with CERS API... GUID device ID"
conn.request("GET", "/cers/api/register?name=indigoRemote®istrationType=new&deviceId=34c43339-af3d-40e7-b1b2-743331375368c")
responseToGUID = conn.getresponse()
print "CERS GUID Headers: " + str(responseToGUID.getheaders())
print "CERS GUID Response: [" + str(responseToGUID.status) + "] " + responseToGUID.read()
print "Attempting to register with CERS API... GUID device ID AND Auth Cookie"
conn.request("GET", "/cers/api/register?name=indigoRemote®istrationType=new&deviceId=34c43339-af3d-40e7-b1b2-743331375368c")
conn.putheader("Cookie", "auth=3d76f00d8c7e4473fbc8c3d952a33756f863427596fc76c4395367bea25b3288")
conn.endheaders()
responseToGUIDCookie = conn.getresponse()
print "CERS GUID/Cookie Headers: " + str(responseToGUIDCookie.getheaders())
print "CERS GUID/Cookie Response: [" + str(responseToGUIDCookie.status) + "] " + responseToGUIDCookie.read()
#print "Sending IR Code With X-CERS-DEVICE Headers"
#conn.putrequest('POST', "/sony/IRCC")
#conn.putheader("Host", televisionIP + ":" + str(televisionPort))
#conn.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
#conn.putheader("X-CERS-DEVICE-INFO", "Duncanware (IndigoPlugin)")
#conn.putheader("X-CERS-DEVICE-ID", "DuncanwareRemote:34c43339-af3d-40e7-b1b2-743331375368c")
#conn.putheader("Cookie", "auth=3d76f00d8c7e4473fbc8c3d952a33756f863427596fc76c4395367bea25b3288")
#conn.putheader("SOAPAction", "\"urn:schemas-sony-com:service:IRCC:1#X_SendIRCC\"")
#conn.putheader("Content-Length", "%d" % len(soapTemplate))
#conn.endheaders()
#conn.send(soapTemplate)
#responseToREST = conn.getresponse()
#print "Response: [" + str(responseToREST.status) + "] " + responseToREST.read()
#print ""
#print "Sending IR Code With SideView User Agent Header"
#conn2 = httplib.HTTPConnection(televisionIP, televisionPort)
#conn2.connect()
#conn2.putrequest('POST', "/sony/IRCC")
#conn2.putheader("Host", televisionIP + ":" + str(televisionPort))
#conn2.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
#conn2.putheader("User-Agent", "TVSideView/2.0.1 CFNetwork/672.0.8 Darwin/14.0.0")
#conn2.putheader("Cookie", "auth=3d76f00d8c7e4473fbc8c3d952a33756f863427596fc76c4395367bea25b3288")
#conn2.putheader("SOAPAction", "\"urn:schemas-sony-com:service:IRCC:1#X_SendIRCC\"")
#conn2.putheader("Content-Length", "%d" % len(soapTemplate))
#conn2.endheaders()
#conn2.send(soapTemplate)
#responseToREST = conn2.getresponse()
#print "Response: [" + str(responseToREST.status) + "] " + responseToREST.read()
#print ""
#print "Attempting to read System Information..."
#payload = '{"id":20,"method":"getSystemInformation","version":"1.0","params":[]}'
#sysInfoConn = httplib.HTTPConnection(televisionIP, televisionPort)
#sysInfoConn.connect()
#sysInfoConn.putrequest('POST', "/sony/system")
#sysInfoConn.putheader("Content-type", "application/json")
#sysInfoConn.putheader("Cookie", "auth=3d76f<PASSWORD>c7e4473fbc8c3d952a33756f863427596fc76c4395367bea25b3288")
#sysInfoConn.putheader("Content-Length", "%d" % len(payload))
#sysInfoConn.endheaders()
#sysInfoConn.send(payload)
#responseToSysInfo = sysInfoConn.getresponse()
#print "Response: [" + str(responseToSysInfo.status) + "] " + responseToSysInfo.read()
#print ""
#print "Attempting to read Remote Control Information..."
#payload = '{"id":20,"method":"getRemoteControllerInfo","version":"1.0","params":[]}'
#remoteInfoConn = httplib.HTTPConnection(televisionIP, televisionPort)
#remoteInfoConn.connect()
#remoteInfoConn.putrequest('POST', "/sony/system")
#remoteInfoConn.putheader("Content-type", "application/json")
#remoteInfoConn.putheader("Content-Length", "%d" % len(payload))
#remoteInfoConn.endheaders()
#remoteInfoConn.send(payload)
#responseToRemoteInfo = remoteInfoConn.getresponse()
#print "Response: [" + str(responseToRemoteInfo.status) + "] " + responseToRemoteInfo.read()
#print ""
#regconn = httplib.HTTPConnection(televisionIP, televisionPort)
#regconn.connect()
#regconn.request("GET", "/cers/api/register?name=indigoRemote®istrationType=new&deviceId=MediaRemote%3A" + fakeMAC)
#responseToReg = regconn.getresponse()
#print "Response to Registration: [" + str(responseToReg.status) + "] " + responseToReg.read()
except Exception as e:
print "Exception: " + str(e) | Documentation/test scripts/test_bravia_tv.py | import functools
import httplib
import Queue
import os
import re
import string
import socket
import sys
import threading
import telnetlib
import time
import urllib
if __name__ == '__main__':
try:
soapTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body>
<m:X_SendIRCC xmlns:m="urn:schemas-sony-com:service:IRCC:1">
<IRCCCode>AAAAAQAAAAEAAAASAw==</IRCCCode>
</m:X_SendIRCC>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"""
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# INPUT YOUR CONFIGURATION HERE
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
televisionIP = "192.168.178.32"
televisionPort = 80
conn = httplib.HTTPConnection(televisionIP, televisionPort)
conn.connect()
print "Attempting to register with CERS API... GUID device ID"
conn.request("GET", "/cers/api/register?name=indigoRemote®istrationType=new&deviceId=34c43339-af3d-40e7-b1b2-743331375368c")
responseToGUID = conn.getresponse()
print "CERS GUID Headers: " + str(responseToGUID.getheaders())
print "CERS GUID Response: [" + str(responseToGUID.status) + "] " + responseToGUID.read()
print "Attempting to register with CERS API... GUID device ID AND Auth Cookie"
conn.request("GET", "/cers/api/register?name=indigoRemote®istrationType=new&deviceId=34c43339-af3d-40e7-b1b2-743331375368c")
conn.putheader("Cookie", "auth=3d76f00d8c7e4473fbc8c3d952a33756f863427596fc76c4395367bea25b3288")
conn.endheaders()
responseToGUIDCookie = conn.getresponse()
print "CERS GUID/Cookie Headers: " + str(responseToGUIDCookie.getheaders())
print "CERS GUID/Cookie Response: [" + str(responseToGUIDCookie.status) + "] " + responseToGUIDCookie.read()
#print "Sending IR Code With X-CERS-DEVICE Headers"
#conn.putrequest('POST', "/sony/IRCC")
#conn.putheader("Host", televisionIP + ":" + str(televisionPort))
#conn.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
#conn.putheader("X-CERS-DEVICE-INFO", "Duncanware (IndigoPlugin)")
#conn.putheader("X-CERS-DEVICE-ID", "DuncanwareRemote:34c43339-af3d-40e7-b1b2-743331375368c")
#conn.putheader("Cookie", "auth=3d76f00d8c7e4473fbc8c3d952a33756f863427596fc76c4395367bea25b3288")
#conn.putheader("SOAPAction", "\"urn:schemas-sony-com:service:IRCC:1#X_SendIRCC\"")
#conn.putheader("Content-Length", "%d" % len(soapTemplate))
#conn.endheaders()
#conn.send(soapTemplate)
#responseToREST = conn.getresponse()
#print "Response: [" + str(responseToREST.status) + "] " + responseToREST.read()
#print ""
#print "Sending IR Code With SideView User Agent Header"
#conn2 = httplib.HTTPConnection(televisionIP, televisionPort)
#conn2.connect()
#conn2.putrequest('POST', "/sony/IRCC")
#conn2.putheader("Host", televisionIP + ":" + str(televisionPort))
#conn2.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
#conn2.putheader("User-Agent", "TVSideView/2.0.1 CFNetwork/672.0.8 Darwin/14.0.0")
#conn2.putheader("Cookie", "auth=3d76f00d8c7e4473fbc8c3d952a33756f863427596fc76c4395367bea25b3288")
#conn2.putheader("SOAPAction", "\"urn:schemas-sony-com:service:IRCC:1#X_SendIRCC\"")
#conn2.putheader("Content-Length", "%d" % len(soapTemplate))
#conn2.endheaders()
#conn2.send(soapTemplate)
#responseToREST = conn2.getresponse()
#print "Response: [" + str(responseToREST.status) + "] " + responseToREST.read()
#print ""
#print "Attempting to read System Information..."
#payload = '{"id":20,"method":"getSystemInformation","version":"1.0","params":[]}'
#sysInfoConn = httplib.HTTPConnection(televisionIP, televisionPort)
#sysInfoConn.connect()
#sysInfoConn.putrequest('POST', "/sony/system")
#sysInfoConn.putheader("Content-type", "application/json")
#sysInfoConn.putheader("Cookie", "auth=3d76f<PASSWORD>c7e4473fbc8c3d952a33756f863427596fc76c4395367bea25b3288")
#sysInfoConn.putheader("Content-Length", "%d" % len(payload))
#sysInfoConn.endheaders()
#sysInfoConn.send(payload)
#responseToSysInfo = sysInfoConn.getresponse()
#print "Response: [" + str(responseToSysInfo.status) + "] " + responseToSysInfo.read()
#print ""
#print "Attempting to read Remote Control Information..."
#payload = '{"id":20,"method":"getRemoteControllerInfo","version":"1.0","params":[]}'
#remoteInfoConn = httplib.HTTPConnection(televisionIP, televisionPort)
#remoteInfoConn.connect()
#remoteInfoConn.putrequest('POST', "/sony/system")
#remoteInfoConn.putheader("Content-type", "application/json")
#remoteInfoConn.putheader("Content-Length", "%d" % len(payload))
#remoteInfoConn.endheaders()
#remoteInfoConn.send(payload)
#responseToRemoteInfo = remoteInfoConn.getresponse()
#print "Response: [" + str(responseToRemoteInfo.status) + "] " + responseToRemoteInfo.read()
#print ""
#regconn = httplib.HTTPConnection(televisionIP, televisionPort)
#regconn.connect()
#regconn.request("GET", "/cers/api/register?name=indigoRemote®istrationType=new&deviceId=MediaRemote%3A" + fakeMAC)
#responseToReg = regconn.getresponse()
#print "Response to Registration: [" + str(responseToReg.status) + "] " + responseToReg.read()
except Exception as e:
print "Exception: " + str(e) | 0.048824 | 0.05498 |
import csv
import pandas as pd
import json
import spacy
import re
from spacy.lang.en import English
from spacy.pipeline import EntityRuler
from spacy import displacy
from collections import Counter
# Import data yg akan dilakukan ekstraksi informasi
data = pd.read_csv("data/desa.csv", encoding='utf-8', index_col=0)
ekstrak = pd.read_csv("hasil_rbf.csv", index_col=0)
file_reader = data[data['Provinsi'] == 'DKI Jakarta'].reset_index().iloc[:, 1:]
kab = file_reader['Kabupaten'].drop_duplicates().reset_index()['Kabupaten']
kec = file_reader['Kecamatan'].drop_duplicates().reset_index()['Kecamatan']
des = file_reader['Desa'].drop_duplicates().reset_index()['Desa']
# Membuat pattern untuk ekstraksi lokasi
patterns, patterns1, patterns2 = [], [], [
{'label': 'ID', 'pattern': 'Jakarta'}]
def createPattern(data):
patte = []
for pp in data:
for ss in pp.split(" ("):
pat = []
if ss.__contains__(" / "):
#pat += [{'lower':s.lower().replace(")", "")} for sss in ss.split(" / ") for s in sss.split()]
patte += [{'label': 'GPE', 'pattern': ss.replace(" / ", "")}]
elif ss.__contains__("/"):
#pat += [{'lower':s.lower().replace(")", "")} for sss in ss.split("/") for s in sss.split()]
patte += [{'label': 'GPE', 'pattern': ss.replace("/", "")}]
else:
# pat += [{'lower':s.lower().replace(")", "")} for s in ss.split()]
patte += [{'label': 'GPE', 'pattern': ss.replace(")", "")}]
# patte.append({'label':'ORG', 'pattern':pat})
return patte
patterns += createPattern(kec)
patterns += createPattern(kab)
patterns += createPattern(des)
# Memanggil fungsi nlp untuk memulai ekstraksi informasi
nlp = spacy.blank('en')
# Menambahkan daftar lokasi pada spacy
ruler = EntityRuler(nlp)
ruler.add_patterns(patterns)
nlp.add_pipe(ruler)
# Import data konten berita yg akan dilakukan ekstraksi informasi
files = open('data/content_berita.csv', 'r', encoding='utf-8')
file_reader = csv.reader(files)
# Ekstraksi Lokasi
lokasi = [list(dict.fromkeys([x.text for x in nlp(row['filtering1']).ents]))
for idx, row in file_reader.iterrows()]
# Ekstraksi Tanggal
def deteksi_tanggal(stc, wrd=False):
if wrd:
sentences = stc.split()
return [w for ss in sentences for w in re.findall("\d+/\d+/\d+", ss) if w]
else:
return [w for w in re.findall("\d+/\d+/\d+", stc) if w]
tanggal = [list(dict.fromkeys(deteksi_tanggal(row['filtering1'])))
for idx, row in file_reader.iterrows()]
# Menggabungkan data hasil ekstraksi
file_reader['lokasi'] = lokasi
file_reader['tanggal'] = tanggal
hasil = file_reader[['filtering1', 'rbf', 'lokasi',
'tanggal']].reset_index().iloc[:, 1:]
h = hasil[hasil['rbf'] == 1].reset_index().iloc[:, 1:]
result = h[h.tanggal.map(len) > 0].reset_index().iloc[:, 1:]
result['c'] = [1 if 'Jakarta' in loc else 0 for loc in result['lokasi']]
res = result[result['c'] == 1].reset_index().iloc[:, 1:-1]
res = res[res.lokasi.map(len) > 1].reset_index().iloc[:, 1:]
# Data hasil ekstraksi
fix = res.drop_duplicates().reset_index().iloc[:, 1:]
fix.to_csv("siap dipetakan_new_80%.csv") | script/news-analysis/Ekstraksi.py | import csv
import pandas as pd
import json
import spacy
import re
from spacy.lang.en import English
from spacy.pipeline import EntityRuler
from spacy import displacy
from collections import Counter
# Import data yg akan dilakukan ekstraksi informasi
data = pd.read_csv("data/desa.csv", encoding='utf-8', index_col=0)
ekstrak = pd.read_csv("hasil_rbf.csv", index_col=0)
file_reader = data[data['Provinsi'] == 'DKI Jakarta'].reset_index().iloc[:, 1:]
kab = file_reader['Kabupaten'].drop_duplicates().reset_index()['Kabupaten']
kec = file_reader['Kecamatan'].drop_duplicates().reset_index()['Kecamatan']
des = file_reader['Desa'].drop_duplicates().reset_index()['Desa']
# Membuat pattern untuk ekstraksi lokasi
patterns, patterns1, patterns2 = [], [], [
{'label': 'ID', 'pattern': 'Jakarta'}]
def createPattern(data):
patte = []
for pp in data:
for ss in pp.split(" ("):
pat = []
if ss.__contains__(" / "):
#pat += [{'lower':s.lower().replace(")", "")} for sss in ss.split(" / ") for s in sss.split()]
patte += [{'label': 'GPE', 'pattern': ss.replace(" / ", "")}]
elif ss.__contains__("/"):
#pat += [{'lower':s.lower().replace(")", "")} for sss in ss.split("/") for s in sss.split()]
patte += [{'label': 'GPE', 'pattern': ss.replace("/", "")}]
else:
# pat += [{'lower':s.lower().replace(")", "")} for s in ss.split()]
patte += [{'label': 'GPE', 'pattern': ss.replace(")", "")}]
# patte.append({'label':'ORG', 'pattern':pat})
return patte
patterns += createPattern(kec)
patterns += createPattern(kab)
patterns += createPattern(des)
# Memanggil fungsi nlp untuk memulai ekstraksi informasi
nlp = spacy.blank('en')
# Menambahkan daftar lokasi pada spacy
ruler = EntityRuler(nlp)
ruler.add_patterns(patterns)
nlp.add_pipe(ruler)
# Import data konten berita yg akan dilakukan ekstraksi informasi
files = open('data/content_berita.csv', 'r', encoding='utf-8')
file_reader = csv.reader(files)
# Ekstraksi Lokasi
lokasi = [list(dict.fromkeys([x.text for x in nlp(row['filtering1']).ents]))
for idx, row in file_reader.iterrows()]
# Ekstraksi Tanggal
def deteksi_tanggal(stc, wrd=False):
if wrd:
sentences = stc.split()
return [w for ss in sentences for w in re.findall("\d+/\d+/\d+", ss) if w]
else:
return [w for w in re.findall("\d+/\d+/\d+", stc) if w]
tanggal = [list(dict.fromkeys(deteksi_tanggal(row['filtering1'])))
for idx, row in file_reader.iterrows()]
# Menggabungkan data hasil ekstraksi
file_reader['lokasi'] = lokasi
file_reader['tanggal'] = tanggal
hasil = file_reader[['filtering1', 'rbf', 'lokasi',
'tanggal']].reset_index().iloc[:, 1:]
h = hasil[hasil['rbf'] == 1].reset_index().iloc[:, 1:]
result = h[h.tanggal.map(len) > 0].reset_index().iloc[:, 1:]
result['c'] = [1 if 'Jakarta' in loc else 0 for loc in result['lokasi']]
res = result[result['c'] == 1].reset_index().iloc[:, 1:-1]
res = res[res.lokasi.map(len) > 1].reset_index().iloc[:, 1:]
# Data hasil ekstraksi
fix = res.drop_duplicates().reset_index().iloc[:, 1:]
fix.to_csv("siap dipetakan_new_80%.csv") | 0.221603 | 0.207897 |
"""Azure Service helpers."""
import logging
from tempfile import NamedTemporaryFile
from adal.adal_error import AdalError
from azure.common import AzureException
from azure.core.exceptions import HttpResponseError
from msrest.exceptions import ClientException
from providers.azure.client import AzureClientFactory
LOG = logging.getLogger(__name__)
class AzureServiceError(Exception):
"""Raised when errors are encountered from Azure."""
pass
class AzureCostReportNotFound(Exception):
"""Raised when Azure cost report is not found."""
pass
class AzureService:
"""A class to handle interactions with the Azure services."""
def __init__(
self,
tenant_id,
client_id,
client_secret,
resource_group_name,
storage_account_name,
subscription_id=None,
cloud="public",
):
"""Establish connection information."""
self._resource_group_name = resource_group_name
self._storage_account_name = storage_account_name
self._factory = AzureClientFactory(subscription_id, tenant_id, client_id, client_secret, cloud)
if not self._factory.subscription_id:
raise AzureServiceError("Azure Service missing subscription id.")
self._cloud_storage_account = self._factory.cloud_storage_account(resource_group_name, storage_account_name)
if not self._factory.credentials:
raise AzureServiceError("Azure Service credentials are not configured.")
def get_cost_export_for_key(self, key, container_name):
"""Get the latest cost export file from given storage account container."""
report = None
try:
container_client = self._cloud_storage_account.get_container_client(container_name)
blob_list = container_client.list_blobs(name_starts_with=key)
except (AdalError, AzureException, ClientException) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
for blob in blob_list:
if key == blob.name:
report = blob
break
if not report:
message = f"No cost report for report name {key} found in container {container_name}."
raise AzureCostReportNotFound(message)
return report
def download_cost_export(self, key, container_name, destination=None):
"""Download the latest cost export file from a given storage container."""
cost_export = self.get_cost_export_for_key(key, container_name)
file_path = destination
if not destination:
temp_file = NamedTemporaryFile(delete=False, suffix=".csv")
file_path = temp_file.name
try:
blob_client = self._cloud_storage_account.get_blob_client(container_name, cost_export.name)
with open(file_path, "wb") as blob_download:
blob_download.write(blob_client.download_blob().readall())
except (AdalError, AzureException, ClientException, IOError) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
return file_path
def get_latest_cost_export_for_path(self, report_path, container_name):
"""Get the latest cost export file from given storage account container."""
latest_report = None
if not container_name:
message = "Unable to gather latest export as container name is not provided."
LOG.warning(message)
raise AzureCostReportNotFound(message)
try:
container_client = self._cloud_storage_account.get_container_client(container_name)
blob_list = container_client.list_blobs(name_starts_with=report_path)
for blob in blob_list:
if report_path in blob.name and not latest_report:
latest_report = blob
elif report_path in blob.name and blob.last_modified > latest_report.last_modified:
latest_report = blob
if not latest_report:
message = f"No cost report found in container {container_name} for " f"path {report_path}."
raise AzureCostReportNotFound(message)
return latest_report
except (AdalError, AzureException, ClientException) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
except HttpResponseError as httpError:
if httpError.status_code == 403:
message = (
"An authorization error occurred attempting to gather latest export"
f" in container {container_name} for "
f"path {report_path}."
)
else:
message = (
"Unknown error occurred attempting to gather latest export"
f" in container {container_name} for "
f"path {report_path}."
)
error_msg = message + f" Azure Error: {httpError}."
LOG.warning(error_msg)
raise AzureCostReportNotFound(message)
def describe_cost_management_exports(self):
"""List cost management export."""
scope = f"/subscriptions/{self._factory.subscription_id}"
expected_resource_id = (
f"/subscriptions/{self._factory.subscription_id}/resourceGroups/"
f"{self._resource_group_name}/providers/Microsoft.Storage/"
f"storageAccounts/{self._storage_account_name}"
)
export_reports = []
try:
cost_management_client = self._factory.cost_management_client
management_reports = cost_management_client.exports.list(scope)
for report in management_reports.value:
if report.delivery_info.destination.resource_id == expected_resource_id:
report_def = {
"name": report.name,
"container": report.delivery_info.destination.container,
"directory": report.delivery_info.destination.root_folder_path,
}
export_reports.append(report_def)
except (AdalError, AzureException, ClientException) as exc:
raise AzureCostReportNotFound(exc)
return export_reports | koku/masu/external/downloader/azure/azure_service.py | """Azure Service helpers."""
import logging
from tempfile import NamedTemporaryFile
from adal.adal_error import AdalError
from azure.common import AzureException
from azure.core.exceptions import HttpResponseError
from msrest.exceptions import ClientException
from providers.azure.client import AzureClientFactory
LOG = logging.getLogger(__name__)
class AzureServiceError(Exception):
"""Raised when errors are encountered from Azure."""
pass
class AzureCostReportNotFound(Exception):
"""Raised when Azure cost report is not found."""
pass
class AzureService:
"""A class to handle interactions with the Azure services."""
def __init__(
self,
tenant_id,
client_id,
client_secret,
resource_group_name,
storage_account_name,
subscription_id=None,
cloud="public",
):
"""Establish connection information."""
self._resource_group_name = resource_group_name
self._storage_account_name = storage_account_name
self._factory = AzureClientFactory(subscription_id, tenant_id, client_id, client_secret, cloud)
if not self._factory.subscription_id:
raise AzureServiceError("Azure Service missing subscription id.")
self._cloud_storage_account = self._factory.cloud_storage_account(resource_group_name, storage_account_name)
if not self._factory.credentials:
raise AzureServiceError("Azure Service credentials are not configured.")
def get_cost_export_for_key(self, key, container_name):
"""Get the latest cost export file from given storage account container."""
report = None
try:
container_client = self._cloud_storage_account.get_container_client(container_name)
blob_list = container_client.list_blobs(name_starts_with=key)
except (AdalError, AzureException, ClientException) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
for blob in blob_list:
if key == blob.name:
report = blob
break
if not report:
message = f"No cost report for report name {key} found in container {container_name}."
raise AzureCostReportNotFound(message)
return report
def download_cost_export(self, key, container_name, destination=None):
"""Download the latest cost export file from a given storage container."""
cost_export = self.get_cost_export_for_key(key, container_name)
file_path = destination
if not destination:
temp_file = NamedTemporaryFile(delete=False, suffix=".csv")
file_path = temp_file.name
try:
blob_client = self._cloud_storage_account.get_blob_client(container_name, cost_export.name)
with open(file_path, "wb") as blob_download:
blob_download.write(blob_client.download_blob().readall())
except (AdalError, AzureException, ClientException, IOError) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
return file_path
def get_latest_cost_export_for_path(self, report_path, container_name):
"""Get the latest cost export file from given storage account container."""
latest_report = None
if not container_name:
message = "Unable to gather latest export as container name is not provided."
LOG.warning(message)
raise AzureCostReportNotFound(message)
try:
container_client = self._cloud_storage_account.get_container_client(container_name)
blob_list = container_client.list_blobs(name_starts_with=report_path)
for blob in blob_list:
if report_path in blob.name and not latest_report:
latest_report = blob
elif report_path in blob.name and blob.last_modified > latest_report.last_modified:
latest_report = blob
if not latest_report:
message = f"No cost report found in container {container_name} for " f"path {report_path}."
raise AzureCostReportNotFound(message)
return latest_report
except (AdalError, AzureException, ClientException) as error:
raise AzureServiceError("Failed to download cost export. Error: ", str(error))
except HttpResponseError as httpError:
if httpError.status_code == 403:
message = (
"An authorization error occurred attempting to gather latest export"
f" in container {container_name} for "
f"path {report_path}."
)
else:
message = (
"Unknown error occurred attempting to gather latest export"
f" in container {container_name} for "
f"path {report_path}."
)
error_msg = message + f" Azure Error: {httpError}."
LOG.warning(error_msg)
raise AzureCostReportNotFound(message)
def describe_cost_management_exports(self):
"""List cost management export."""
scope = f"/subscriptions/{self._factory.subscription_id}"
expected_resource_id = (
f"/subscriptions/{self._factory.subscription_id}/resourceGroups/"
f"{self._resource_group_name}/providers/Microsoft.Storage/"
f"storageAccounts/{self._storage_account_name}"
)
export_reports = []
try:
cost_management_client = self._factory.cost_management_client
management_reports = cost_management_client.exports.list(scope)
for report in management_reports.value:
if report.delivery_info.destination.resource_id == expected_resource_id:
report_def = {
"name": report.name,
"container": report.delivery_info.destination.container,
"directory": report.delivery_info.destination.root_folder_path,
}
export_reports.append(report_def)
except (AdalError, AzureException, ClientException) as exc:
raise AzureCostReportNotFound(exc)
return export_reports | 0.848235 | 0.092606 |
import shapes
import pygame
class Brush:
"""
Brush class
"""
def __init__(self, colour, width):
"""
Constructor, assigns values
Args:
colour (tuple): RGB values for the brush colour
width (int): width of brush
"""
self._colour = colour
self._width = width
def make_brush_stroke(self, position):
"""
Creates a brush stroke when the brush is used
Args:
position (tuple): the coordinates that the brush is on to make the mark
"""
return BrushStroke(self._colour, self._width, position)
def get_colour(self):
"""
returns the colour of the brush
"""
return self._colour
def get_width(self):
"""
returns the width of the brush
"""
return self._width
def set_colour(self, new_colour):
"""
sets the colour of the brush
args:
new_colour (tuple): the new colour of the brush in RGB
"""
self._colour = new_colour
def set_width(self, new_width):
"""
sets the width of the brush
args:
new_wdith (int): the new width of the brush
"""
self._width = new_width
class Eraser(Brush):
"""
Eraser, inherits from Brush
"""
def __init__(self, width):
"""
Eraser constructor
Args:
width (int): width of eraser
height (int): height of eraser
"""
super().__init__((255, 255, 255), width)
class BrushStroke(Brush, shapes.Shape):
"""
The mark that the Brush class would make on the canvas
"""
def __init__(self, colour, width, coordinates):
"""
Constructor for the BrushStroke
Args:
colour (tuple): the RGB value of the brush mark
width (int): the width of the mark
height (int): the height of the mark
coordinates (tuple): the position of the mark on the brush'es canvas
"""
super().__init__(colour, width)
self._coordinates = coordinates
def get_coordinates(self):
"""
returns the mark's position
"""
return self._coordinates
def draw(self, screen):
"""
Draws the brush mark on the canvas
Args:
screen (pygame.surface): the pygame surface the brush mark will be drawn on
"""
pygame.draw.circle(
screen,
self._colour,
self._coordinates,
(int(self._width / 2)),
0)
def mark(self, canvas):
"""
Marks the brushstroke on the canvas
Args:
canvas (list): 3d array keeping track of each pixel on the board
Returns:
list: the updated canvas
"""
for x in range(self._coordinates[0] - self._width,
self._coordinates[0] + self._width):
for y in range(
self._coordinates[1] - self._width, self._coordinates[1] + self._width):
if (((x - self._coordinates[0]) * (x - self._coordinates[0])) + (
(y - self._coordinates[1]) * (y - self._coordinates[1]))) < (self._width * self._width):
canvas[y - 115][x - 200] = self._colour
return canvas
def fill(canvas, point, colour):
"""
Fills an area of the canvas
Args:
canvas (list): the canvsa to fill something on
point (tuple): the point to start filling at
colour (list): the colour to fill with
Returns:
list: the newly filled canvas
"""
original_colour = canvas[point[0]][point[1]]
mock_queue = []
mock_queue.append(point)
while len(mock_queue) > 0:
new_point = mock_queue.pop(0)
canvas[new_point[0]][new_point[1]] = colour
if (new_point[0] + 1 < len(canvas)) and (canvas[new_point[0] + 1]
[new_point[1]] == original_colour):
mock_queue.append((new_point[0] + 1, new_point[1]))
if (new_point[0] - 1 >= 0) and (canvas[new_point[0] - 1]
[new_point[1]] == original_colour):
mock_queue.append((new_point[0] - 1, new_point[1]))
if (new_point[1] + 1 < len(canvas[0])) and (canvas[new_point[0]]
[new_point[1] + 1] == original_colour):
mock_queue.append((new_point[0], new_point[1] + 1))
if (new_point[1] + 1 >= 0) and (canvas[new_point[0]]
[new_point[1] - 1] == original_colour):
mock_queue.append((new_point[0], new_point[1] - 1))
return canvas | src/brushes.py | import shapes
import pygame
class Brush:
"""
Brush class
"""
def __init__(self, colour, width):
"""
Constructor, assigns values
Args:
colour (tuple): RGB values for the brush colour
width (int): width of brush
"""
self._colour = colour
self._width = width
def make_brush_stroke(self, position):
"""
Creates a brush stroke when the brush is used
Args:
position (tuple): the coordinates that the brush is on to make the mark
"""
return BrushStroke(self._colour, self._width, position)
def get_colour(self):
"""
returns the colour of the brush
"""
return self._colour
def get_width(self):
"""
returns the width of the brush
"""
return self._width
def set_colour(self, new_colour):
"""
sets the colour of the brush
args:
new_colour (tuple): the new colour of the brush in RGB
"""
self._colour = new_colour
def set_width(self, new_width):
"""
sets the width of the brush
args:
new_wdith (int): the new width of the brush
"""
self._width = new_width
class Eraser(Brush):
"""
Eraser, inherits from Brush
"""
def __init__(self, width):
"""
Eraser constructor
Args:
width (int): width of eraser
height (int): height of eraser
"""
super().__init__((255, 255, 255), width)
class BrushStroke(Brush, shapes.Shape):
"""
The mark that the Brush class would make on the canvas
"""
def __init__(self, colour, width, coordinates):
"""
Constructor for the BrushStroke
Args:
colour (tuple): the RGB value of the brush mark
width (int): the width of the mark
height (int): the height of the mark
coordinates (tuple): the position of the mark on the brush'es canvas
"""
super().__init__(colour, width)
self._coordinates = coordinates
def get_coordinates(self):
"""
returns the mark's position
"""
return self._coordinates
def draw(self, screen):
"""
Draws the brush mark on the canvas
Args:
screen (pygame.surface): the pygame surface the brush mark will be drawn on
"""
pygame.draw.circle(
screen,
self._colour,
self._coordinates,
(int(self._width / 2)),
0)
def mark(self, canvas):
"""
Marks the brushstroke on the canvas
Args:
canvas (list): 3d array keeping track of each pixel on the board
Returns:
list: the updated canvas
"""
for x in range(self._coordinates[0] - self._width,
self._coordinates[0] + self._width):
for y in range(
self._coordinates[1] - self._width, self._coordinates[1] + self._width):
if (((x - self._coordinates[0]) * (x - self._coordinates[0])) + (
(y - self._coordinates[1]) * (y - self._coordinates[1]))) < (self._width * self._width):
canvas[y - 115][x - 200] = self._colour
return canvas
def fill(canvas, point, colour):
"""
Fills an area of the canvas
Args:
canvas (list): the canvsa to fill something on
point (tuple): the point to start filling at
colour (list): the colour to fill with
Returns:
list: the newly filled canvas
"""
original_colour = canvas[point[0]][point[1]]
mock_queue = []
mock_queue.append(point)
while len(mock_queue) > 0:
new_point = mock_queue.pop(0)
canvas[new_point[0]][new_point[1]] = colour
if (new_point[0] + 1 < len(canvas)) and (canvas[new_point[0] + 1]
[new_point[1]] == original_colour):
mock_queue.append((new_point[0] + 1, new_point[1]))
if (new_point[0] - 1 >= 0) and (canvas[new_point[0] - 1]
[new_point[1]] == original_colour):
mock_queue.append((new_point[0] - 1, new_point[1]))
if (new_point[1] + 1 < len(canvas[0])) and (canvas[new_point[0]]
[new_point[1] + 1] == original_colour):
mock_queue.append((new_point[0], new_point[1] + 1))
if (new_point[1] + 1 >= 0) and (canvas[new_point[0]]
[new_point[1] - 1] == original_colour):
mock_queue.append((new_point[0], new_point[1] - 1))
return canvas | 0.897415 | 0.547404 |
from .. import Tag
from ..render import HRenderer
import threading
import os,json
from starlette.applications import Starlette
from starlette.responses import HTMLResponse
from starlette.routing import Route,WebSocketRoute
from starlette.endpoints import WebSocketEndpoint
import socket
def isFree(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
return not (s.connect_ex((ip,port)) == 0)
class DevApp(Starlette):
""" DEV APP, Runner specialized for development process. Features :
* autoreload on file changes
* refresh UI/HTML/client part, after server autoreloaded
* console.log/info in devtools, for all exchanges
* uvicorn debug
* js error() method auto implemented (popup with skip/refresh)
Simple ASync Web Server (with starlette) with WebSocket interactions with HTag.
Open the rendering in a browser tab.
The instance is an ASGI htag app
"""
def __init__(self,tagClass:type):
assert issubclass(tagClass,Tag)
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
# add a Static Template, for displaying beautiful full error on UI ;-)
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #TODO: perhaps something integrated in hrenderer
t=Tag.H.div( _style="z-index:10000000000;position:fixed;top:10px;left:10px;background:#F00;padding:8px;border:1px solid yellow" )
t <= Tag.H.a("X",_href="#",_onclick="this.parentNode.remove()",_style="color:yellow;text-decoration:none",_title="Forget error (skip)")
t <= " "
t <= Tag.H.a("REFRESH",_href="#",_onclick="window.location.reload()",_style="color:yellow;text-decoration:none",_title="Restart the UI part by refreshing it")
t <= Tag.H.pre()
template = Tag.H.template(t,_id="DevAppError")
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
js = """
window.error=function(txt) {
var clone = document.importNode(document.querySelector("#DevAppError").content, true);
clone.querySelector("pre").innerHTML = txt
document.body.appendChild(clone)
}
async function interact( o ) {
let packet = JSON.stringify(o)
console.info("[htag interact]",packet.length,o)
ws.send( packet );
}
var ws = new WebSocket("ws://"+document.location.host+"/ws");
ws.onopen = function() {console.info("[htag start]");start()};
ws.onclose = function() {document.body.innerHTML="Refreshing";window.location.reload()}
ws.onmessage = function(e) {
let data = JSON.parse(e.data);
console.info("[htag action]",e.data.length,data)
action( data );
};
"""
self.renderer=HRenderer(tagClass, js, lambda: os._exit(0), fullerror=True, statics=[template,])
class WsInteract(WebSocketEndpoint):
encoding = "json"
async def on_receive(this, websocket, data):
actions = await self.renderer.interact(data["id"],data["method"],data["args"],data["kargs"])
await websocket.send_text( json.dumps(actions) )
Starlette.__init__(self,debug=True, routes=[
Route('/', self.GET, methods=["GET"]),
WebSocketRoute("/ws", WsInteract),
])
async def GET(self,request):
return HTMLResponse( str(self.renderer) )
def run(self, host="127.0.0.1", port=8000, openBrowser=True): # localhost, by default !!
""" example `app.run(__name__)` """
import uvicorn,webbrowser
import inspect,sys
from pathlib import Path
try:
fi= inspect.getframeinfo(sys._getframe(1))
stem = Path(fi.filename).stem
instanceName = fi.code_context[0].strip().split(".")[0]
except Exception as e:
print("Can't run DevApp :",e)
sys.exit(-1)
fileapp = stem+":"+instanceName
url = f"http://{host}:{port}"
print("="*79)
print(f"Start Uvicorn Reloader for '{fileapp}' ({url})")
print("="*79)
if openBrowser:
webbrowser.open_new_tab(url)
uvicorn.run(fileapp,host=host,port=port,reload=True,debug=True) | htag/runners/devapp.py |
from .. import Tag
from ..render import HRenderer
import threading
import os,json
from starlette.applications import Starlette
from starlette.responses import HTMLResponse
from starlette.routing import Route,WebSocketRoute
from starlette.endpoints import WebSocketEndpoint
import socket
def isFree(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
return not (s.connect_ex((ip,port)) == 0)
class DevApp(Starlette):
""" DEV APP, Runner specialized for development process. Features :
* autoreload on file changes
* refresh UI/HTML/client part, after server autoreloaded
* console.log/info in devtools, for all exchanges
* uvicorn debug
* js error() method auto implemented (popup with skip/refresh)
Simple ASync Web Server (with starlette) with WebSocket interactions with HTag.
Open the rendering in a browser tab.
The instance is an ASGI htag app
"""
def __init__(self,tagClass:type):
assert issubclass(tagClass,Tag)
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
# add a Static Template, for displaying beautiful full error on UI ;-)
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #TODO: perhaps something integrated in hrenderer
t=Tag.H.div( _style="z-index:10000000000;position:fixed;top:10px;left:10px;background:#F00;padding:8px;border:1px solid yellow" )
t <= Tag.H.a("X",_href="#",_onclick="this.parentNode.remove()",_style="color:yellow;text-decoration:none",_title="Forget error (skip)")
t <= " "
t <= Tag.H.a("REFRESH",_href="#",_onclick="window.location.reload()",_style="color:yellow;text-decoration:none",_title="Restart the UI part by refreshing it")
t <= Tag.H.pre()
template = Tag.H.template(t,_id="DevAppError")
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
js = """
window.error=function(txt) {
var clone = document.importNode(document.querySelector("#DevAppError").content, true);
clone.querySelector("pre").innerHTML = txt
document.body.appendChild(clone)
}
async function interact( o ) {
let packet = JSON.stringify(o)
console.info("[htag interact]",packet.length,o)
ws.send( packet );
}
var ws = new WebSocket("ws://"+document.location.host+"/ws");
ws.onopen = function() {console.info("[htag start]");start()};
ws.onclose = function() {document.body.innerHTML="Refreshing";window.location.reload()}
ws.onmessage = function(e) {
let data = JSON.parse(e.data);
console.info("[htag action]",e.data.length,data)
action( data );
};
"""
self.renderer=HRenderer(tagClass, js, lambda: os._exit(0), fullerror=True, statics=[template,])
class WsInteract(WebSocketEndpoint):
encoding = "json"
async def on_receive(this, websocket, data):
actions = await self.renderer.interact(data["id"],data["method"],data["args"],data["kargs"])
await websocket.send_text( json.dumps(actions) )
Starlette.__init__(self,debug=True, routes=[
Route('/', self.GET, methods=["GET"]),
WebSocketRoute("/ws", WsInteract),
])
async def GET(self,request):
return HTMLResponse( str(self.renderer) )
def run(self, host="127.0.0.1", port=8000, openBrowser=True): # localhost, by default !!
""" example `app.run(__name__)` """
import uvicorn,webbrowser
import inspect,sys
from pathlib import Path
try:
fi= inspect.getframeinfo(sys._getframe(1))
stem = Path(fi.filename).stem
instanceName = fi.code_context[0].strip().split(".")[0]
except Exception as e:
print("Can't run DevApp :",e)
sys.exit(-1)
fileapp = stem+":"+instanceName
url = f"http://{host}:{port}"
print("="*79)
print(f"Start Uvicorn Reloader for '{fileapp}' ({url})")
print("="*79)
if openBrowser:
webbrowser.open_new_tab(url)
uvicorn.run(fileapp,host=host,port=port,reload=True,debug=True) | 0.309963 | 0.06951 |
from datetime import timedelta
from logging import getLogger
from typing import Callable, Union, List, Tuple, Optional
from zstandard import ZstdDecompressor # type: ignore
from penguin_judge.check_result import equal_binary
from penguin_judge.models import (
JudgeStatus, Submission, JudgeResult, transaction, scoped_session)
from penguin_judge.judge import (
T, JudgeDriver, JudgeTask, JudgeTestInfo, AgentTestResult, AgentError)
LOGGER = getLogger(__name__)
def run(judge_class: Callable[[], JudgeDriver],
task: JudgeTask) -> JudgeStatus:
LOGGER.info('judge start (contest_id: {}, problem_id: {}, '
'submission_id: {}, user_id: {}'.format(
task.contest_id, task.problem_id, task.id, task.user_id))
zctx = ZstdDecompressor()
try:
task.code = zctx.decompress(task.code)
for test in task.tests:
test.input = zctx.decompress(test.input)
test.output = zctx.decompress(test.output)
except Exception:
LOGGER.warning('decompress failed', exc_info=True)
with transaction() as s:
return _update_submission_status(s, task,
JudgeStatus.InternalError)
with judge_class() as judge:
ret = _prepare(judge, task)
if ret:
return ret
if task.compile_image_name:
ret = _compile(judge, task)
if ret:
return ret
ret = _tests(judge, task)
LOGGER.info('judge finished (submission_id={}): {}'.format(task.id, ret))
return ret
def _prepare(judge: JudgeDriver, task: JudgeTask) -> Union[JudgeStatus, None]:
try:
judge.prepare(task)
return None
except Exception:
LOGGER.warning('prepare failed', exc_info=True)
with transaction() as s:
return _update_submission_status(
s, task, JudgeStatus.InternalError)
def _compile(judge: JudgeDriver, task: JudgeTask) -> Union[JudgeStatus, None]:
try:
ret = judge.compile(task)
except Exception:
LOGGER.warning('compile failed', exc_info=True)
ret = JudgeStatus.InternalError
if isinstance(ret, JudgeStatus):
with transaction() as s:
_update_submission_status(s, task, ret)
s.query(JudgeResult).filter(
JudgeResult.contest_id == task.contest_id,
JudgeResult.problem_id == task.problem_id,
JudgeResult.submission_id == task.id
).update({
JudgeResult.status: ret}, synchronize_session=False)
LOGGER.info('judge failed (submission_id={}): {}'.format(
task.id, ret))
return ret
task.code, task.compile_time = ret.binary, timedelta(seconds=ret.time)
return None
def _tests(judge: JudgeDriver, task: JudgeTask) -> JudgeStatus:
judge_results: List[
Tuple[JudgeStatus, Optional[timedelta], Optional[int]]] = []
def judge_test_cmpl(
test: JudgeTestInfo,
resp: Union[AgentTestResult, AgentError]
) -> None:
time: Optional[timedelta] = None
memory_kb: Optional[int] = None
if isinstance(resp, AgentTestResult):
if resp.time is not None:
time = timedelta(seconds=resp.time)
if resp.memory_bytes is not None:
memory_kb = resp.memory_bytes // 1024
if equal_binary(test.output, resp.output):
status = JudgeStatus.Accepted
else:
status = JudgeStatus.WrongAnswer
else:
status = JudgeStatus.from_str(resp.kind)
judge_results.append((status, time, memory_kb))
with transaction() as s:
s.query(JudgeResult).filter(
JudgeResult.contest_id == task.contest_id,
JudgeResult.problem_id == task.problem_id,
JudgeResult.submission_id == task.id,
JudgeResult.test_id == test.id,
).update({
JudgeResult.status: status,
JudgeResult.time: time,
JudgeResult.memory: memory_kb,
}, synchronize_session=False)
def start_test_func(test_id: str) -> None:
with transaction() as s:
s.query(JudgeResult).filter(
JudgeResult.contest_id == task.contest_id,
JudgeResult.problem_id == task.problem_id,
JudgeResult.submission_id == task.id,
JudgeResult.test_id == test_id,
).update({
JudgeResult.status: JudgeStatus.Running
}, synchronize_session=False)
try:
judge.tests(task, start_test_func, judge_test_cmpl)
except Exception:
LOGGER.warning(
'test failed (submission_id={})'.format(task.id), exc_info=True)
judge_results.append((JudgeStatus.InternalError, None, None))
def get_submission_status() -> JudgeStatus:
judge_status = set([s for s, _, _ in judge_results])
if len(judge_status) == 1:
return list(judge_status)[0]
for x in (JudgeStatus.InternalError, JudgeStatus.RuntimeError,
JudgeStatus.WrongAnswer, JudgeStatus.MemoryLimitExceeded,
JudgeStatus.TimeLimitExceeded,
JudgeStatus.OutputLimitExceeded):
if x in judge_status:
return x
return JudgeStatus.InternalError # pragma: no cover
def max_value(lst: List[T]) -> Optional[T]:
ret = None
for x in lst:
if x is None:
continue
if ret is None or ret < x:
ret = x
return ret
submission_status = get_submission_status()
max_time = max_value([t for _, t, _ in judge_results])
max_memory = max_value([m for _, _, m in judge_results])
with transaction() as s:
s.query(Submission).filter(
Submission.contest_id == task.contest_id,
Submission.problem_id == task.problem_id,
Submission.id == task.id
).update({
Submission.status: submission_status,
Submission.compile_time: task.compile_time,
Submission.max_time: max_time,
Submission.max_memory: max_memory,
}, synchronize_session=False)
return submission_status
def _update_submission_status(
s: scoped_session, task: JudgeTask, status: JudgeStatus
) -> JudgeStatus:
s.query(Submission).filter(
Submission.contest_id == task.contest_id,
Submission.problem_id == task.problem_id,
Submission.id == task.id,
).update({Submission.status: status}, synchronize_session=False)
return status | backend/penguin_judge/judge/main.py | from datetime import timedelta
from logging import getLogger
from typing import Callable, Union, List, Tuple, Optional
from zstandard import ZstdDecompressor # type: ignore
from penguin_judge.check_result import equal_binary
from penguin_judge.models import (
JudgeStatus, Submission, JudgeResult, transaction, scoped_session)
from penguin_judge.judge import (
T, JudgeDriver, JudgeTask, JudgeTestInfo, AgentTestResult, AgentError)
LOGGER = getLogger(__name__)
def run(judge_class: Callable[[], JudgeDriver],
task: JudgeTask) -> JudgeStatus:
LOGGER.info('judge start (contest_id: {}, problem_id: {}, '
'submission_id: {}, user_id: {}'.format(
task.contest_id, task.problem_id, task.id, task.user_id))
zctx = ZstdDecompressor()
try:
task.code = zctx.decompress(task.code)
for test in task.tests:
test.input = zctx.decompress(test.input)
test.output = zctx.decompress(test.output)
except Exception:
LOGGER.warning('decompress failed', exc_info=True)
with transaction() as s:
return _update_submission_status(s, task,
JudgeStatus.InternalError)
with judge_class() as judge:
ret = _prepare(judge, task)
if ret:
return ret
if task.compile_image_name:
ret = _compile(judge, task)
if ret:
return ret
ret = _tests(judge, task)
LOGGER.info('judge finished (submission_id={}): {}'.format(task.id, ret))
return ret
def _prepare(judge: JudgeDriver, task: JudgeTask) -> Union[JudgeStatus, None]:
try:
judge.prepare(task)
return None
except Exception:
LOGGER.warning('prepare failed', exc_info=True)
with transaction() as s:
return _update_submission_status(
s, task, JudgeStatus.InternalError)
def _compile(judge: JudgeDriver, task: JudgeTask) -> Union[JudgeStatus, None]:
try:
ret = judge.compile(task)
except Exception:
LOGGER.warning('compile failed', exc_info=True)
ret = JudgeStatus.InternalError
if isinstance(ret, JudgeStatus):
with transaction() as s:
_update_submission_status(s, task, ret)
s.query(JudgeResult).filter(
JudgeResult.contest_id == task.contest_id,
JudgeResult.problem_id == task.problem_id,
JudgeResult.submission_id == task.id
).update({
JudgeResult.status: ret}, synchronize_session=False)
LOGGER.info('judge failed (submission_id={}): {}'.format(
task.id, ret))
return ret
task.code, task.compile_time = ret.binary, timedelta(seconds=ret.time)
return None
def _tests(judge: JudgeDriver, task: JudgeTask) -> JudgeStatus:
judge_results: List[
Tuple[JudgeStatus, Optional[timedelta], Optional[int]]] = []
def judge_test_cmpl(
test: JudgeTestInfo,
resp: Union[AgentTestResult, AgentError]
) -> None:
time: Optional[timedelta] = None
memory_kb: Optional[int] = None
if isinstance(resp, AgentTestResult):
if resp.time is not None:
time = timedelta(seconds=resp.time)
if resp.memory_bytes is not None:
memory_kb = resp.memory_bytes // 1024
if equal_binary(test.output, resp.output):
status = JudgeStatus.Accepted
else:
status = JudgeStatus.WrongAnswer
else:
status = JudgeStatus.from_str(resp.kind)
judge_results.append((status, time, memory_kb))
with transaction() as s:
s.query(JudgeResult).filter(
JudgeResult.contest_id == task.contest_id,
JudgeResult.problem_id == task.problem_id,
JudgeResult.submission_id == task.id,
JudgeResult.test_id == test.id,
).update({
JudgeResult.status: status,
JudgeResult.time: time,
JudgeResult.memory: memory_kb,
}, synchronize_session=False)
def start_test_func(test_id: str) -> None:
with transaction() as s:
s.query(JudgeResult).filter(
JudgeResult.contest_id == task.contest_id,
JudgeResult.problem_id == task.problem_id,
JudgeResult.submission_id == task.id,
JudgeResult.test_id == test_id,
).update({
JudgeResult.status: JudgeStatus.Running
}, synchronize_session=False)
try:
judge.tests(task, start_test_func, judge_test_cmpl)
except Exception:
LOGGER.warning(
'test failed (submission_id={})'.format(task.id), exc_info=True)
judge_results.append((JudgeStatus.InternalError, None, None))
def get_submission_status() -> JudgeStatus:
judge_status = set([s for s, _, _ in judge_results])
if len(judge_status) == 1:
return list(judge_status)[0]
for x in (JudgeStatus.InternalError, JudgeStatus.RuntimeError,
JudgeStatus.WrongAnswer, JudgeStatus.MemoryLimitExceeded,
JudgeStatus.TimeLimitExceeded,
JudgeStatus.OutputLimitExceeded):
if x in judge_status:
return x
return JudgeStatus.InternalError # pragma: no cover
def max_value(lst: List[T]) -> Optional[T]:
ret = None
for x in lst:
if x is None:
continue
if ret is None or ret < x:
ret = x
return ret
submission_status = get_submission_status()
max_time = max_value([t for _, t, _ in judge_results])
max_memory = max_value([m for _, _, m in judge_results])
with transaction() as s:
s.query(Submission).filter(
Submission.contest_id == task.contest_id,
Submission.problem_id == task.problem_id,
Submission.id == task.id
).update({
Submission.status: submission_status,
Submission.compile_time: task.compile_time,
Submission.max_time: max_time,
Submission.max_memory: max_memory,
}, synchronize_session=False)
return submission_status
def _update_submission_status(
s: scoped_session, task: JudgeTask, status: JudgeStatus
) -> JudgeStatus:
s.query(Submission).filter(
Submission.contest_id == task.contest_id,
Submission.problem_id == task.problem_id,
Submission.id == task.id,
).update({Submission.status: status}, synchronize_session=False)
return status | 0.745028 | 0.23014 |
import re
from markdown.blockprocessors import ParagraphProcessor
from markdown.extensions import Extension
from markdown.util import etree
from .utils import markdown_ordered_dict_prepend
class EmbeddingProcessor(ParagraphProcessor):
RE = re.compile(r'!\[embed(\?(?P<params>.*))?\]\((?P<url>[^\)]+)\)')
YOUTUBE_LINK_PATTERN = re.compile(r'youtu\.?be')
VIMEO_LINK_PATTERN = re.compile(r'(https?://)?(www.)?(player.)?vimeo.com/([a-z]*/)*(?P<id>[0-9]{6,11})[?]?.*')
YOUTUBE_PATTERNS = [
re.compile(r'youtu\.be/(?P<id>\w+)'), # youtu.be/<id>
re.compile(r'(\?|&)v=(?P<id>\w+)'), # ?v=<id> | &v=<id>
re.compile(r'embed/(?P<id>\w+)'), # embed/<id>
re.compile(r'/v/(?P<id>\w+)'), # /v/<id>
]
YOUTUBE_EMBED_TEMPLATE = 'https://www.youtube.com/embed/%s'
VIMEO_EMBED_TEMPLATE = 'https://player.vimeo.com/video/%s'
def process_embed_url(self, url):
youtube_match = self.YOUTUBE_LINK_PATTERN.search(url)
if youtube_match:
for pattern in self.YOUTUBE_PATTERNS:
match = pattern.search(url)
if not match:
continue
return self.YOUTUBE_EMBED_TEMPLATE % match.group('id')
return url
vimeo_match = self.VIMEO_LINK_PATTERN.search(url)
if vimeo_match:
return self.VIMEO_EMBED_TEMPLATE % vimeo_match.group('id')
return url
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
block = blocks.pop(0)
block_match = self.RE.match(block)
el = etree.SubElement(parent, 'iframe')
el.set('class', 'embed')
el.set('webkitallowfullscreen', '')
el.set('mozallowfullscreen', '')
el.set('allowfullscreen', '')
el.set('frameborder', '0')
el.set('width', '100%')
el.set('src', self.process_embed_url(block_match.groupdict()['url']))
params = block_match.groupdict()['params'] or ''
for param in params.split('&'):
param = param.split('=')
if len(param) == 2:
el.set(*param)
class EmbeddingExtension(Extension):
def extendMarkdown(self, md, md_globals):
# Inserting to the top of inline patterns to avoid conflicts with images pattern
markdown_ordered_dict_prepend(md.parser.blockprocessors, 'embed', EmbeddingProcessor(md.parser))
def makeExtension(*args, **kwargs):
return EmbeddingExtension(*args, **kwargs) | iwg_blog/markdown_extensions/embedding.py | import re
from markdown.blockprocessors import ParagraphProcessor
from markdown.extensions import Extension
from markdown.util import etree
from .utils import markdown_ordered_dict_prepend
class EmbeddingProcessor(ParagraphProcessor):
RE = re.compile(r'!\[embed(\?(?P<params>.*))?\]\((?P<url>[^\)]+)\)')
YOUTUBE_LINK_PATTERN = re.compile(r'youtu\.?be')
VIMEO_LINK_PATTERN = re.compile(r'(https?://)?(www.)?(player.)?vimeo.com/([a-z]*/)*(?P<id>[0-9]{6,11})[?]?.*')
YOUTUBE_PATTERNS = [
re.compile(r'youtu\.be/(?P<id>\w+)'), # youtu.be/<id>
re.compile(r'(\?|&)v=(?P<id>\w+)'), # ?v=<id> | &v=<id>
re.compile(r'embed/(?P<id>\w+)'), # embed/<id>
re.compile(r'/v/(?P<id>\w+)'), # /v/<id>
]
YOUTUBE_EMBED_TEMPLATE = 'https://www.youtube.com/embed/%s'
VIMEO_EMBED_TEMPLATE = 'https://player.vimeo.com/video/%s'
def process_embed_url(self, url):
youtube_match = self.YOUTUBE_LINK_PATTERN.search(url)
if youtube_match:
for pattern in self.YOUTUBE_PATTERNS:
match = pattern.search(url)
if not match:
continue
return self.YOUTUBE_EMBED_TEMPLATE % match.group('id')
return url
vimeo_match = self.VIMEO_LINK_PATTERN.search(url)
if vimeo_match:
return self.VIMEO_EMBED_TEMPLATE % vimeo_match.group('id')
return url
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
block = blocks.pop(0)
block_match = self.RE.match(block)
el = etree.SubElement(parent, 'iframe')
el.set('class', 'embed')
el.set('webkitallowfullscreen', '')
el.set('mozallowfullscreen', '')
el.set('allowfullscreen', '')
el.set('frameborder', '0')
el.set('width', '100%')
el.set('src', self.process_embed_url(block_match.groupdict()['url']))
params = block_match.groupdict()['params'] or ''
for param in params.split('&'):
param = param.split('=')
if len(param) == 2:
el.set(*param)
class EmbeddingExtension(Extension):
def extendMarkdown(self, md, md_globals):
# Inserting to the top of inline patterns to avoid conflicts with images pattern
markdown_ordered_dict_prepend(md.parser.blockprocessors, 'embed', EmbeddingProcessor(md.parser))
def makeExtension(*args, **kwargs):
return EmbeddingExtension(*args, **kwargs) | 0.390708 | 0.127598 |
import re
import unittest
import pytest
from cognite.client.data_classes import ContextualizationJob
from cognite.client.exceptions import ModelFailedException
from cognite.experimental import CogniteClient
from cognite.experimental.data_classes import PNIDDetectionList, PNIDDetectResults
from tests.utils import jsgz_load
COGNITE_CLIENT = CogniteClient()
PNIDAPI = COGNITE_CLIENT.pnid_parsing
@pytest.fixture
def mock_detect(rsps):
response_body = {"jobId": 789, "status": "Queued"}
rsps.add(
rsps.POST,
PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/detect",
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_extract_pattern(rsps):
response_body = {"jobId": 456, "status": "Queued"}
rsps.add(
rsps.POST,
PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/extractpattern",
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_convert(rsps):
response_body = {"jobId": 345, "status": "Queued"}
rsps.add(
rsps.POST,
PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/convert",
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_detect_ok(rsps):
response_body = {
"jobId": 123,
"status": "Completed",
"items": [
{"text": "a", "boundingBox": {"xMin": 0, "xMax": 1, "yMin": 0, "yMax": 1}, "entities": [{"name": "a"}]}
],
"fileId": 123432423,
"fileExternalId": "123432423",
}
rsps.add(
rsps.GET,
re.compile(PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/detect" + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_pattern_ok(rsps):
response_body = {
"jobId": 456,
"status": "Completed",
"items": [],
"fileId": 123432423,
"fileExternalId": "123432423",
}
rsps.add(
rsps.GET,
re.compile(PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/extractpattern" + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_convert_ok(rsps):
response_body = {
"jobId": 123,
"status": "Completed",
"svgUrl": "svg.url.com",
"pngUrl": "png.url.com",
"fileId": 123432423,
"fileExternalId": "123432423",
}
rsps.add(
rsps.GET,
re.compile(PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/convert" + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_failed(rsps):
response_body = {"jobId": 123, "status": "Failed", "errorMessage": "error message"}
rsps.add(
rsps.GET,
re.compile(PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
class TestPNIDParsing:
def test_detect_entities_str(self, mock_detect, mock_status_detect_ok):
entities = ["a", "b"]
file_id = 123432423
job = PNIDAPI.detect(
file_id=file_id, entities=entities, name_mapping={"a": "c"}, partial_match=False, min_tokens=3
)
assert isinstance(job, ContextualizationJob)
assert "items" in job.result
assert 789 == job.job_id
assert "Completed" == job.status
n_detect_calls = 0
n_status_calls = 0
for call in mock_detect.calls:
if "detect" in call.request.url and call.request.method == "POST":
n_detect_calls += 1
assert {
"entities": entities,
"fileId": file_id,
"nameMapping": {"a": "c"},
"partialMatch": False,
"minTokens": 3,
"searchField": "name",
} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/789" in call.request.url
assert 1 == n_detect_calls
assert 1 == n_status_calls
def test_detect_entities_dict(self, mock_detect, mock_status_detect_ok):
entities = [{"name": "a"}, {"name": "b"}]
file_id = 123432423
job = PNIDAPI.detect(
file_id=file_id, entities=entities, name_mapping={"a": "c"}, partial_match=False, min_tokens=3
)
assert isinstance(job, ContextualizationJob)
assert "items" in job.result
assert 789 == job.job_id
assert "Completed" == job.status
assert 1 == len(job.matches)
assert [{"name": "a"}] == job.matches[0].entities
assert "a" == job.matches[0].text
n_detect_calls = 0
n_status_calls = 0
for call in mock_detect.calls:
if "detect" in call.request.url and call.request.method == "POST":
n_detect_calls += 1
assert {
"entities": [{"name": "a"}, {"name": "b"}],
"fileId": file_id,
"nameMapping": {"a": "c"},
"partialMatch": False,
"minTokens": 3,
"searchField": "name",
} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/789" in call.request.url
assert 1 == n_detect_calls
assert 1 == n_status_calls
def test_extract_pattern(self, mock_extract_pattern, mock_status_pattern_ok):
patterns = ["ab{1,2}"]
file_id = 123432423
job = PNIDAPI.extract_pattern(file_id=file_id, patterns=patterns)
assert isinstance(job, ContextualizationJob)
assert "Queued" == job.status
assert "items" in job.result
assert "Completed" == job.status
assert 456 == job.job_id
n_extract_pattern_calls = 0
n_status_calls = 0
for call in mock_extract_pattern.calls:
if "extractpattern" in call.request.url and call.request.method == "POST":
n_extract_pattern_calls += 1
assert {"patterns": patterns, "fileId": file_id} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/456" in call.request.url
assert 1 == n_extract_pattern_calls
assert 1 == n_status_calls
def test_convert(self, mock_convert, mock_status_convert_ok):
items = [
{
"text": "21-PT-1019",
"boundingBox": {
"xMax": 0.5895183277794608,
"xMin": 0.573159648591336,
"yMax": 0.3737254901960784,
"yMin": 0.3611764705882352,
},
}
]
file_id = 123432423
job = PNIDAPI.convert(file_id=file_id, items=items, grayscale=True)
assert isinstance(job, ContextualizationJob)
assert "Queued" == job.status
assert "svgUrl" in job.result
assert "Completed" == job.status
assert 345 == job.job_id
n_convert_calls = 0
n_status_calls = 0
for call in mock_convert.calls:
if "convert" in call.request.url and call.request.method == "POST":
n_convert_calls += 1
assert {"fileId": file_id, "items": items, "grayscale": True,} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/345" in call.request.url
assert 1 == n_convert_calls
assert 1 == n_status_calls
def test_file_external_id(self, mock_detect, mock_status_detect_ok):
entities = [{"name": "a"}, {"name": "b"}]
file_external_id = "123432423"
job = PNIDAPI.detect(
file_external_id=file_external_id,
entities=entities,
name_mapping={"a": "c"},
partial_match=False,
min_tokens=3,
)
assert isinstance(job, PNIDDetectResults)
assert isinstance(job._repr_html_(), str)
assert "fileId" in job.result
assert "fileExternalId" in job.result
assert file_external_id == job.file_external_id
assert isinstance(job.matches, PNIDDetectionList)
assert "Completed" == job.status | tests/tests_unit/test_contextualization/test_pnid_parsing.py | import re
import unittest
import pytest
from cognite.client.data_classes import ContextualizationJob
from cognite.client.exceptions import ModelFailedException
from cognite.experimental import CogniteClient
from cognite.experimental.data_classes import PNIDDetectionList, PNIDDetectResults
from tests.utils import jsgz_load
COGNITE_CLIENT = CogniteClient()
PNIDAPI = COGNITE_CLIENT.pnid_parsing
@pytest.fixture
def mock_detect(rsps):
response_body = {"jobId": 789, "status": "Queued"}
rsps.add(
rsps.POST,
PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/detect",
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_extract_pattern(rsps):
response_body = {"jobId": 456, "status": "Queued"}
rsps.add(
rsps.POST,
PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/extractpattern",
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_convert(rsps):
response_body = {"jobId": 345, "status": "Queued"}
rsps.add(
rsps.POST,
PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/convert",
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_detect_ok(rsps):
response_body = {
"jobId": 123,
"status": "Completed",
"items": [
{"text": "a", "boundingBox": {"xMin": 0, "xMax": 1, "yMin": 0, "yMax": 1}, "entities": [{"name": "a"}]}
],
"fileId": 123432423,
"fileExternalId": "123432423",
}
rsps.add(
rsps.GET,
re.compile(PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/detect" + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_pattern_ok(rsps):
response_body = {
"jobId": 456,
"status": "Completed",
"items": [],
"fileId": 123432423,
"fileExternalId": "123432423",
}
rsps.add(
rsps.GET,
re.compile(PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/extractpattern" + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_convert_ok(rsps):
response_body = {
"jobId": 123,
"status": "Completed",
"svgUrl": "svg.url.com",
"pngUrl": "png.url.com",
"fileId": 123432423,
"fileExternalId": "123432423",
}
rsps.add(
rsps.GET,
re.compile(PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/convert" + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
@pytest.fixture
def mock_status_failed(rsps):
response_body = {"jobId": 123, "status": "Failed", "errorMessage": "error message"}
rsps.add(
rsps.GET,
re.compile(PNIDAPI._get_base_url_with_base_path() + PNIDAPI._RESOURCE_PATH + "/\\d+"),
status=200,
json=response_body,
)
yield rsps
class TestPNIDParsing:
def test_detect_entities_str(self, mock_detect, mock_status_detect_ok):
entities = ["a", "b"]
file_id = 123432423
job = PNIDAPI.detect(
file_id=file_id, entities=entities, name_mapping={"a": "c"}, partial_match=False, min_tokens=3
)
assert isinstance(job, ContextualizationJob)
assert "items" in job.result
assert 789 == job.job_id
assert "Completed" == job.status
n_detect_calls = 0
n_status_calls = 0
for call in mock_detect.calls:
if "detect" in call.request.url and call.request.method == "POST":
n_detect_calls += 1
assert {
"entities": entities,
"fileId": file_id,
"nameMapping": {"a": "c"},
"partialMatch": False,
"minTokens": 3,
"searchField": "name",
} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/789" in call.request.url
assert 1 == n_detect_calls
assert 1 == n_status_calls
def test_detect_entities_dict(self, mock_detect, mock_status_detect_ok):
entities = [{"name": "a"}, {"name": "b"}]
file_id = 123432423
job = PNIDAPI.detect(
file_id=file_id, entities=entities, name_mapping={"a": "c"}, partial_match=False, min_tokens=3
)
assert isinstance(job, ContextualizationJob)
assert "items" in job.result
assert 789 == job.job_id
assert "Completed" == job.status
assert 1 == len(job.matches)
assert [{"name": "a"}] == job.matches[0].entities
assert "a" == job.matches[0].text
n_detect_calls = 0
n_status_calls = 0
for call in mock_detect.calls:
if "detect" in call.request.url and call.request.method == "POST":
n_detect_calls += 1
assert {
"entities": [{"name": "a"}, {"name": "b"}],
"fileId": file_id,
"nameMapping": {"a": "c"},
"partialMatch": False,
"minTokens": 3,
"searchField": "name",
} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/789" in call.request.url
assert 1 == n_detect_calls
assert 1 == n_status_calls
def test_extract_pattern(self, mock_extract_pattern, mock_status_pattern_ok):
patterns = ["ab{1,2}"]
file_id = 123432423
job = PNIDAPI.extract_pattern(file_id=file_id, patterns=patterns)
assert isinstance(job, ContextualizationJob)
assert "Queued" == job.status
assert "items" in job.result
assert "Completed" == job.status
assert 456 == job.job_id
n_extract_pattern_calls = 0
n_status_calls = 0
for call in mock_extract_pattern.calls:
if "extractpattern" in call.request.url and call.request.method == "POST":
n_extract_pattern_calls += 1
assert {"patterns": patterns, "fileId": file_id} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/456" in call.request.url
assert 1 == n_extract_pattern_calls
assert 1 == n_status_calls
def test_convert(self, mock_convert, mock_status_convert_ok):
items = [
{
"text": "21-PT-1019",
"boundingBox": {
"xMax": 0.5895183277794608,
"xMin": 0.573159648591336,
"yMax": 0.3737254901960784,
"yMin": 0.3611764705882352,
},
}
]
file_id = 123432423
job = PNIDAPI.convert(file_id=file_id, items=items, grayscale=True)
assert isinstance(job, ContextualizationJob)
assert "Queued" == job.status
assert "svgUrl" in job.result
assert "Completed" == job.status
assert 345 == job.job_id
n_convert_calls = 0
n_status_calls = 0
for call in mock_convert.calls:
if "convert" in call.request.url and call.request.method == "POST":
n_convert_calls += 1
assert {"fileId": file_id, "items": items, "grayscale": True,} == jsgz_load(call.request.body)
else:
n_status_calls += 1
assert "/345" in call.request.url
assert 1 == n_convert_calls
assert 1 == n_status_calls
def test_file_external_id(self, mock_detect, mock_status_detect_ok):
entities = [{"name": "a"}, {"name": "b"}]
file_external_id = "123432423"
job = PNIDAPI.detect(
file_external_id=file_external_id,
entities=entities,
name_mapping={"a": "c"},
partial_match=False,
min_tokens=3,
)
assert isinstance(job, PNIDDetectResults)
assert isinstance(job._repr_html_(), str)
assert "fileId" in job.result
assert "fileExternalId" in job.result
assert file_external_id == job.file_external_id
assert isinstance(job.matches, PNIDDetectionList)
assert "Completed" == job.status | 0.382372 | 0.204521 |
from s3iamcli.cli_response import CLIResponse
class UserLoginProfile:
def __init__(self, iam_client, cli_args):
self.iam_client = iam_client
self.cli_args = cli_args
def create(self):
if(self.cli_args.name is None):
message = "User name is required for user login-profile creation"
CLIResponse.send_error_out(message)
if(self.cli_args.password is None):
message = "User password is required for user login-profile creation"
CLIResponse.send_error_out(message)
user_args = {}
user_args['UserName'] = self.cli_args.name
user_args['Password'] = self.cli_args.password
user_args['PasswordResetRequired'] = False
if(self.cli_args.password_reset_required):
user_args['PasswordResetRequired'] = True
try:
result = self.iam_client.create_login_profile(**user_args)
except Exception as ex:
message = "Failed to create userloginprofile.\n"
message += str(ex)
CLIResponse.send_error_out(message)
profile = (result['LoginProfile'])
print("Login Profile %s %s %s" % (profile['CreateDate'], profile['PasswordResetRequired'], profile['UserName']))
def get(self):
if(self.cli_args.name is None):
message = "User name is required for getting Login Profile"
CLIResponse.send_error_out(message)
user_args = {}
user_args['UserName'] = self.cli_args.name
try:
result = self.iam_client.get_login_profile(**user_args)
except Exception as ex:
message = "Failed to get Login Profile for "+ user_args['UserName'] + "\n"
message += str(ex)
CLIResponse.send_error_out(message)
profile = (result['LoginProfile'])
print("Login Profile %s %s %s" % (profile['CreateDate'], profile['PasswordResetRequired'], profile['UserName']))
def update(self):
if(self.cli_args.name is None):
message = "UserName is required for UpdateUserLoginProfile"
CLIResponse.send_error_out(message)
user_args = {}
user_args['UserName'] = self.cli_args.name
if(not self.cli_args.password is None):
user_args['Password'] = <PASSWORD>
user_args['PasswordResetRequired'] = False
if(self.cli_args.password_reset_required):
user_args['PasswordResetRequired'] = True
if(self.cli_args.password is None) and (self.cli_args.password_reset_required is False) and (self.cli_args.no_password_reset_required is False):
message = "Please provide password or password-reset flag"
CLIResponse.send_error_out(message)
try:
result = self.iam_client.update_login_profile(**user_args)
message = "UpdateUserLoginProfile is successful"
CLIResponse.send_success_out(message)
except Exception as ex:
message = "UpdateUserLoginProfile failed\n"
message += str(ex)
CLIResponse.send_error_out(message)
def changepassword(self):
if(self.cli_args.old_password is None):
message = "OldPassword is required for changing user password"
CLIResponse.send_error_out(message)
if(self.cli_args.new_password is None):
message = "NewPassword is required for changing user password"
CLIResponse.send_error_out(message)
user_args = {}
user_args['OldPassword'] = self.cli_args.old_password
user_args['NewPassword'] = self.cli_args.new_password
try:
result = self.iam_client.change_password(**user_args)
message = "ChangePassword is successful"
CLIResponse.send_success_out(message)
except Exception as ex:
message = "ChangePassword failed\n"
message += str(ex)
CLIResponse.send_error_out(message) | auth-utils/s3iamcli/s3iamcli/userloginprofile.py |
from s3iamcli.cli_response import CLIResponse
class UserLoginProfile:
def __init__(self, iam_client, cli_args):
self.iam_client = iam_client
self.cli_args = cli_args
def create(self):
if(self.cli_args.name is None):
message = "User name is required for user login-profile creation"
CLIResponse.send_error_out(message)
if(self.cli_args.password is None):
message = "User password is required for user login-profile creation"
CLIResponse.send_error_out(message)
user_args = {}
user_args['UserName'] = self.cli_args.name
user_args['Password'] = self.cli_args.password
user_args['PasswordResetRequired'] = False
if(self.cli_args.password_reset_required):
user_args['PasswordResetRequired'] = True
try:
result = self.iam_client.create_login_profile(**user_args)
except Exception as ex:
message = "Failed to create userloginprofile.\n"
message += str(ex)
CLIResponse.send_error_out(message)
profile = (result['LoginProfile'])
print("Login Profile %s %s %s" % (profile['CreateDate'], profile['PasswordResetRequired'], profile['UserName']))
def get(self):
if(self.cli_args.name is None):
message = "User name is required for getting Login Profile"
CLIResponse.send_error_out(message)
user_args = {}
user_args['UserName'] = self.cli_args.name
try:
result = self.iam_client.get_login_profile(**user_args)
except Exception as ex:
message = "Failed to get Login Profile for "+ user_args['UserName'] + "\n"
message += str(ex)
CLIResponse.send_error_out(message)
profile = (result['LoginProfile'])
print("Login Profile %s %s %s" % (profile['CreateDate'], profile['PasswordResetRequired'], profile['UserName']))
def update(self):
if(self.cli_args.name is None):
message = "UserName is required for UpdateUserLoginProfile"
CLIResponse.send_error_out(message)
user_args = {}
user_args['UserName'] = self.cli_args.name
if(not self.cli_args.password is None):
user_args['Password'] = <PASSWORD>
user_args['PasswordResetRequired'] = False
if(self.cli_args.password_reset_required):
user_args['PasswordResetRequired'] = True
if(self.cli_args.password is None) and (self.cli_args.password_reset_required is False) and (self.cli_args.no_password_reset_required is False):
message = "Please provide password or password-reset flag"
CLIResponse.send_error_out(message)
try:
result = self.iam_client.update_login_profile(**user_args)
message = "UpdateUserLoginProfile is successful"
CLIResponse.send_success_out(message)
except Exception as ex:
message = "UpdateUserLoginProfile failed\n"
message += str(ex)
CLIResponse.send_error_out(message)
def changepassword(self):
if(self.cli_args.old_password is None):
message = "OldPassword is required for changing user password"
CLIResponse.send_error_out(message)
if(self.cli_args.new_password is None):
message = "NewPassword is required for changing user password"
CLIResponse.send_error_out(message)
user_args = {}
user_args['OldPassword'] = self.cli_args.old_password
user_args['NewPassword'] = self.cli_args.new_password
try:
result = self.iam_client.change_password(**user_args)
message = "ChangePassword is successful"
CLIResponse.send_success_out(message)
except Exception as ex:
message = "ChangePassword failed\n"
message += str(ex)
CLIResponse.send_error_out(message) | 0.223547 | 0.039379 |
import os
import sys
import time
import click
import signal
import requests
from requests.compat import urljoin
from prometheus_client import start_http_server
from prometheus_client.core import REGISTRY
from ecs_container_exporter.utils import create_metric, task_metric_tags, TASK_CONTAINER_NAME_TAG
from ecs_container_exporter.cpu_metrics import calculate_cpu_metrics
from ecs_container_exporter.memory_metrics import calculate_memory_metrics
from ecs_container_exporter.io_metrics import calculate_io_metrics
from ecs_container_exporter.network_metrics import calculate_network_metrics
import logging
log = logging.getLogger(__name__)
class ECSContainerExporter(object):
include_containers = []
exclude_containers = []
# 1 - healthy, 0 - unhealthy
exporter_status = 1
# initial task metrics that do not change
static_task_metrics = []
# individual container tags
task_container_tags = {}
# task limits
task_cpu_limit = 0
task_mem_limit = 0
# individual container limits
task_container_limits = {}
# the Task level metrics are included by default
include_container_ids = [TASK_CONTAINER_NAME_TAG]
def __init__(self, metadata_url=None, include_containers=None, exclude_containers=None, http_timeout=60):
self.task_metadata_url = urljoin(metadata_url + '/', 'task')
# For testing
# self.task_stats_url = urljoin(metadata_url + '/', 'stats')
self.task_stats_url = urljoin(metadata_url + '/', 'task/stats')
if exclude_containers:
self.exclude_containers = exclude_containers
if include_containers:
self.include_containers = include_containers
self.http_timeout = http_timeout
self.log = logging.getLogger(__name__)
self.log.info(f'Exporter initialized with '
f'metadata_url: {self.task_metadata_url}, '
f'task_stats_url: {self.task_stats_url}, '
f'http_timeout: {self.http_timeout}, '
f'include_containers: {self.include_containers}, '
f'exclude_containers: {self.exclude_containers}')
self.collect_static_metrics()
REGISTRY.register(self)
def collect_static_metrics(self):
while True:
# some wait for the task to be in running state
time.sleep(5)
try:
response = requests.get(self.task_metadata_url, timeout=self.http_timeout)
except requests.exceptions.Timeout:
msg = f'Metadata url {self.task_metadata_url} timed out after {self.http_timeout} seconds'
self.exporter_status = 0
self.log.exception(msg)
continue
except requests.exceptions.RequestException:
msg = f'Error fetching from Metadata url {self.task_metadata_url}'
self.exporter_status = 0
self.log.exception(msg)
continue
if response.status_code != 200:
msg = f'Url {self.task_metadata_url} responded with {response.status_code} HTTP code'
self.exporter_status = 0
self.log.error(msg)
continue
try:
metadata = response.json()
except ValueError:
msg = f'Cannot decode metadata url {self.task_metadata_url} response {response.text}'
self.exporter_status = 0
self.log.error(msg, exc_info=True)
continue
if metadata.get('KnownStatus') != 'RUNNING':
self.log.warning(f'ECS Task not yet in RUNNING state, current status is: {metadata["KnownStatus"]}')
continue
else:
break
self.log.debug(f'Discovered Task metadata: {metadata}')
self.parse_task_metadata(metadata)
def parse_task_metadata(self, metadata):
self.static_task_metrics = []
self.task_container_tags = {}
self.task_container_limits = {}
# task cpu/mem limit
task_tag = task_metric_tags()
self.task_cpu_limit, self.task_mem_limit = self.cpu_mem_limit(metadata)
metric = create_metric('cpu_limit', self.task_cpu_limit, task_tag, 'gauge', 'Task CPU limit')
self.static_task_metrics.append(metric)
metric = create_metric('mem_limit', self.task_mem_limit, task_tag, 'gauge', 'Task Memory limit')
self.static_task_metrics.append(metric)
# container tags and limits
for container in metadata['Containers']:
container_id = container['DockerId']
container_name = container['Name']
if self.should_process_container(container_name,
self.include_containers,
self.exclude_containers):
self.log.info(f'Processing stats for container: {container_name} - {container_id}')
self.include_container_ids.append(container_id)
else:
self.log.info(f'Excluding container: {container_name} - {container_id} as per exclusion')
self.task_container_tags[container_id] = {'container_name': container_name}
# container cpu/mem limit
cpu_value, mem_value = self.cpu_mem_limit(container)
self.task_container_limits[container_id] = {'cpu': cpu_value,
'mem': mem_value}
if container_id in self.include_container_ids:
metric = create_metric('cpu_limit', cpu_value, self.task_container_tags[container_id],
'gauge', 'Limit in percent of the CPU usage')
self.static_task_metrics.append(metric)
metric = create_metric('mem_limit', mem_value, self.task_container_tags[container_id],
'gauge', 'Limit in memory usage in MBs')
self.static_task_metrics.append(metric)
def should_process_container(self, container_name, include_containers, exclude_containers):
if container_name in exclude_containers:
return False
else:
if include_containers:
if container_name in include_containers:
return True
else:
return False
else:
return True
def cpu_mem_limit(self, metadata):
# normalise to `cpu shares`
cpu_limit = metadata.get('Limits', {}).get('CPU', 0) * 1024
mem_limit = metadata.get('Limits', {}).get('Memory', 0)
return (
cpu_limit, mem_limit
)
# every http request gets data from here
def collect(self):
container_metrics = self.collect_container_metrics()
# exporter status metric
metric = create_metric('exporter_status', self.exporter_status, {},
'gauge', 'Exporter Status')
container_metrics.append(metric)
return self.static_task_metrics + container_metrics
def collect_container_metrics(self):
metrics = []
try:
request = requests.get(self.task_stats_url)
except requests.exceptions.Timeout:
msg = f'Task stats url {self.task_stats_url} timed out after {self.http_timeout} seconds'
self.exporter_status = 0
self.log.warning(msg)
return metrics
except requests.exceptions.RequestException:
msg = f'Error fetching from task stats url {self.task_stats_url}'
self.exporter_status = 0
self.log.warning(msg)
return metrics
if request.status_code != 200:
msg = f'Url {self.task_stats_url} responded with {request.status_code} HTTP code'
self.exporter_status = 0
self.log.error(msg)
return metrics
try:
stats = request.json()
self.exporter_status = 1
except ValueError:
msg = 'Cannot decode task stats {self.task_stats_url} url response {request.text}'
self.exporter_status = 0
self.log.warning(msg, exc_info=True)
return metrics
container_metrics_all = self.parse_container_metadata(stats,
self.task_cpu_limit,
self.task_container_limits,
self.task_container_tags)
# flatten and filter excluded containers
filtered_container_metrics = []
for metrics_by_container in container_metrics_all:
for container_id, metrics in metrics_by_container.items():
if container_id in self.include_container_ids:
filtered_container_metrics.extend(metrics)
return filtered_container_metrics
def parse_container_metadata(self, stats, task_cpu_limit,
task_container_limits, task_container_tags):
"""
More details on the exposed docker metrics
https://github.com/moby/moby/blob/c1d090fcc88fa3bc5b804aead91ec60e30207538/api/types/stats.go
"""
container_metrics_all = []
try:
# CPU metrics
container_metrics_all.append(
calculate_cpu_metrics(stats,
task_cpu_limit,
task_container_limits,
task_container_tags)
)
# Memory metrics
container_metrics_all.append(
calculate_memory_metrics(stats, task_container_tags)
)
# I/O metrics
container_metrics_all.append(
calculate_io_metrics(stats, task_container_tags)
)
# network metrics
container_metrics_all.append(
calculate_network_metrics(stats, task_container_tags)
)
except Exception as e:
self.log.warning("Could not retrieve metrics for {}: {}".format(task_container_tags, e), exc_info=True)
self.exporter_status = 1
return container_metrics_all
def shutdown(sig_number, frame):
log.info("Recevied signal {}, Shuttting down".format(sig_number))
sys.exit(0)
@click.command()
@click.option('--metadata-url', envvar='ECS_CONTAINER_METADATA_URI', type=str, default=None,
help='Override ECS Metadata Url')
@click.option('--exporter-port', envvar='EXPORTER_PORT', type=int, default=9545,
help='Change exporter listen port')
@click.option('--include', envvar='INCLUDE', type=str, default=None,
help='Comma seperated list of container names to include, or use env var INCLUDE')
@click.option('--exclude', envvar='EXCLUDE', type=str, default=None,
help='Comma seperated list of container names to exclude, or use env var EXCLUDE')
@click.option('--log-level', envvar='LOG_LEVEL', type=str, default='INFO',
help='Log level, default: INFO')
def main(
metadata_url=None, exporter_port=9545, include=None, exclude=None, log_level='INFO'
):
if not metadata_url:
sys.exit('AWS environment variable ECS_CONTAINER_METADATA_URI not found '
'nor is --metadata-url set')
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
logging.basicConfig(
format='%(asctime)s:%(levelname)s:%(message)s',
)
logging.getLogger().setLevel(
getattr(logging, log_level.upper())
)
if exclude:
exclude=exclude.strip().split(',')
if include:
include=include.strip().split(',')
ECSContainerExporter(metadata_url=metadata_url,
include_containers=include,
exclude_containers=exclude)
# Start up the server to expose the metrics.
start_http_server(int(exporter_port))
while True:
time.sleep(10)
if __name__ == '__main__':
main() | ecs_container_exporter/main.py | import os
import sys
import time
import click
import signal
import requests
from requests.compat import urljoin
from prometheus_client import start_http_server
from prometheus_client.core import REGISTRY
from ecs_container_exporter.utils import create_metric, task_metric_tags, TASK_CONTAINER_NAME_TAG
from ecs_container_exporter.cpu_metrics import calculate_cpu_metrics
from ecs_container_exporter.memory_metrics import calculate_memory_metrics
from ecs_container_exporter.io_metrics import calculate_io_metrics
from ecs_container_exporter.network_metrics import calculate_network_metrics
import logging
log = logging.getLogger(__name__)
class ECSContainerExporter(object):
include_containers = []
exclude_containers = []
# 1 - healthy, 0 - unhealthy
exporter_status = 1
# initial task metrics that do not change
static_task_metrics = []
# individual container tags
task_container_tags = {}
# task limits
task_cpu_limit = 0
task_mem_limit = 0
# individual container limits
task_container_limits = {}
# the Task level metrics are included by default
include_container_ids = [TASK_CONTAINER_NAME_TAG]
def __init__(self, metadata_url=None, include_containers=None, exclude_containers=None, http_timeout=60):
self.task_metadata_url = urljoin(metadata_url + '/', 'task')
# For testing
# self.task_stats_url = urljoin(metadata_url + '/', 'stats')
self.task_stats_url = urljoin(metadata_url + '/', 'task/stats')
if exclude_containers:
self.exclude_containers = exclude_containers
if include_containers:
self.include_containers = include_containers
self.http_timeout = http_timeout
self.log = logging.getLogger(__name__)
self.log.info(f'Exporter initialized with '
f'metadata_url: {self.task_metadata_url}, '
f'task_stats_url: {self.task_stats_url}, '
f'http_timeout: {self.http_timeout}, '
f'include_containers: {self.include_containers}, '
f'exclude_containers: {self.exclude_containers}')
self.collect_static_metrics()
REGISTRY.register(self)
def collect_static_metrics(self):
while True:
# some wait for the task to be in running state
time.sleep(5)
try:
response = requests.get(self.task_metadata_url, timeout=self.http_timeout)
except requests.exceptions.Timeout:
msg = f'Metadata url {self.task_metadata_url} timed out after {self.http_timeout} seconds'
self.exporter_status = 0
self.log.exception(msg)
continue
except requests.exceptions.RequestException:
msg = f'Error fetching from Metadata url {self.task_metadata_url}'
self.exporter_status = 0
self.log.exception(msg)
continue
if response.status_code != 200:
msg = f'Url {self.task_metadata_url} responded with {response.status_code} HTTP code'
self.exporter_status = 0
self.log.error(msg)
continue
try:
metadata = response.json()
except ValueError:
msg = f'Cannot decode metadata url {self.task_metadata_url} response {response.text}'
self.exporter_status = 0
self.log.error(msg, exc_info=True)
continue
if metadata.get('KnownStatus') != 'RUNNING':
self.log.warning(f'ECS Task not yet in RUNNING state, current status is: {metadata["KnownStatus"]}')
continue
else:
break
self.log.debug(f'Discovered Task metadata: {metadata}')
self.parse_task_metadata(metadata)
def parse_task_metadata(self, metadata):
self.static_task_metrics = []
self.task_container_tags = {}
self.task_container_limits = {}
# task cpu/mem limit
task_tag = task_metric_tags()
self.task_cpu_limit, self.task_mem_limit = self.cpu_mem_limit(metadata)
metric = create_metric('cpu_limit', self.task_cpu_limit, task_tag, 'gauge', 'Task CPU limit')
self.static_task_metrics.append(metric)
metric = create_metric('mem_limit', self.task_mem_limit, task_tag, 'gauge', 'Task Memory limit')
self.static_task_metrics.append(metric)
# container tags and limits
for container in metadata['Containers']:
container_id = container['DockerId']
container_name = container['Name']
if self.should_process_container(container_name,
self.include_containers,
self.exclude_containers):
self.log.info(f'Processing stats for container: {container_name} - {container_id}')
self.include_container_ids.append(container_id)
else:
self.log.info(f'Excluding container: {container_name} - {container_id} as per exclusion')
self.task_container_tags[container_id] = {'container_name': container_name}
# container cpu/mem limit
cpu_value, mem_value = self.cpu_mem_limit(container)
self.task_container_limits[container_id] = {'cpu': cpu_value,
'mem': mem_value}
if container_id in self.include_container_ids:
metric = create_metric('cpu_limit', cpu_value, self.task_container_tags[container_id],
'gauge', 'Limit in percent of the CPU usage')
self.static_task_metrics.append(metric)
metric = create_metric('mem_limit', mem_value, self.task_container_tags[container_id],
'gauge', 'Limit in memory usage in MBs')
self.static_task_metrics.append(metric)
def should_process_container(self, container_name, include_containers, exclude_containers):
if container_name in exclude_containers:
return False
else:
if include_containers:
if container_name in include_containers:
return True
else:
return False
else:
return True
def cpu_mem_limit(self, metadata):
# normalise to `cpu shares`
cpu_limit = metadata.get('Limits', {}).get('CPU', 0) * 1024
mem_limit = metadata.get('Limits', {}).get('Memory', 0)
return (
cpu_limit, mem_limit
)
# every http request gets data from here
def collect(self):
container_metrics = self.collect_container_metrics()
# exporter status metric
metric = create_metric('exporter_status', self.exporter_status, {},
'gauge', 'Exporter Status')
container_metrics.append(metric)
return self.static_task_metrics + container_metrics
def collect_container_metrics(self):
metrics = []
try:
request = requests.get(self.task_stats_url)
except requests.exceptions.Timeout:
msg = f'Task stats url {self.task_stats_url} timed out after {self.http_timeout} seconds'
self.exporter_status = 0
self.log.warning(msg)
return metrics
except requests.exceptions.RequestException:
msg = f'Error fetching from task stats url {self.task_stats_url}'
self.exporter_status = 0
self.log.warning(msg)
return metrics
if request.status_code != 200:
msg = f'Url {self.task_stats_url} responded with {request.status_code} HTTP code'
self.exporter_status = 0
self.log.error(msg)
return metrics
try:
stats = request.json()
self.exporter_status = 1
except ValueError:
msg = 'Cannot decode task stats {self.task_stats_url} url response {request.text}'
self.exporter_status = 0
self.log.warning(msg, exc_info=True)
return metrics
container_metrics_all = self.parse_container_metadata(stats,
self.task_cpu_limit,
self.task_container_limits,
self.task_container_tags)
# flatten and filter excluded containers
filtered_container_metrics = []
for metrics_by_container in container_metrics_all:
for container_id, metrics in metrics_by_container.items():
if container_id in self.include_container_ids:
filtered_container_metrics.extend(metrics)
return filtered_container_metrics
def parse_container_metadata(self, stats, task_cpu_limit,
task_container_limits, task_container_tags):
"""
More details on the exposed docker metrics
https://github.com/moby/moby/blob/c1d090fcc88fa3bc5b804aead91ec60e30207538/api/types/stats.go
"""
container_metrics_all = []
try:
# CPU metrics
container_metrics_all.append(
calculate_cpu_metrics(stats,
task_cpu_limit,
task_container_limits,
task_container_tags)
)
# Memory metrics
container_metrics_all.append(
calculate_memory_metrics(stats, task_container_tags)
)
# I/O metrics
container_metrics_all.append(
calculate_io_metrics(stats, task_container_tags)
)
# network metrics
container_metrics_all.append(
calculate_network_metrics(stats, task_container_tags)
)
except Exception as e:
self.log.warning("Could not retrieve metrics for {}: {}".format(task_container_tags, e), exc_info=True)
self.exporter_status = 1
return container_metrics_all
def shutdown(sig_number, frame):
log.info("Recevied signal {}, Shuttting down".format(sig_number))
sys.exit(0)
@click.command()
@click.option('--metadata-url', envvar='ECS_CONTAINER_METADATA_URI', type=str, default=None,
help='Override ECS Metadata Url')
@click.option('--exporter-port', envvar='EXPORTER_PORT', type=int, default=9545,
help='Change exporter listen port')
@click.option('--include', envvar='INCLUDE', type=str, default=None,
help='Comma seperated list of container names to include, or use env var INCLUDE')
@click.option('--exclude', envvar='EXCLUDE', type=str, default=None,
help='Comma seperated list of container names to exclude, or use env var EXCLUDE')
@click.option('--log-level', envvar='LOG_LEVEL', type=str, default='INFO',
help='Log level, default: INFO')
def main(
metadata_url=None, exporter_port=9545, include=None, exclude=None, log_level='INFO'
):
if not metadata_url:
sys.exit('AWS environment variable ECS_CONTAINER_METADATA_URI not found '
'nor is --metadata-url set')
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
logging.basicConfig(
format='%(asctime)s:%(levelname)s:%(message)s',
)
logging.getLogger().setLevel(
getattr(logging, log_level.upper())
)
if exclude:
exclude=exclude.strip().split(',')
if include:
include=include.strip().split(',')
ECSContainerExporter(metadata_url=metadata_url,
include_containers=include,
exclude_containers=exclude)
# Start up the server to expose the metrics.
start_http_server(int(exporter_port))
while True:
time.sleep(10)
if __name__ == '__main__':
main() | 0.335024 | 0.062875 |
import math, os, sys, unittest
sys.path.append(os.path.join('..'))
from twyg.geom import Vector2
deg = math.degrees
rad = math.radians
class TestEvalExpr(unittest.TestCase):
def assert_equals(self, a, b):
self.assertTrue(abs(a - b) < 1e-12)
def test_constructor_cartesian1(self):
v = Vector2(3, -4)
self.assert_equals(5, v.m)
self.assert_equals(53.13010235415598, deg(v.a))
def test_constructor_cartesian2(self):
v = Vector2(4, -4)
self.assert_equals(5.6568542494923806, v.m)
self.assert_equals(45.0, deg(v.a))
def test_normalize(self):
v = Vector2(4, -4)
self.assert_equals(5.65685424949238, v.m)
self.assert_equals(45.0, deg(v.a))
v.normalize()
self.assert_equals(1.0, v.m)
self.assert_equals(45.0, deg(v.a))
def test_rotate_positive(self):
v = Vector2(4, -4)
v.rotate(rad(-15))
self.assert_equals(30.0, deg(v.a))
def test_rotate_negative(self):
v = Vector2(4, -4)
v.rotate(rad(30))
self.assert_equals(75.0, deg(v.a))
def test_constructor_polar(self):
v = Vector2(angle=rad(30), m=1)
self.assert_equals(30.0, deg(v.a))
self.assert_equals(1.0, v.m)
self.assert_equals(0.86602540378443, v.x)
self.assert_equals(-0.5, v.y)
def test_constructor_copy(self):
v1 = Vector2(angle=rad(30), m=1)
v2 = Vector2(v1)
self.assert_equals(v2.x, v1.x)
self.assert_equals(v2.y, v1.y)
self.assert_equals(v2.m, v1.m)
self.assert_equals(v2.a, v1.a)
def test_scalar_multiply_right(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = v * 2
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_multiply_left(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = 2 * v
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_multiply_and_assign(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v *= 2
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_divide_and_assign(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v /= 2
self.assert_equals(a, v.a)
self.assert_equals(m / 2, v.m)
def test_scalar_divide_right(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = v / 2
self.assert_equals(a, v.a)
self.assert_equals(m / 2, v.m)
if __name__ == '__main__':
unittest.main() | twyg/tests/geom_test.py | import math, os, sys, unittest
sys.path.append(os.path.join('..'))
from twyg.geom import Vector2
deg = math.degrees
rad = math.radians
class TestEvalExpr(unittest.TestCase):
def assert_equals(self, a, b):
self.assertTrue(abs(a - b) < 1e-12)
def test_constructor_cartesian1(self):
v = Vector2(3, -4)
self.assert_equals(5, v.m)
self.assert_equals(53.13010235415598, deg(v.a))
def test_constructor_cartesian2(self):
v = Vector2(4, -4)
self.assert_equals(5.6568542494923806, v.m)
self.assert_equals(45.0, deg(v.a))
def test_normalize(self):
v = Vector2(4, -4)
self.assert_equals(5.65685424949238, v.m)
self.assert_equals(45.0, deg(v.a))
v.normalize()
self.assert_equals(1.0, v.m)
self.assert_equals(45.0, deg(v.a))
def test_rotate_positive(self):
v = Vector2(4, -4)
v.rotate(rad(-15))
self.assert_equals(30.0, deg(v.a))
def test_rotate_negative(self):
v = Vector2(4, -4)
v.rotate(rad(30))
self.assert_equals(75.0, deg(v.a))
def test_constructor_polar(self):
v = Vector2(angle=rad(30), m=1)
self.assert_equals(30.0, deg(v.a))
self.assert_equals(1.0, v.m)
self.assert_equals(0.86602540378443, v.x)
self.assert_equals(-0.5, v.y)
def test_constructor_copy(self):
v1 = Vector2(angle=rad(30), m=1)
v2 = Vector2(v1)
self.assert_equals(v2.x, v1.x)
self.assert_equals(v2.y, v1.y)
self.assert_equals(v2.m, v1.m)
self.assert_equals(v2.a, v1.a)
def test_scalar_multiply_right(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = v * 2
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_multiply_left(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = 2 * v
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_multiply_and_assign(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v *= 2
self.assert_equals(a, v.a)
self.assert_equals(m * 2, v.m)
def test_scalar_divide_and_assign(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v /= 2
self.assert_equals(a, v.a)
self.assert_equals(m / 2, v.m)
def test_scalar_divide_right(self):
v = Vector2(3, 2)
m, a = v.m, v.a
v = v / 2
self.assert_equals(a, v.a)
self.assert_equals(m / 2, v.m)
if __name__ == '__main__':
unittest.main() | 0.581778 | 0.708824 |
import sys
import os
import time
from signal import SIGTERM
class Daemon:
def __init__(self, stdout='/dev/null', stderr=None, stdin='/dev/null'):
self.stdout = stdout
self.stderr = stderr
self.stdin = stdin
self.startmsg = 'started with pid {}'
def deamonize(self, pidfile=None):
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError as exc:
sys.stderr.write("fork #1 failed: ({}) {}\n".format(exc.errno, exc.self.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError as exc:
print(exc)
sys.stderr.write("fork #2 failed: ({}) {}\n".format(exc.errno, exc.self.strerror))
sys.exit(1)
# Open file descriptors and print start message
if not self.stderr:
self.stderr = self.stdout
pid = str(os.getpid())
sys.stderr.write("\n{}\n".format(self.startmsg.format(pid)))
sys.stderr.flush()
if pidfile:
with open(pidfile, 'w+') as f:
f.write("{}\n".format(pid))
def startstop(self, action, pidfile='pid.txt'):
try:
with open(pidfile) as pf:
pid = int(pf.read().strip())
except (IOError, ValueError):
pid = None
if 'stop' == action or 'restart' == action:
if not pid:
mess = "Could not stop, pid file '{}' missing.\n"
sys.stderr.write(mess.format(pidfile))
sys.exit(1)
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(1)
except OSError as exc:
exc = str(exc)
if exc.find("No such process") > 0:
os.remove(pidfile)
if 'stop' == action:
sys.exit(0)
action = 'start'
pid = None
else:
print(str(exc))
sys.exit(1)
elif 'start' == action:
if pid:
mess = "Start aborded since pid file '{}' exists.\n"
sys.stderr.write(mess.format(pidfile))
sys.exit(1)
self.deamonize(pidfile)
return
sys.exit(2)
def start(self, function, *args, **kwargs):
print("Start unix daemon...")
self.startstop("start", pidfile='/tmp/deamonize.pid')
if function:
function(*args, **kwargs)
def stop(self):
print("Stop unix daemon...")
self.startstop("stop", pidfile='/tmp/deamonize.pid') | Bagpipe/Coordinator/daemon.py | import sys
import os
import time
from signal import SIGTERM
class Daemon:
def __init__(self, stdout='/dev/null', stderr=None, stdin='/dev/null'):
self.stdout = stdout
self.stderr = stderr
self.stdin = stdin
self.startmsg = 'started with pid {}'
def deamonize(self, pidfile=None):
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError as exc:
sys.stderr.write("fork #1 failed: ({}) {}\n".format(exc.errno, exc.self.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError as exc:
print(exc)
sys.stderr.write("fork #2 failed: ({}) {}\n".format(exc.errno, exc.self.strerror))
sys.exit(1)
# Open file descriptors and print start message
if not self.stderr:
self.stderr = self.stdout
pid = str(os.getpid())
sys.stderr.write("\n{}\n".format(self.startmsg.format(pid)))
sys.stderr.flush()
if pidfile:
with open(pidfile, 'w+') as f:
f.write("{}\n".format(pid))
def startstop(self, action, pidfile='pid.txt'):
try:
with open(pidfile) as pf:
pid = int(pf.read().strip())
except (IOError, ValueError):
pid = None
if 'stop' == action or 'restart' == action:
if not pid:
mess = "Could not stop, pid file '{}' missing.\n"
sys.stderr.write(mess.format(pidfile))
sys.exit(1)
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(1)
except OSError as exc:
exc = str(exc)
if exc.find("No such process") > 0:
os.remove(pidfile)
if 'stop' == action:
sys.exit(0)
action = 'start'
pid = None
else:
print(str(exc))
sys.exit(1)
elif 'start' == action:
if pid:
mess = "Start aborded since pid file '{}' exists.\n"
sys.stderr.write(mess.format(pidfile))
sys.exit(1)
self.deamonize(pidfile)
return
sys.exit(2)
def start(self, function, *args, **kwargs):
print("Start unix daemon...")
self.startstop("start", pidfile='/tmp/deamonize.pid')
if function:
function(*args, **kwargs)
def stop(self):
print("Stop unix daemon...")
self.startstop("stop", pidfile='/tmp/deamonize.pid') | 0.111652 | 0.078184 |
import requests
import json
def ocr_space_file(filename, overlay=False, api_key='<KEY>', language='eng'):
""" OCR.space API request with local file.
Python3.5 - not tested on 2.7
:param filename: Your file path & name.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
payload = {'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
with open(filename, 'rb') as f:
r = requests.post('https://api.ocr.space/parse/image',
files={filename: f},
data=payload,
)
return r.content.decode()
def ocr_space_url(url, overlay=False, api_key='<KEY>', language='eng'):
""" OCR.space API request with remote file.
Python3.5 - not tested on 2.7
:param url: Image url.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
payload = {'url': url,
'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
r = requests.post('https://api.ocr.space/parse/image',
data=payload,
)
return r.content.decode()
# Use examples:
test_file = ocr_space_file(filename='ocr.jpg', language='pol')
#test_url = ocr_space_url(url='http://i.imgur.com/31d5L5y.jpg')
print(test_file)
# print(test_url)
print(test_file[19])
ini_string = json.dumps(test_file)
print ("initial 1st dictionary", ini_string)
print ("type of ini_object", type(ini_string))
print("\n ")
final_dictionary = json.loads(ini_string)
print ("final dictionary", str(final_dictionary))
print ("type of final_dictionary", type(final_dictionary))
bad_chars=['\\r']
chg = ['\\n']
if 'ParsedText' in final_dictionary:
print("hi")
print(final_dictionary.index('ParsedText'))
print(final_dictionary.index('ErrorMessage'))
print(final_dictionary[final_dictionary.index('ParsedText')+12:final_dictionary.index('ErrorMessage')-2])
final_dictionary = final_dictionary[final_dictionary.index('ParsedText')+12:final_dictionary.index('ErrorMessage')-2]
for i in bad_chars:
final_dictionary = final_dictionary.replace(i,' ')
for i in chg :
final = final_dictionary.replace(i, ' \n')
print("\n")
print(final) | OCR _TEST.py | import requests
import json
def ocr_space_file(filename, overlay=False, api_key='<KEY>', language='eng'):
""" OCR.space API request with local file.
Python3.5 - not tested on 2.7
:param filename: Your file path & name.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
payload = {'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
with open(filename, 'rb') as f:
r = requests.post('https://api.ocr.space/parse/image',
files={filename: f},
data=payload,
)
return r.content.decode()
def ocr_space_url(url, overlay=False, api_key='<KEY>', language='eng'):
""" OCR.space API request with remote file.
Python3.5 - not tested on 2.7
:param url: Image url.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
payload = {'url': url,
'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
r = requests.post('https://api.ocr.space/parse/image',
data=payload,
)
return r.content.decode()
# Use examples:
test_file = ocr_space_file(filename='ocr.jpg', language='pol')
#test_url = ocr_space_url(url='http://i.imgur.com/31d5L5y.jpg')
print(test_file)
# print(test_url)
print(test_file[19])
ini_string = json.dumps(test_file)
print ("initial 1st dictionary", ini_string)
print ("type of ini_object", type(ini_string))
print("\n ")
final_dictionary = json.loads(ini_string)
print ("final dictionary", str(final_dictionary))
print ("type of final_dictionary", type(final_dictionary))
bad_chars=['\\r']
chg = ['\\n']
if 'ParsedText' in final_dictionary:
print("hi")
print(final_dictionary.index('ParsedText'))
print(final_dictionary.index('ErrorMessage'))
print(final_dictionary[final_dictionary.index('ParsedText')+12:final_dictionary.index('ErrorMessage')-2])
final_dictionary = final_dictionary[final_dictionary.index('ParsedText')+12:final_dictionary.index('ErrorMessage')-2]
for i in bad_chars:
final_dictionary = final_dictionary.replace(i,' ')
for i in chg :
final = final_dictionary.replace(i, ' \n')
print("\n")
print(final) | 0.47171 | 0.247726 |
from data_cleaner import DataCleaner
import pandas as pd
import sys
# input_path = "contratos_vigentes_2015.csv"
# DEFAULT_OUTPUT_PATH = "clear_contratos_vigentes_2015.csv"
DEFAULT_INPUT_PATH = "contratos-raw.csv"
DEFAULT_OUTPUT_PATH_VIGENTE = "contratos-2015-clean.csv"
DEFAULT_OUTPUT_PATH1_HISTORICO = "contratos-historico-clean.csv"
RULES = [
{
"nombre_propio": [
{"field": "financiacion"},
{"field": "nombre_organismo"},
{"field": "apellido"},
{"field": "nombre"},
]
},
{
"fecha_simple": [
{"field": "alta_fecha", "time_format": "YYYY/MM/DD"},
{"field": "mod_fecha", "time_format": "YYYY/MM/DD"},
]
},
{
"reemplazar": [
{"field": "locacion",
"replacements": {"Servicios": ["Serv"],
}
}
]
},
{
"reemplazar": [
{"field": "financiacion",
"replacements": {"Dec. 1421/2002 - Convenio Dec.1133/09": ["Dec. 1133/2009"],
"Dec. 1421/2002 - Convenio Conae": ["Convenio Conae"],
"Dec. 1421/2002 - Convenio Docifa": ["Convenio Docifa"],
"Dec. 1421/2002 - Convenio Pecifa": ["Convenio Pecifa"],
"Dec. 1421/2002 - Conven<NAME>": ["<NAME>"],
"Dec. 1421/2002 - Convenio Sigen": ["Convenio Sigen"],
"Dec. 2345/2008 - Fin. Int. B I D": ["Fin. Int. B I D"],
"Dec. 2345/2008 - Fin. Int. B I R F": ["Fin. Int. B I R F"],
"Dec. 2345/2008 - Fin. Int. B M": ["Fin. Int. B M"],
"Dec. 2345/2008 - Fin. Int. Fonplata": ["Fin. Int. Fonplata"],
"Dec. 2345/2008 - Fin. Int. P N U D": ["Fin. Int. P N U D"],
"Dec. 2345/2008 - Fin. Int. U E": ["Fin. Int. U E"],
"Dec. 1421/2002 (arts. 93/99 LCT)": [u"Ley Nº 20744"],
}
}
]
},
{"renombrar_columnas": [
{"field": "alta_fecha", "new_field": "fecha_alta_registro_rcpc"},
{"field": "mod_fecha", "new_field": "fecha_modificacion_registro_rcpc"},
{"field": "id_unico", "new_field": "id_organismo"},
]},
{"remover_columnas": [
{"field": "estudios"},
{"field": "titulo"},
{"field": "nivel_grado"},
{"field": "nacimiento"}
]}
]
def custom_cleaning_before_rules(dc):
"""Script de limpieza custom para aplicar al objeto antes de las reglas.
Args:
dc (DataCleaner): Objeto data cleaner con datos cargados.
"""
pass
def custom_cleaning_after_rules(dc):
"""Script de limpieza custom para aplicar al objeto después de las reglas.
Args:
dc (DataCleaner): Objeto data cleaner con datos cargados.
"""
pass
def clean_file(input_path, output_path):
"""Limpia los datos del input creando un nuevo archivo limpio."""
print("Comenzando limpieza...")
dc = DataCleaner(input_path, encoding='latin1')
custom_cleaning_before_rules(dc)
dc.clean(RULES)
custom_cleaning_after_rules(dc)
y = 2015
dc.df.hasta = pd.to_datetime(dc.df.hasta, yearfirst=True)
dc.df.desde = pd.to_datetime(dc.df.desde, yearfirst=True)
gii = dc.df.desde.dt.year == y
gif = dc.df.hasta.dt.year == y
gis = (dc.df.desde.dt.year < y) & (dc.df.hasta.dt.year > y)
givig = gii | gif | gis
df1 = dc.df[givig].copy()
print("La cantida de registros 2015 es: ")
print(givig.sum())
gin2016 = dc.df.desde.dt.year == 2016
df2 = dc.df[~gin2016].copy()
print("La cantida de registros historicos es: ")
print((~gin2016).sum())
df1.to_csv(
DEFAULT_OUTPUT_PATH_VIGENTE, encoding=dc.OUTPUT_ENCODING,
separator=dc.OUTPUT_SEPARATOR,
quotechar=dc.OUTPUT_QUOTECHAR, index=False)
df2.to_csv(
DEFAULT_OUTPUT_PATH1_HISTORICO, encoding=dc.OUTPUT_ENCODING,
separator=dc.OUTPUT_SEPARATOR,
quotechar=dc.OUTPUT_QUOTECHAR, index=False)
print("Limpieza finalizada exitosamente!")
if __name__ == '__main__':
if len(sys.argv) == 1:
clean_file(DEFAULT_INPUT_PATH, DEFAULT_OUTPUT_PATH_VIGENTE)
elif len(sys.argv) == 2:
clean_file(sys.argv[1], DEFAULT_OUTPUT_PATH_VIGENTE)
elif len(sys.argv) == 3:
clean_file(sys.argv[1], sys.argv[2])
else:
print("{} no es una cantidad de argumentos aceptada.".format(
len(sys.argv) - 1
)) | contratos/cleaner-contratos.py | from data_cleaner import DataCleaner
import pandas as pd
import sys
# input_path = "contratos_vigentes_2015.csv"
# DEFAULT_OUTPUT_PATH = "clear_contratos_vigentes_2015.csv"
DEFAULT_INPUT_PATH = "contratos-raw.csv"
DEFAULT_OUTPUT_PATH_VIGENTE = "contratos-2015-clean.csv"
DEFAULT_OUTPUT_PATH1_HISTORICO = "contratos-historico-clean.csv"
RULES = [
{
"nombre_propio": [
{"field": "financiacion"},
{"field": "nombre_organismo"},
{"field": "apellido"},
{"field": "nombre"},
]
},
{
"fecha_simple": [
{"field": "alta_fecha", "time_format": "YYYY/MM/DD"},
{"field": "mod_fecha", "time_format": "YYYY/MM/DD"},
]
},
{
"reemplazar": [
{"field": "locacion",
"replacements": {"Servicios": ["Serv"],
}
}
]
},
{
"reemplazar": [
{"field": "financiacion",
"replacements": {"Dec. 1421/2002 - Convenio Dec.1133/09": ["Dec. 1133/2009"],
"Dec. 1421/2002 - Convenio Conae": ["Convenio Conae"],
"Dec. 1421/2002 - Convenio Docifa": ["Convenio Docifa"],
"Dec. 1421/2002 - Convenio Pecifa": ["Convenio Pecifa"],
"Dec. 1421/2002 - Conven<NAME>": ["<NAME>"],
"Dec. 1421/2002 - Convenio Sigen": ["Convenio Sigen"],
"Dec. 2345/2008 - Fin. Int. B I D": ["Fin. Int. B I D"],
"Dec. 2345/2008 - Fin. Int. B I R F": ["Fin. Int. B I R F"],
"Dec. 2345/2008 - Fin. Int. B M": ["Fin. Int. B M"],
"Dec. 2345/2008 - Fin. Int. Fonplata": ["Fin. Int. Fonplata"],
"Dec. 2345/2008 - Fin. Int. P N U D": ["Fin. Int. P N U D"],
"Dec. 2345/2008 - Fin. Int. U E": ["Fin. Int. U E"],
"Dec. 1421/2002 (arts. 93/99 LCT)": [u"Ley Nº 20744"],
}
}
]
},
{"renombrar_columnas": [
{"field": "alta_fecha", "new_field": "fecha_alta_registro_rcpc"},
{"field": "mod_fecha", "new_field": "fecha_modificacion_registro_rcpc"},
{"field": "id_unico", "new_field": "id_organismo"},
]},
{"remover_columnas": [
{"field": "estudios"},
{"field": "titulo"},
{"field": "nivel_grado"},
{"field": "nacimiento"}
]}
]
def custom_cleaning_before_rules(dc):
"""Script de limpieza custom para aplicar al objeto antes de las reglas.
Args:
dc (DataCleaner): Objeto data cleaner con datos cargados.
"""
pass
def custom_cleaning_after_rules(dc):
"""Script de limpieza custom para aplicar al objeto después de las reglas.
Args:
dc (DataCleaner): Objeto data cleaner con datos cargados.
"""
pass
def clean_file(input_path, output_path):
"""Limpia los datos del input creando un nuevo archivo limpio."""
print("Comenzando limpieza...")
dc = DataCleaner(input_path, encoding='latin1')
custom_cleaning_before_rules(dc)
dc.clean(RULES)
custom_cleaning_after_rules(dc)
y = 2015
dc.df.hasta = pd.to_datetime(dc.df.hasta, yearfirst=True)
dc.df.desde = pd.to_datetime(dc.df.desde, yearfirst=True)
gii = dc.df.desde.dt.year == y
gif = dc.df.hasta.dt.year == y
gis = (dc.df.desde.dt.year < y) & (dc.df.hasta.dt.year > y)
givig = gii | gif | gis
df1 = dc.df[givig].copy()
print("La cantida de registros 2015 es: ")
print(givig.sum())
gin2016 = dc.df.desde.dt.year == 2016
df2 = dc.df[~gin2016].copy()
print("La cantida de registros historicos es: ")
print((~gin2016).sum())
df1.to_csv(
DEFAULT_OUTPUT_PATH_VIGENTE, encoding=dc.OUTPUT_ENCODING,
separator=dc.OUTPUT_SEPARATOR,
quotechar=dc.OUTPUT_QUOTECHAR, index=False)
df2.to_csv(
DEFAULT_OUTPUT_PATH1_HISTORICO, encoding=dc.OUTPUT_ENCODING,
separator=dc.OUTPUT_SEPARATOR,
quotechar=dc.OUTPUT_QUOTECHAR, index=False)
print("Limpieza finalizada exitosamente!")
if __name__ == '__main__':
if len(sys.argv) == 1:
clean_file(DEFAULT_INPUT_PATH, DEFAULT_OUTPUT_PATH_VIGENTE)
elif len(sys.argv) == 2:
clean_file(sys.argv[1], DEFAULT_OUTPUT_PATH_VIGENTE)
elif len(sys.argv) == 3:
clean_file(sys.argv[1], sys.argv[2])
else:
print("{} no es una cantidad de argumentos aceptada.".format(
len(sys.argv) - 1
)) | 0.233881 | 0.317955 |
import os
import shutil
import eos.log
import eos.tools
import eos.util
def _check_return_code(code):
if code != 0:
raise RuntimeError("repository operation failed")
def _remove_directory(directory):
if os.path.exists(directory):
shutil.rmtree(directory)
def _execute(command):
print_command = eos.verbosity() > 1
quiet = eos.verbosity() <= 2
return eos.util.execute_command(command, print_command, quiet)
def _execute_and_capture_output(command):
print_command = eos.verbosity() > 1
return eos.util.execute_command_capture_output(command, print_command)
# -----
def hg_repo_exists(directory):
# Is a more robust check possible? https://trac.sagemath.org/ticket/12128 says no.
return os.path.exists(os.path.join(directory, ".hg"))
def git_repo_exists(directory):
return (
os.path.exists(os.path.join(directory, ".git"))
and _execute_and_capture_output(eos.tools.command_git() + " -C " + directory + " rev-parse --git-dir")[0] == 0
)
def hg_clone(url, directory):
return _execute(eos.tools.command_hg() + " clone " + url + " " + directory)
def hg_pull(directory):
return _execute(eos.tools.command_hg() + " pull -R " + directory)
def hg_purge(directory):
return _execute(eos.tools.command_hg() + " purge -R " + directory + " --all --config extensions.purge=")
def hg_update_to_revision(directory, revision=None):
if revision is None:
revision = ""
return _execute(eos.tools.command_hg() + " update -R " + directory + " -C " + revision)
def hg_update_to_branch_tip(directory, branch):
return _execute(eos.tools.command_hg() + " update -R " + directory + " -C " + branch)
def hg_verify_commit_hash(directory, expected_commit_hash):
rcode, out, err = _execute_and_capture_output(eos.tools.command_hg() + " -R " + directory + " --debug id -i")
if rcode != 0:
return False
current_commit_hash = out
hash_match = expected_commit_hash in current_commit_hash
return hash_match
# -----
def git_clone(url, directory):
return _execute(eos.tools.command_git() + " clone --recursive " + url + " " + directory)
def git_fetch(directory):
return _execute(eos.tools.command_git() + " -C " + directory + " fetch --recurse-submodules")
def git_pull(directory):
return _execute(eos.tools.command_git() + " -C " + directory + " pull --recurse-submodules")
def git_clean(directory):
return _execute(eos.tools.command_git() + " -C " + directory + " clean -fxd")
def git_checkout(directory, branch=None):
if branch is None:
branch = "" # making this effectively a no-op
return _execute(eos.tools.command_git() + " -C " + directory + " checkout " + branch)
def git_submodule_update(directory):
return _execute(eos.tools.command_git() + " -C " + directory + " submodule update")
def git_reset_to_revision(directory, revision=None):
if revision is None:
revision = "HEAD"
return _execute(eos.tools.command_git() + " -C " + directory + " reset --hard " + revision)
def git_verify_commit_hash(directory, expected_commit_hash):
rcode, out, err = _execute_and_capture_output(eos.tools.command_git() + " -C " + directory + " rev-parse HEAD")
if rcode != 0:
return False
current_commit_hash = out
hash_match = expected_commit_hash in current_commit_hash
return hash_match
# -----
def svn_checkout(url, directory):
return _execute(eos.tools.command_svn() + " checkout " + url + " " + directory)
# -----
def update_state_git(url, dst_dir, branch=None, revision=None):
if not git_repo_exists(dst_dir):
if url is None:
return False
_remove_directory(dst_dir)
_check_return_code(git_clone(url, dst_dir))
else:
_check_return_code(git_clean(dst_dir))
if url is not None:
_check_return_code(git_fetch(dst_dir))
if revision and revision != "":
_check_return_code(git_reset_to_revision(dst_dir, revision))
else:
if not branch or branch == "":
branch = "master"
_check_return_code(git_checkout(dst_dir, branch))
if url is not None:
_check_return_code(git_pull(dst_dir))
_check_return_code(git_submodule_update(dst_dir))
if eos.util.is_sha1(revision) and not git_verify_commit_hash(dst_dir, revision):
eos.log_error("SHA1 hash check failed")
return False
return True
def update_state_hg(url, dst_dir, branch=None, revision=None):
if not hg_repo_exists(dst_dir):
if url is None:
return False
_remove_directory(dst_dir)
_check_return_code(hg_clone(url, dst_dir))
else:
_check_return_code(hg_purge(dst_dir))
if url is not None:
_check_return_code(hg_pull(dst_dir))
if revision and revision != "":
_check_return_code(hg_update_to_revision(dst_dir, revision))
else:
if not branch or branch == "":
branch = "default"
_check_return_code(hg_update_to_branch_tip(dst_dir, branch))
if eos.util.is_sha1(revision) and not hg_verify_commit_hash(dst_dir, revision):
eos.log_error("SHA1 hash check failed")
return False
return True
def update_state_svn(url, dst_dir, revision=None):
_remove_directory(dst_dir)
_check_return_code(svn_checkout(url, dst_dir))
if revision and revision != "":
eos.log_error("cannot update SVN repository to revision")
return False
return True
def update_state(repo_type, url, name, dst_dir, branch=None, revision=None):
eos.log_verbose(
"Updating repository for '"
+ name
+ "' (url = "
+ (url if url is not None else "")
+ ", target_dir = "
+ dst_dir
+ ")"
)
try:
if repo_type == "git":
success = update_state_git(url, dst_dir, branch=branch, revision=revision)
elif repo_type == "hg":
success = update_state_hg(url, dst_dir, branch=branch, revision=revision)
elif repo_type == "svn":
if url is None:
eos.log_error("cannot execute local operations on SVN repository")
return False
success = update_state_svn(url, dst_dir, revision=revision)
else:
eos.log_error("unknown repository type '" + repo_type + "'")
return False
except RuntimeError:
return False
return success | eos/repo.py | import os
import shutil
import eos.log
import eos.tools
import eos.util
def _check_return_code(code):
if code != 0:
raise RuntimeError("repository operation failed")
def _remove_directory(directory):
if os.path.exists(directory):
shutil.rmtree(directory)
def _execute(command):
print_command = eos.verbosity() > 1
quiet = eos.verbosity() <= 2
return eos.util.execute_command(command, print_command, quiet)
def _execute_and_capture_output(command):
print_command = eos.verbosity() > 1
return eos.util.execute_command_capture_output(command, print_command)
# -----
def hg_repo_exists(directory):
# Is a more robust check possible? https://trac.sagemath.org/ticket/12128 says no.
return os.path.exists(os.path.join(directory, ".hg"))
def git_repo_exists(directory):
return (
os.path.exists(os.path.join(directory, ".git"))
and _execute_and_capture_output(eos.tools.command_git() + " -C " + directory + " rev-parse --git-dir")[0] == 0
)
def hg_clone(url, directory):
return _execute(eos.tools.command_hg() + " clone " + url + " " + directory)
def hg_pull(directory):
return _execute(eos.tools.command_hg() + " pull -R " + directory)
def hg_purge(directory):
return _execute(eos.tools.command_hg() + " purge -R " + directory + " --all --config extensions.purge=")
def hg_update_to_revision(directory, revision=None):
if revision is None:
revision = ""
return _execute(eos.tools.command_hg() + " update -R " + directory + " -C " + revision)
def hg_update_to_branch_tip(directory, branch):
return _execute(eos.tools.command_hg() + " update -R " + directory + " -C " + branch)
def hg_verify_commit_hash(directory, expected_commit_hash):
rcode, out, err = _execute_and_capture_output(eos.tools.command_hg() + " -R " + directory + " --debug id -i")
if rcode != 0:
return False
current_commit_hash = out
hash_match = expected_commit_hash in current_commit_hash
return hash_match
# -----
def git_clone(url, directory):
return _execute(eos.tools.command_git() + " clone --recursive " + url + " " + directory)
def git_fetch(directory):
return _execute(eos.tools.command_git() + " -C " + directory + " fetch --recurse-submodules")
def git_pull(directory):
return _execute(eos.tools.command_git() + " -C " + directory + " pull --recurse-submodules")
def git_clean(directory):
return _execute(eos.tools.command_git() + " -C " + directory + " clean -fxd")
def git_checkout(directory, branch=None):
if branch is None:
branch = "" # making this effectively a no-op
return _execute(eos.tools.command_git() + " -C " + directory + " checkout " + branch)
def git_submodule_update(directory):
return _execute(eos.tools.command_git() + " -C " + directory + " submodule update")
def git_reset_to_revision(directory, revision=None):
if revision is None:
revision = "HEAD"
return _execute(eos.tools.command_git() + " -C " + directory + " reset --hard " + revision)
def git_verify_commit_hash(directory, expected_commit_hash):
rcode, out, err = _execute_and_capture_output(eos.tools.command_git() + " -C " + directory + " rev-parse HEAD")
if rcode != 0:
return False
current_commit_hash = out
hash_match = expected_commit_hash in current_commit_hash
return hash_match
# -----
def svn_checkout(url, directory):
return _execute(eos.tools.command_svn() + " checkout " + url + " " + directory)
# -----
def update_state_git(url, dst_dir, branch=None, revision=None):
if not git_repo_exists(dst_dir):
if url is None:
return False
_remove_directory(dst_dir)
_check_return_code(git_clone(url, dst_dir))
else:
_check_return_code(git_clean(dst_dir))
if url is not None:
_check_return_code(git_fetch(dst_dir))
if revision and revision != "":
_check_return_code(git_reset_to_revision(dst_dir, revision))
else:
if not branch or branch == "":
branch = "master"
_check_return_code(git_checkout(dst_dir, branch))
if url is not None:
_check_return_code(git_pull(dst_dir))
_check_return_code(git_submodule_update(dst_dir))
if eos.util.is_sha1(revision) and not git_verify_commit_hash(dst_dir, revision):
eos.log_error("SHA1 hash check failed")
return False
return True
def update_state_hg(url, dst_dir, branch=None, revision=None):
if not hg_repo_exists(dst_dir):
if url is None:
return False
_remove_directory(dst_dir)
_check_return_code(hg_clone(url, dst_dir))
else:
_check_return_code(hg_purge(dst_dir))
if url is not None:
_check_return_code(hg_pull(dst_dir))
if revision and revision != "":
_check_return_code(hg_update_to_revision(dst_dir, revision))
else:
if not branch or branch == "":
branch = "default"
_check_return_code(hg_update_to_branch_tip(dst_dir, branch))
if eos.util.is_sha1(revision) and not hg_verify_commit_hash(dst_dir, revision):
eos.log_error("SHA1 hash check failed")
return False
return True
def update_state_svn(url, dst_dir, revision=None):
_remove_directory(dst_dir)
_check_return_code(svn_checkout(url, dst_dir))
if revision and revision != "":
eos.log_error("cannot update SVN repository to revision")
return False
return True
def update_state(repo_type, url, name, dst_dir, branch=None, revision=None):
eos.log_verbose(
"Updating repository for '"
+ name
+ "' (url = "
+ (url if url is not None else "")
+ ", target_dir = "
+ dst_dir
+ ")"
)
try:
if repo_type == "git":
success = update_state_git(url, dst_dir, branch=branch, revision=revision)
elif repo_type == "hg":
success = update_state_hg(url, dst_dir, branch=branch, revision=revision)
elif repo_type == "svn":
if url is None:
eos.log_error("cannot execute local operations on SVN repository")
return False
success = update_state_svn(url, dst_dir, revision=revision)
else:
eos.log_error("unknown repository type '" + repo_type + "'")
return False
except RuntimeError:
return False
return success | 0.343562 | 0.127761 |
import jinja2
import cherrypy
import platform
from pymongo import MongoClient
from pymongo import ReadPreference
from engine.tools import IgnoreRequestFilter
from engine.tools import secureheaders
cherrypy.tools.secureheaders = cherrypy.Tool(
"before_finalize", secureheaders, priority=60)
from engine.tools import HazelcastSession
cherrypy.lib.sessions.HazelcastSession = HazelcastSession
from engine.modules.auth import Auth
cherrypy.tools.check_login = cherrypy.Tool("before_handler", Auth.check_login)
from engine.modules.heartbeat import Heartbeat
from engine.modules.notes import Notes
from engine.modules.vk import VK
class Application(object):
""" Main application class """
def __init__(self, template_engine, modules):
self.template_engine = template_engine
self.module_list = list()
for module in modules:
setattr(self, module, modules[module])
if modules[module].MODULE_NAME is not None:
item = dict()
item["path"] = module
item["name"] = modules[module].MODULE_NAME
item["instance"] = modules[module]
self.module_list.append(item)
@cherrypy.expose
@cherrypy.tools.check_login()
def index(self):
""" Index """
return self.template_engine.get_template(
"index.html"
).render(
user=cherrypy.session.get("login", None),
generator=platform.node(),
modules=self.module_list
)
def main():
""" Main (entry point) """
template_engine = jinja2.Environment(loader=jinja2.FileSystemLoader(
"/usr/src/app/template"))
mongo = MongoClient(
["mongo1", "mongo2", "mongo3"],
replicaSet="rs0",
read_preference=ReadPreference.PRIMARY_PREFERRED,
readConcernLevel="majority",
w=2, wtimeout=3000, j=True
)
modules = {
"heartbeat": Heartbeat(),
"auth": Auth(template_engine, mongo),
"notes": Notes(template_engine, mongo),
"vk": VK(template_engine, mongo)
}
config = "S.H.I.V.A..conf"
cherrypy.config.update(config)
application = cherrypy.tree.mount(
Application(template_engine, modules),
"/",
config
)
application.log.access_log.addFilter(
IgnoreRequestFilter("GET /heartbeat/index"))
cherrypy.engine.signals.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == "__main__":
main() | containers/shiva/S.H.I.V.A..py | import jinja2
import cherrypy
import platform
from pymongo import MongoClient
from pymongo import ReadPreference
from engine.tools import IgnoreRequestFilter
from engine.tools import secureheaders
cherrypy.tools.secureheaders = cherrypy.Tool(
"before_finalize", secureheaders, priority=60)
from engine.tools import HazelcastSession
cherrypy.lib.sessions.HazelcastSession = HazelcastSession
from engine.modules.auth import Auth
cherrypy.tools.check_login = cherrypy.Tool("before_handler", Auth.check_login)
from engine.modules.heartbeat import Heartbeat
from engine.modules.notes import Notes
from engine.modules.vk import VK
class Application(object):
""" Main application class """
def __init__(self, template_engine, modules):
self.template_engine = template_engine
self.module_list = list()
for module in modules:
setattr(self, module, modules[module])
if modules[module].MODULE_NAME is not None:
item = dict()
item["path"] = module
item["name"] = modules[module].MODULE_NAME
item["instance"] = modules[module]
self.module_list.append(item)
@cherrypy.expose
@cherrypy.tools.check_login()
def index(self):
""" Index """
return self.template_engine.get_template(
"index.html"
).render(
user=cherrypy.session.get("login", None),
generator=platform.node(),
modules=self.module_list
)
def main():
""" Main (entry point) """
template_engine = jinja2.Environment(loader=jinja2.FileSystemLoader(
"/usr/src/app/template"))
mongo = MongoClient(
["mongo1", "mongo2", "mongo3"],
replicaSet="rs0",
read_preference=ReadPreference.PRIMARY_PREFERRED,
readConcernLevel="majority",
w=2, wtimeout=3000, j=True
)
modules = {
"heartbeat": Heartbeat(),
"auth": Auth(template_engine, mongo),
"notes": Notes(template_engine, mongo),
"vk": VK(template_engine, mongo)
}
config = "S.H.I.V.A..conf"
cherrypy.config.update(config)
application = cherrypy.tree.mount(
Application(template_engine, modules),
"/",
config
)
application.log.access_log.addFilter(
IgnoreRequestFilter("GET /heartbeat/index"))
cherrypy.engine.signals.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == "__main__":
main() | 0.367838 | 0.112918 |
import os
from collections import OrderedDict
def create_param(is_reverse, data):
line_id = data[0]
try:
if is_reverse:
param = data[1]
src_line = [data[0], data[1]]
else:
param = data[1] + ',' + data[2]
src_line = [data[0], data[1], data[2]]
return {'id': line_id, 'param': param, 'src_line': src_line}
except Exception:
pass
def parse_file(is_reverse):
input_data = []
file_name = os.path.join('csv', 'input.csv')
with open(file_name, encoding='utf8') as input_file:
for line in input_file:
data = line.split(';')
new_data = []
for item in data:
new_data.append(item.strip())
param = create_param(is_reverse, new_data)
input_data.append(param)
return input_data
def parse_result(is_reverse, line_id, resp):
try:
data = resp['response']['GeoObjectCollection']['featureMember'][0]['GeoObject']
address_data = get_address_components(data)
address = list(address_data.values())
if is_reverse:
coord = data['Point']['pos']
return [line_id] + coord.split() + address
else:
return [line_id] + address
except Exception:
return [line_id] + ['Н/Д']
def get_address_components(data):
result = OrderedDict()
result['country'] = None
result['province'] = None
result['area'] = None
result['locality'] = None
result['street'] = None
result['house'] = None
for key in result.keys():
for component in data['metaDataProperty']['GeocoderMetaData']['Address']['Components']:
if component['kind'] == key:
if result[key]:
result[key] += ';' + component['name']
else:
result[key] = component['name']
return result
def write_data(file, data):
file_name = os.path.join('csv', f'{file}.csv')
with open(file_name, 'a', encoding='utf8') as output_file:
for item in data:
line = ';'.join(item)
output_file.write(line + '\n') | geocoder/util.py | import os
from collections import OrderedDict
def create_param(is_reverse, data):
line_id = data[0]
try:
if is_reverse:
param = data[1]
src_line = [data[0], data[1]]
else:
param = data[1] + ',' + data[2]
src_line = [data[0], data[1], data[2]]
return {'id': line_id, 'param': param, 'src_line': src_line}
except Exception:
pass
def parse_file(is_reverse):
input_data = []
file_name = os.path.join('csv', 'input.csv')
with open(file_name, encoding='utf8') as input_file:
for line in input_file:
data = line.split(';')
new_data = []
for item in data:
new_data.append(item.strip())
param = create_param(is_reverse, new_data)
input_data.append(param)
return input_data
def parse_result(is_reverse, line_id, resp):
try:
data = resp['response']['GeoObjectCollection']['featureMember'][0]['GeoObject']
address_data = get_address_components(data)
address = list(address_data.values())
if is_reverse:
coord = data['Point']['pos']
return [line_id] + coord.split() + address
else:
return [line_id] + address
except Exception:
return [line_id] + ['Н/Д']
def get_address_components(data):
result = OrderedDict()
result['country'] = None
result['province'] = None
result['area'] = None
result['locality'] = None
result['street'] = None
result['house'] = None
for key in result.keys():
for component in data['metaDataProperty']['GeocoderMetaData']['Address']['Components']:
if component['kind'] == key:
if result[key]:
result[key] += ';' + component['name']
else:
result[key] = component['name']
return result
def write_data(file, data):
file_name = os.path.join('csv', f'{file}.csv')
with open(file_name, 'a', encoding='utf8') as output_file:
for item in data:
line = ';'.join(item)
output_file.write(line + '\n') | 0.203708 | 0.387864 |
import config as config # configurables file
import os, sys
import datetime as dt
import asyncio
import smtplib
import RH.Reports.RH_functions as rh # robinhood processes
import RH.Reports.APP_functions as app # application processes
import RH.Process_routes as routes # routes
import RH.Log.logger as logger
async def rh_login():
""" Login to Robinhood every 15-minutes.
"""
timer = 60 * 15 # 15 minutes
while True:
await asyncio.sleep(timer)
rh.app().connect_robinhood() # connect to rH
async def email_connect(e_client, e_server):
""" Refreshes email client and server objects.
"""
timer = 60 * 15 # 15 minutes
while True:
await asyncio.sleep(timer)
e_client = app.email_server().connect_gmail()
e_server = app.email_server().connect_smtp()
async def process_mail(e_client, e_server):
""" Processes email.
Routes are defined in "Process_routes.py"
"""
while True:
await asyncio.sleep(0.1)
# Try to read message. If error, reconnect and try again.
try:
UNREAD_MSG = app.email_server().get_unread_mail(e_client=e_client, inbox_name='Inbox')
except Exception as e:
# Refresh connection and read msg again
e_client = app.email_server().connect_gmail()
e_server = app.email_server().connect_smtp()
UNREAD_MSG = app.email_server().get_unread_mail(e_client=e_client, inbox_name='Inbox')
# Process message
routes.PROCESS_UNREAD_MSG(
unread_email=UNREAD_MSG,
email_client=e_client,
email_server=e_server,
)
if __name__=='__main__':
# Launch application
sys.stdout.write('Starting Robin-Texts')
# Log
LOGGER = logger.log() # initialize class
LOG = LOGGER.log # log
LOG.info('Log initialized')
# Async processes
try:
# Initial load
rh.app().connect_robinhood()
e_client = app.email_server().connect_gmail()
e_server = app.email_server().connect_smtp()
# Create tasks
APP = asyncio.get_event_loop() # scheduler
APP.create_task(rh_login()) # refresh RH connection
APP.create_task(email_connect(e_client, e_server)) # refresh email connections
APP.create_task(process_mail(e_client, e_server))
# Launch tasks
APP.run_forever()
except KeyboardInterrupt:
LOG.exception('EXIT -- Keyboard interruption')
except Exception as error:
LOG.exception('ERROR --{}'.format(error))
# Close application
APP.close() # close out
LOG.info('Closing log')
LOGGER.close() | APP.py | import config as config # configurables file
import os, sys
import datetime as dt
import asyncio
import smtplib
import RH.Reports.RH_functions as rh # robinhood processes
import RH.Reports.APP_functions as app # application processes
import RH.Process_routes as routes # routes
import RH.Log.logger as logger
async def rh_login():
""" Login to Robinhood every 15-minutes.
"""
timer = 60 * 15 # 15 minutes
while True:
await asyncio.sleep(timer)
rh.app().connect_robinhood() # connect to rH
async def email_connect(e_client, e_server):
""" Refreshes email client and server objects.
"""
timer = 60 * 15 # 15 minutes
while True:
await asyncio.sleep(timer)
e_client = app.email_server().connect_gmail()
e_server = app.email_server().connect_smtp()
async def process_mail(e_client, e_server):
""" Processes email.
Routes are defined in "Process_routes.py"
"""
while True:
await asyncio.sleep(0.1)
# Try to read message. If error, reconnect and try again.
try:
UNREAD_MSG = app.email_server().get_unread_mail(e_client=e_client, inbox_name='Inbox')
except Exception as e:
# Refresh connection and read msg again
e_client = app.email_server().connect_gmail()
e_server = app.email_server().connect_smtp()
UNREAD_MSG = app.email_server().get_unread_mail(e_client=e_client, inbox_name='Inbox')
# Process message
routes.PROCESS_UNREAD_MSG(
unread_email=UNREAD_MSG,
email_client=e_client,
email_server=e_server,
)
if __name__=='__main__':
# Launch application
sys.stdout.write('Starting Robin-Texts')
# Log
LOGGER = logger.log() # initialize class
LOG = LOGGER.log # log
LOG.info('Log initialized')
# Async processes
try:
# Initial load
rh.app().connect_robinhood()
e_client = app.email_server().connect_gmail()
e_server = app.email_server().connect_smtp()
# Create tasks
APP = asyncio.get_event_loop() # scheduler
APP.create_task(rh_login()) # refresh RH connection
APP.create_task(email_connect(e_client, e_server)) # refresh email connections
APP.create_task(process_mail(e_client, e_server))
# Launch tasks
APP.run_forever()
except KeyboardInterrupt:
LOG.exception('EXIT -- Keyboard interruption')
except Exception as error:
LOG.exception('ERROR --{}'.format(error))
# Close application
APP.close() # close out
LOG.info('Closing log')
LOGGER.close() | 0.151843 | 0.052912 |
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.functional import cached_property
class People(models.Model):
entity = models.ForeignKey(
'register.Entity',
related_name='%(class)s' + 's',
on_delete=models.CASCADE,
)
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
email = models.EmailField(max_length=200, null=True, blank=True, db_index=True)
birth_date = models.DateField(db_index=True)
doc = models.CharField(max_length=20, null=True, blank=True, db_index=True)
home_phone_number = models.CharField(max_length=20, null=True, blank=True)
cell_phone_number = models.CharField(max_length=20, null=True, blank=True)
added_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='created_%(class)s' + 's',
on_delete=models.SET_NULL,
null=True,
blank=True,
editable=False
)
address = models.ForeignKey(
'register.Address',
related_name='%(class)s' + 's',
on_delete=models.SET_NULL,
null=True,
blank=True
)
date_added = models.DateTimeField(auto_now_add=True)
date_changed = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@cached_property
def full_name(self):
return self.first_name + ' ' + self.last_name
@cached_property
def phone_number(self):
return self.cell_phone_number or self.home_phone_number
@cached_property
def public_id(self):
return '{date}{id:04}'.format(
date=self.date_added.strftime('%y%m%d'),
id=self.id,
)
def __str__(self):
return self.first_name
class Patient(People):
HOLDER = 'holder'
DEPENDENT = 'dependent'
TYPE_CHOICES = (
(HOLDER, 'Holder'),
(DEPENDENT, 'Dependent')
)
AFFILIATED = 'affiliated'
DISAFFILIATED = 'disaffiliated'
ABEYANCE = 'abeyance'
STATUS_CHOICES = (
(AFFILIATED, 'Affiliated'),
(DISAFFILIATED, 'Disaffiliated'),
(ABEYANCE, 'Abeyance'),
)
type = models.CharField(max_length=20, choices=TYPE_CHOICES, db_index=True)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, db_index=True)
holder = models.ForeignKey(
'people.Patient',
related_name='dependents',
null=True,
blank=True,
on_delete=models.SET_NULL
)
def clean(self):
if self.type == self.DEPENDENT and not self.holder:
raise ValidationError('Holder must be selected for dependents.')
class Professional(People):
ACTIVE = 'active'
INACTIVE = 'inactive'
ON_HOLD = 'on_hold'
STATUS_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
(ON_HOLD, 'On hold'),
)
transfer_value = models.DecimalField(max_digits=8, decimal_places=2)
registration_number = models.CharField(max_length=20, unique=True, db_index=True)
service_phone_number = models.CharField(max_length=20, null=True, blank=True)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, db_index=True)
procedures = models.ManyToManyField(
'attendance.Procedure',
related_name='professionals',
)
@cached_property
def phone_number(self):
return self.service_phone_number or self.cell_phone_number or self.home_phone_number | people/models.py | from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.functional import cached_property
class People(models.Model):
entity = models.ForeignKey(
'register.Entity',
related_name='%(class)s' + 's',
on_delete=models.CASCADE,
)
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
email = models.EmailField(max_length=200, null=True, blank=True, db_index=True)
birth_date = models.DateField(db_index=True)
doc = models.CharField(max_length=20, null=True, blank=True, db_index=True)
home_phone_number = models.CharField(max_length=20, null=True, blank=True)
cell_phone_number = models.CharField(max_length=20, null=True, blank=True)
added_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='created_%(class)s' + 's',
on_delete=models.SET_NULL,
null=True,
blank=True,
editable=False
)
address = models.ForeignKey(
'register.Address',
related_name='%(class)s' + 's',
on_delete=models.SET_NULL,
null=True,
blank=True
)
date_added = models.DateTimeField(auto_now_add=True)
date_changed = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@cached_property
def full_name(self):
return self.first_name + ' ' + self.last_name
@cached_property
def phone_number(self):
return self.cell_phone_number or self.home_phone_number
@cached_property
def public_id(self):
return '{date}{id:04}'.format(
date=self.date_added.strftime('%y%m%d'),
id=self.id,
)
def __str__(self):
return self.first_name
class Patient(People):
HOLDER = 'holder'
DEPENDENT = 'dependent'
TYPE_CHOICES = (
(HOLDER, 'Holder'),
(DEPENDENT, 'Dependent')
)
AFFILIATED = 'affiliated'
DISAFFILIATED = 'disaffiliated'
ABEYANCE = 'abeyance'
STATUS_CHOICES = (
(AFFILIATED, 'Affiliated'),
(DISAFFILIATED, 'Disaffiliated'),
(ABEYANCE, 'Abeyance'),
)
type = models.CharField(max_length=20, choices=TYPE_CHOICES, db_index=True)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, db_index=True)
holder = models.ForeignKey(
'people.Patient',
related_name='dependents',
null=True,
blank=True,
on_delete=models.SET_NULL
)
def clean(self):
if self.type == self.DEPENDENT and not self.holder:
raise ValidationError('Holder must be selected for dependents.')
class Professional(People):
ACTIVE = 'active'
INACTIVE = 'inactive'
ON_HOLD = 'on_hold'
STATUS_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
(ON_HOLD, 'On hold'),
)
transfer_value = models.DecimalField(max_digits=8, decimal_places=2)
registration_number = models.CharField(max_length=20, unique=True, db_index=True)
service_phone_number = models.CharField(max_length=20, null=True, blank=True)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, db_index=True)
procedures = models.ManyToManyField(
'attendance.Procedure',
related_name='professionals',
)
@cached_property
def phone_number(self):
return self.service_phone_number or self.cell_phone_number or self.home_phone_number | 0.560734 | 0.108095 |
from marshmallow import EXCLUDE
from marshmallow_jsonapi import Schema as __Schema, SchemaOpts as __SchemaOpts
from starlette.applications import Starlette
class BaseSchemaOpts(__SchemaOpts):
""" An adaptation of marshmallow-jsonapi Flask SchemaOpts for use with Starlette. """
def __init__(self, meta, *args, **kwargs):
if getattr(meta, 'self_url', None):
raise ValueError(
'Use `self_route` instead of `self_url` when using the Starlette extension.'
)
if getattr(meta, 'self_url_kwargs', None):
raise ValueError(
'Use `self_route_kwargs` instead of `self_url_kwargs` when using the Starlette extension.'
)
if getattr(meta, 'self_url_many', None):
raise ValueError(
'Use `self_route_many` instead of `self_url_many` when using the Starlette extension.'
)
if (
getattr(meta, 'self_route_kwargs', None)
and not getattr(meta, 'self_route', None)
):
raise ValueError(
'Must specify `self_route` Meta option when `self_route_kwargs` is specified.'
)
# Transfer Starlette options to URL options
meta.self_url = getattr(meta, 'self_route', None)
meta.self_url_kwargs = getattr(meta, 'self_route_kwargs', None)
meta.self_url_many = getattr(meta, 'self_route_many', None)
super().__init__(meta, *args, **kwargs)
self.unknown = getattr(meta, 'unknown', EXCLUDE)
class JSONAPISchema(__Schema):
OPTIONS_CLASS = BaseSchemaOpts
class Meta:
"""
Options object that takes the same options as `marshmallow-jsonapi.Schema`,
but instead of ``self_url``, ``self_url_kwargs`` and ``self_url_many``
has the following options to resolve the URLs from Starlette route names:
* ``self_route`` - Route name to resolve the self URL link from.
* ``self_route_kwargs`` - Replacement fields for ``self_route``. String
attributes enclosed in ``< >`` will be interpreted as attributes to
pull from the schema data.
* ``self_route_many`` - Route name to resolve the self URL link when a
collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.app = kwargs.pop('app', None) # type: Starlette
super().__init__(*args, **kwargs)
def generate_url(self, link, **kwargs):
if self.app and isinstance(self.app, Starlette) and link:
return self.app.url_path_for(link, **kwargs)
return None
def get_resource_links(self, item):
""" Override the marshmallow-jsonapi implementation to check for None links. """
links = super().get_resource_links(item)
if links and isinstance(links, dict) and links.get('self'):
return links
return None | starlette_jsonapi/schema.py | from marshmallow import EXCLUDE
from marshmallow_jsonapi import Schema as __Schema, SchemaOpts as __SchemaOpts
from starlette.applications import Starlette
class BaseSchemaOpts(__SchemaOpts):
""" An adaptation of marshmallow-jsonapi Flask SchemaOpts for use with Starlette. """
def __init__(self, meta, *args, **kwargs):
if getattr(meta, 'self_url', None):
raise ValueError(
'Use `self_route` instead of `self_url` when using the Starlette extension.'
)
if getattr(meta, 'self_url_kwargs', None):
raise ValueError(
'Use `self_route_kwargs` instead of `self_url_kwargs` when using the Starlette extension.'
)
if getattr(meta, 'self_url_many', None):
raise ValueError(
'Use `self_route_many` instead of `self_url_many` when using the Starlette extension.'
)
if (
getattr(meta, 'self_route_kwargs', None)
and not getattr(meta, 'self_route', None)
):
raise ValueError(
'Must specify `self_route` Meta option when `self_route_kwargs` is specified.'
)
# Transfer Starlette options to URL options
meta.self_url = getattr(meta, 'self_route', None)
meta.self_url_kwargs = getattr(meta, 'self_route_kwargs', None)
meta.self_url_many = getattr(meta, 'self_route_many', None)
super().__init__(meta, *args, **kwargs)
self.unknown = getattr(meta, 'unknown', EXCLUDE)
class JSONAPISchema(__Schema):
OPTIONS_CLASS = BaseSchemaOpts
class Meta:
"""
Options object that takes the same options as `marshmallow-jsonapi.Schema`,
but instead of ``self_url``, ``self_url_kwargs`` and ``self_url_many``
has the following options to resolve the URLs from Starlette route names:
* ``self_route`` - Route name to resolve the self URL link from.
* ``self_route_kwargs`` - Replacement fields for ``self_route``. String
attributes enclosed in ``< >`` will be interpreted as attributes to
pull from the schema data.
* ``self_route_many`` - Route name to resolve the self URL link when a
collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.app = kwargs.pop('app', None) # type: Starlette
super().__init__(*args, **kwargs)
def generate_url(self, link, **kwargs):
if self.app and isinstance(self.app, Starlette) and link:
return self.app.url_path_for(link, **kwargs)
return None
def get_resource_links(self, item):
""" Override the marshmallow-jsonapi implementation to check for None links. """
links = super().get_resource_links(item)
if links and isinstance(links, dict) and links.get('self'):
return links
return None | 0.797281 | 0.086903 |
from __future__ import annotations
import typing as t
from dataclasses import dataclass, field
from arg_services.graph.v1 import graph_pb2
from arguebuf.models import Userdata
from arguebuf.models.metadata import Metadata
from arguebuf.schema import ova
from arguebuf.services import dt, utils
from pendulum.datetime import DateTime
@dataclass()
class Resource:
text: t.Any
title: t.Optional[str] = None
source: t.Optional[str] = None
timestamp: t.Optional[DateTime] = None
metadata: Metadata = field(default_factory=Metadata)
userdata: Userdata = field(default_factory=dict)
_id: str = field(default_factory=utils.uuid)
@property
def id(self) -> str:
return self._id
@property
def plain_text(self) -> str:
"""Generate a string from Resource object."""
return utils.xstr(self.text)
def to_protobuf(self) -> graph_pb2.Resource:
"""Export Resource object into a Graph's Resource object in PROTOBUF format."""
obj = graph_pb2.Resource(
text=self.plain_text, metadata=self.metadata.to_protobuf()
)
obj.userdata.update(self.userdata)
if title := self.title:
obj.title = title
if source := self.source:
obj.source = source
return obj
@classmethod
def from_protobuf(
cls,
id: str,
obj: graph_pb2.Resource,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Resource:
"""Generate Resource object from PROTOBUF format Graph's Resource object."""
return cls(
utils.parse(obj.text, nlp),
obj.title,
obj.source,
dt.from_protobuf(obj.timestamp),
Metadata.from_protobuf(obj.metadata),
dict(obj.userdata.items()),
id,
)
@classmethod
def from_ova(
cls, obj: ova.Analysis, nlp: t.Optional[t.Callable[[str], t.Any]]
) -> Resource:
return cls(
utils.parse(obj.get("plain_txt"), nlp),
obj.get("documentTitle"),
obj.get("documentSource"),
dt.from_format(obj.get("documentDate"), ova.DATE_FORMAT_ANALYSIS),
) | arguebuf/models/resource.py | from __future__ import annotations
import typing as t
from dataclasses import dataclass, field
from arg_services.graph.v1 import graph_pb2
from arguebuf.models import Userdata
from arguebuf.models.metadata import Metadata
from arguebuf.schema import ova
from arguebuf.services import dt, utils
from pendulum.datetime import DateTime
@dataclass()
class Resource:
text: t.Any
title: t.Optional[str] = None
source: t.Optional[str] = None
timestamp: t.Optional[DateTime] = None
metadata: Metadata = field(default_factory=Metadata)
userdata: Userdata = field(default_factory=dict)
_id: str = field(default_factory=utils.uuid)
@property
def id(self) -> str:
return self._id
@property
def plain_text(self) -> str:
"""Generate a string from Resource object."""
return utils.xstr(self.text)
def to_protobuf(self) -> graph_pb2.Resource:
"""Export Resource object into a Graph's Resource object in PROTOBUF format."""
obj = graph_pb2.Resource(
text=self.plain_text, metadata=self.metadata.to_protobuf()
)
obj.userdata.update(self.userdata)
if title := self.title:
obj.title = title
if source := self.source:
obj.source = source
return obj
@classmethod
def from_protobuf(
cls,
id: str,
obj: graph_pb2.Resource,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Resource:
"""Generate Resource object from PROTOBUF format Graph's Resource object."""
return cls(
utils.parse(obj.text, nlp),
obj.title,
obj.source,
dt.from_protobuf(obj.timestamp),
Metadata.from_protobuf(obj.metadata),
dict(obj.userdata.items()),
id,
)
@classmethod
def from_ova(
cls, obj: ova.Analysis, nlp: t.Optional[t.Callable[[str], t.Any]]
) -> Resource:
return cls(
utils.parse(obj.get("plain_txt"), nlp),
obj.get("documentTitle"),
obj.get("documentSource"),
dt.from_format(obj.get("documentDate"), ova.DATE_FORMAT_ANALYSIS),
) | 0.898461 | 0.16872 |
import certifi
import os
import socket
import ssl
from functools import lru_cache
from os.path import dirname, abspath
from pydantic import BaseSettings
host_name = socket.gethostname()
class Settings(BaseSettings):
"""
application settings
"""
# uvicorn settings
uvicorn_app: str = "bluebutton.asgi:app"
uvicorn_host: str = "0.0.0.0"
uvicorn_port: int = 5200
uvicorn_reload: bool = False
# general certificate settings
# path to "standard" CA certificates
certificate_authority_path: str = certifi.where()
certificate_verify: bool = False
# bluebutton package settings
bluebutton_ca_file: str = certifi.where()
bluebutton_ca_path: str = None
bluebutton_cert_name: str = "lfh-bluebutton-client.pem"
bluebutton_cert_key_name: str = "lfh-bluebutton-client.key"
bluebutton_config_directory: str = "/home/lfh/bluebutton/config"
bluebutton_logging_config_path: str = "logging.yaml"
bluebutton_rate_limit: str = "5/second"
bluebutton_timing_enabled: bool = False
# LFH Blue Button 2.0 Client Endpoint
bluebutton_authorize_callback: str = f"https://localhost:{uvicorn_port}/bluebutton/authorize_callback"
# CMS Blue Button 2.0 Endpoints and settings
cms_authorize_url: str = "https://sandbox.bluebutton.cms.gov/v2/o/authorize/"
cms_token_url: str = "https://sandbox.bluebutton.cms.gov/v2/o/token/"
cms_base_url: str = "https://sandbox.bluebutton.cms.gov/v2/fhir/"
cms_scopes: str = "patient/Patient.read patient/Coverage.read patient/ExplanationOfBenefit.read"
cms_client_id: str = "kAMZfgm43Y27HhCTJ2sZyttdV5pFvGyFvaboXqEf"
cms_client_secret: str = "<KEY>"
return_cms_result: bool = False
# LFH connect FHIR url
lfh_fhir_url = "https://localhost:5000/fhir"
class Config:
case_sensitive = False
env_file = os.path.join(dirname(dirname(abspath(__file__))), ".env")
@lru_cache()
def get_settings() -> Settings:
"""Returns the settings instance"""
return Settings()
@lru_cache()
def get_ssl_context(ssl_purpose: ssl.Purpose) -> ssl.SSLContext:
"""
Returns a SSL Context configured for server auth with the certificate path
:param ssl_purpose:
"""
settings = get_settings()
ssl_context = ssl.create_default_context(ssl_purpose)
ssl_context.load_verify_locations(
cafile=settings.bluebutton_ca_file, capath=settings.bluebutton_ca_path
)
return ssl_context | blue-button.py/bluebutton/config.py | import certifi
import os
import socket
import ssl
from functools import lru_cache
from os.path import dirname, abspath
from pydantic import BaseSettings
host_name = socket.gethostname()
class Settings(BaseSettings):
"""
application settings
"""
# uvicorn settings
uvicorn_app: str = "bluebutton.asgi:app"
uvicorn_host: str = "0.0.0.0"
uvicorn_port: int = 5200
uvicorn_reload: bool = False
# general certificate settings
# path to "standard" CA certificates
certificate_authority_path: str = certifi.where()
certificate_verify: bool = False
# bluebutton package settings
bluebutton_ca_file: str = certifi.where()
bluebutton_ca_path: str = None
bluebutton_cert_name: str = "lfh-bluebutton-client.pem"
bluebutton_cert_key_name: str = "lfh-bluebutton-client.key"
bluebutton_config_directory: str = "/home/lfh/bluebutton/config"
bluebutton_logging_config_path: str = "logging.yaml"
bluebutton_rate_limit: str = "5/second"
bluebutton_timing_enabled: bool = False
# LFH Blue Button 2.0 Client Endpoint
bluebutton_authorize_callback: str = f"https://localhost:{uvicorn_port}/bluebutton/authorize_callback"
# CMS Blue Button 2.0 Endpoints and settings
cms_authorize_url: str = "https://sandbox.bluebutton.cms.gov/v2/o/authorize/"
cms_token_url: str = "https://sandbox.bluebutton.cms.gov/v2/o/token/"
cms_base_url: str = "https://sandbox.bluebutton.cms.gov/v2/fhir/"
cms_scopes: str = "patient/Patient.read patient/Coverage.read patient/ExplanationOfBenefit.read"
cms_client_id: str = "kAMZfgm43Y27HhCTJ2sZyttdV5pFvGyFvaboXqEf"
cms_client_secret: str = "<KEY>"
return_cms_result: bool = False
# LFH connect FHIR url
lfh_fhir_url = "https://localhost:5000/fhir"
class Config:
case_sensitive = False
env_file = os.path.join(dirname(dirname(abspath(__file__))), ".env")
@lru_cache()
def get_settings() -> Settings:
"""Returns the settings instance"""
return Settings()
@lru_cache()
def get_ssl_context(ssl_purpose: ssl.Purpose) -> ssl.SSLContext:
"""
Returns a SSL Context configured for server auth with the certificate path
:param ssl_purpose:
"""
settings = get_settings()
ssl_context = ssl.create_default_context(ssl_purpose)
ssl_context.load_verify_locations(
cafile=settings.bluebutton_ca_file, capath=settings.bluebutton_ca_path
)
return ssl_context | 0.394084 | 0.080864 |
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
import pandas
from operator import itemgetter
def execute(filters=None):
return get_data(filters)
def get_conditions(filters):
where_clause = []
if filters.get("from_date"):
where_clause += ["date(comm.creation) >= %(from_date)s"]
if filters.get("to_date"):
where_clause += ["date(comm.creation) <= %(to_date)s"]
if filters.get("communication_medium"):
where_clause += ["comm.communication_medium = %(communication_medium)s"]
return " where " + " and ".join(where_clause) if where_clause else ""
def get_data(filters):
filters.today = frappe.utils.getdate()
data = frappe.db.sql(
"""
with fn as
(
select
dl.link_name customer, cml.link_name contact,
datediff(%(today)s,communication_date) days_since_last_communication, communication_medium
from
tabCommunication comm
inner join `tabCommunication Link` cml on cml.parent = comm.name
and cml.link_doctype = 'Contact'
left outer join `tabDynamic Link` dl on dl.link_doctype = 'Customer'
and dl.parenttype = 'Contact'
and dl.parent = cml.link_name
{where_conditions}
and not exists (
select 1 from tabEvent e
where comm.reference_doctype = 'Event'
and e.name = comm.reference_name and e.status = 'Open'
)
)
select
fn.customer, fn.contact, communication_medium, min(fn.days_since_last_communication) days_since_last_communication
from
fn
where
fn.customer is not null
group by
fn.customer, fn.contact, communication_medium
""".format(
where_conditions=get_conditions(filters)
),
filters,
as_dict=True,
# debug=True,
)
if not data:
return [], []
df = pandas.DataFrame.from_records(data)
df1 = pandas.pivot_table(
df,
values="days_since_last_communication",
index=["customer", "contact"],
columns=["communication_medium"],
aggfunc="count",
margins=True,
margins_name="Total",
)
df1.drop(index="Total", axis=0, inplace=True)
df2 = pandas.pivot_table(
df,
values="days_since_last_communication",
index=["customer", "contact"],
aggfunc=min,
)
df3 = df1.join(df2, rsuffix="__")
df3 = df3.reset_index().fillna(0)
columns = [
dict(
label="Customer",
fieldname="customer",
fieldtype="Link",
options="Customer",
width=165,
),
dict(
label="Contact",
fieldname="contact",
fieldtype="Link",
options="Contact",
width=165,
),
]
columns += [
dict(
label="Days Since Last Communication"
if col == "days_since_last_communication"
else col,
fieldname=col,
fieldtype="Int",
width=95,
)
for col in df3.columns
if not col in ["customer", "contact"]
]
data = df3.to_dict("records")
return columns, data | npro/npro/report/customer_contactwise_communication_analysis/customer_contactwise_communication_analysis.py |
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
import pandas
from operator import itemgetter
def execute(filters=None):
return get_data(filters)
def get_conditions(filters):
where_clause = []
if filters.get("from_date"):
where_clause += ["date(comm.creation) >= %(from_date)s"]
if filters.get("to_date"):
where_clause += ["date(comm.creation) <= %(to_date)s"]
if filters.get("communication_medium"):
where_clause += ["comm.communication_medium = %(communication_medium)s"]
return " where " + " and ".join(where_clause) if where_clause else ""
def get_data(filters):
filters.today = frappe.utils.getdate()
data = frappe.db.sql(
"""
with fn as
(
select
dl.link_name customer, cml.link_name contact,
datediff(%(today)s,communication_date) days_since_last_communication, communication_medium
from
tabCommunication comm
inner join `tabCommunication Link` cml on cml.parent = comm.name
and cml.link_doctype = 'Contact'
left outer join `tabDynamic Link` dl on dl.link_doctype = 'Customer'
and dl.parenttype = 'Contact'
and dl.parent = cml.link_name
{where_conditions}
and not exists (
select 1 from tabEvent e
where comm.reference_doctype = 'Event'
and e.name = comm.reference_name and e.status = 'Open'
)
)
select
fn.customer, fn.contact, communication_medium, min(fn.days_since_last_communication) days_since_last_communication
from
fn
where
fn.customer is not null
group by
fn.customer, fn.contact, communication_medium
""".format(
where_conditions=get_conditions(filters)
),
filters,
as_dict=True,
# debug=True,
)
if not data:
return [], []
df = pandas.DataFrame.from_records(data)
df1 = pandas.pivot_table(
df,
values="days_since_last_communication",
index=["customer", "contact"],
columns=["communication_medium"],
aggfunc="count",
margins=True,
margins_name="Total",
)
df1.drop(index="Total", axis=0, inplace=True)
df2 = pandas.pivot_table(
df,
values="days_since_last_communication",
index=["customer", "contact"],
aggfunc=min,
)
df3 = df1.join(df2, rsuffix="__")
df3 = df3.reset_index().fillna(0)
columns = [
dict(
label="Customer",
fieldname="customer",
fieldtype="Link",
options="Customer",
width=165,
),
dict(
label="Contact",
fieldname="contact",
fieldtype="Link",
options="Contact",
width=165,
),
]
columns += [
dict(
label="Days Since Last Communication"
if col == "days_since_last_communication"
else col,
fieldname=col,
fieldtype="Int",
width=95,
)
for col in df3.columns
if not col in ["customer", "contact"]
]
data = df3.to_dict("records")
return columns, data | 0.66769 | 0.177526 |
import json
import datetime
import subprocess as sp
import boto3
import time
import sys
from gpiozero import LED
from gpiozero import Button
# Red led = gpio 24
# Green led = gpio 18
# Button = gpio 3
class RpiHandler:
def __init__(self, table_name, username):
self.state = False
self.start_time = datetime.datetime.now()
self.green_led = LED(18)
self.red_led = LED(24)
self.button = Button(3)
self.counter = 0
self.restarts = 0
self.table_name = table_name
self.user = username
self.session_status = ""
# put item as specificed json format
def generate_log(self):
time_delta = datetime.datetime.now() - self.start_time
data_str = '{"RpiDateTime":"00:00:0123","RpiUser":"'+self.user+'","RpiSession":"'+str(self.counter)+'","RpiSessionStatus": "'+self.session_status+'","RpiDuration":"00:00:0123","RpiFault": "none","RpiRestarts":"'+str(self.restarts)+'"}'
data_json = json.loads(data_str)
data_json["RpiDateTime"] = str(self.start_time)
data_json["RpiDuration"] = str(time_delta)
return data_json
def handle(self):
print("button press")
self.state = not self.state
self.counter+=1
table = self.dynamo_get_table(self.table_name)
if self.state:
# turn on green LED
print("Green LED on.")
self.green_led.on()
self.red_led.off()
self.session_status = "active"
# construct log
print("Sending initial log to AWS...")
data = self.generate_log()
print("generated log: ", data)
# send log to aws
self.dynamo_put(table, data)
# blink led
self.green_led.off()
time.sleep(.5)
self.green_led.on()
# start AirPlay server as background process
print("Starting AirPlay Server...")
sp.Popen("/home/pi/Downloads/RPiPlay/build/rpiplay", shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
print("Check your IPhone for RPiPlay in the AirPlay menu.")
print("To turn off AirPlay Server press the button again.")
else:
# turn on red LED
print("Red LED on.")
self.green_led.off()
self.red_led.on()
self.session_status = "inactive"
# stop airplay server
print("Stopping AirPlay Server...")
cmd = "pkill -f rpiplay"
sp.run(["pkill","-f","rpiplay"])
print("AirPlay server stopped.")
# construct log
print("Sending concluding log to AWS...")
data = self.generate_log()
print("generated log: ", data)
# submit log
self.dynamo_put(table, data)
print("To start the AirPlay server again press the button.")
self.restarts+=1
def dynamo_get_table(self, name):
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
# creating a DynamoDB table. Note that the attributes of this table
# are lazy-loaded: a request is not made nor are the attribute
# values populated until the attributes
# on the table resource are accessed or its load() method is called.
table = dynamodb.Table(name)
# Print out some data about the table.
# This will cause a request to be made to DynamoDB and its attribute
# values will be set based on the response.
print(table.creation_date_time)
return table
# put item as specificed json format
def dynamo_put(self, table, data):
request = table.put_item(Item=data)
print(request)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print("Welcome to the RPi and AWS controller!")
print("press your button to start the AirPlay server")
flag = True
username = "test user"
if len(sys.argv) > 1:
username = str(sys.argv[1])
rpi = RpiHandler("rpi-aws-log", username)
while(flag):
rpi.button.when_pressed = rpi.handle | rpi/gpiocontroller.py |
import json
import datetime
import subprocess as sp
import boto3
import time
import sys
from gpiozero import LED
from gpiozero import Button
# Red led = gpio 24
# Green led = gpio 18
# Button = gpio 3
class RpiHandler:
def __init__(self, table_name, username):
self.state = False
self.start_time = datetime.datetime.now()
self.green_led = LED(18)
self.red_led = LED(24)
self.button = Button(3)
self.counter = 0
self.restarts = 0
self.table_name = table_name
self.user = username
self.session_status = ""
# put item as specificed json format
def generate_log(self):
time_delta = datetime.datetime.now() - self.start_time
data_str = '{"RpiDateTime":"00:00:0123","RpiUser":"'+self.user+'","RpiSession":"'+str(self.counter)+'","RpiSessionStatus": "'+self.session_status+'","RpiDuration":"00:00:0123","RpiFault": "none","RpiRestarts":"'+str(self.restarts)+'"}'
data_json = json.loads(data_str)
data_json["RpiDateTime"] = str(self.start_time)
data_json["RpiDuration"] = str(time_delta)
return data_json
def handle(self):
print("button press")
self.state = not self.state
self.counter+=1
table = self.dynamo_get_table(self.table_name)
if self.state:
# turn on green LED
print("Green LED on.")
self.green_led.on()
self.red_led.off()
self.session_status = "active"
# construct log
print("Sending initial log to AWS...")
data = self.generate_log()
print("generated log: ", data)
# send log to aws
self.dynamo_put(table, data)
# blink led
self.green_led.off()
time.sleep(.5)
self.green_led.on()
# start AirPlay server as background process
print("Starting AirPlay Server...")
sp.Popen("/home/pi/Downloads/RPiPlay/build/rpiplay", shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
print("Check your IPhone for RPiPlay in the AirPlay menu.")
print("To turn off AirPlay Server press the button again.")
else:
# turn on red LED
print("Red LED on.")
self.green_led.off()
self.red_led.on()
self.session_status = "inactive"
# stop airplay server
print("Stopping AirPlay Server...")
cmd = "pkill -f rpiplay"
sp.run(["pkill","-f","rpiplay"])
print("AirPlay server stopped.")
# construct log
print("Sending concluding log to AWS...")
data = self.generate_log()
print("generated log: ", data)
# submit log
self.dynamo_put(table, data)
print("To start the AirPlay server again press the button.")
self.restarts+=1
def dynamo_get_table(self, name):
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
# creating a DynamoDB table. Note that the attributes of this table
# are lazy-loaded: a request is not made nor are the attribute
# values populated until the attributes
# on the table resource are accessed or its load() method is called.
table = dynamodb.Table(name)
# Print out some data about the table.
# This will cause a request to be made to DynamoDB and its attribute
# values will be set based on the response.
print(table.creation_date_time)
return table
# put item as specificed json format
def dynamo_put(self, table, data):
request = table.put_item(Item=data)
print(request)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print("Welcome to the RPi and AWS controller!")
print("press your button to start the AirPlay server")
flag = True
username = "test user"
if len(sys.argv) > 1:
username = str(sys.argv[1])
rpi = RpiHandler("rpi-aws-log", username)
while(flag):
rpi.button.when_pressed = rpi.handle | 0.275325 | 0.111121 |
class TstNode(object):
key = None
value = None
mid = None
left = None
right = None
def __init__(self, key=None, value=None, left=None, right=None, mid=None):
if key is not None:
self.key = key
if value is not None:
self.value = value
if mid is not None:
self.mid = mid
if left is not None:
self.left = left
if right is not None:
self.right = right
def char_at(s, index):
if len(s) <= index:
return -1
return ord(s[index])
class TernarySearchTrie(object):
root = None
N = 0
def put(self, key, value):
self.root = self._put(self.root, key, value, 0)
def _put(self, x, key, value, d):
c = char_at(key, d)
if x is None:
x = TstNode(key=c, value=None)
compared = c - x.key
if compared < 0:
x.left = self._put(x.left, key, value, d)
elif compared > 0:
x.right = self._put(x.right, key, value, d)
else:
if len(key) - 1 > d:
x.mid = self._put(x.mid, key, value, d + 1)
else:
if x.value is None:
self.N += 1
x.value = value
return x
def get(self, key):
x = self._get(self.root, key, 0)
if x is None:
return None
return x.value
def _get(self, x, key, d):
c = char_at(key, d)
if x is None:
return None
compared = c - x.key
if compared < 0:
return self._get(x.left, key, d)
elif compared > 0:
return self._get(x.right, key, d)
else:
if len(key) - 1 > d:
return self._get(x.mid, key, d + 1)
else:
return x
def delete(self, key):
self.root = self._delete(self.root, key, 0)
def _delete(self, x, key, d):
if x is None:
return None
c = char_at(key, d)
compared = c - x.key
if compared < 0:
x.left = self._delete(x.left, key, d)
elif compared > 0:
x.right = self._delete(x.right, key, d)
else:
if len(key) - 1 > d:
x.mid = self._delete(x.mid, key, d + 1)
else:
self.N -= 1
x = None
return x
def contains_key(self, key):
x = self._get(self.root, key, 0)
if x is None:
return False
return True
def size(self):
return self.N
def is_empty(self):
return self.N == 0
def keys(self):
queue = []
self.collect(self.root, '', queue)
return queue
def values(self):
queue = []
self.collect_values(self.root, queue)
return queue
def collect(self, x, prefix, queue):
if x is None:
return
if x.value is not None:
queue.append(prefix + chr(x.key))
self.collect(x.left, prefix, queue)
self.collect(x.mid, prefix + chr(x.key), queue)
self.collect(x.right, prefix, queue)
def collect_values(self, x, queue):
if x is None:
return
if x.value is not None:
queue.append(x.value)
self.collect_values(x.left, queue)
self.collect_values(x.mid, queue)
self.collect_values(x.right, queue)
class TernarySearchSet(TernarySearchTrie):
def add(self, key):
self.put(key, 0)
def contains(self, key):
return self.contains_key(key)
def to_array(self):
return self.keys() | pysie/dsl/set.py | class TstNode(object):
key = None
value = None
mid = None
left = None
right = None
def __init__(self, key=None, value=None, left=None, right=None, mid=None):
if key is not None:
self.key = key
if value is not None:
self.value = value
if mid is not None:
self.mid = mid
if left is not None:
self.left = left
if right is not None:
self.right = right
def char_at(s, index):
if len(s) <= index:
return -1
return ord(s[index])
class TernarySearchTrie(object):
root = None
N = 0
def put(self, key, value):
self.root = self._put(self.root, key, value, 0)
def _put(self, x, key, value, d):
c = char_at(key, d)
if x is None:
x = TstNode(key=c, value=None)
compared = c - x.key
if compared < 0:
x.left = self._put(x.left, key, value, d)
elif compared > 0:
x.right = self._put(x.right, key, value, d)
else:
if len(key) - 1 > d:
x.mid = self._put(x.mid, key, value, d + 1)
else:
if x.value is None:
self.N += 1
x.value = value
return x
def get(self, key):
x = self._get(self.root, key, 0)
if x is None:
return None
return x.value
def _get(self, x, key, d):
c = char_at(key, d)
if x is None:
return None
compared = c - x.key
if compared < 0:
return self._get(x.left, key, d)
elif compared > 0:
return self._get(x.right, key, d)
else:
if len(key) - 1 > d:
return self._get(x.mid, key, d + 1)
else:
return x
def delete(self, key):
self.root = self._delete(self.root, key, 0)
def _delete(self, x, key, d):
if x is None:
return None
c = char_at(key, d)
compared = c - x.key
if compared < 0:
x.left = self._delete(x.left, key, d)
elif compared > 0:
x.right = self._delete(x.right, key, d)
else:
if len(key) - 1 > d:
x.mid = self._delete(x.mid, key, d + 1)
else:
self.N -= 1
x = None
return x
def contains_key(self, key):
x = self._get(self.root, key, 0)
if x is None:
return False
return True
def size(self):
return self.N
def is_empty(self):
return self.N == 0
def keys(self):
queue = []
self.collect(self.root, '', queue)
return queue
def values(self):
queue = []
self.collect_values(self.root, queue)
return queue
def collect(self, x, prefix, queue):
if x is None:
return
if x.value is not None:
queue.append(prefix + chr(x.key))
self.collect(x.left, prefix, queue)
self.collect(x.mid, prefix + chr(x.key), queue)
self.collect(x.right, prefix, queue)
def collect_values(self, x, queue):
if x is None:
return
if x.value is not None:
queue.append(x.value)
self.collect_values(x.left, queue)
self.collect_values(x.mid, queue)
self.collect_values(x.right, queue)
class TernarySearchSet(TernarySearchTrie):
def add(self, key):
self.put(key, 0)
def contains(self, key):
return self.contains_key(key)
def to_array(self):
return self.keys() | 0.619932 | 0.264982 |
import os
import json
from datetime import datetime
from collections import Counter, defaultdict
FAIL_TAGS = ('UNDEFINED', 'PRECALC', 'BUILDING', 'NOT_SOLID', 'VOLUME',
'BBOX_Z', 'BBOX_XY', 'TIMEOUT')
class Report:
'''Summarizes test result data.
Also organizes parameters which failed to produce a valid gear for further
case reproduction and debugging.
'''
def __init__(self, filename):
info = filename.split('_')
self.branch = info[-3]
self.git_sha = info[-2]
date_str = info[-1].split('.')[0]
self.dt = datetime.strptime(date_str, '%Y%m%d%H%M%S')
data = json.loads(open(filename).read())
self.n_passed = data['summary']['passed']
self.n_failed = data['summary']['failed']
self.n_total = data['summary']['total']
self.duration = data['duration']
self.fail_tags = Counter()
self.fail_tags_per_test = defaultdict(Counter)
self.fails = defaultdict(lambda: defaultdict(list))
for test in data['tests']:
if test['outcome'] == 'passed':
continue
tag = test['metadata']['failure_tag']
self.fail_tags[tag] += 1
test_name = test['nodeid'].split('::')[-2]
params = test['metadata']['gear_params']
self.fails[test_name][tag].append(params)
self.fail_tags_per_test[test_name][tag] += 1
def print_summary(self):
dt = self.dt.strftime('%d-%m-%Y, %H:%M:%S')
prc = self.n_passed / self.n_total * 100.0
fprc = 100.0 - prc
secs = int(self.duration)
hrs = secs // (60 * 60)
secs %= (60 * 60)
mins = secs // 60
secs %= 60
print(f'branch: {self.branch}')
print(f'git SHA: {self.git_sha}')
print(f'date & time: {dt}')
print(f'running time: {hrs} hours, {mins} minutes, {secs} seconds')
print(f'passed: {self.n_passed} ({prc:.2f}%)')
print(f'failed: {self.n_failed} ({fprc:.2f}%)')
header = ' ' * 26
for tag in FAIL_TAGS:
header = header + f'{tag:>10}'
print(header)
for tname, tags in self.fail_tags_per_test.items():
line = f'{tname:<26}'
for tag in FAIL_TAGS:
n = tags[tag]
line = line + f'{n:>10}'
print(line)
@staticmethod
def gather_reports(dir_):
reps = []
for fname in os.listdir(dir_):
fname = os.path.join(dir_, fname)
if os.path.isfile(fname) and fname.endswith('.json'):
reps.append(Report(fname))
reps.sort(key=lambda e: e.dt, reverse=True)
return reps
if __name__ == '__main__':
reps = Report.gather_reports('./reports')
print('\n')
for rep in reps:
rep.print_summary()
print()
print('=' * 106)
print('=' * 106)
print() | tests/report.py | import os
import json
from datetime import datetime
from collections import Counter, defaultdict
FAIL_TAGS = ('UNDEFINED', 'PRECALC', 'BUILDING', 'NOT_SOLID', 'VOLUME',
'BBOX_Z', 'BBOX_XY', 'TIMEOUT')
class Report:
'''Summarizes test result data.
Also organizes parameters which failed to produce a valid gear for further
case reproduction and debugging.
'''
def __init__(self, filename):
info = filename.split('_')
self.branch = info[-3]
self.git_sha = info[-2]
date_str = info[-1].split('.')[0]
self.dt = datetime.strptime(date_str, '%Y%m%d%H%M%S')
data = json.loads(open(filename).read())
self.n_passed = data['summary']['passed']
self.n_failed = data['summary']['failed']
self.n_total = data['summary']['total']
self.duration = data['duration']
self.fail_tags = Counter()
self.fail_tags_per_test = defaultdict(Counter)
self.fails = defaultdict(lambda: defaultdict(list))
for test in data['tests']:
if test['outcome'] == 'passed':
continue
tag = test['metadata']['failure_tag']
self.fail_tags[tag] += 1
test_name = test['nodeid'].split('::')[-2]
params = test['metadata']['gear_params']
self.fails[test_name][tag].append(params)
self.fail_tags_per_test[test_name][tag] += 1
def print_summary(self):
dt = self.dt.strftime('%d-%m-%Y, %H:%M:%S')
prc = self.n_passed / self.n_total * 100.0
fprc = 100.0 - prc
secs = int(self.duration)
hrs = secs // (60 * 60)
secs %= (60 * 60)
mins = secs // 60
secs %= 60
print(f'branch: {self.branch}')
print(f'git SHA: {self.git_sha}')
print(f'date & time: {dt}')
print(f'running time: {hrs} hours, {mins} minutes, {secs} seconds')
print(f'passed: {self.n_passed} ({prc:.2f}%)')
print(f'failed: {self.n_failed} ({fprc:.2f}%)')
header = ' ' * 26
for tag in FAIL_TAGS:
header = header + f'{tag:>10}'
print(header)
for tname, tags in self.fail_tags_per_test.items():
line = f'{tname:<26}'
for tag in FAIL_TAGS:
n = tags[tag]
line = line + f'{n:>10}'
print(line)
@staticmethod
def gather_reports(dir_):
reps = []
for fname in os.listdir(dir_):
fname = os.path.join(dir_, fname)
if os.path.isfile(fname) and fname.endswith('.json'):
reps.append(Report(fname))
reps.sort(key=lambda e: e.dt, reverse=True)
return reps
if __name__ == '__main__':
reps = Report.gather_reports('./reports')
print('\n')
for rep in reps:
rep.print_summary()
print()
print('=' * 106)
print('=' * 106)
print() | 0.269133 | 0.135518 |
import csv, json, pandas as pd
import os, sys, requests, datetime, time
import zipfile, io
import lxml.html as lhtml
import lxml.html.clean as lhtmlclean
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
class QualClient:
"""
QualClient is a python wrapper the provides convenient access to data
exports directly from Qualtrics into Pandas for further manipulation.
The client in intiated with an API Token, and API URL
It provides 3 Primary functions-
QualClient.pull_survey_meta():
Pulls down a complete list of your surveys and addtional parameters
such as isActive, Creation Date, Mod Date, Name, and IDs
QualClient.pull_definition(survey_id):
survey_id : str
Takes the supplied survey_id and returns a df with the
survey's defintion info, which identifies things like the
questions asked, question text, question order, and IDs
QualClient.pull_results(survey_id):
survey_id : str
Take the supplied survey_id and returns a df of all of the responses
to the survey, with both the raw text and encoding of the response.
This functionalty actually downloads and unzips files from Qualtrics, so be
aware that it might take a moment to return the finalized data.
DF takes the shape of a long table with one response per row.
Example Usage:
client = QualClient(API_Token, API_url)
definitions = client.survey(survey_id)
print(definitions.head())
"""
def __init__(self, api_token, api_url):
self.api_token = api_token
self.headers = {
'x-api-token': self.api_token,
'content-type': "application/json",
'cache-control': "no-cache"
}
self.api_url = api_url
self.survey_url = self.api_url + 'surveys/'
self.definition_url = self.api_url + 'survey-definitions/'
self.response_url = self.api_url + 'responseexports/'
self.failed_responses = ["cancelled", "failed"]
def pull_survey_meta(self):
arrQualtricsSurveys = []
arrSurveyName = []
arrSurveyActive = []
arrCreation = []
arrMod = []
def GetQualtricsSurveys(qualtricsSurveysURL):
response = requests.get(url=qualtricsSurveysURL,
headers=self.headers)
jsonResponse = response.json()
nextPage = jsonResponse['result']['nextPage']
arrQualtricsSurveys.extend(
[srv['id'] for srv in jsonResponse['result']['elements']])
arrSurveyName.extend(
[srv['name'] for srv in jsonResponse['result']['elements']])
arrSurveyActive.extend([
srv['isActive'] for srv in jsonResponse['result']['elements']
])
arrCreation.extend([
srv['creationDate']
for srv in jsonResponse['result']['elements']
])
arrMod.extend([
srv['lastModified']
for srv in jsonResponse['result']['elements']
])
#Contains nextPage
if (nextPage is not None):
GetQualtricsSurveys(nextPage)
GetQualtricsSurveys(self.survey_url)
df = pd.DataFrame({
'SurveyID': arrQualtricsSurveys,
'Survey_Name': arrSurveyName,
'IsActive': arrSurveyActive,
'Created': arrCreation,
'LastModified': arrMod
})
return df
def pull_definition(self, survey_id):
response = json.loads(
requests.get(
url=self.definition_url + survey_id,
headers=self.headers).content.decode("utf-8"))['result']
question = pd.json_normalize(response['Questions']).melt()
flow = pd.json_normalize(response['SurveyFlow']['Flow'])
if ("EmbeddedData" in flow.columns or "Flow" in flow.columns):
flow.rename(columns={
'ID': 'BlockID',
'Type': 'FlowType'
},
inplace=True)
if not 'BlockID' in flow.columns:
flow['BlockID'] = ""
flow = flow[[
'EmbeddedData', 'FlowID', 'BlockID', 'Flow', 'FlowType'
]].reset_index()
flow.rename(columns={'index': 'FlowSort'}, inplace=True)
flow_block = flow[(
flow.EmbeddedData.isnull() == True)].EmbeddedData.apply(
pd.Series).merge(
flow, right_index=True,
left_index=True).drop(["EmbeddedData"], axis=1).melt(
id_vars=[
'FlowSort', 'FlowID', 'BlockID', 'FlowType'
],
value_name="EmbeddedData")
embed = flow[(
flow.EmbeddedData.isnull() == False)].EmbeddedData.apply(
pd.Series).merge(
flow, right_index=True,
left_index=True).drop(["EmbeddedData"], axis=1).melt(
id_vars=[
'FlowSort', 'FlowID', 'BlockID', 'FlowType'
],
value_name="EmbeddedData")
embed = embed.EmbeddedData.apply(pd.Series).merge(
embed, right_index=True,
left_index=True).drop(["EmbeddedData"],
axis=1).dropna(subset=['Field', 'Type'])
embed = embed[[
'FlowSort', 'FlowID', 'BlockID', 'FlowType', 'Field', 'Type',
'Value'
]]
embed = embed.sort_values(by=['FlowSort'])
combined = flow_block.merge(
embed,
how='outer',
on=['FlowSort', 'FlowID', 'BlockID',
'FlowType']).sort_values(by=['FlowSort'])
combined.drop(["variable", "EmbeddedData"], axis=1, inplace=True)
combined.drop_duplicates(inplace=True)
else:
flow = flow[['FlowID', 'Type']].reset_index()
flow.columns = ['FlowSort', 'FlowID', 'BlockID', 'FlowType']
flow['Field'] = ''
flow['Type'] = ''
flow['Value'] = ''
combined = flow
blocks = pd.json_normalize(response['Blocks']).melt()
blocks[["BlockID",
"BlockSettings"]] = blocks.variable.str.split('.',
1,
expand=True)
blocks = blocks[~blocks['BlockSettings'].str.contains('Options')
& ~blocks['BlockSettings'].str.contains('SubType')]
blocks = blocks.pivot(index='BlockID',
columns='BlockSettings',
values='value')
blocks = blocks['BlockElements'].apply(pd.Series).merge(
blocks, right_index=True,
left_index=True).drop(['BlockElements'], axis=1).melt(
id_vars=['ID', 'Type', 'Description'],
value_name="BlockElement").dropna()
blocks.rename(columns={'ID': 'BlockID'}, inplace=True)
blocks['ElementType'] = blocks['BlockElement']
blocks['ElementType'] = blocks['ElementType'].apply(
lambda x: x['Type'])
blocks['QID'] = blocks['BlockElement'].apply(
lambda x: x['QuestionID'] if 'QuestionID' in x else "")
blocks = blocks.drop(['BlockElement'], axis=1)
blocks.rename(
columns=(lambda x: 'BlockElementSort' if x == 'variable' else
('Block' + x
if (('Block' in x) == False and x != 'QID') else x)),
inplace=True)
blocks = combined.merge(blocks, on='BlockID', how='right')
extract = question[(
question.variable.str.contains('.Language.') == False)]
extract[["QID", "QPath"]] = extract.variable.str.split('.',
1,
expand=True)
extract[["QPath",
"ChoiceSetting"]] = extract.QPath.str.rsplit('.',
1,
expand=True)
extract['value'] = extract.apply(
lambda x: response['Questions'][x.QID]['Labels']
if (x.QPath.startswith("Labels.") == True) else x['value'],
axis=1)
extract['ChoiceSetting'] = extract.apply(
lambda x: None
if (x.QPath.startswith("Labels.") == True) else x.ChoiceSetting,
axis=1)
extract['QPath'] = extract.apply(
lambda x: "Labels"
if (x.QPath.startswith("Labels.") == True) else x.QPath,
axis=1)
question_pvt = extract[(extract.ChoiceSetting.isnull() == True)]
question_pvt = question_pvt.pivot_table(index=['QID'],
columns=['QPath'],
values='value',
aggfunc='first').reset_index()
question_settings = extract[
(extract.QPath.str.contains("Choices.") == False)
& (extract.QPath.str.contains("Answers.") == False)]
choice_settings = question_settings[(
question_settings.ChoiceSetting.str.replace(
'-', '').str.isnumeric() == True)]
question_settings = question_settings[(
question_settings.ChoiceSetting.str.replace(
'-', '').str.isnumeric() == False)]
question_settings['QPath'] = question_settings.apply(
lambda x: x['QPath'] + "." + x['ChoiceSetting'], axis=1)
question_settings['QPath'] = question_settings.apply(
lambda x: x['QPath'].split('.', 2)[0] + "." + x['QPath'].split(
'.', 2)[2]
if "AdditionalQuestions" in x['QPath'] else x['QPath'],
axis=1)
question_settings = question_settings.drop(
columns=['variable', 'ChoiceSetting'])
question_settings = question_settings.pivot_table(
index=['QID'], columns=['QPath'], values='value',
aggfunc='first').reset_index()
question_pvt = question_pvt.merge(question_settings,
how='left',
on='QID')
if (choice_settings.empty == False):
choice_settings['CQID'] = choice_settings.apply(
lambda x: x['QID'] + '-' + x['ChoiceSetting']
if ((x['ChoiceSetting'] is not None) & (
(x['ChoiceSetting']).isnumeric())) else x['QID'],
axis=1)
choice_settings.drop(columns=['variable', 'QID'])
choice_settings = choice_settings.pivot_table(
index=['CQID'],
columns=['QPath'],
values='value',
aggfunc='first').reset_index()
answers = extract[(extract.QPath.str.contains("Answers.") == True)]
if (answers.empty == False):
answers[["QPath",
"CRecode"]] = answers.QPath.str.split('.', 1, expand=True)
answers['CRecode'] = answers['CRecode'].apply(
lambda x: '#' + x.split('.')[0] + '-' + x.split('.')[2]
if "Answers" in x else x)
answers['AnswerSort'] = 1
answers['AnswerSort'] = answers.groupby(
'QID')['AnswerSort'].cumsum()
answers = answers.drop(columns=['variable', 'ChoiceSetting'])
choices_pvt = extract[(extract.QPath.str.contains("Choices.") == True)]
choices_pvt[["QPath",
"CRecode"]] = choices_pvt.QPath.str.split('.',
1,
expand=True)
choices_pvt["IChoiceSetting"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else (x.split('.', 1)[1]
if x.count('.') > 0 else ""))
choices_pvt["ChoiceSetting"] = choices_pvt.apply(
lambda x: x['IChoiceSetting'] + "." + x['ChoiceSetting']
if "Image" in str(x['IChoiceSetting']) else x['ChoiceSetting'],
axis=1)
choices_pvt["PGRGrpIdx"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else x.split('.', 1)[0]
if 'Choices' in x else None)
choices_pvt["PGRChoiceIdx"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else x.rsplit('.', 1)[1]
if "Choices" in x else None)
choices_pvt["CRecode"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else (x.split('.', 1)[0]
if x.count('.') > 0 else x))
choices_pvt["CRecode"] = choices_pvt.apply(
lambda x: x["CRecode"] if x["PGRChoiceIdx"] is None else "#" + x[
"CRecode"] + "-" + x["PGRChoiceIdx"],
axis=1)
choices_pvt["CQID"] = choices_pvt.apply(
lambda x: x["QID"]
if x["CRecode"] is None else x["QID"] + x["CRecode"]
if "#" in x["CRecode"] else x["QID"] + "-" + x["CRecode"],
axis=1)
choices_pvt = choices_pvt.pivot_table(index=['CQID', 'QID'],
columns=['ChoiceSetting'],
values='value',
aggfunc='first').reset_index()
if (choice_settings.empty == False):
choices_pvt = choices_pvt.merge(choice_settings,
on='CQID',
how='left')
choices_order = extract[(extract.QPath == "ChoiceOrder")]
choices_order = choices_order.value.apply(pd.Series).merge(
choices_order, right_index=True, left_index=True).drop(
["value", "QPath", "variable", "ChoiceSetting"],
axis=1).melt(id_vars=['QID'], value_name="CRecode").dropna()
choices_order.columns = ['QID', 'ChoiceOrder', 'CRecode']
choices_order['CQID'] = choices_order['QID'] + "-" + choices_order[
'CRecode'].astype(str)
### Combine SVF - Blocks - Questions - Choices - ChoiceOrder
svFlattened = choices_pvt.merge(choices_order, how='left', on='CQID')
svFlattened = svFlattened.drop(columns="QID_y")
svFlattened = svFlattened.rename(columns={'QID_x': 'QID'})
svFlattened = question_pvt.merge(svFlattened, how='outer', on='QID')
svFlattened = blocks.merge(svFlattened, how='left', on='QID')
svFlattened['QuestionText'] = svFlattened['QuestionText_Unsafe'].apply(
lambda x: "" if x == "" else lhtmlclean.Cleaner(
style=True).clean_html(lhtml.fromstring(str(x))).text_content(
).replace("nan", "").strip())
svFlattened['Display'] = svFlattened['Display'].apply(
lambda x: "" if x == "" else lhtmlclean.Cleaner(
style=True).clean_html(lhtml.fromstring(str(x))).text_content(
).replace("nan", "").strip())
svFlattened['CQID'] = svFlattened.apply(
lambda x: x.CQID if "QID" in str(x.CQID) else x.Field
if pd.isnull(x.Field) == False else x.QID
if pd.isnull(x.QID) == False else "",
axis=1)
svFlattened = svFlattened.drop(
columns=['AnswerOrder', 'ChoiceOrder_x'], errors='ignore')
csvfilteredColumns = [
'FlowSort', 'FlowID', 'BlockElementSort', 'BlockDescription',
'QID', 'CQID', 'QuestionText', 'QuestionType', 'Selector',
'SubSelector', 'DataExportTag', 'ChoiceDataExportTags_y',
'Display', 'Image.Display', 'Image.ImageLocation',
'VariableNaming', 'ChoiceOrder_y', 'CRecode'
]
for x in csvfilteredColumns:
if (x not in svFlattened.columns):
svFlattened[x] = ''
svFlattenedFiltered = svFlattened[csvfilteredColumns].drop_duplicates(
subset='CQID', ignore_index=True)
# only return filtered, do we need to return the result unfiltered?
return svFlattenedFiltered
def pull_results(self, survey_id):
def pull_file(label, survey_id):
file_type = lambda x: "With Labels" if label == True else "Without Labels"
parameters = "{\"format\": \"csv\", \"useLabels\": "\
+ (str(label)).lower() + ", \"surveyId\": \""\
+ survey_id + "\"" + ", \"endDate\":\"" \
+ str(datetime.datetime.utcnow().isoformat()[0:19]) + "Z\"}"
response = requests.post(url=self.response_url,
headers=self.headers,
data=parameters)
responseFileID = response.json()["result"]["id"]
if (responseFileID is not None):
response = requests.get(url=self.response_url +
responseFileID,
headers=self.headers)
responseFileStatus = response.json()["result"]["status"]
while (responseFileStatus == "in progress"):
time.sleep(5)
response = requests.get(url=self.response_url +
responseFileID,
headers=self.headers)
responseFileStatus = response.json()["result"]["status"]
completion_rate = response.json(
)['result']['percentComplete']
print(
f"File Request ({file_type(label)}) - {completion_rate}%"
)
if (responseFileStatus in self.failed_responses):
print("Error Network Issue / Failed Request : " + survey_id)
responseFileDownload = response.json()["result"]["file"]
response = requests.get(url=responseFileDownload,
headers=self.headers)
else:
print('No Response file ID, please check the survey ID')
with zipfile.ZipFile(io.BytesIO(response.content),
mode='r') as file:
download = file.read(list(file.NameToInfo.keys())[0]).decode()
df = pd.read_csv(io.StringIO(download), low_memory=False)
return df
wlExport = pull_file(True, survey_id)
nlExport = pull_file(False, survey_id)
mdQID = pd.melt(wlExport.iloc[[1]])
mdQID.columns = ["QRecode", "QID"]
mdQID["QID"] = mdQID["QID"].apply(
lambda x: json.loads(x.replace("'", "\""))["ImportId"])
wlExport = wlExport.iloc[2:]
nlExport = nlExport.iloc[2:]
print("Exports are finished - Working on combining them...")
wlExport = wlExport.rename(
columns=lambda x: "ResponseID" if x == "ResponseId" else x)
mdTxtResp = pd.melt(wlExport, id_vars=["ResponseID"])
mdTxtResp.columns = ["ResponseID", "QRecode", "TxtRespAnswer"]
#Join Back ResponseID Values
mdRespIDs = pd.melt(wlExport, value_vars=["ResponseID"])
mdRespIDs["TxtRespAnswer"] = mdRespIDs["value"]
mdRespIDs.columns = ["QRecode", "ResponseID", "TxtRespAnswer"]
def IsNumeric(x):
try:
float(x)
except (ValueError):
return ""
return x
#Merge Text w. Response ID Values
ndTxtResp = mdTxtResp.merge(mdRespIDs, how='outer')
nlExport = nlExport.rename(
columns=lambda x: "ResponseID" if x == "ResponseId" else x)
mdNumResp = pd.melt(nlExport, id_vars=["ResponseID"])
mdNumResp.columns = ["ResponseID", "QRecode", "NumRespAnswer"]
mdNumResp["NumRespAnswer"] = mdNumResp["NumRespAnswer"].apply(
lambda x: IsNumeric(x))
#Merge Text w. Num Resp Values
ndTextNumResp = mdNumResp.merge(ndTxtResp, how='outer')
#Merge Results w. QID // ndQColumns.merge for QIDs + QText
ndResultsFlat = mdQID.merge(ndTextNumResp, how='outer')
ndResultsFlat["SurveyID"] = survey_id
#Use Recodes for QID for non Questions
ndResultsFlat["QID"] = ndResultsFlat.apply(
lambda x: x['QID'] if "QID" in str(x['QID']) else x['QRecode'],
axis=1)
#NumAns != TextAns QCID = QID + Recode
ndResultsFlat["CQID"] = ndResultsFlat.apply(
lambda x: x['QID'].rsplit("-", 1)[0] + "-" + str(x['NumRespAnswer']
).split('.', 1)[0]
if x['NumRespAnswer'] != x['TxtRespAnswer'] and "QID" in x["QID"]
and pd.isnull(x['TxtRespAnswer']) == False and "TEXT" not in x[
"QID"] and "#" not in x["QID"] and '-' not in x["QID"] else x[
'QID'].rsplit("-", 1)[0]
if "#" in x['QID'] else x['QID'].replace('-Group', '').replace(
'-Rank', '').replace('-TEXT', ''),
axis=1)
# Loop & Merge
ndResultsFlat["CQID"] = ndResultsFlat.apply(
lambda x: "QID" + x["CQID"].replace("-xyValues-x", "").replace(
"-xyValues-y", "").split("_QID", 1)[1]
if "_QID" in x["CQID"] else x["CQID"],
axis=1)
del wlExport, nlExport
print("Done")
return ndResultsFlat | qualclient/qualclient.py | import csv, json, pandas as pd
import os, sys, requests, datetime, time
import zipfile, io
import lxml.html as lhtml
import lxml.html.clean as lhtmlclean
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
class QualClient:
"""
QualClient is a python wrapper the provides convenient access to data
exports directly from Qualtrics into Pandas for further manipulation.
The client in intiated with an API Token, and API URL
It provides 3 Primary functions-
QualClient.pull_survey_meta():
Pulls down a complete list of your surveys and addtional parameters
such as isActive, Creation Date, Mod Date, Name, and IDs
QualClient.pull_definition(survey_id):
survey_id : str
Takes the supplied survey_id and returns a df with the
survey's defintion info, which identifies things like the
questions asked, question text, question order, and IDs
QualClient.pull_results(survey_id):
survey_id : str
Take the supplied survey_id and returns a df of all of the responses
to the survey, with both the raw text and encoding of the response.
This functionalty actually downloads and unzips files from Qualtrics, so be
aware that it might take a moment to return the finalized data.
DF takes the shape of a long table with one response per row.
Example Usage:
client = QualClient(API_Token, API_url)
definitions = client.survey(survey_id)
print(definitions.head())
"""
def __init__(self, api_token, api_url):
self.api_token = api_token
self.headers = {
'x-api-token': self.api_token,
'content-type': "application/json",
'cache-control': "no-cache"
}
self.api_url = api_url
self.survey_url = self.api_url + 'surveys/'
self.definition_url = self.api_url + 'survey-definitions/'
self.response_url = self.api_url + 'responseexports/'
self.failed_responses = ["cancelled", "failed"]
def pull_survey_meta(self):
arrQualtricsSurveys = []
arrSurveyName = []
arrSurveyActive = []
arrCreation = []
arrMod = []
def GetQualtricsSurveys(qualtricsSurveysURL):
response = requests.get(url=qualtricsSurveysURL,
headers=self.headers)
jsonResponse = response.json()
nextPage = jsonResponse['result']['nextPage']
arrQualtricsSurveys.extend(
[srv['id'] for srv in jsonResponse['result']['elements']])
arrSurveyName.extend(
[srv['name'] for srv in jsonResponse['result']['elements']])
arrSurveyActive.extend([
srv['isActive'] for srv in jsonResponse['result']['elements']
])
arrCreation.extend([
srv['creationDate']
for srv in jsonResponse['result']['elements']
])
arrMod.extend([
srv['lastModified']
for srv in jsonResponse['result']['elements']
])
#Contains nextPage
if (nextPage is not None):
GetQualtricsSurveys(nextPage)
GetQualtricsSurveys(self.survey_url)
df = pd.DataFrame({
'SurveyID': arrQualtricsSurveys,
'Survey_Name': arrSurveyName,
'IsActive': arrSurveyActive,
'Created': arrCreation,
'LastModified': arrMod
})
return df
def pull_definition(self, survey_id):
response = json.loads(
requests.get(
url=self.definition_url + survey_id,
headers=self.headers).content.decode("utf-8"))['result']
question = pd.json_normalize(response['Questions']).melt()
flow = pd.json_normalize(response['SurveyFlow']['Flow'])
if ("EmbeddedData" in flow.columns or "Flow" in flow.columns):
flow.rename(columns={
'ID': 'BlockID',
'Type': 'FlowType'
},
inplace=True)
if not 'BlockID' in flow.columns:
flow['BlockID'] = ""
flow = flow[[
'EmbeddedData', 'FlowID', 'BlockID', 'Flow', 'FlowType'
]].reset_index()
flow.rename(columns={'index': 'FlowSort'}, inplace=True)
flow_block = flow[(
flow.EmbeddedData.isnull() == True)].EmbeddedData.apply(
pd.Series).merge(
flow, right_index=True,
left_index=True).drop(["EmbeddedData"], axis=1).melt(
id_vars=[
'FlowSort', 'FlowID', 'BlockID', 'FlowType'
],
value_name="EmbeddedData")
embed = flow[(
flow.EmbeddedData.isnull() == False)].EmbeddedData.apply(
pd.Series).merge(
flow, right_index=True,
left_index=True).drop(["EmbeddedData"], axis=1).melt(
id_vars=[
'FlowSort', 'FlowID', 'BlockID', 'FlowType'
],
value_name="EmbeddedData")
embed = embed.EmbeddedData.apply(pd.Series).merge(
embed, right_index=True,
left_index=True).drop(["EmbeddedData"],
axis=1).dropna(subset=['Field', 'Type'])
embed = embed[[
'FlowSort', 'FlowID', 'BlockID', 'FlowType', 'Field', 'Type',
'Value'
]]
embed = embed.sort_values(by=['FlowSort'])
combined = flow_block.merge(
embed,
how='outer',
on=['FlowSort', 'FlowID', 'BlockID',
'FlowType']).sort_values(by=['FlowSort'])
combined.drop(["variable", "EmbeddedData"], axis=1, inplace=True)
combined.drop_duplicates(inplace=True)
else:
flow = flow[['FlowID', 'Type']].reset_index()
flow.columns = ['FlowSort', 'FlowID', 'BlockID', 'FlowType']
flow['Field'] = ''
flow['Type'] = ''
flow['Value'] = ''
combined = flow
blocks = pd.json_normalize(response['Blocks']).melt()
blocks[["BlockID",
"BlockSettings"]] = blocks.variable.str.split('.',
1,
expand=True)
blocks = blocks[~blocks['BlockSettings'].str.contains('Options')
& ~blocks['BlockSettings'].str.contains('SubType')]
blocks = blocks.pivot(index='BlockID',
columns='BlockSettings',
values='value')
blocks = blocks['BlockElements'].apply(pd.Series).merge(
blocks, right_index=True,
left_index=True).drop(['BlockElements'], axis=1).melt(
id_vars=['ID', 'Type', 'Description'],
value_name="BlockElement").dropna()
blocks.rename(columns={'ID': 'BlockID'}, inplace=True)
blocks['ElementType'] = blocks['BlockElement']
blocks['ElementType'] = blocks['ElementType'].apply(
lambda x: x['Type'])
blocks['QID'] = blocks['BlockElement'].apply(
lambda x: x['QuestionID'] if 'QuestionID' in x else "")
blocks = blocks.drop(['BlockElement'], axis=1)
blocks.rename(
columns=(lambda x: 'BlockElementSort' if x == 'variable' else
('Block' + x
if (('Block' in x) == False and x != 'QID') else x)),
inplace=True)
blocks = combined.merge(blocks, on='BlockID', how='right')
extract = question[(
question.variable.str.contains('.Language.') == False)]
extract[["QID", "QPath"]] = extract.variable.str.split('.',
1,
expand=True)
extract[["QPath",
"ChoiceSetting"]] = extract.QPath.str.rsplit('.',
1,
expand=True)
extract['value'] = extract.apply(
lambda x: response['Questions'][x.QID]['Labels']
if (x.QPath.startswith("Labels.") == True) else x['value'],
axis=1)
extract['ChoiceSetting'] = extract.apply(
lambda x: None
if (x.QPath.startswith("Labels.") == True) else x.ChoiceSetting,
axis=1)
extract['QPath'] = extract.apply(
lambda x: "Labels"
if (x.QPath.startswith("Labels.") == True) else x.QPath,
axis=1)
question_pvt = extract[(extract.ChoiceSetting.isnull() == True)]
question_pvt = question_pvt.pivot_table(index=['QID'],
columns=['QPath'],
values='value',
aggfunc='first').reset_index()
question_settings = extract[
(extract.QPath.str.contains("Choices.") == False)
& (extract.QPath.str.contains("Answers.") == False)]
choice_settings = question_settings[(
question_settings.ChoiceSetting.str.replace(
'-', '').str.isnumeric() == True)]
question_settings = question_settings[(
question_settings.ChoiceSetting.str.replace(
'-', '').str.isnumeric() == False)]
question_settings['QPath'] = question_settings.apply(
lambda x: x['QPath'] + "." + x['ChoiceSetting'], axis=1)
question_settings['QPath'] = question_settings.apply(
lambda x: x['QPath'].split('.', 2)[0] + "." + x['QPath'].split(
'.', 2)[2]
if "AdditionalQuestions" in x['QPath'] else x['QPath'],
axis=1)
question_settings = question_settings.drop(
columns=['variable', 'ChoiceSetting'])
question_settings = question_settings.pivot_table(
index=['QID'], columns=['QPath'], values='value',
aggfunc='first').reset_index()
question_pvt = question_pvt.merge(question_settings,
how='left',
on='QID')
if (choice_settings.empty == False):
choice_settings['CQID'] = choice_settings.apply(
lambda x: x['QID'] + '-' + x['ChoiceSetting']
if ((x['ChoiceSetting'] is not None) & (
(x['ChoiceSetting']).isnumeric())) else x['QID'],
axis=1)
choice_settings.drop(columns=['variable', 'QID'])
choice_settings = choice_settings.pivot_table(
index=['CQID'],
columns=['QPath'],
values='value',
aggfunc='first').reset_index()
answers = extract[(extract.QPath.str.contains("Answers.") == True)]
if (answers.empty == False):
answers[["QPath",
"CRecode"]] = answers.QPath.str.split('.', 1, expand=True)
answers['CRecode'] = answers['CRecode'].apply(
lambda x: '#' + x.split('.')[0] + '-' + x.split('.')[2]
if "Answers" in x else x)
answers['AnswerSort'] = 1
answers['AnswerSort'] = answers.groupby(
'QID')['AnswerSort'].cumsum()
answers = answers.drop(columns=['variable', 'ChoiceSetting'])
choices_pvt = extract[(extract.QPath.str.contains("Choices.") == True)]
choices_pvt[["QPath",
"CRecode"]] = choices_pvt.QPath.str.split('.',
1,
expand=True)
choices_pvt["IChoiceSetting"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else (x.split('.', 1)[1]
if x.count('.') > 0 else ""))
choices_pvt["ChoiceSetting"] = choices_pvt.apply(
lambda x: x['IChoiceSetting'] + "." + x['ChoiceSetting']
if "Image" in str(x['IChoiceSetting']) else x['ChoiceSetting'],
axis=1)
choices_pvt["PGRGrpIdx"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else x.split('.', 1)[0]
if 'Choices' in x else None)
choices_pvt["PGRChoiceIdx"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else x.rsplit('.', 1)[1]
if "Choices" in x else None)
choices_pvt["CRecode"] = choices_pvt["CRecode"].apply(
lambda x: None if x is None else (x.split('.', 1)[0]
if x.count('.') > 0 else x))
choices_pvt["CRecode"] = choices_pvt.apply(
lambda x: x["CRecode"] if x["PGRChoiceIdx"] is None else "#" + x[
"CRecode"] + "-" + x["PGRChoiceIdx"],
axis=1)
choices_pvt["CQID"] = choices_pvt.apply(
lambda x: x["QID"]
if x["CRecode"] is None else x["QID"] + x["CRecode"]
if "#" in x["CRecode"] else x["QID"] + "-" + x["CRecode"],
axis=1)
choices_pvt = choices_pvt.pivot_table(index=['CQID', 'QID'],
columns=['ChoiceSetting'],
values='value',
aggfunc='first').reset_index()
if (choice_settings.empty == False):
choices_pvt = choices_pvt.merge(choice_settings,
on='CQID',
how='left')
choices_order = extract[(extract.QPath == "ChoiceOrder")]
choices_order = choices_order.value.apply(pd.Series).merge(
choices_order, right_index=True, left_index=True).drop(
["value", "QPath", "variable", "ChoiceSetting"],
axis=1).melt(id_vars=['QID'], value_name="CRecode").dropna()
choices_order.columns = ['QID', 'ChoiceOrder', 'CRecode']
choices_order['CQID'] = choices_order['QID'] + "-" + choices_order[
'CRecode'].astype(str)
### Combine SVF - Blocks - Questions - Choices - ChoiceOrder
svFlattened = choices_pvt.merge(choices_order, how='left', on='CQID')
svFlattened = svFlattened.drop(columns="QID_y")
svFlattened = svFlattened.rename(columns={'QID_x': 'QID'})
svFlattened = question_pvt.merge(svFlattened, how='outer', on='QID')
svFlattened = blocks.merge(svFlattened, how='left', on='QID')
svFlattened['QuestionText'] = svFlattened['QuestionText_Unsafe'].apply(
lambda x: "" if x == "" else lhtmlclean.Cleaner(
style=True).clean_html(lhtml.fromstring(str(x))).text_content(
).replace("nan", "").strip())
svFlattened['Display'] = svFlattened['Display'].apply(
lambda x: "" if x == "" else lhtmlclean.Cleaner(
style=True).clean_html(lhtml.fromstring(str(x))).text_content(
).replace("nan", "").strip())
svFlattened['CQID'] = svFlattened.apply(
lambda x: x.CQID if "QID" in str(x.CQID) else x.Field
if pd.isnull(x.Field) == False else x.QID
if pd.isnull(x.QID) == False else "",
axis=1)
svFlattened = svFlattened.drop(
columns=['AnswerOrder', 'ChoiceOrder_x'], errors='ignore')
csvfilteredColumns = [
'FlowSort', 'FlowID', 'BlockElementSort', 'BlockDescription',
'QID', 'CQID', 'QuestionText', 'QuestionType', 'Selector',
'SubSelector', 'DataExportTag', 'ChoiceDataExportTags_y',
'Display', 'Image.Display', 'Image.ImageLocation',
'VariableNaming', 'ChoiceOrder_y', 'CRecode'
]
for x in csvfilteredColumns:
if (x not in svFlattened.columns):
svFlattened[x] = ''
svFlattenedFiltered = svFlattened[csvfilteredColumns].drop_duplicates(
subset='CQID', ignore_index=True)
# only return filtered, do we need to return the result unfiltered?
return svFlattenedFiltered
def pull_results(self, survey_id):
def pull_file(label, survey_id):
file_type = lambda x: "With Labels" if label == True else "Without Labels"
parameters = "{\"format\": \"csv\", \"useLabels\": "\
+ (str(label)).lower() + ", \"surveyId\": \""\
+ survey_id + "\"" + ", \"endDate\":\"" \
+ str(datetime.datetime.utcnow().isoformat()[0:19]) + "Z\"}"
response = requests.post(url=self.response_url,
headers=self.headers,
data=parameters)
responseFileID = response.json()["result"]["id"]
if (responseFileID is not None):
response = requests.get(url=self.response_url +
responseFileID,
headers=self.headers)
responseFileStatus = response.json()["result"]["status"]
while (responseFileStatus == "in progress"):
time.sleep(5)
response = requests.get(url=self.response_url +
responseFileID,
headers=self.headers)
responseFileStatus = response.json()["result"]["status"]
completion_rate = response.json(
)['result']['percentComplete']
print(
f"File Request ({file_type(label)}) - {completion_rate}%"
)
if (responseFileStatus in self.failed_responses):
print("Error Network Issue / Failed Request : " + survey_id)
responseFileDownload = response.json()["result"]["file"]
response = requests.get(url=responseFileDownload,
headers=self.headers)
else:
print('No Response file ID, please check the survey ID')
with zipfile.ZipFile(io.BytesIO(response.content),
mode='r') as file:
download = file.read(list(file.NameToInfo.keys())[0]).decode()
df = pd.read_csv(io.StringIO(download), low_memory=False)
return df
wlExport = pull_file(True, survey_id)
nlExport = pull_file(False, survey_id)
mdQID = pd.melt(wlExport.iloc[[1]])
mdQID.columns = ["QRecode", "QID"]
mdQID["QID"] = mdQID["QID"].apply(
lambda x: json.loads(x.replace("'", "\""))["ImportId"])
wlExport = wlExport.iloc[2:]
nlExport = nlExport.iloc[2:]
print("Exports are finished - Working on combining them...")
wlExport = wlExport.rename(
columns=lambda x: "ResponseID" if x == "ResponseId" else x)
mdTxtResp = pd.melt(wlExport, id_vars=["ResponseID"])
mdTxtResp.columns = ["ResponseID", "QRecode", "TxtRespAnswer"]
#Join Back ResponseID Values
mdRespIDs = pd.melt(wlExport, value_vars=["ResponseID"])
mdRespIDs["TxtRespAnswer"] = mdRespIDs["value"]
mdRespIDs.columns = ["QRecode", "ResponseID", "TxtRespAnswer"]
def IsNumeric(x):
try:
float(x)
except (ValueError):
return ""
return x
#Merge Text w. Response ID Values
ndTxtResp = mdTxtResp.merge(mdRespIDs, how='outer')
nlExport = nlExport.rename(
columns=lambda x: "ResponseID" if x == "ResponseId" else x)
mdNumResp = pd.melt(nlExport, id_vars=["ResponseID"])
mdNumResp.columns = ["ResponseID", "QRecode", "NumRespAnswer"]
mdNumResp["NumRespAnswer"] = mdNumResp["NumRespAnswer"].apply(
lambda x: IsNumeric(x))
#Merge Text w. Num Resp Values
ndTextNumResp = mdNumResp.merge(ndTxtResp, how='outer')
#Merge Results w. QID // ndQColumns.merge for QIDs + QText
ndResultsFlat = mdQID.merge(ndTextNumResp, how='outer')
ndResultsFlat["SurveyID"] = survey_id
#Use Recodes for QID for non Questions
ndResultsFlat["QID"] = ndResultsFlat.apply(
lambda x: x['QID'] if "QID" in str(x['QID']) else x['QRecode'],
axis=1)
#NumAns != TextAns QCID = QID + Recode
ndResultsFlat["CQID"] = ndResultsFlat.apply(
lambda x: x['QID'].rsplit("-", 1)[0] + "-" + str(x['NumRespAnswer']
).split('.', 1)[0]
if x['NumRespAnswer'] != x['TxtRespAnswer'] and "QID" in x["QID"]
and pd.isnull(x['TxtRespAnswer']) == False and "TEXT" not in x[
"QID"] and "#" not in x["QID"] and '-' not in x["QID"] else x[
'QID'].rsplit("-", 1)[0]
if "#" in x['QID'] else x['QID'].replace('-Group', '').replace(
'-Rank', '').replace('-TEXT', ''),
axis=1)
# Loop & Merge
ndResultsFlat["CQID"] = ndResultsFlat.apply(
lambda x: "QID" + x["CQID"].replace("-xyValues-x", "").replace(
"-xyValues-y", "").split("_QID", 1)[1]
if "_QID" in x["CQID"] else x["CQID"],
axis=1)
del wlExport, nlExport
print("Done")
return ndResultsFlat | 0.36376 | 0.256966 |
def spaceRect(rect,y):
for i in range(len(rect)):
for b in [0,1]:
if rect[i][b]>=y:
rect[i][b]+=1
def spaceColumn(col,y):
for i in range(len(col)):
if col[i]>=y:
col[i]+=1
def findMax(rect):
mx=-1
for i in range(len(rect)):
for b in [0,1]:
mx=max(rect[i][b],mx)
return mx
def braidToRect(br,n):
start=range(n)
end=range(n)
rect=[]
for gen in br:
if gen[0]==0:
height=end[gen[1]]
height2=end[gen[1]+1]
spaceColumn(start,height)
spaceRect(rect,height)
rect.append([height,height2+1])
end[gen[1]+1]=height+1
spaceColumn(end,height+2)
else:
spaceColumn(start,end[gen[1]+1]+1)
spaceRect(rect,end[gen[1]+1]+1)
rect.append([end[gen[1]],end[gen[1]+1]+1])
tmp=end[gen[1]+1]
spaceColumn(end,tmp)
end[gen[1]]=tmp
## print (start,end,rect)
mx=findMax(rect)
for i in range(len(start)):
rect=[[start[len(start)-i-1],mx+i+1]]+rect+[[end[len(start)-i-1],mx+i+1]]
return rect
##print braidToRect([[0,0],[1,0]],2)
def elim(tab):
res=[]
for e in tab:
if e: res.append(e)
return res
def rdBraid(s):
tmp=s[1:-1]
tmp=tmp.split(",")
mx=-1
res=[]
for kk in tmp:
k=int(kk)
if k<0:
res.append((0,-k-1))
mx=max(mx,-k)
else:
res.append((1,k-1))
mx=max(mx,k)
return (res,mx+1)
###############application
import pickle
if __name__ == "__main__":
pass
##
## br=open("braidList.txt","r")
## rawList=[elim(kn.split(" ")) for kn in br.read().split("\n")]
## br.close()
## atlas=dict()
## import simplify.diagSimplify
##
## for kn in rawList:
## tmp=rdBraid(kn[2])
## atlas[(int(kn[0]),int(kn[1]))]=simplify.diagSimplify.simplify(
## braidToRect(tmp[0],tmp[1]),5000)
## if len(atlas)%100==0: print len(atlas)
## ##the result is the knot dico called atlas!
## sav=open("knotAtlas.pic","wb")
## pickle.dump(atlas,sav)
## sav.close()
## print "Atlas ready"
av=open("knotAtlas.pic","rb")
atlas=pickle.load(av)
av.close()
if __name__ == "__main__":
s=""
print atlas[(7,2)]
for i in range(13):
for j in range(1,len(atlas)+1):
if atlas.has_key((i,j)):
s+=str((i,j))+": "+str(atlas[(i,j)])+"\n"
else:
break
print "passage"
av=open("knotAtlasV1.txt","w")
av.write(s)
av.close()
##print atlas | src/KnotTheory/HFK-Zurich/braid2rect.py | def spaceRect(rect,y):
for i in range(len(rect)):
for b in [0,1]:
if rect[i][b]>=y:
rect[i][b]+=1
def spaceColumn(col,y):
for i in range(len(col)):
if col[i]>=y:
col[i]+=1
def findMax(rect):
mx=-1
for i in range(len(rect)):
for b in [0,1]:
mx=max(rect[i][b],mx)
return mx
def braidToRect(br,n):
start=range(n)
end=range(n)
rect=[]
for gen in br:
if gen[0]==0:
height=end[gen[1]]
height2=end[gen[1]+1]
spaceColumn(start,height)
spaceRect(rect,height)
rect.append([height,height2+1])
end[gen[1]+1]=height+1
spaceColumn(end,height+2)
else:
spaceColumn(start,end[gen[1]+1]+1)
spaceRect(rect,end[gen[1]+1]+1)
rect.append([end[gen[1]],end[gen[1]+1]+1])
tmp=end[gen[1]+1]
spaceColumn(end,tmp)
end[gen[1]]=tmp
## print (start,end,rect)
mx=findMax(rect)
for i in range(len(start)):
rect=[[start[len(start)-i-1],mx+i+1]]+rect+[[end[len(start)-i-1],mx+i+1]]
return rect
##print braidToRect([[0,0],[1,0]],2)
def elim(tab):
res=[]
for e in tab:
if e: res.append(e)
return res
def rdBraid(s):
tmp=s[1:-1]
tmp=tmp.split(",")
mx=-1
res=[]
for kk in tmp:
k=int(kk)
if k<0:
res.append((0,-k-1))
mx=max(mx,-k)
else:
res.append((1,k-1))
mx=max(mx,k)
return (res,mx+1)
###############application
import pickle
if __name__ == "__main__":
pass
##
## br=open("braidList.txt","r")
## rawList=[elim(kn.split(" ")) for kn in br.read().split("\n")]
## br.close()
## atlas=dict()
## import simplify.diagSimplify
##
## for kn in rawList:
## tmp=rdBraid(kn[2])
## atlas[(int(kn[0]),int(kn[1]))]=simplify.diagSimplify.simplify(
## braidToRect(tmp[0],tmp[1]),5000)
## if len(atlas)%100==0: print len(atlas)
## ##the result is the knot dico called atlas!
## sav=open("knotAtlas.pic","wb")
## pickle.dump(atlas,sav)
## sav.close()
## print "Atlas ready"
av=open("knotAtlas.pic","rb")
atlas=pickle.load(av)
av.close()
if __name__ == "__main__":
s=""
print atlas[(7,2)]
for i in range(13):
for j in range(1,len(atlas)+1):
if atlas.has_key((i,j)):
s+=str((i,j))+": "+str(atlas[(i,j)])+"\n"
else:
break
print "passage"
av=open("knotAtlasV1.txt","w")
av.write(s)
av.close()
##print atlas | 0.049359 | 0.267197 |
from typing import Any, Dict, List, Tuple, Union
from ..logger import get_logger
log = get_logger("DB-Util")
def convert_to_db_list(orig_list: Union[Tuple[Any, ...], List[Any]]) -> Dict[str, Any]:
"""Convert a list to the DynamoDB list type.
Note: There is no tuple type in DynamoDB so we will also convert tuples to "L" type,
which is also a list.
Parameters
----------
orig_list: List[Any]
The native list.
Returns
-------
new_list: Dict[str, Any]
The DynamoDB list: {'L': [<list elements>]}.
"""
new_list: List[Any] = []
for elt in orig_list:
if isinstance(elt, str):
new_list.append({"S": elt})
elif isinstance(elt, (int, float)):
new_list.append({"N": str(elt)})
elif isinstance(elt, (list, tuple)):
new_list.append(convert_to_db_list(elt))
elif isinstance(elt, dict):
new_list.append(convert_to_db_dict(elt))
elif elt is None:
new_list.append({"S": "None"})
else:
raise RuntimeError("Cannot convert %s (%s)" % (str(elt), type(elt)))
return {"L": new_list}
def convert_to_db_dict(orig_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Convert a dict to the DynamoDB dict form.
Parameters
----------
orig_dict: Dict[str, Any]
The native dict.
Returns
-------
new_dict: Dict[str, Any]
The DynamoDB dict: {'M': {<dict elements>}}.
"""
new_dict: Dict[str, Any] = {}
for key, val in orig_dict.items():
if isinstance(val, str):
new_dict[key] = {"S": val}
elif isinstance(val, (int, float)):
new_dict[key] = {"N": str(val)}
elif isinstance(val, (list, tuple)):
new_dict[key] = convert_to_db_list(val)
elif isinstance(val, dict):
new_dict[key] = convert_to_db_dict(val)
elif val is None:
new_dict[key] = {"S": "None"}
else:
raise RuntimeError("Cnanot convert %s (%s)" % (str(val), type(val)))
return {"M": new_dict}
def convert_to_list(db_list: Dict[str, Any]) -> List[Any]:
"""Convert a DynamoDB list to a native list.
Parameters
----------
db_list: Dict[str, Any]
A DynamoDB list: {'L': [<list elements>]}.
Returns
-------
new_list: List[Any]
A native list.
"""
if "L" not in db_list:
raise RuntimeError("Not a DynamoDB list: %s" % (str(db_list)))
new_list: List[Any] = []
for elt in db_list["L"]:
assert len(elt) == 1
dtype = list(elt.keys())[0]
if dtype == "S":
new_list.append(str(elt[dtype]) if elt[dtype] != "None" else None)
elif dtype == "N":
new_list.append(float(elt[dtype]))
elif dtype == "L":
new_list.append(convert_to_list(elt))
elif dtype == "M":
new_list.append(convert_to_dict(elt))
else:
raise RuntimeError("Cannot convert %s (%s)" % (str(elt), dtype))
return new_list
def convert_to_dict(db_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Convert a DynamoDB dict to a native dict.
Parameters
----------
db_dict: Dict[str, Any]
A DynamoDB dict: {'M': {<dict elements>}}.
Returns
-------
new_dict: Dict[str, Any]
A native dict.
"""
if "M" not in db_dict:
raise RuntimeError("Not a DynamoDB dict: %s" % str(db_dict))
new_dict: Dict[str, Any] = {}
for key, elt in db_dict["M"].items():
dtype = list(elt.keys())[0]
if dtype == "S":
new_dict[key] = str(elt[dtype]) if elt[dtype] != "None" else None
elif dtype == "N":
new_dict[key] = float(elt[dtype])
elif dtype == "L":
new_dict[key] = convert_to_list(elt)
elif dtype == "M":
new_dict[key] = convert_to_dict(elt)
else:
raise RuntimeError("Cannot convert %s (%s)" % (str(elt), dtype))
return new_dict | lorien/database/util.py | from typing import Any, Dict, List, Tuple, Union
from ..logger import get_logger
log = get_logger("DB-Util")
def convert_to_db_list(orig_list: Union[Tuple[Any, ...], List[Any]]) -> Dict[str, Any]:
"""Convert a list to the DynamoDB list type.
Note: There is no tuple type in DynamoDB so we will also convert tuples to "L" type,
which is also a list.
Parameters
----------
orig_list: List[Any]
The native list.
Returns
-------
new_list: Dict[str, Any]
The DynamoDB list: {'L': [<list elements>]}.
"""
new_list: List[Any] = []
for elt in orig_list:
if isinstance(elt, str):
new_list.append({"S": elt})
elif isinstance(elt, (int, float)):
new_list.append({"N": str(elt)})
elif isinstance(elt, (list, tuple)):
new_list.append(convert_to_db_list(elt))
elif isinstance(elt, dict):
new_list.append(convert_to_db_dict(elt))
elif elt is None:
new_list.append({"S": "None"})
else:
raise RuntimeError("Cannot convert %s (%s)" % (str(elt), type(elt)))
return {"L": new_list}
def convert_to_db_dict(orig_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Convert a dict to the DynamoDB dict form.
Parameters
----------
orig_dict: Dict[str, Any]
The native dict.
Returns
-------
new_dict: Dict[str, Any]
The DynamoDB dict: {'M': {<dict elements>}}.
"""
new_dict: Dict[str, Any] = {}
for key, val in orig_dict.items():
if isinstance(val, str):
new_dict[key] = {"S": val}
elif isinstance(val, (int, float)):
new_dict[key] = {"N": str(val)}
elif isinstance(val, (list, tuple)):
new_dict[key] = convert_to_db_list(val)
elif isinstance(val, dict):
new_dict[key] = convert_to_db_dict(val)
elif val is None:
new_dict[key] = {"S": "None"}
else:
raise RuntimeError("Cnanot convert %s (%s)" % (str(val), type(val)))
return {"M": new_dict}
def convert_to_list(db_list: Dict[str, Any]) -> List[Any]:
"""Convert a DynamoDB list to a native list.
Parameters
----------
db_list: Dict[str, Any]
A DynamoDB list: {'L': [<list elements>]}.
Returns
-------
new_list: List[Any]
A native list.
"""
if "L" not in db_list:
raise RuntimeError("Not a DynamoDB list: %s" % (str(db_list)))
new_list: List[Any] = []
for elt in db_list["L"]:
assert len(elt) == 1
dtype = list(elt.keys())[0]
if dtype == "S":
new_list.append(str(elt[dtype]) if elt[dtype] != "None" else None)
elif dtype == "N":
new_list.append(float(elt[dtype]))
elif dtype == "L":
new_list.append(convert_to_list(elt))
elif dtype == "M":
new_list.append(convert_to_dict(elt))
else:
raise RuntimeError("Cannot convert %s (%s)" % (str(elt), dtype))
return new_list
def convert_to_dict(db_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Convert a DynamoDB dict to a native dict.
Parameters
----------
db_dict: Dict[str, Any]
A DynamoDB dict: {'M': {<dict elements>}}.
Returns
-------
new_dict: Dict[str, Any]
A native dict.
"""
if "M" not in db_dict:
raise RuntimeError("Not a DynamoDB dict: %s" % str(db_dict))
new_dict: Dict[str, Any] = {}
for key, elt in db_dict["M"].items():
dtype = list(elt.keys())[0]
if dtype == "S":
new_dict[key] = str(elt[dtype]) if elt[dtype] != "None" else None
elif dtype == "N":
new_dict[key] = float(elt[dtype])
elif dtype == "L":
new_dict[key] = convert_to_list(elt)
elif dtype == "M":
new_dict[key] = convert_to_dict(elt)
else:
raise RuntimeError("Cannot convert %s (%s)" % (str(elt), dtype))
return new_dict | 0.873032 | 0.364071 |
from pygame import mixer # Playing sound
from gtts import gTTS, gTTSError
from mutagen.mp3 import MP3
from multiprocessing import Process, Queue, Manager
from audioplayer import AudioPlayer
import time
import uuid
import os
import gtts.tokenizer.symbols as sym
# Add custom abbreviations for pt-br
new_abbreviations = [
("krl", "caralho"),
("blz", "beleza"),
("lib", "libe")
]
sym.SUB_PAIRS.extend(new_abbreviations)
# Define sounds path and init mixer
SOUNDS_PATH = "sounds"
mixer.init(devicename="CABLE Input (VB-Audio Virtual Cable)")
class TaskHandler(Process):
def __init__(self, tasks: Queue, settings: Manager):
super(TaskHandler, self).__init__()
self.queue = tasks # messages
self.settings = settings # UI user configs
self._sound_list = []
self.running = True
def run(self) -> None:
print("Starting TaskHandler")
while self.running:
item = self.queue.get() # TODO: manda dicionario onde a chave descreve a tarefa ao inves de mandar diretamente o caminho
print(item)
try:
path = text_to_voice(item, self.settings["lang"])
play_sound(path)
except AssertionError:
print("deixa de zoar krl")
except gTTSError as err:
print(f"Error while saving file: \n{err.msg}")
def stop(self):
print("Stoping TaskHandler...")
self.running = False
self.terminate()
self.join()
def play_sound(path=""):
duration = get_sound_duration(path)
mixer.music.load(path) # Load the mp3
mixer.music.play()
player = AudioPlayer(path) # play sound in local speakers
player.play()
time.sleep(duration) # wait until the end
def text_to_voice(text="", language="pt-br"):
file_name = f"{SOUNDS_PATH}/voice-{uuid.uuid4()}.mp3"
tts = gTTS(text, lang=language)
tts.save(file_name)
return file_name
def create_sound_dir():
if not os.path.isdir(SOUNDS_PATH):
os.mkdir(SOUNDS_PATH)
def get_sound_duration(path):
mp3 = MP3(path)
return mp3.info.length
def init_settings():
settings = Manager().dict()
settings["lang"] = "pt-br"
return settings | bg.py | from pygame import mixer # Playing sound
from gtts import gTTS, gTTSError
from mutagen.mp3 import MP3
from multiprocessing import Process, Queue, Manager
from audioplayer import AudioPlayer
import time
import uuid
import os
import gtts.tokenizer.symbols as sym
# Add custom abbreviations for pt-br
new_abbreviations = [
("krl", "caralho"),
("blz", "beleza"),
("lib", "libe")
]
sym.SUB_PAIRS.extend(new_abbreviations)
# Define sounds path and init mixer
SOUNDS_PATH = "sounds"
mixer.init(devicename="CABLE Input (VB-Audio Virtual Cable)")
class TaskHandler(Process):
def __init__(self, tasks: Queue, settings: Manager):
super(TaskHandler, self).__init__()
self.queue = tasks # messages
self.settings = settings # UI user configs
self._sound_list = []
self.running = True
def run(self) -> None:
print("Starting TaskHandler")
while self.running:
item = self.queue.get() # TODO: manda dicionario onde a chave descreve a tarefa ao inves de mandar diretamente o caminho
print(item)
try:
path = text_to_voice(item, self.settings["lang"])
play_sound(path)
except AssertionError:
print("deixa de zoar krl")
except gTTSError as err:
print(f"Error while saving file: \n{err.msg}")
def stop(self):
print("Stoping TaskHandler...")
self.running = False
self.terminate()
self.join()
def play_sound(path=""):
duration = get_sound_duration(path)
mixer.music.load(path) # Load the mp3
mixer.music.play()
player = AudioPlayer(path) # play sound in local speakers
player.play()
time.sleep(duration) # wait until the end
def text_to_voice(text="", language="pt-br"):
file_name = f"{SOUNDS_PATH}/voice-{uuid.uuid4()}.mp3"
tts = gTTS(text, lang=language)
tts.save(file_name)
return file_name
def create_sound_dir():
if not os.path.isdir(SOUNDS_PATH):
os.mkdir(SOUNDS_PATH)
def get_sound_duration(path):
mp3 = MP3(path)
return mp3.info.length
def init_settings():
settings = Manager().dict()
settings["lang"] = "pt-br"
return settings | 0.285671 | 0.074467 |
__all__ = ["tripleFromMetadataXML",
"decodeTriple",
"ChemistryLookupError" ]
import xml.etree.ElementTree as ET, os.path
from pkg_resources import Requirement, resource_filename
from collections import OrderedDict
class ChemistryLookupError(Exception): pass
def _loadBarcodeMappingsFromFile(mapFile):
try:
tree = ET.parse(mapFile)
root = tree.getroot()
mappingElements = root.findall("Mapping")
mappings = OrderedDict()
mapKeys = ["BindingKit", "SequencingKit", "SoftwareVersion", "SequencingChemistry"]
for mapElement in mappingElements:
bindingKit = mapElement.find("BindingKit").text
sequencingKit = mapElement.find("SequencingKit").text
softwareVersion = mapElement.find("SoftwareVersion").text
sequencingChemistry = mapElement.find("SequencingChemistry").text
mappings[(bindingKit, sequencingKit, softwareVersion)] = sequencingChemistry
return mappings
except:
raise ChemistryLookupError, "Error loading chemistry mapping xml"
def _loadBarcodeMappings():
mappingFname = resource_filename(Requirement.parse('pbcore'),'pbcore/chemistry/resources/mapping.xml')
return _loadBarcodeMappingsFromFile(mappingFname)
_BARCODE_MAPPINGS = _loadBarcodeMappings()
def tripleFromMetadataXML(metadataXmlPath):
"""
Scrape the triple from the metadata.xml, or exception if the file
or the relevant contents are not found
"""
nsd = {None: "http://pacificbiosciences.com/PAP/Metadata.xsd",
"pb": "http://pacificbiosciences.com/PAP/Metadata.xsd"}
try:
tree = ET.parse(metadataXmlPath)
root = tree.getroot()
bindingKit = root.find("pb:BindingKit/pb:PartNumber", namespaces=nsd).text
sequencingKit = root.find("pb:SequencingKit/pb:PartNumber", namespaces=nsd).text
# The instrument version is truncated to the first 3 dot components
instrumentControlVersion = root.find("pb:InstCtrlVer", namespaces=nsd).text
verComponents = instrumentControlVersion.split(".")[0:2]
instrumentControlVersion = ".".join(verComponents)
return (bindingKit, sequencingKit, instrumentControlVersion)
except Exception as e:
raise ChemistryLookupError, \
("Could not find, or extract chemistry information from, %s" % (metadataXmlPath,))
def decodeTriple(bindingKit, sequencingKit, softwareVersion):
"""
Return the name of the chemisty configuration given the
configuration triple that was recorded on the instrument.
"""
return _BARCODE_MAPPINGS.get((bindingKit, sequencingKit, softwareVersion), "unknown") | pbcore/chemistry/chemistry.py |
__all__ = ["tripleFromMetadataXML",
"decodeTriple",
"ChemistryLookupError" ]
import xml.etree.ElementTree as ET, os.path
from pkg_resources import Requirement, resource_filename
from collections import OrderedDict
class ChemistryLookupError(Exception): pass
def _loadBarcodeMappingsFromFile(mapFile):
try:
tree = ET.parse(mapFile)
root = tree.getroot()
mappingElements = root.findall("Mapping")
mappings = OrderedDict()
mapKeys = ["BindingKit", "SequencingKit", "SoftwareVersion", "SequencingChemistry"]
for mapElement in mappingElements:
bindingKit = mapElement.find("BindingKit").text
sequencingKit = mapElement.find("SequencingKit").text
softwareVersion = mapElement.find("SoftwareVersion").text
sequencingChemistry = mapElement.find("SequencingChemistry").text
mappings[(bindingKit, sequencingKit, softwareVersion)] = sequencingChemistry
return mappings
except:
raise ChemistryLookupError, "Error loading chemistry mapping xml"
def _loadBarcodeMappings():
mappingFname = resource_filename(Requirement.parse('pbcore'),'pbcore/chemistry/resources/mapping.xml')
return _loadBarcodeMappingsFromFile(mappingFname)
_BARCODE_MAPPINGS = _loadBarcodeMappings()
def tripleFromMetadataXML(metadataXmlPath):
"""
Scrape the triple from the metadata.xml, or exception if the file
or the relevant contents are not found
"""
nsd = {None: "http://pacificbiosciences.com/PAP/Metadata.xsd",
"pb": "http://pacificbiosciences.com/PAP/Metadata.xsd"}
try:
tree = ET.parse(metadataXmlPath)
root = tree.getroot()
bindingKit = root.find("pb:BindingKit/pb:PartNumber", namespaces=nsd).text
sequencingKit = root.find("pb:SequencingKit/pb:PartNumber", namespaces=nsd).text
# The instrument version is truncated to the first 3 dot components
instrumentControlVersion = root.find("pb:InstCtrlVer", namespaces=nsd).text
verComponents = instrumentControlVersion.split(".")[0:2]
instrumentControlVersion = ".".join(verComponents)
return (bindingKit, sequencingKit, instrumentControlVersion)
except Exception as e:
raise ChemistryLookupError, \
("Could not find, or extract chemistry information from, %s" % (metadataXmlPath,))
def decodeTriple(bindingKit, sequencingKit, softwareVersion):
"""
Return the name of the chemisty configuration given the
configuration triple that was recorded on the instrument.
"""
return _BARCODE_MAPPINGS.get((bindingKit, sequencingKit, softwareVersion), "unknown") | 0.578448 | 0.113949 |
from twitter import Twitter, OAuth, TwitterHTTPError
import os
from twitter_info import *
# put the full path and file name of the file you want to store your "already followed"
# list in
ALREADY_FOLLOWED_FILE = "already-followed.csv"
t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET,
CONSUMER_KEY, CONSUMER_SECRET))
def search_tweets(q, count=100, result_type="recent"):
"""
Returns a list of tweets matching a certain phrase (hashtag, word, etc.)
"""
return t.search.tweets(q=q, result_type=result_type, count=count)
def auto_fav(q, count=100, result_type="recent"):
"""
Favorites tweets that match a certain phrase (hashtag, word, etc.)
"""
result = search_tweets(q, count, result_type)
for tweet in result["statuses"]:
try:
# don't favorite your own tweets
if tweet["user"]["screen_name"] == TWITTER_HANDLE:
continue
result = t.favorites.create(_id=tweet["id"])
print("favorited: %s" % (result["text"].encode("utf-8")))
# when you have already favorited a tweet, this error is thrown
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
def auto_rt(q, count=100, result_type="recent"):
"""
Retweets tweets that match a certain phrase (hashtag, word, etc.)
"""
result = search_tweets(q, count, result_type)
for tweet in result["statuses"]:
try:
# don't retweet your own tweets
if tweet["user"]["screen_name"] == TWITTER_HANDLE:
continue
result = t.statuses.retweet(id=tweet["id"])
print("retweeted: %s" % (result["text"].encode("utf-8")))
# when you have already retweeted a tweet, this error is thrown
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
def get_do_not_follow_list():
"""
Returns list of users the bot has already followed.
"""
# make sure the "already followed" file exists
if not os.path.isfile(ALREADY_FOLLOWED_FILE):
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
out_file.write("")
# read in the list of user IDs that the bot has already followed in the
# past
do_not_follow = set()
dnf_list = []
with open(ALREADY_FOLLOWED_FILE) as in_file:
for line in in_file:
dnf_list.append(int(line))
do_not_follow.update(set(dnf_list))
del dnf_list
return do_not_follow
def auto_follow(q, count=100, result_type="recent"):
"""
Follows anyone who tweets about a specific phrase (hashtag, word, etc.)
"""
result = search_tweets(q, count, result_type)
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
do_not_follow = get_do_not_follow_list()
for tweet in result["statuses"]:
try:
if (tweet["user"]["screen_name"] != TWITTER_HANDLE and
tweet["user"]["id"] not in following and
tweet["user"]["id"] not in do_not_follow):
t.friendships.create(user_id=tweet["user"]["id"], follow=False)
following.update(set([tweet["user"]["id"]]))
print("followed %s" % (tweet["user"]["screen_name"]))
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
# quit on error unless it's because someone blocked me
if "blocked" not in str(e).lower():
quit()
def auto_follow_followers_for_user(user_screen_name, count=100):
"""
Follows the followers of a user
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
followers_for_user = set(t.followers.ids(screen_name=user_screen_name)["ids"][:count]);
do_not_follow = get_do_not_follow_list()
for user_id in followers_for_user:
try:
if (user_id not in following and
user_id not in do_not_follow):
t.friendships.create(user_id=user_id, follow=False)
print("followed %s" % user_id)
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
def auto_follow_followers():
"""
Follows back everyone who's followed you
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)["ids"])
not_following_back = followers - following
for user_id in not_following_back:
try:
t.friendships.create(user_id=user_id, follow=False)
except Exception as e:
print("error: %s" % (str(e)))
def auto_unfollow_nonfollowers():
"""
Unfollows everyone who hasn't followed you back
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)["ids"])
# put user IDs here that you want to keep following even if they don't
# follow you back
users_keep_following = set([])
not_following_back = following - followers
# make sure the "already followed" file exists
if not os.path.isfile(ALREADY_FOLLOWED_FILE):
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
out_file.write("")
# update the "already followed" file with users who didn't follow back
already_followed = set(not_following_back)
af_list = []
with open(ALREADY_FOLLOWED_FILE) as in_file:
for line in in_file:
af_list.append(int(line))
already_followed.update(set(af_list))
del af_list
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
for val in already_followed:
out_file.write(str(val) + "\n")
for user_id in not_following_back:
if user_id not in users_keep_following:
t.friendships.destroy(user_id=user_id)
print("unfollowed %d" % (user_id))
def auto_mute_following():
"""
Mutes everyone that you are following
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)["ids"])
not_muted = following - muted
# put user IDs of people you do not want to mute here
users_keep_unmuted = set([])
# mute all
for user_id in not_muted:
if user_id not in users_keep_unmuted:
t.mutes.users.create(user_id=user_id)
print("muted %d" % (user_id))
def auto_unmute():
"""
Unmutes everyone that you have muted
"""
muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)["ids"])
# put user IDs of people you want to remain muted here
users_keep_muted = set([])
# mute all
for user_id in muted:
if user_id not in users_keep_muted:
t.mutes.users.destroy(user_id=user_id)
print("unmuted %d" % (user_id)) | twitter/twitter_follow_bot.py | from twitter import Twitter, OAuth, TwitterHTTPError
import os
from twitter_info import *
# put the full path and file name of the file you want to store your "already followed"
# list in
ALREADY_FOLLOWED_FILE = "already-followed.csv"
t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET,
CONSUMER_KEY, CONSUMER_SECRET))
def search_tweets(q, count=100, result_type="recent"):
"""
Returns a list of tweets matching a certain phrase (hashtag, word, etc.)
"""
return t.search.tweets(q=q, result_type=result_type, count=count)
def auto_fav(q, count=100, result_type="recent"):
"""
Favorites tweets that match a certain phrase (hashtag, word, etc.)
"""
result = search_tweets(q, count, result_type)
for tweet in result["statuses"]:
try:
# don't favorite your own tweets
if tweet["user"]["screen_name"] == TWITTER_HANDLE:
continue
result = t.favorites.create(_id=tweet["id"])
print("favorited: %s" % (result["text"].encode("utf-8")))
# when you have already favorited a tweet, this error is thrown
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
def auto_rt(q, count=100, result_type="recent"):
"""
Retweets tweets that match a certain phrase (hashtag, word, etc.)
"""
result = search_tweets(q, count, result_type)
for tweet in result["statuses"]:
try:
# don't retweet your own tweets
if tweet["user"]["screen_name"] == TWITTER_HANDLE:
continue
result = t.statuses.retweet(id=tweet["id"])
print("retweeted: %s" % (result["text"].encode("utf-8")))
# when you have already retweeted a tweet, this error is thrown
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
def get_do_not_follow_list():
"""
Returns list of users the bot has already followed.
"""
# make sure the "already followed" file exists
if not os.path.isfile(ALREADY_FOLLOWED_FILE):
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
out_file.write("")
# read in the list of user IDs that the bot has already followed in the
# past
do_not_follow = set()
dnf_list = []
with open(ALREADY_FOLLOWED_FILE) as in_file:
for line in in_file:
dnf_list.append(int(line))
do_not_follow.update(set(dnf_list))
del dnf_list
return do_not_follow
def auto_follow(q, count=100, result_type="recent"):
"""
Follows anyone who tweets about a specific phrase (hashtag, word, etc.)
"""
result = search_tweets(q, count, result_type)
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
do_not_follow = get_do_not_follow_list()
for tweet in result["statuses"]:
try:
if (tweet["user"]["screen_name"] != TWITTER_HANDLE and
tweet["user"]["id"] not in following and
tweet["user"]["id"] not in do_not_follow):
t.friendships.create(user_id=tweet["user"]["id"], follow=False)
following.update(set([tweet["user"]["id"]]))
print("followed %s" % (tweet["user"]["screen_name"]))
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
# quit on error unless it's because someone blocked me
if "blocked" not in str(e).lower():
quit()
def auto_follow_followers_for_user(user_screen_name, count=100):
"""
Follows the followers of a user
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
followers_for_user = set(t.followers.ids(screen_name=user_screen_name)["ids"][:count]);
do_not_follow = get_do_not_follow_list()
for user_id in followers_for_user:
try:
if (user_id not in following and
user_id not in do_not_follow):
t.friendships.create(user_id=user_id, follow=False)
print("followed %s" % user_id)
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
def auto_follow_followers():
"""
Follows back everyone who's followed you
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)["ids"])
not_following_back = followers - following
for user_id in not_following_back:
try:
t.friendships.create(user_id=user_id, follow=False)
except Exception as e:
print("error: %s" % (str(e)))
def auto_unfollow_nonfollowers():
"""
Unfollows everyone who hasn't followed you back
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)["ids"])
# put user IDs here that you want to keep following even if they don't
# follow you back
users_keep_following = set([])
not_following_back = following - followers
# make sure the "already followed" file exists
if not os.path.isfile(ALREADY_FOLLOWED_FILE):
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
out_file.write("")
# update the "already followed" file with users who didn't follow back
already_followed = set(not_following_back)
af_list = []
with open(ALREADY_FOLLOWED_FILE) as in_file:
for line in in_file:
af_list.append(int(line))
already_followed.update(set(af_list))
del af_list
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
for val in already_followed:
out_file.write(str(val) + "\n")
for user_id in not_following_back:
if user_id not in users_keep_following:
t.friendships.destroy(user_id=user_id)
print("unfollowed %d" % (user_id))
def auto_mute_following():
"""
Mutes everyone that you are following
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)["ids"])
not_muted = following - muted
# put user IDs of people you do not want to mute here
users_keep_unmuted = set([])
# mute all
for user_id in not_muted:
if user_id not in users_keep_unmuted:
t.mutes.users.create(user_id=user_id)
print("muted %d" % (user_id))
def auto_unmute():
"""
Unmutes everyone that you have muted
"""
muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)["ids"])
# put user IDs of people you want to remain muted here
users_keep_muted = set([])
# mute all
for user_id in muted:
if user_id not in users_keep_muted:
t.mutes.users.destroy(user_id=user_id)
print("unmuted %d" % (user_id)) | 0.319758 | 0.085939 |
from bs4 import BeautifulSoup
from Crypto.PublicKey import RSA
from argparse import ArgumentParser
import requests
import os
from . import __version__
FACTOR_DB_URL = 'http://factordb.com/index.php'
def get_int_href(href) -> int:
nid = href.get('href').split('=')[1].strip('/')
response = requests.get(FACTOR_DB_URL, params = {'showid': nid})
soup = BeautifulSoup(response.text, 'html.parser')
number = soup.findAll('table')[1].findAll('tr')[2].findAll('td')[1].get_text().replace(os.linesep, '').replace('\n','')
number = int(number)
return number
def factor_db(n) -> list:
try:
response = requests.get(FACTOR_DB_URL, params = {'query': n})
soup = BeautifulSoup(response.text, 'html.parser')
except:
print('Network connection failed.')
return
try:
factors = soup.findAll('table')[1].findAll('tr')[2].findAll('td')[2].findAll('a')
except:
print('Failed to parse data. Maybe the HTML of FactorDB has changed.')
return
if(len(factors) != 3):
print('Factorization not found on FactorDB or n is not semiprime.')
return
factors = list(map(get_int_href, factors))
factors.remove(int(n))
return factors
def calculate_d(e, p, q) -> int:
# Modified method from: https://stackoverflow.com/questions/23279208/calculate-d-from-n-e-p-q-in-rsa
# Answer by: https://stackoverflow.com/users/448810/user448810
m = (p-1)*(q-1)
a, b, u = 0, m, 1
while e > 0:
q = b // e # integer division
e, a, b, u = b % e, u, e, a - q * u
if b == 1:
return a % m
raise ValueError("Must be coprime.")
def print_or_save(key, path=None):
key = key.exportKey(format='PEM')
if(not isinstance(key,str)):
key = key.decode('utf-8')
if(not path):
print()
print(key)
else:
with open(path,'w') as f:
f.write(key)
def gen_private(p, q, n=None, e = 0x10001, save_path=None):
if(p<q):
p,q = q,p # OpenSSL
if(not n):
n = p * q
d = calculate_d(e,p,q)
key = RSA.construct((n,e,d,p,q))
print_or_save(key, save_path)
def gen_public(n, e=0x10001, save_path = None):
key = RSA.construct((n,e))
print_or_save(key, save_path)
def resolve_pqne(args)->tuple:
if(not args.n and args.p and args.q):
return int(args.p), int(args.q), int(args.p) * int(args.q), int(args.e)
elif(args.n and args.p and not args.q):
return int(args.p), int(args.n) // int(args.p), int(args.n), int(args.e)
elif(args.n and not args.p and args.q):
return int(args.n) // int(args.q), int(args.q), int(args.n), int(args.e)
elif(args.n):
return None, None, int(args.n), int(args.e)
def gen_from_value(pqne, pub_key=False, save_path=None):
p, q, n, e = pqne
if(pub_key):
gen_public(n, e, save_path)
return
if(not p and not q):
print('Getting factors from factorDB.com ...')
pq = factor_db(n)
if(not pq):
return
p, q = pq
gen_private(p,q,n,e,save_path)
def gen_from_key(key_path, pub_key=False, save_path=None):
with open(key_path,'r') as f:
key = RSA.import_key(f.read())
if(pub_key):
print_or_save(key.publickey(), save_path)
elif(key.has_private()):
print_or_save(key, save_path)
else:
gen_from_value((None, None, key.n, key.e), save_path=save_path)
def main():
parser = ArgumentParser(description='Genarate private key from public key using FactorDB.com or p, q')
parser.add_argument('-k','--key', dest='key', metavar='PATH', help='Try generating from key file.')
parser.add_argument('-x','--gen-public', dest='gen_pub', action='store_true',help='Genarate public key file insted of private.')
parser.add_argument('-o','--out', dest='out_path', metavar='PATH', help='Save key into a file instead of printing.')
parser.add_argument('-p', dest='p', metavar='VALUE',help='1st prime value of RSA key.')
parser.add_argument('-q', dest='q', metavar='VALUE',help='2nd prime value of RSA key.')
parser.add_argument('-n', dest='n', metavar='VALUE',help='n value of RSA key.')
parser.add_argument('-e', dest='e', metavar='VALUE',help='Public exponent value of RSA key.', default='65537')
parser.add_argument('-v','--version',action='version',version='v'+str(__version__))
args = parser.parse_args()
if(args.key):
gen_from_key(args.key, args.gen_pub, args.out_path)
return
pqne = resolve_pqne(args)
if(pqne == None):
print('Invalid argument combination.\nYou must provide either n or a key.\n')
parser.print_usage()
return
gen_from_value(pqne, args.gen_pub, args.out_path)
if __name__ == "__main__":
main() | rsapwn/rsapwn.py | from bs4 import BeautifulSoup
from Crypto.PublicKey import RSA
from argparse import ArgumentParser
import requests
import os
from . import __version__
FACTOR_DB_URL = 'http://factordb.com/index.php'
def get_int_href(href) -> int:
nid = href.get('href').split('=')[1].strip('/')
response = requests.get(FACTOR_DB_URL, params = {'showid': nid})
soup = BeautifulSoup(response.text, 'html.parser')
number = soup.findAll('table')[1].findAll('tr')[2].findAll('td')[1].get_text().replace(os.linesep, '').replace('\n','')
number = int(number)
return number
def factor_db(n) -> list:
try:
response = requests.get(FACTOR_DB_URL, params = {'query': n})
soup = BeautifulSoup(response.text, 'html.parser')
except:
print('Network connection failed.')
return
try:
factors = soup.findAll('table')[1].findAll('tr')[2].findAll('td')[2].findAll('a')
except:
print('Failed to parse data. Maybe the HTML of FactorDB has changed.')
return
if(len(factors) != 3):
print('Factorization not found on FactorDB or n is not semiprime.')
return
factors = list(map(get_int_href, factors))
factors.remove(int(n))
return factors
def calculate_d(e, p, q) -> int:
# Modified method from: https://stackoverflow.com/questions/23279208/calculate-d-from-n-e-p-q-in-rsa
# Answer by: https://stackoverflow.com/users/448810/user448810
m = (p-1)*(q-1)
a, b, u = 0, m, 1
while e > 0:
q = b // e # integer division
e, a, b, u = b % e, u, e, a - q * u
if b == 1:
return a % m
raise ValueError("Must be coprime.")
def print_or_save(key, path=None):
key = key.exportKey(format='PEM')
if(not isinstance(key,str)):
key = key.decode('utf-8')
if(not path):
print()
print(key)
else:
with open(path,'w') as f:
f.write(key)
def gen_private(p, q, n=None, e = 0x10001, save_path=None):
if(p<q):
p,q = q,p # OpenSSL
if(not n):
n = p * q
d = calculate_d(e,p,q)
key = RSA.construct((n,e,d,p,q))
print_or_save(key, save_path)
def gen_public(n, e=0x10001, save_path = None):
key = RSA.construct((n,e))
print_or_save(key, save_path)
def resolve_pqne(args)->tuple:
if(not args.n and args.p and args.q):
return int(args.p), int(args.q), int(args.p) * int(args.q), int(args.e)
elif(args.n and args.p and not args.q):
return int(args.p), int(args.n) // int(args.p), int(args.n), int(args.e)
elif(args.n and not args.p and args.q):
return int(args.n) // int(args.q), int(args.q), int(args.n), int(args.e)
elif(args.n):
return None, None, int(args.n), int(args.e)
def gen_from_value(pqne, pub_key=False, save_path=None):
p, q, n, e = pqne
if(pub_key):
gen_public(n, e, save_path)
return
if(not p and not q):
print('Getting factors from factorDB.com ...')
pq = factor_db(n)
if(not pq):
return
p, q = pq
gen_private(p,q,n,e,save_path)
def gen_from_key(key_path, pub_key=False, save_path=None):
with open(key_path,'r') as f:
key = RSA.import_key(f.read())
if(pub_key):
print_or_save(key.publickey(), save_path)
elif(key.has_private()):
print_or_save(key, save_path)
else:
gen_from_value((None, None, key.n, key.e), save_path=save_path)
def main():
parser = ArgumentParser(description='Genarate private key from public key using FactorDB.com or p, q')
parser.add_argument('-k','--key', dest='key', metavar='PATH', help='Try generating from key file.')
parser.add_argument('-x','--gen-public', dest='gen_pub', action='store_true',help='Genarate public key file insted of private.')
parser.add_argument('-o','--out', dest='out_path', metavar='PATH', help='Save key into a file instead of printing.')
parser.add_argument('-p', dest='p', metavar='VALUE',help='1st prime value of RSA key.')
parser.add_argument('-q', dest='q', metavar='VALUE',help='2nd prime value of RSA key.')
parser.add_argument('-n', dest='n', metavar='VALUE',help='n value of RSA key.')
parser.add_argument('-e', dest='e', metavar='VALUE',help='Public exponent value of RSA key.', default='65537')
parser.add_argument('-v','--version',action='version',version='v'+str(__version__))
args = parser.parse_args()
if(args.key):
gen_from_key(args.key, args.gen_pub, args.out_path)
return
pqne = resolve_pqne(args)
if(pqne == None):
print('Invalid argument combination.\nYou must provide either n or a key.\n')
parser.print_usage()
return
gen_from_value(pqne, args.gen_pub, args.out_path)
if __name__ == "__main__":
main() | 0.463201 | 0.081374 |
from collections import defaultdict
from dataclasses import dataclass
from functools import total_ordering
from itertools import groupby
from json import loads
from optparse import OptionParser
from sys import getdefaultencoding
from jsonpath_ng import parse
from tabulate import tabulate
@dataclass(order=True, frozen=True)
class Iter(object):
size: int
@dataclass(order=True, frozen=True)
class Dict(object):
size: int
def kv_flatten(obj):
if isinstance(obj, dict):
yield tuple(), Dict(len(obj))
for prefix, sub in obj.items():
for key, value in kv_flatten(sub):
yield (prefix, *key), value
elif isinstance(obj, (list, tuple, set)):
yield tuple(), Iter(len(obj))
for element in obj:
for key, value in kv_flatten(element):
yield ("*", *key), value
else:
yield tuple(), obj
def kv_diff(objs):
objs = iter(objs)
keys = set()
keys.update(kv_flatten(next(objs)))
for i, obj in enumerate(objs):
i += 1
news = set(kv_flatten(obj))
for missing in keys - news:
print(i, "missing", missing)
for addition in news - keys:
print(i, "addition", addition)
if keys == news:
print(i, "match")
keys |= news
new_trie = lambda: defaultdict(new_trie)
def trie_insert(trie, key, value):
curr = trie
for part in key:
curr = curr[part]
if None not in curr:
curr[None] = list()
curr[None].append(value)
def trie_items(trie):
for key in trie:
if key is None:
yield tuple(), trie[key]
continue
for test in trie_items(trie[key]):
prefix, value = test
yield (key, *prefix), value
@total_ordering
class MinType(object):
def __le__(self, other):
return True
def __eq__(self, other):
return self is other
Min = MinType()
min_sortkey = lambda x: (Min, Min) if x is None else (str(type(x)), x)
def json_path(key):
path = ["$"]
for part in key:
if part == "*":
path.append("[*]")
else:
path.append(f".{part}")
return "".join(path)
def expand_path(path):
expanded = []
for part in path.split("."):
if path == "$":
pass
elif part.startswith("[") and part.endswith("]"):
expanded.append("*")
else:
expanded.append(part)
return expanded
def analyze(obj, *, path="$"):
root = new_trie()
jp = parse(path)
for match in jp.find(obj):
for key, value in kv_flatten(match.value):
prefix = expand_path(str(match.full_path))
trie_insert(root, (*prefix, *key), value)
results = []
for key, values in trie_items(root):
for i, (t, group) in enumerate(groupby(sorted(values, key=min_sortkey), type)):
group = list(group)
row = []
if i == 0:
row.append(json_path(key))
else:
row.append("")
row.extend(
[
t.__name__,
len(group),
len(set(group)),
min(group) if t is not type(None) else "",
max(group) if t is not type(None) else "",
]
)
results.append(row)
print(
tabulate(
results,
headers=["Key", "Type", "Values", "Distinct", "Min", "Max"],
tablefmt="simple",
)
)
def main():
parser = OptionParser(prog="json-analyze")
parser.add_option(
"-f", "--file", dest="filename", help="JSON file to analyze", metavar="FILE"
)
parser.add_option(
"-e",
"--encoding",
dest="encoding",
help="Text Encoding",
metavar="CODEC",
default=getdefaultencoding(),
)
parser.add_option(
"-p",
"--path",
dest="path",
help="JSON path applied before the analysis",
metavar="PATH",
default="$",
)
(options, args) = parser.parse_args()
if options.filename:
with open(options.filename, "r", encoding=options.encoding) as fh:
analyze(loads(fh.read()), path=options.path) | pythonProject1/venv/Lib/site-packages/json_analyze.py | from collections import defaultdict
from dataclasses import dataclass
from functools import total_ordering
from itertools import groupby
from json import loads
from optparse import OptionParser
from sys import getdefaultencoding
from jsonpath_ng import parse
from tabulate import tabulate
@dataclass(order=True, frozen=True)
class Iter(object):
size: int
@dataclass(order=True, frozen=True)
class Dict(object):
size: int
def kv_flatten(obj):
if isinstance(obj, dict):
yield tuple(), Dict(len(obj))
for prefix, sub in obj.items():
for key, value in kv_flatten(sub):
yield (prefix, *key), value
elif isinstance(obj, (list, tuple, set)):
yield tuple(), Iter(len(obj))
for element in obj:
for key, value in kv_flatten(element):
yield ("*", *key), value
else:
yield tuple(), obj
def kv_diff(objs):
objs = iter(objs)
keys = set()
keys.update(kv_flatten(next(objs)))
for i, obj in enumerate(objs):
i += 1
news = set(kv_flatten(obj))
for missing in keys - news:
print(i, "missing", missing)
for addition in news - keys:
print(i, "addition", addition)
if keys == news:
print(i, "match")
keys |= news
new_trie = lambda: defaultdict(new_trie)
def trie_insert(trie, key, value):
curr = trie
for part in key:
curr = curr[part]
if None not in curr:
curr[None] = list()
curr[None].append(value)
def trie_items(trie):
for key in trie:
if key is None:
yield tuple(), trie[key]
continue
for test in trie_items(trie[key]):
prefix, value = test
yield (key, *prefix), value
@total_ordering
class MinType(object):
def __le__(self, other):
return True
def __eq__(self, other):
return self is other
Min = MinType()
min_sortkey = lambda x: (Min, Min) if x is None else (str(type(x)), x)
def json_path(key):
path = ["$"]
for part in key:
if part == "*":
path.append("[*]")
else:
path.append(f".{part}")
return "".join(path)
def expand_path(path):
expanded = []
for part in path.split("."):
if path == "$":
pass
elif part.startswith("[") and part.endswith("]"):
expanded.append("*")
else:
expanded.append(part)
return expanded
def analyze(obj, *, path="$"):
root = new_trie()
jp = parse(path)
for match in jp.find(obj):
for key, value in kv_flatten(match.value):
prefix = expand_path(str(match.full_path))
trie_insert(root, (*prefix, *key), value)
results = []
for key, values in trie_items(root):
for i, (t, group) in enumerate(groupby(sorted(values, key=min_sortkey), type)):
group = list(group)
row = []
if i == 0:
row.append(json_path(key))
else:
row.append("")
row.extend(
[
t.__name__,
len(group),
len(set(group)),
min(group) if t is not type(None) else "",
max(group) if t is not type(None) else "",
]
)
results.append(row)
print(
tabulate(
results,
headers=["Key", "Type", "Values", "Distinct", "Min", "Max"],
tablefmt="simple",
)
)
def main():
parser = OptionParser(prog="json-analyze")
parser.add_option(
"-f", "--file", dest="filename", help="JSON file to analyze", metavar="FILE"
)
parser.add_option(
"-e",
"--encoding",
dest="encoding",
help="Text Encoding",
metavar="CODEC",
default=getdefaultencoding(),
)
parser.add_option(
"-p",
"--path",
dest="path",
help="JSON path applied before the analysis",
metavar="PATH",
default="$",
)
(options, args) = parser.parse_args()
if options.filename:
with open(options.filename, "r", encoding=options.encoding) as fh:
analyze(loads(fh.read()), path=options.path) | 0.386995 | 0.211091 |
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = """\
Copyright (c) 2005-2013 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import errno
import os
import sys
import socket
from thor.loop import EventSource, schedule
class TcpConnection(EventSource):
"""
An asynchronous TCP connection.
Emits:
- data (chunk): incoming data
- close (): the other party has closed the connection
- pause (bool): whether the connection has been paused
It will emit the 'data' even every time incoming data is
available;
> def process(data):
> print "got some data:", data
> tcp_conn.on('data', process)
When you want to write to the connection, just write to it:
> tcp_conn.write(data)
If you want to close the connection from your side, just call close:
> tcp_conn.close()
Note that this will flush any data already written.
If the other side closes the connection, The 'close' event will be
emitted;
> def handle_close():
> print "oops, they don't like us any more..."
> tcp_conn.on('close', handle_close)
If you write too much data to the connection and the buffers fill up,
pause_cb will be emitted with True to tell you to stop sending data
temporarily;
> def handle_pause(paused):
> if paused:
> # stop sending data
> else:
> # it's OK to start again
> tcp_conn.on('pause', handle_pause)
Note that this is advisory; if you ignore it, the data will still be
buffered, but the buffer will grow.
Likewise, if you want to pause the connection because your buffers
are full, call pause;
> tcp_conn.pause(True)
but don't forget to tell it when it's OK to send data again;
> tcp_conn.pause(False)
NOTE that connections are paused to start with; if you want to start
getting data from them, you'll need to pause(False).
"""
# TODO: play with various buffer sizes
write_bufsize = 16
read_bufsize = 1024 * 16
_block_errs = set([
(BlockingIOError, errno.EAGAIN),
(BlockingIOError, errno.EWOULDBLOCK),
(TimeoutError, errno.ETIMEDOUT)])
_close_errs = set([
(OSError, errno.EBADF),
(OSError, errno.ENOTCONN),
(ConnectionResetError, errno.ECONNRESET),
(BrokenPipeError, errno.ESHUTDOWN),
(BrokenPipeError, errno.EPIPE),
(ConnectionAbortedError, errno.ECONNABORTED),
(ConnectionRefusedError, errno.ECONNREFUSED)])
def __init__(self, sock, host, port, loop=None):
EventSource.__init__(self, loop)
self.socket = sock
self.host = host
self.port = port
self.tcp_connected = True # we assume a connected socket
self._input_paused = True # we start with input paused
self._output_paused = False
self._closing = False
self._write_buffer = []
self.register_fd(sock.fileno())
self.on('readable', self.handle_read)
self.on('writable', self.handle_write)
self.on('close', self.handle_close)
def __repr__(self):
status = [self.__class__.__module__ + "." + self.__class__.__name__]
status.append(self.tcp_connected and 'connected' or 'disconnected')
status.append('%s:%s' % (self.host, self.port))
if self._input_paused:
status.append('input paused')
if self._output_paused:
status.append('output paused')
if self._closing:
status.append('closing')
if self._write_buffer:
status.append('%s write buffered' % len(self._write_buffer))
return "<%s at %#x>" % (", ".join(status), id(self))
def handle_read(self):
"The connection has data read for reading"
try:
# TODO: look into recv_into (but see python issue7827)
data = self.socket.recv(self.read_bufsize)
except Exception as why:
err = (type(why), why.errno)
if err in self._block_errs:
return
elif err in self._close_errs:
self.emit('close')
return
else:
raise
if data == b'':
self.emit('close')
else:
self.emit('data', data)
# TODO: try using buffer; see
# http://itamarst.org/writings/pycon05/fast.html
def handle_write(self):
"The connection is ready for writing; write any buffered data."
if len(self._write_buffer) > 0:
data = b''.join(self._write_buffer)
try:
sent = self.socket.send(data) if len(data) > 0 else 0
except Exception as why:
err = (type(why), why.errno)
if err in self._block_errs:
return
elif err in self._close_errs:
self.emit('close')
return
else:
raise
if sent < len(data):
self._write_buffer = [data[sent:]]
else:
self._write_buffer = []
if self._output_paused and \
len(self._write_buffer) < self.write_bufsize:
self._output_paused = False
self.emit('pause', False)
if self._closing:
self.close()
if len(self._write_buffer) == 0:
self.event_del('writable')
def handle_close(self):
"""
The connection has been closed by the other side.
"""
self.tcp_connected = False
# TODO: make sure removing close doesn't cause problems.
self.removeListeners('readable', 'writable', 'close')
self.unregister_fd()
self.socket.close()
def write(self, data):
"Write data to the connection."
self._write_buffer.append(data)
if len(self._write_buffer) > self.write_bufsize:
self._output_paused = True
self.emit('pause', True)
self.event_add('writable')
def pause(self, paused):
"""
Temporarily stop/start reading from the connection and pushing
it to the app.
"""
if paused:
self.event_del('readable')
else:
self.event_add('readable')
self._input_paused = paused
def close(self):
"Flush buffered data (if any) and close the connection."
self.pause(True)
if len(self._write_buffer) > 0:
self._closing = True
else:
self.handle_close()
# TODO: should loop stop automatically close all conns?
class TcpServer(EventSource):
"""
An asynchronous TCP server.
Emits:
- connect (tcp_conn): upon connection
To start listening:
> s = TcpServer(host, port)
> s.on('connect', conn_handler)
conn_handler is called every time a new client connects.
"""
def __init__(self, host, port, sock=None, loop=None):
EventSource.__init__(self, loop)
self.host = host
self.port = port
self.sock = sock or server_listen(host, port)
self.on('readable', self.handle_accept)
self.register_fd(self.sock.fileno(), 'readable')
schedule(0, self.emit, 'start')
def handle_accept(self):
try:
conn, addr = self.sock.accept()
except (TypeError, IndexError):
# sometimes accept() returns None if we have
# multiple processes listening
return
conn.setblocking(False)
self.create_conn(conn, addr[0], addr[1])
def create_conn(self, sock, host, port):
tcp_conn = TcpConnection(sock, host, port, self._loop)
self.emit('connect', tcp_conn)
# TODO: should loop stop close listening sockets?
def shutdown(self):
"Stop accepting requests and close the listening socket."
self.removeListeners('readable')
self.sock.close()
self.emit('stop')
# TODO: emit close?
def server_listen(host, port, backlog=None):
"Return a socket listening to host:port."
# TODO: IPV6
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(backlog or socket.SOMAXCONN)
return sock
class TcpClient(EventSource):
"""
An asynchronous TCP client.
Emits:
- connect (tcp_conn): upon connection
- connect_error (err_type, err_id, err_str): if there's a problem
before getting a connection. err_type is socket.error or
socket.gaierror; err_id is the specific error encountered, and
err_str is its textual description.
To connect to a server:
> c = TcpClient()
> c.on('connect', conn_handler)
> c.on('connect_error', error_handler)
> c.connect(host, port)
conn_handler will be called with the tcp_conn as the argument
when the connection is made.
"""
def __init__(self, loop=None):
EventSource.__init__(self, loop)
self.host = None
self.port = None
self._timeout_ev = None
self._error_sent = False
# TODO: IPV6
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(False)
self.on('error', self.handle_conn_error)
self.register_fd(self.sock.fileno(), 'writable')
self.event_add('error')
def connect(self, host, port, connect_timeout=None):
"""
Connect to host:port (with an optional connect timeout)
and emit 'connect' when connected, or 'connect_error' in
the case of an error.
"""
self.host = host
self.port = port
self.once('writable', self.handle_connect)
# TODO: use socket.getaddrinfo(); needs to be non-blocking.
try:
err = self.sock.connect_ex((host, port))
except socket.gaierror as why:
self.handle_conn_error(type(why), [why.errno, why.strerror])
return
except socket.error as why:
self.handle_conn_error(type(why), [why.errno, why.strerror])
return
if err != errno.EINPROGRESS:
self.handle_conn_error(socket.error, [err, os.strerror(err)])
return
if connect_timeout:
self._timeout_ev = self._loop.schedule(
connect_timeout,
self.handle_conn_error,
TimeoutError,
[errno.ETIMEDOUT, os.strerror(errno.ETIMEDOUT)],
True)
def create_conn(self):
tcp_conn = TcpConnection(self.sock, self.host, self.port, self._loop)
self.emit('connect', tcp_conn)
def handle_connect(self):
self.unregister_fd()
if self._timeout_ev:
self._timeout_ev.delete()
if self._error_sent:
return
err = self.sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err:
self.handle_conn_error(socket.error, [err, os.strerror(err)])
else:
self.create_conn()
def handle_conn_error(self, err_type=None, why=None, close=False):
"""
Handle a connect error.
@err_type - e.g., socket.error; defaults to socket.error
@why - tuple of [err_id, err_str]
@close - whether the error means the socket should be closed
"""
if self._timeout_ev:
self._timeout_ev.delete()
if self._error_sent:
return
if err_type is None:
err_type = socket.error
err_id = self.sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
err_str = os.strerror(err_id)
else:
err_id = why[0]
err_str = why[1]
self._error_sent = True
self.unregister_fd()
self.emit('connect_error', err_type, err_id, err_str)
if close:
self.sock.close()
if __name__ == "__main__":
# quick demo server
from thor.loop import run, stop
server = TcpServer('localhost', int(sys.argv[-1]))
def handle_conn(conn):
conn.pause(False)
def echo(chunk):
if chunk.decode().strip().lower() in ['quit', 'stop']:
stop()
else:
conn.write(("-> %s" % chunk).encode())
conn.on('data', echo)
server.on('connect', handle_conn)
run() | thor/tcp.py | __author__ = "<NAME> <<EMAIL>>"
__copyright__ = """\
Copyright (c) 2005-2013 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import errno
import os
import sys
import socket
from thor.loop import EventSource, schedule
class TcpConnection(EventSource):
"""
An asynchronous TCP connection.
Emits:
- data (chunk): incoming data
- close (): the other party has closed the connection
- pause (bool): whether the connection has been paused
It will emit the 'data' even every time incoming data is
available;
> def process(data):
> print "got some data:", data
> tcp_conn.on('data', process)
When you want to write to the connection, just write to it:
> tcp_conn.write(data)
If you want to close the connection from your side, just call close:
> tcp_conn.close()
Note that this will flush any data already written.
If the other side closes the connection, The 'close' event will be
emitted;
> def handle_close():
> print "oops, they don't like us any more..."
> tcp_conn.on('close', handle_close)
If you write too much data to the connection and the buffers fill up,
pause_cb will be emitted with True to tell you to stop sending data
temporarily;
> def handle_pause(paused):
> if paused:
> # stop sending data
> else:
> # it's OK to start again
> tcp_conn.on('pause', handle_pause)
Note that this is advisory; if you ignore it, the data will still be
buffered, but the buffer will grow.
Likewise, if you want to pause the connection because your buffers
are full, call pause;
> tcp_conn.pause(True)
but don't forget to tell it when it's OK to send data again;
> tcp_conn.pause(False)
NOTE that connections are paused to start with; if you want to start
getting data from them, you'll need to pause(False).
"""
# TODO: play with various buffer sizes
write_bufsize = 16
read_bufsize = 1024 * 16
_block_errs = set([
(BlockingIOError, errno.EAGAIN),
(BlockingIOError, errno.EWOULDBLOCK),
(TimeoutError, errno.ETIMEDOUT)])
_close_errs = set([
(OSError, errno.EBADF),
(OSError, errno.ENOTCONN),
(ConnectionResetError, errno.ECONNRESET),
(BrokenPipeError, errno.ESHUTDOWN),
(BrokenPipeError, errno.EPIPE),
(ConnectionAbortedError, errno.ECONNABORTED),
(ConnectionRefusedError, errno.ECONNREFUSED)])
def __init__(self, sock, host, port, loop=None):
EventSource.__init__(self, loop)
self.socket = sock
self.host = host
self.port = port
self.tcp_connected = True # we assume a connected socket
self._input_paused = True # we start with input paused
self._output_paused = False
self._closing = False
self._write_buffer = []
self.register_fd(sock.fileno())
self.on('readable', self.handle_read)
self.on('writable', self.handle_write)
self.on('close', self.handle_close)
def __repr__(self):
status = [self.__class__.__module__ + "." + self.__class__.__name__]
status.append(self.tcp_connected and 'connected' or 'disconnected')
status.append('%s:%s' % (self.host, self.port))
if self._input_paused:
status.append('input paused')
if self._output_paused:
status.append('output paused')
if self._closing:
status.append('closing')
if self._write_buffer:
status.append('%s write buffered' % len(self._write_buffer))
return "<%s at %#x>" % (", ".join(status), id(self))
def handle_read(self):
"The connection has data read for reading"
try:
# TODO: look into recv_into (but see python issue7827)
data = self.socket.recv(self.read_bufsize)
except Exception as why:
err = (type(why), why.errno)
if err in self._block_errs:
return
elif err in self._close_errs:
self.emit('close')
return
else:
raise
if data == b'':
self.emit('close')
else:
self.emit('data', data)
# TODO: try using buffer; see
# http://itamarst.org/writings/pycon05/fast.html
def handle_write(self):
"The connection is ready for writing; write any buffered data."
if len(self._write_buffer) > 0:
data = b''.join(self._write_buffer)
try:
sent = self.socket.send(data) if len(data) > 0 else 0
except Exception as why:
err = (type(why), why.errno)
if err in self._block_errs:
return
elif err in self._close_errs:
self.emit('close')
return
else:
raise
if sent < len(data):
self._write_buffer = [data[sent:]]
else:
self._write_buffer = []
if self._output_paused and \
len(self._write_buffer) < self.write_bufsize:
self._output_paused = False
self.emit('pause', False)
if self._closing:
self.close()
if len(self._write_buffer) == 0:
self.event_del('writable')
def handle_close(self):
"""
The connection has been closed by the other side.
"""
self.tcp_connected = False
# TODO: make sure removing close doesn't cause problems.
self.removeListeners('readable', 'writable', 'close')
self.unregister_fd()
self.socket.close()
def write(self, data):
"Write data to the connection."
self._write_buffer.append(data)
if len(self._write_buffer) > self.write_bufsize:
self._output_paused = True
self.emit('pause', True)
self.event_add('writable')
def pause(self, paused):
"""
Temporarily stop/start reading from the connection and pushing
it to the app.
"""
if paused:
self.event_del('readable')
else:
self.event_add('readable')
self._input_paused = paused
def close(self):
"Flush buffered data (if any) and close the connection."
self.pause(True)
if len(self._write_buffer) > 0:
self._closing = True
else:
self.handle_close()
# TODO: should loop stop automatically close all conns?
class TcpServer(EventSource):
"""
An asynchronous TCP server.
Emits:
- connect (tcp_conn): upon connection
To start listening:
> s = TcpServer(host, port)
> s.on('connect', conn_handler)
conn_handler is called every time a new client connects.
"""
def __init__(self, host, port, sock=None, loop=None):
EventSource.__init__(self, loop)
self.host = host
self.port = port
self.sock = sock or server_listen(host, port)
self.on('readable', self.handle_accept)
self.register_fd(self.sock.fileno(), 'readable')
schedule(0, self.emit, 'start')
def handle_accept(self):
try:
conn, addr = self.sock.accept()
except (TypeError, IndexError):
# sometimes accept() returns None if we have
# multiple processes listening
return
conn.setblocking(False)
self.create_conn(conn, addr[0], addr[1])
def create_conn(self, sock, host, port):
tcp_conn = TcpConnection(sock, host, port, self._loop)
self.emit('connect', tcp_conn)
# TODO: should loop stop close listening sockets?
def shutdown(self):
"Stop accepting requests and close the listening socket."
self.removeListeners('readable')
self.sock.close()
self.emit('stop')
# TODO: emit close?
def server_listen(host, port, backlog=None):
"Return a socket listening to host:port."
# TODO: IPV6
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(backlog or socket.SOMAXCONN)
return sock
class TcpClient(EventSource):
"""
An asynchronous TCP client.
Emits:
- connect (tcp_conn): upon connection
- connect_error (err_type, err_id, err_str): if there's a problem
before getting a connection. err_type is socket.error or
socket.gaierror; err_id is the specific error encountered, and
err_str is its textual description.
To connect to a server:
> c = TcpClient()
> c.on('connect', conn_handler)
> c.on('connect_error', error_handler)
> c.connect(host, port)
conn_handler will be called with the tcp_conn as the argument
when the connection is made.
"""
def __init__(self, loop=None):
EventSource.__init__(self, loop)
self.host = None
self.port = None
self._timeout_ev = None
self._error_sent = False
# TODO: IPV6
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(False)
self.on('error', self.handle_conn_error)
self.register_fd(self.sock.fileno(), 'writable')
self.event_add('error')
def connect(self, host, port, connect_timeout=None):
"""
Connect to host:port (with an optional connect timeout)
and emit 'connect' when connected, or 'connect_error' in
the case of an error.
"""
self.host = host
self.port = port
self.once('writable', self.handle_connect)
# TODO: use socket.getaddrinfo(); needs to be non-blocking.
try:
err = self.sock.connect_ex((host, port))
except socket.gaierror as why:
self.handle_conn_error(type(why), [why.errno, why.strerror])
return
except socket.error as why:
self.handle_conn_error(type(why), [why.errno, why.strerror])
return
if err != errno.EINPROGRESS:
self.handle_conn_error(socket.error, [err, os.strerror(err)])
return
if connect_timeout:
self._timeout_ev = self._loop.schedule(
connect_timeout,
self.handle_conn_error,
TimeoutError,
[errno.ETIMEDOUT, os.strerror(errno.ETIMEDOUT)],
True)
def create_conn(self):
tcp_conn = TcpConnection(self.sock, self.host, self.port, self._loop)
self.emit('connect', tcp_conn)
def handle_connect(self):
self.unregister_fd()
if self._timeout_ev:
self._timeout_ev.delete()
if self._error_sent:
return
err = self.sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err:
self.handle_conn_error(socket.error, [err, os.strerror(err)])
else:
self.create_conn()
def handle_conn_error(self, err_type=None, why=None, close=False):
"""
Handle a connect error.
@err_type - e.g., socket.error; defaults to socket.error
@why - tuple of [err_id, err_str]
@close - whether the error means the socket should be closed
"""
if self._timeout_ev:
self._timeout_ev.delete()
if self._error_sent:
return
if err_type is None:
err_type = socket.error
err_id = self.sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
err_str = os.strerror(err_id)
else:
err_id = why[0]
err_str = why[1]
self._error_sent = True
self.unregister_fd()
self.emit('connect_error', err_type, err_id, err_str)
if close:
self.sock.close()
if __name__ == "__main__":
# quick demo server
from thor.loop import run, stop
server = TcpServer('localhost', int(sys.argv[-1]))
def handle_conn(conn):
conn.pause(False)
def echo(chunk):
if chunk.decode().strip().lower() in ['quit', 'stop']:
stop()
else:
conn.write(("-> %s" % chunk).encode())
conn.on('data', echo)
server.on('connect', handle_conn)
run() | 0.359926 | 0.15084 |
from datetime import datetime
import logging
from prpy.planning.base import Tags
from prpy.util import GetTrajectoryTags
def get_logger():
"""
Return the metrics logger.
@return metrics logger
"""
metrics_logger = logging.getLogger('planning_metrics')
return metrics_logger
def setup_logger():
"""
Configure metrics logger.
@return metrics logger
"""
logfile = datetime.now().strftime('trial_%Y%m%d_%H%M.log')
metrics_logger = get_logger()
hdlr = logging.FileHandler('%s' % logfile)
formatter = logging.Formatter('%(asctime)s %(message)s',
'%Y%m%d %H:%M:%S') # date/time plus message
hdlr.setFormatter(formatter)
metrics_logger.addHandler(hdlr)
metrics_logger.setLevel(logging.INFO)
return metrics_logger
def _log_data(path, action_name, header, tag, log_metadata=False):
"""
Log data about a path or trajectory.
@param path: trajectory after postprocessing
@param action_name: name of Action that generated the trajectory
@param header: one-letter header for logs
@param tag: tag to filter trajectory tags with
@param log_metadata: True if metadata should be logged
"""
logger = get_logger()
path_tags = GetTrajectoryTags(path)
log_data = [header, action_name, path_tags.get(tag, 'unknown')]
if log_metadata:
log_data += [
path_tags.get(Tags.PLANNER, 'unknown'),
path_tags.get(Tags.METHOD, 'unknown')
]
logger.info(' '.join([str(v) for v in log_data]))
def log_plan_data(path, action_name):
"""
Log timing and metadata about planning of a path or trajectory.
@param path: trajectory after postprocessing
@param action_name: name of Action that generated the trajectory
"""
_log_data(path, action_name, 'P', Tags.PLAN_TIME, log_metadata=True)
def log_postprocess_data(traj, action_name):
"""
Log timing and metadata about postprocessing of a path or trajectory.
@param traj: trajectory after postprocessing
@param action_name: name of Action that generated the trajectory
"""
_log_data(traj, action_name, 'S', Tags.POSTPROCESS_TIME, log_metadata=True)
def log_execution_data(traj, action_name):
"""
Log timing data about execution of a trajectory or path.
@param traj: trajectory to log
@param action_name: name of Action that generated the trajectory
"""
_log_data(traj, action_name, 'E', Tags.EXECUTION_TIME) | src/magi/logging_utils.py |
from datetime import datetime
import logging
from prpy.planning.base import Tags
from prpy.util import GetTrajectoryTags
def get_logger():
"""
Return the metrics logger.
@return metrics logger
"""
metrics_logger = logging.getLogger('planning_metrics')
return metrics_logger
def setup_logger():
"""
Configure metrics logger.
@return metrics logger
"""
logfile = datetime.now().strftime('trial_%Y%m%d_%H%M.log')
metrics_logger = get_logger()
hdlr = logging.FileHandler('%s' % logfile)
formatter = logging.Formatter('%(asctime)s %(message)s',
'%Y%m%d %H:%M:%S') # date/time plus message
hdlr.setFormatter(formatter)
metrics_logger.addHandler(hdlr)
metrics_logger.setLevel(logging.INFO)
return metrics_logger
def _log_data(path, action_name, header, tag, log_metadata=False):
"""
Log data about a path or trajectory.
@param path: trajectory after postprocessing
@param action_name: name of Action that generated the trajectory
@param header: one-letter header for logs
@param tag: tag to filter trajectory tags with
@param log_metadata: True if metadata should be logged
"""
logger = get_logger()
path_tags = GetTrajectoryTags(path)
log_data = [header, action_name, path_tags.get(tag, 'unknown')]
if log_metadata:
log_data += [
path_tags.get(Tags.PLANNER, 'unknown'),
path_tags.get(Tags.METHOD, 'unknown')
]
logger.info(' '.join([str(v) for v in log_data]))
def log_plan_data(path, action_name):
"""
Log timing and metadata about planning of a path or trajectory.
@param path: trajectory after postprocessing
@param action_name: name of Action that generated the trajectory
"""
_log_data(path, action_name, 'P', Tags.PLAN_TIME, log_metadata=True)
def log_postprocess_data(traj, action_name):
"""
Log timing and metadata about postprocessing of a path or trajectory.
@param traj: trajectory after postprocessing
@param action_name: name of Action that generated the trajectory
"""
_log_data(traj, action_name, 'S', Tags.POSTPROCESS_TIME, log_metadata=True)
def log_execution_data(traj, action_name):
"""
Log timing data about execution of a trajectory or path.
@param traj: trajectory to log
@param action_name: name of Action that generated the trajectory
"""
_log_data(traj, action_name, 'E', Tags.EXECUTION_TIME) | 0.83762 | 0.174621 |
from pythonjsonlogger import jsonlogger
import logging
# Distributed tracing
# OpenTelemetry python imports
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
# Intrumentation libraries for tracing
from opentelemetry.instrumentation.flask import FlaskInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
# Flask and friends
from flask import Flask, request
import werkzeug
# HTTP client library
import requests
# Other libraries
import os
# Initialize the tracing machinery
resource = Resource({"service.name": "service1"})
OTEL_AGENT = os.getenv('OTEL_AGENT', "otel-agent")
otlp_exporter = OTLPSpanExporter(endpoint=OTEL_AGENT + ":4317", insecure=True)
trace.set_tracer_provider(TracerProvider(resource=resource))
tracer = trace.get_tracer(__name__)
span_processor = BatchExportSpanProcessor(otlp_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
# Setup the instrumentation for the Flask app
# and Requests library
FlaskInstrumentor().instrument_app(app)
RequestsInstrumentor().instrument()
# Set up the logging really early before initialization
# of the Flask app instance
logger = logging.getLogger()
logHandler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter()
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
logger.setLevel(logging.DEBUG)
app = Flask(__name__)
# setup middleware to log the request
# before handling it
@app.before_request
def record_request():
request_body = "{}"
if request.method == "POST":
if request.content_type == "application/json":
request_body = json.loads(request.json)
logger.info('Request receieved', extra={
'request_path': request.path,
'request_method': request.method,
'request_content_type': request.content_type,
'request_body': request_body,
})
# setup middleware to log the response before
# sending it back to the client
@app.after_request
def record_response(response):
logger.info('Request processed', extra={
'request_path': request.path,
'response_status': response.status_code
})
return response
def do_stuff():
return requests.get('http://service2:5000')
@app.route('/')
def index():
# We create a span here
with tracer.start_as_current_span("service2-request"):
data = do_stuff()
return data.text, 200
@app.errorhandler(werkzeug.exceptions.HTTPException)
def handle_500(error):
return "Something went wrong", 500
@app.route('/honeypot/')
def test1():
1/0
return 'lol' | demo-app/service1/app.py | from pythonjsonlogger import jsonlogger
import logging
# Distributed tracing
# OpenTelemetry python imports
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
# Intrumentation libraries for tracing
from opentelemetry.instrumentation.flask import FlaskInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
# Flask and friends
from flask import Flask, request
import werkzeug
# HTTP client library
import requests
# Other libraries
import os
# Initialize the tracing machinery
resource = Resource({"service.name": "service1"})
OTEL_AGENT = os.getenv('OTEL_AGENT', "otel-agent")
otlp_exporter = OTLPSpanExporter(endpoint=OTEL_AGENT + ":4317", insecure=True)
trace.set_tracer_provider(TracerProvider(resource=resource))
tracer = trace.get_tracer(__name__)
span_processor = BatchExportSpanProcessor(otlp_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
# Setup the instrumentation for the Flask app
# and Requests library
FlaskInstrumentor().instrument_app(app)
RequestsInstrumentor().instrument()
# Set up the logging really early before initialization
# of the Flask app instance
logger = logging.getLogger()
logHandler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter()
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
logger.setLevel(logging.DEBUG)
app = Flask(__name__)
# setup middleware to log the request
# before handling it
@app.before_request
def record_request():
request_body = "{}"
if request.method == "POST":
if request.content_type == "application/json":
request_body = json.loads(request.json)
logger.info('Request receieved', extra={
'request_path': request.path,
'request_method': request.method,
'request_content_type': request.content_type,
'request_body': request_body,
})
# setup middleware to log the response before
# sending it back to the client
@app.after_request
def record_response(response):
logger.info('Request processed', extra={
'request_path': request.path,
'response_status': response.status_code
})
return response
def do_stuff():
return requests.get('http://service2:5000')
@app.route('/')
def index():
# We create a span here
with tracer.start_as_current_span("service2-request"):
data = do_stuff()
return data.text, 200
@app.errorhandler(werkzeug.exceptions.HTTPException)
def handle_500(error):
return "Something went wrong", 500
@app.route('/honeypot/')
def test1():
1/0
return 'lol' | 0.536556 | 0.077622 |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.manifold as manifold
from sklearn.decomposition import PCA, TruncatedSVD, RandomizedPCA
from pandas.tools.plotting import parallel_coordinates
from bokeh_plots import scatter_with_hover
from bokeh_server import bokeh_server
sns.set_context('poster')
sns.set_color_codes()
plot_kwargs = {'alpha': 0.25, 's': 50, 'linewidth': 0}
color_palette = sns.color_palette('deep', 8)
algorithm_class_dict = {
'mds': manifold.MDS,
'tsne': manifold.TSNE,
'pca': PCA,
}
algorithm_kwargs_dict = {
'mds': dict(n_components=2, max_iter=100, n_init=1, random_state=0),
'tsne': dict(n_components=2, init='pca', random_state=0),
'pca': dict(n_components=2)
}
def plot_2d(data, labels=None, probabilities=None, algorithm='tsne', algorithm_kwargs=None):
if data.shape[1] > 2:
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm = algorithm_class(**algorithm_kwargs)
else:
algorithm = algorithm_class(**algorithm_kwargs_dict[algorithm])
Y = algorithm.fit_transform(data)
else:
Y = data
color_palette = sns.color_palette('deep', len(np.unique(labels)))
if labels is not None:
cluster_colors = [color_palette[x] if x >= 0 else
(0.5, 0.5, 0.5) for
x in labels]
if probabilities is not None and np.isfinite(probabilities):
cluster_member_colors = [sns.desaturate(x, p) for x, p in
zip(cluster_colors, probabilities)]
else:
cluster_member_colors = cluster_colors
else:
cluster_member_colors = 'b'
plt.scatter(Y[:, 0], Y[:, 1], c=cluster_member_colors, **plot_kwargs)
frame = plt.gca()
frame.get_xaxis().set_visible(False)
frame.get_yaxis().set_visible(False)
plt.show()
def bokeh_plot_2d(data, labels=None, probabilities=None, algorithm='tsne', algorithm_kwargs=None, untransformed_data=None):
if data.shape[1] > 2:
if data.shape[1] > 32 and algorithm != 'pca':
data = RandomizedPCA(n_components=32).fit_transform(data)
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm = algorithm_class(**algorithm_kwargs)
else:
algorithm = algorithm_class(**algorithm_kwargs_dict[algorithm])
Y = algorithm.fit_transform(data)
else:
Y = data
color_palette = sns.color_palette('deep', len(np.unique(labels)))
if labels is not None:
cluster_colors = [color_palette[x] if x >= 0 else
(0.5, 0.5, 0.5) for
x in labels]
if probabilities is not None and np.all(np.isfinite(probabilities)):
cluster_member_colors = [sns.desaturate(x, p) for x, p in
zip(cluster_colors, probabilities)]
else:
cluster_member_colors = cluster_colors
cluster_member_colors = [mpl.colors.rgb2hex(rgb) for rgb in cluster_member_colors]
else:
cluster_member_colors = 'b'
if untransformed_data is not None:
original_columns = untransformed_data.columns.tolist()
df = untransformed_data.copy()
df['proj1'] = Y[:, 0]
df['proj2'] = Y[:, 1]
else:
original_columns = []
data_dict = {}
for column in xrange(data.shape[1]):
colname = 'x%i' % column
original_columns.append(colname)
data_dict[colname] = data[:, column]
data_dict.update({'proj1': Y[:, 0], 'proj2': Y[:, 1]})
df = pd.DataFrame(data_dict)
with bokeh_server(name='comp') as server:
q = scatter_with_hover(df, 'proj1', 'proj2', cols=original_columns, color=cluster_member_colors, alpha=0.5, size=5)
server.show(q)
def project_data(data, algorithm='tsne', algorithm_kwargs=None, n_components=2):
if data.shape[1] > n_components:
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm_kwargs['n_components'] = n_components
algorithm = algorithm_class(**algorithm_kwargs)
else:
kwargs_dict = algorithm_kwargs_dict.copy()
kwargs_dict[algorithm]['n_components'] = n_components
algorithm = algorithm_class(**kwargs_dict[algorithm])
return algorithm.fit_transform(data)
else:
return data
def plot_parallel_coordinates(data, labels, n_components=10, algorithm='tsne', algorithm_kwargs=None, show_average=False):
df = data
df['y'] = labels
if show_average:
df = df.groupby('y').mean()
df['y'] = df.index
parallel_coordinates(df[ df['y'] != -1 ], 'y')
plt.show()
def prep_for_d3(data, cluster, filename):
Y = project_data(data.values, algorithm='tsne')
data['name'] = cluster.labels_
data['name'] = data['name'].apply(lambda x: 'group_{}'.format(x))
data['group'] = cluster.labels_
data['y1'] = Y[:, 0]
data['y2'] = Y[:, 1]
data.to_csv(filename, index_label='index') | examples/plot.py | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.manifold as manifold
from sklearn.decomposition import PCA, TruncatedSVD, RandomizedPCA
from pandas.tools.plotting import parallel_coordinates
from bokeh_plots import scatter_with_hover
from bokeh_server import bokeh_server
sns.set_context('poster')
sns.set_color_codes()
plot_kwargs = {'alpha': 0.25, 's': 50, 'linewidth': 0}
color_palette = sns.color_palette('deep', 8)
algorithm_class_dict = {
'mds': manifold.MDS,
'tsne': manifold.TSNE,
'pca': PCA,
}
algorithm_kwargs_dict = {
'mds': dict(n_components=2, max_iter=100, n_init=1, random_state=0),
'tsne': dict(n_components=2, init='pca', random_state=0),
'pca': dict(n_components=2)
}
def plot_2d(data, labels=None, probabilities=None, algorithm='tsne', algorithm_kwargs=None):
if data.shape[1] > 2:
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm = algorithm_class(**algorithm_kwargs)
else:
algorithm = algorithm_class(**algorithm_kwargs_dict[algorithm])
Y = algorithm.fit_transform(data)
else:
Y = data
color_palette = sns.color_palette('deep', len(np.unique(labels)))
if labels is not None:
cluster_colors = [color_palette[x] if x >= 0 else
(0.5, 0.5, 0.5) for
x in labels]
if probabilities is not None and np.isfinite(probabilities):
cluster_member_colors = [sns.desaturate(x, p) for x, p in
zip(cluster_colors, probabilities)]
else:
cluster_member_colors = cluster_colors
else:
cluster_member_colors = 'b'
plt.scatter(Y[:, 0], Y[:, 1], c=cluster_member_colors, **plot_kwargs)
frame = plt.gca()
frame.get_xaxis().set_visible(False)
frame.get_yaxis().set_visible(False)
plt.show()
def bokeh_plot_2d(data, labels=None, probabilities=None, algorithm='tsne', algorithm_kwargs=None, untransformed_data=None):
if data.shape[1] > 2:
if data.shape[1] > 32 and algorithm != 'pca':
data = RandomizedPCA(n_components=32).fit_transform(data)
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm = algorithm_class(**algorithm_kwargs)
else:
algorithm = algorithm_class(**algorithm_kwargs_dict[algorithm])
Y = algorithm.fit_transform(data)
else:
Y = data
color_palette = sns.color_palette('deep', len(np.unique(labels)))
if labels is not None:
cluster_colors = [color_palette[x] if x >= 0 else
(0.5, 0.5, 0.5) for
x in labels]
if probabilities is not None and np.all(np.isfinite(probabilities)):
cluster_member_colors = [sns.desaturate(x, p) for x, p in
zip(cluster_colors, probabilities)]
else:
cluster_member_colors = cluster_colors
cluster_member_colors = [mpl.colors.rgb2hex(rgb) for rgb in cluster_member_colors]
else:
cluster_member_colors = 'b'
if untransformed_data is not None:
original_columns = untransformed_data.columns.tolist()
df = untransformed_data.copy()
df['proj1'] = Y[:, 0]
df['proj2'] = Y[:, 1]
else:
original_columns = []
data_dict = {}
for column in xrange(data.shape[1]):
colname = 'x%i' % column
original_columns.append(colname)
data_dict[colname] = data[:, column]
data_dict.update({'proj1': Y[:, 0], 'proj2': Y[:, 1]})
df = pd.DataFrame(data_dict)
with bokeh_server(name='comp') as server:
q = scatter_with_hover(df, 'proj1', 'proj2', cols=original_columns, color=cluster_member_colors, alpha=0.5, size=5)
server.show(q)
def project_data(data, algorithm='tsne', algorithm_kwargs=None, n_components=2):
if data.shape[1] > n_components:
algorithm_class = algorithm_class_dict[algorithm]
if algorithm_kwargs:
algorithm_kwargs['n_components'] = n_components
algorithm = algorithm_class(**algorithm_kwargs)
else:
kwargs_dict = algorithm_kwargs_dict.copy()
kwargs_dict[algorithm]['n_components'] = n_components
algorithm = algorithm_class(**kwargs_dict[algorithm])
return algorithm.fit_transform(data)
else:
return data
def plot_parallel_coordinates(data, labels, n_components=10, algorithm='tsne', algorithm_kwargs=None, show_average=False):
df = data
df['y'] = labels
if show_average:
df = df.groupby('y').mean()
df['y'] = df.index
parallel_coordinates(df[ df['y'] != -1 ], 'y')
plt.show()
def prep_for_d3(data, cluster, filename):
Y = project_data(data.values, algorithm='tsne')
data['name'] = cluster.labels_
data['name'] = data['name'].apply(lambda x: 'group_{}'.format(x))
data['group'] = cluster.labels_
data['y1'] = Y[:, 0]
data['y2'] = Y[:, 1]
data.to_csv(filename, index_label='index') | 0.498291 | 0.564519 |
import argparse
import os
import shutil
import stat
import sys
import tempfile
import pytest
from statick_tool.config import Config
from statick_tool.plugin_context import PluginContext
from statick_tool.resources import Resources
from statick_tool.tool_plugin import ToolPlugin
def test_tool_plugin_load_mapping_valid():
"""Test that we can load the warnings mapping."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str)
resources = Resources([os.path.join(os.path.dirname(__file__), 'good_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert len(mapping) == 1
assert mapping == {'a': 'TST1-NO'}
def test_tool_plugin_load_mapping_invalid():
"""Test that we correctly skip invalid entries."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str)
resources = Resources([os.path.join(os.path.dirname(__file__), 'bad_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert not mapping
def test_tool_plugin_load_mapping_missing():
"""Test that we return an empty dict for missing files."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str)
resources = Resources([os.path.join(os.path.dirname(__file__), 'missing_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert not mapping
def test_tool_plugin_load_mapping_suffixed():
"""Test that we can load the warnings mapping with a suffix."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str, default='experimental')
resources = Resources([os.path.join(os.path.dirname(__file__), 'good_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert len(mapping) == 1
assert mapping == {'b': 'TST2-NO'}
def test_tool_plugin_load_mapping_suffixed_fallback():
"""Test that we fall back to the non-suffixed file if we can't find a mapping file with an appropriate suffix."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str, default='gibberish')
resources = Resources([os.path.join(os.path.dirname(__file__), 'good_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert len(mapping) == 1
assert mapping == {'a': 'TST1-NO'}
def test_tool_plugin_get_user_flags_invalid_level():
"""Test that we return an empty list for invalid levels."""
arg_parser = argparse.ArgumentParser()
resources = Resources([os.path.join(os.path.dirname(__file__), 'user_flags_config')])
config = Config(resources.get_file("config.yaml"))
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
flags = tp.get_user_flags('level2', name='test')
assert flags == []
def test_tool_plugin_get_user_flags_invalid_tool():
"""Test that we return an empty list for undefined tools."""
arg_parser = argparse.ArgumentParser()
resources = Resources([os.path.join(os.path.dirname(__file__), 'user_flags_config')])
config = Config(resources.get_file("config.yaml"))
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
flags = tp.get_user_flags('level', name='test2')
assert flags == []
def test_tool_plugin_get_user_flags_no_config():
"""Test that we return an empty list for missing configs."""
arg_parser = argparse.ArgumentParser()
resources = Resources([os.path.join(os.path.dirname(__file__), 'user_flags_config_missing')])
config = Config(resources.get_file("config.yaml"))
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
flags = tp.get_user_flags('level', name='test')
assert flags == []
def test_tool_plugin_get_user_flags_valid_flags():
"""Test that we return a list of user flags."""
arg_parser = argparse.ArgumentParser()
resources = Resources([os.path.join(os.path.dirname(__file__), 'user_flags_config')])
config = Config(resources.get_file("config.yaml"))
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
flags = tp.get_user_flags('level', name='test')
assert flags == ['look', 'a', 'flag']
def test_tool_plugin_is_valid_executable_valid():
"""Test that is_valid_executable returns True for executable files."""
# Create an executable file
tmp_file = tempfile.NamedTemporaryFile()
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_no_exe_flag():
"""
Test that is_valid_executable returns False for a non-executable file.
NOTE: any platform which doesn't have executable bits should skip
this test, since the os.stat call will always say that the file is
executable
"""
if sys.platform.startswith('win32'):
pytest.skip("windows doesn't have executable flags")
# Create a file
tmp_file = tempfile.NamedTemporaryFile()
assert not ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_nonexistent():
"""Test that is_valid_executable returns False for a nonexistent file."""
assert not ToolPlugin.is_valid_executable('nonexistent')
def test_tool_plugin_is_valid_executable_extension_nopathext(monkeypatch):
"""
Test that is_valid_executable works correctly with .exe appended, no PATHEXT
is_valid_executable should find the file as created.
"""
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(suffix='.exe')
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_noextension_nopathext(monkeypatch):
"""
Test that is_valid_executable works correctly with no extension and no PATHEXT
is_valid_executable should find the file as created.
"""
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile()
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_extension_pathext(monkeypatch):
"""
Test that is_valid_executable works correctly with an extension and a set PATHEXT
is_valid_executable should find the file as created.
"""
# Monkeypatch the environment to set
monkeypatch.setenv('PATHEXT', '.exe;.bat')
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(suffix='.exe')
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_noextension_pathext(monkeypatch):
"""
Test that is_valid_executable works correctly with no extension and a set PATHEXT
is_valid_executable should find the file as created.
"""
# Monkeypatch the environment to set
monkeypatch.setenv('PATHEXT', '.exe;.bat')
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile()
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_wrongextension_pathext(monkeypatch):
"""
Test that is_valid_executable works correctly with a set PATHEXT and a non-PATHEXT extension.
is_valid_executable should NOT find the file.
"""
# Monkeypatch the environment to set
monkeypatch.setenv('PATHEXT', '.exe;.bat')
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(suffix='.potato')
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
# Get the created file minus the suffix
no_ext_path, _ = os.path.splitext(tmp_file.name)
assert not ToolPlugin.is_valid_executable(no_ext_path)
def test_tool_plugin_command_exists_fullpath(monkeypatch):
"""Test that command_exists works correctly (full path given). """
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary directory which will be part of the path
tmp_dir = tempfile.mkdtemp()
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_dir)
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.command_exists(tmp_file.name)
# Cleanup
shutil.rmtree(tmp_dir, ignore_errors=True)
def test_tool_plugin_command_exists_shortpath_valid(monkeypatch):
"""Test that command_exists works correctly (only filename given, command is on PATH). """
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary directory which will be part of the path
tmp_dir = tempfile.mkdtemp()
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_dir)
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
monkeypatch.setenv('PATH', tmp_dir)
_, tmp_file_name = os.path.split(tmp_file.name)
assert ToolPlugin.command_exists(tmp_file_name)
# Cleanup
shutil.rmtree(tmp_dir, ignore_errors=True)
def test_tool_plugin_command_exists_shortpath_invalid(monkeypatch):
"""Test that command_exists works correctly (only filename given, command is not on PATH). """
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary directory which will be part of the path
tmp_dir = tempfile.mkdtemp()
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_dir)
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
_, tmp_file_name = os.path.split(tmp_file.name)
assert not ToolPlugin.command_exists(tmp_file_name)
# Cleanup
shutil.rmtree(tmp_dir, ignore_errors=True) | tests/tool_plugin/test_tool_plugin.py | import argparse
import os
import shutil
import stat
import sys
import tempfile
import pytest
from statick_tool.config import Config
from statick_tool.plugin_context import PluginContext
from statick_tool.resources import Resources
from statick_tool.tool_plugin import ToolPlugin
def test_tool_plugin_load_mapping_valid():
"""Test that we can load the warnings mapping."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str)
resources = Resources([os.path.join(os.path.dirname(__file__), 'good_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert len(mapping) == 1
assert mapping == {'a': 'TST1-NO'}
def test_tool_plugin_load_mapping_invalid():
"""Test that we correctly skip invalid entries."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str)
resources = Resources([os.path.join(os.path.dirname(__file__), 'bad_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert not mapping
def test_tool_plugin_load_mapping_missing():
"""Test that we return an empty dict for missing files."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str)
resources = Resources([os.path.join(os.path.dirname(__file__), 'missing_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert not mapping
def test_tool_plugin_load_mapping_suffixed():
"""Test that we can load the warnings mapping with a suffix."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str, default='experimental')
resources = Resources([os.path.join(os.path.dirname(__file__), 'good_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert len(mapping) == 1
assert mapping == {'b': 'TST2-NO'}
def test_tool_plugin_load_mapping_suffixed_fallback():
"""Test that we fall back to the non-suffixed file if we can't find a mapping file with an appropriate suffix."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--mapping-file-suffix', dest="mapping_file_suffix",
type=str, default='gibberish')
resources = Resources([os.path.join(os.path.dirname(__file__), 'good_config')])
plugin_context = PluginContext(arg_parser.parse_args([]), resources, None)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
mapping = tp.load_mapping()
assert len(mapping) == 1
assert mapping == {'a': 'TST1-NO'}
def test_tool_plugin_get_user_flags_invalid_level():
"""Test that we return an empty list for invalid levels."""
arg_parser = argparse.ArgumentParser()
resources = Resources([os.path.join(os.path.dirname(__file__), 'user_flags_config')])
config = Config(resources.get_file("config.yaml"))
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
flags = tp.get_user_flags('level2', name='test')
assert flags == []
def test_tool_plugin_get_user_flags_invalid_tool():
"""Test that we return an empty list for undefined tools."""
arg_parser = argparse.ArgumentParser()
resources = Resources([os.path.join(os.path.dirname(__file__), 'user_flags_config')])
config = Config(resources.get_file("config.yaml"))
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
flags = tp.get_user_flags('level', name='test2')
assert flags == []
def test_tool_plugin_get_user_flags_no_config():
"""Test that we return an empty list for missing configs."""
arg_parser = argparse.ArgumentParser()
resources = Resources([os.path.join(os.path.dirname(__file__), 'user_flags_config_missing')])
config = Config(resources.get_file("config.yaml"))
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
flags = tp.get_user_flags('level', name='test')
assert flags == []
def test_tool_plugin_get_user_flags_valid_flags():
"""Test that we return a list of user flags."""
arg_parser = argparse.ArgumentParser()
resources = Resources([os.path.join(os.path.dirname(__file__), 'user_flags_config')])
config = Config(resources.get_file("config.yaml"))
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
tp = ToolPlugin()
tp.set_plugin_context(plugin_context)
flags = tp.get_user_flags('level', name='test')
assert flags == ['look', 'a', 'flag']
def test_tool_plugin_is_valid_executable_valid():
"""Test that is_valid_executable returns True for executable files."""
# Create an executable file
tmp_file = tempfile.NamedTemporaryFile()
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_no_exe_flag():
"""
Test that is_valid_executable returns False for a non-executable file.
NOTE: any platform which doesn't have executable bits should skip
this test, since the os.stat call will always say that the file is
executable
"""
if sys.platform.startswith('win32'):
pytest.skip("windows doesn't have executable flags")
# Create a file
tmp_file = tempfile.NamedTemporaryFile()
assert not ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_nonexistent():
"""Test that is_valid_executable returns False for a nonexistent file."""
assert not ToolPlugin.is_valid_executable('nonexistent')
def test_tool_plugin_is_valid_executable_extension_nopathext(monkeypatch):
"""
Test that is_valid_executable works correctly with .exe appended, no PATHEXT
is_valid_executable should find the file as created.
"""
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(suffix='.exe')
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_noextension_nopathext(monkeypatch):
"""
Test that is_valid_executable works correctly with no extension and no PATHEXT
is_valid_executable should find the file as created.
"""
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile()
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_extension_pathext(monkeypatch):
"""
Test that is_valid_executable works correctly with an extension and a set PATHEXT
is_valid_executable should find the file as created.
"""
# Monkeypatch the environment to set
monkeypatch.setenv('PATHEXT', '.exe;.bat')
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(suffix='.exe')
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_noextension_pathext(monkeypatch):
"""
Test that is_valid_executable works correctly with no extension and a set PATHEXT
is_valid_executable should find the file as created.
"""
# Monkeypatch the environment to set
monkeypatch.setenv('PATHEXT', '.exe;.bat')
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile()
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.is_valid_executable(tmp_file.name)
def test_tool_plugin_is_valid_executable_wrongextension_pathext(monkeypatch):
"""
Test that is_valid_executable works correctly with a set PATHEXT and a non-PATHEXT extension.
is_valid_executable should NOT find the file.
"""
# Monkeypatch the environment to set
monkeypatch.setenv('PATHEXT', '.exe;.bat')
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(suffix='.potato')
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
# Get the created file minus the suffix
no_ext_path, _ = os.path.splitext(tmp_file.name)
assert not ToolPlugin.is_valid_executable(no_ext_path)
def test_tool_plugin_command_exists_fullpath(monkeypatch):
"""Test that command_exists works correctly (full path given). """
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary directory which will be part of the path
tmp_dir = tempfile.mkdtemp()
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_dir)
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
assert ToolPlugin.command_exists(tmp_file.name)
# Cleanup
shutil.rmtree(tmp_dir, ignore_errors=True)
def test_tool_plugin_command_exists_shortpath_valid(monkeypatch):
"""Test that command_exists works correctly (only filename given, command is on PATH). """
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary directory which will be part of the path
tmp_dir = tempfile.mkdtemp()
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_dir)
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
monkeypatch.setenv('PATH', tmp_dir)
_, tmp_file_name = os.path.split(tmp_file.name)
assert ToolPlugin.command_exists(tmp_file_name)
# Cleanup
shutil.rmtree(tmp_dir, ignore_errors=True)
def test_tool_plugin_command_exists_shortpath_invalid(monkeypatch):
"""Test that command_exists works correctly (only filename given, command is not on PATH). """
# Monkeypatch the environment to clear PATHEXT
monkeypatch.delenv('PATHEXT', raising=False)
# Make a temporary directory which will be part of the path
tmp_dir = tempfile.mkdtemp()
# Make a temporary executable
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_dir)
st = os.stat(tmp_file.name)
os.chmod(tmp_file.name, st.st_mode | stat.S_IXUSR)
_, tmp_file_name = os.path.split(tmp_file.name)
assert not ToolPlugin.command_exists(tmp_file_name)
# Cleanup
shutil.rmtree(tmp_dir, ignore_errors=True) | 0.47926 | 0.234407 |
import os
from wa import ApkUiautoWorkload, Parameter
from wa.framework.exception import ValidationError, WorkloadError
class Gmail(ApkUiautoWorkload):
name = 'gmail'
package_names = ['com.google.android.gm']
description = '''
A workload to perform standard productivity tasks within Gmail. The workload carries out
various tasks, such as creating new emails, attaching images and sending them.
Test description:
1. Open Gmail application
2. Click to create New mail
3. Attach an image from the local images folder to the email
4. Enter recipient details in the To field
5. Enter text in the Subject field
6. Enter text in the Compose field
7. Click the Send mail button
Known working APK version: 7.11.5.176133587
'''
parameters = [
Parameter('recipient', kind=str, default='<EMAIL>',
description='''
The email address of the recipient. Setting a void address
will stop any mesage failures clogging up your device inbox
'''),
Parameter('test_image', kind=str, default='uxperf_1600x1200.jpg',
description='''
An image to be copied onto the device that will be attached
to the email
'''),
]
# This workload relies on the internet so check that there is a working
# internet connection
requires_network = True
def __init__(self, target, **kwargs):
super(Gmail, self).__init__(target, **kwargs)
self.deployable_assets = [self.test_image]
self.clean_assets = True
def init_resources(self, context):
super(Gmail, self).init_resources(context)
if self.target.get_sdk_version() >= 24 and 'com.google.android.apps.photos' not in self.target.list_packages():
raise WorkloadError('gmail workload requires Google Photos to be installed for Android N onwards')
# Allows for getting working directory regardless if path ends with a '/'
work_dir = self.target.working_directory
work_dir = work_dir if work_dir[-1] != os.sep else work_dir[:-1]
self.gui.uiauto_params['workdir_name'] = self.target.path.basename(work_dir)
self.gui.uiauto_params['recipient'] = self.recipient
# Only accept certain image formats
if os.path.splitext(self.test_image.lower())[1] not in ['.jpg', '.jpeg', '.png']:
raise ValidationError('{} must be a JPEG or PNG file'.format(self.test_image)) | wa/workloads/gmail/__init__.py |
import os
from wa import ApkUiautoWorkload, Parameter
from wa.framework.exception import ValidationError, WorkloadError
class Gmail(ApkUiautoWorkload):
name = 'gmail'
package_names = ['com.google.android.gm']
description = '''
A workload to perform standard productivity tasks within Gmail. The workload carries out
various tasks, such as creating new emails, attaching images and sending them.
Test description:
1. Open Gmail application
2. Click to create New mail
3. Attach an image from the local images folder to the email
4. Enter recipient details in the To field
5. Enter text in the Subject field
6. Enter text in the Compose field
7. Click the Send mail button
Known working APK version: 7.11.5.176133587
'''
parameters = [
Parameter('recipient', kind=str, default='<EMAIL>',
description='''
The email address of the recipient. Setting a void address
will stop any mesage failures clogging up your device inbox
'''),
Parameter('test_image', kind=str, default='uxperf_1600x1200.jpg',
description='''
An image to be copied onto the device that will be attached
to the email
'''),
]
# This workload relies on the internet so check that there is a working
# internet connection
requires_network = True
def __init__(self, target, **kwargs):
super(Gmail, self).__init__(target, **kwargs)
self.deployable_assets = [self.test_image]
self.clean_assets = True
def init_resources(self, context):
super(Gmail, self).init_resources(context)
if self.target.get_sdk_version() >= 24 and 'com.google.android.apps.photos' not in self.target.list_packages():
raise WorkloadError('gmail workload requires Google Photos to be installed for Android N onwards')
# Allows for getting working directory regardless if path ends with a '/'
work_dir = self.target.working_directory
work_dir = work_dir if work_dir[-1] != os.sep else work_dir[:-1]
self.gui.uiauto_params['workdir_name'] = self.target.path.basename(work_dir)
self.gui.uiauto_params['recipient'] = self.recipient
# Only accept certain image formats
if os.path.splitext(self.test_image.lower())[1] not in ['.jpg', '.jpeg', '.png']:
raise ValidationError('{} must be a JPEG or PNG file'.format(self.test_image)) | 0.544075 | 0.317638 |
from collections import defaultdict
import json
from os import environ, getcwd, path
import shutil
import subprocess
import ssh_utils
import utils
WORKSPACE = getcwd()
HOSTS_PATH = path.join(WORKSPACE, 'hosts')
HOSTS_TEMPLATE_PATH = path.join(WORKSPACE, '.hosts-template')
def host_path(host_dir):
return path.join(HOSTS_PATH, host_dir)
def config(host_dir):
_host_path = host_path(host_dir)
config_file = path.join(_host_path, 'config.json')
try:
with open(config_file, 'r') as f:
_config = json.load(f)
except IOError:
if not path.isdir(HOSTS_PATH):
shutil.copytree(HOSTS_TEMPLATE_PATH, HOSTS_PATH)
# Try again
return config(host_dir)
elif path.isdir(_host_path):
raise Exception('Host not found: {}'.format(
_host_path.replace(environ.get('HOME'), '~')))
else:
raise HostconfigFileNotFound('Host config file not found: {}'.format(
config_file.replace(environ.get('HOME'), '~')))
except ValueError as e:
raise Exception('There is a syntax error in {}: {}'.format(config_file, e))
return _config
class HostDownException(Exception):
pass
class HostconfigFileNotFound(Exception):
pass
class BaseHost(object):
_data = None
root = None
config = None
def __init__(self, root):
self.root = root
@property
def name(self):
return self.config.get('host-name', path.basename(self.root))
def ping(self):
ip_list = self.ip_list
utils.log('IP-addresses: '+', '.join(ip_list))
for ip in ip_list:
utils.log('Pinging {} ({})'.format(self.name, ip))
if utils.ping(ip):
utils.log('Ping successful')
with open('{}/ip-address.txt'.format(self.root), 'w') as f:
f.write(ip)
return ip
utils.log('Ping unsuccessful')
raise HostDownException
@property
def ip(self):
return self.ping()
def command(self, command, stdout=False):
self.ping()
return self.ssh(command=command, stdout=stdout)
@property
def flat_ssh_config(self):
return ssh_utils.flat_ssh_config(ssh_config=self.ssh_config)
def ssh(self, command=None, stdout=False):
ssh_config = self.ssh_config
try:
return ssh_utils.ssh(ssh_config=ssh_config, command=command, stdout=stdout)
except ssh_utils.SshException as e:
exit()
def ssh_command(self, command=None):
return ssh_utils.ssh_command(ssh_config=self.ssh_config,
command=command)
def scp_from(self, from_file, to_file):
return ssh_utils.scp(ssh_config=self.ssh_config, from_file=from_file, to_file=to_file, from_remote=True)
def scp_to(self, from_file, to_file):
return ssh_utils.scp(ssh_config=self.ssh_config, from_file=from_file, to_file=to_file, to_remote=True)
def get(self, key):
if self.data.has_key(key):
return self.data.get(key)
return None
def set(self, key, value):
self.data[key] = value
return self
def unset(self, key):
if self.datahas_key(key):
del self.data[key]
return self
def remove_data(self):
self._data = {}
return self
@property
def data(self):
if self._data is None:
self._data = self.state_file_content
return self._data
@property
def state_file(self):
return '{}/.state.json'.format(self.root)
@property
def state_file_content(self):
utils.log('Reading state from file {}'.format(self.state_file))
try:
return json.load(open(self.state_file))
except IOError:
return defaultdict(dict)
except ValueError as e:
utils.log('There is a syntax error in {}: {}'.format(self.state_file, e))
exit(1)
def save(self):
utils.log('Saving state to file {}'.format(self.state_file))
with open(self.state_file, 'w') as f:
f.write(json.dumps(self.data, indent=4)) | .python-packages/base_host.py | from collections import defaultdict
import json
from os import environ, getcwd, path
import shutil
import subprocess
import ssh_utils
import utils
WORKSPACE = getcwd()
HOSTS_PATH = path.join(WORKSPACE, 'hosts')
HOSTS_TEMPLATE_PATH = path.join(WORKSPACE, '.hosts-template')
def host_path(host_dir):
return path.join(HOSTS_PATH, host_dir)
def config(host_dir):
_host_path = host_path(host_dir)
config_file = path.join(_host_path, 'config.json')
try:
with open(config_file, 'r') as f:
_config = json.load(f)
except IOError:
if not path.isdir(HOSTS_PATH):
shutil.copytree(HOSTS_TEMPLATE_PATH, HOSTS_PATH)
# Try again
return config(host_dir)
elif path.isdir(_host_path):
raise Exception('Host not found: {}'.format(
_host_path.replace(environ.get('HOME'), '~')))
else:
raise HostconfigFileNotFound('Host config file not found: {}'.format(
config_file.replace(environ.get('HOME'), '~')))
except ValueError as e:
raise Exception('There is a syntax error in {}: {}'.format(config_file, e))
return _config
class HostDownException(Exception):
pass
class HostconfigFileNotFound(Exception):
pass
class BaseHost(object):
_data = None
root = None
config = None
def __init__(self, root):
self.root = root
@property
def name(self):
return self.config.get('host-name', path.basename(self.root))
def ping(self):
ip_list = self.ip_list
utils.log('IP-addresses: '+', '.join(ip_list))
for ip in ip_list:
utils.log('Pinging {} ({})'.format(self.name, ip))
if utils.ping(ip):
utils.log('Ping successful')
with open('{}/ip-address.txt'.format(self.root), 'w') as f:
f.write(ip)
return ip
utils.log('Ping unsuccessful')
raise HostDownException
@property
def ip(self):
return self.ping()
def command(self, command, stdout=False):
self.ping()
return self.ssh(command=command, stdout=stdout)
@property
def flat_ssh_config(self):
return ssh_utils.flat_ssh_config(ssh_config=self.ssh_config)
def ssh(self, command=None, stdout=False):
ssh_config = self.ssh_config
try:
return ssh_utils.ssh(ssh_config=ssh_config, command=command, stdout=stdout)
except ssh_utils.SshException as e:
exit()
def ssh_command(self, command=None):
return ssh_utils.ssh_command(ssh_config=self.ssh_config,
command=command)
def scp_from(self, from_file, to_file):
return ssh_utils.scp(ssh_config=self.ssh_config, from_file=from_file, to_file=to_file, from_remote=True)
def scp_to(self, from_file, to_file):
return ssh_utils.scp(ssh_config=self.ssh_config, from_file=from_file, to_file=to_file, to_remote=True)
def get(self, key):
if self.data.has_key(key):
return self.data.get(key)
return None
def set(self, key, value):
self.data[key] = value
return self
def unset(self, key):
if self.datahas_key(key):
del self.data[key]
return self
def remove_data(self):
self._data = {}
return self
@property
def data(self):
if self._data is None:
self._data = self.state_file_content
return self._data
@property
def state_file(self):
return '{}/.state.json'.format(self.root)
@property
def state_file_content(self):
utils.log('Reading state from file {}'.format(self.state_file))
try:
return json.load(open(self.state_file))
except IOError:
return defaultdict(dict)
except ValueError as e:
utils.log('There is a syntax error in {}: {}'.format(self.state_file, e))
exit(1)
def save(self):
utils.log('Saving state to file {}'.format(self.state_file))
with open(self.state_file, 'w') as f:
f.write(json.dumps(self.data, indent=4)) | 0.422147 | 0.058534 |
import datetime
import pytest
import boto3
from mock import patch, Mock
import reports.import_config_rule_status.import_config_rule_status as import_config_rule_status
@pytest.fixture(scope="function")
def _citizen_items_valid():
return {
"Items": [
{
"AccountId": {"S": "1"},
"AccountName": {"S": "Account1"},
"ExecutionRoleArn": {"S": "arn:etc"}
}
]
}
@pytest.fixture(scope="function")
def _config_rule_status_items_valid():
return {
"Items": [
{
"AccountId": {"S": "1"},
"AccountName": {"S": "Account1"},
"RuleName": {"S": "CheckTags"}
}
]
}
@pytest.fixture(scope="function")
def _config_rules_comp_valid():
return {
"ComplianceByConfigRules": [
{
"ConfigRuleName": "Rule1",
"Compliance": {"ComplianceType": "COMPLIANT"}
}
]
}
@pytest.fixture(scope="function")
def _config_rules_valid():
return {
"ConfigRules": [
{
"ConfigRuleName": "Rule1",
"Source": {
"SourceIdentifier": \
"arn:aws:lambda:ap-southeast-2:1234567890:function:ProxyLambda"
}
}
]
}
@pytest.fixture(scope="function")
def _config_rule_invoke_success():
return {
"ConfigRulesEvaluationStatus": [
{
"LastSuccessfulInvocationTime": datetime.datetime(2018, 2, 27, 16, 52, 24, 964000, tzinfo=None),
"FirstEvaluationStarted": True,
"ConfigRuleName": "CheckConfigRule",
"ConfigRuleArn": "arn:aws:config:ap-southeast-2:213618447103:config-rule/config-rule-3thzbc",
"FirstActivatedTime": datetime.datetime(2017, 9, 12, 15, 5, 23, 46000, tzinfo=None),
"LastSuccessfulEvaluationTime": datetime.datetime(2018, 2, 27, 16, 52, 39, 510000, tzinfo=None),
"ConfigRuleId": "config-rule-3thzbc"
}
]
}
@patch("boto3.client")
def test_get_assumed_creds_empty(mock_b3_client):
assert not import_config_rule_status.get_assumed_creds(boto3.client("sts"), {})
@patch("boto3.client")
def test_get_assumed_creds(mock_b3_client):
assert import_config_rule_status.get_assumed_creds(
boto3.client("sts"),
{"creds": "TestCreds"}
)
@patch("boto3.client")
def test_get_table_items(mock_b3_client, _citizen_items_valid):
mock_b3_client("dynamodb").get_paginator("scan").paginate.return_value = [_citizen_items_valid]
assert import_config_rule_status.get_table_items(boto3.client("dynamobd"), "TestTable")
@patch("boto3.client")
def test_get_config_rules_statuses(mock_b3_client, _config_rules_comp_valid):
mock_b3_client("config").get_paginator(
"describe_compliance_by_config_rule"
).paginate.return_value = [_config_rules_comp_valid]
assert import_config_rule_status.get_config_rules_statuses(boto3.client("config"))
@patch("boto3.client")
def test_import_config_rule_statuses(
mock_b3_client,
_citizen_items_valid,
_config_rules_comp_valid,
_config_rules_valid
):
def mock_get_paginator(arg):
side_mock = Mock()
if arg == "describe_compliance_by_config_rule":
side_mock.paginate.return_value = [_config_rules_comp_valid]
elif arg == "describe_config_rules":
side_mock.paginate.return_value = [_config_rules_valid]
return side_mock
mock_b3_client("config").get_paginator.side_effect = mock_get_paginator
assert import_config_rule_status.import_config_rule_statuses(
"TestTable",
_citizen_items_valid["Items"][0],
boto3.client("sts"),
boto3.client("dynamodb"),
"",
""
) is None
@patch("boto3.client")
def test_get_config_rule_invoke_success(mockb3_client, _config_rule_invoke_success):
mockb3_client("config").describe_config_rule_evaluation_status.return_value = \
_config_rule_invoke_success
rule_invocation_time, invocation_result = import_config_rule_status.get_config_rule_invoke_success(boto3.client("config"),"CheckConfigRule")
assert invocation_result == "SUCCESS"
@patch("boto3.client")
def test_delete_all_items(mock_b3_client, _config_rule_status_items_valid):
mock_b3_client("dynamodb").get_paginator("scan").paginate.return_value = \
[_config_rule_status_items_valid]
assert import_config_rule_status.delete_all_items(boto3.client("dynamodb"), None) is None
@patch("boto3.client")
@patch("reports.import_config_rule_status.import_config_rule_status.get_table_items")
@patch("reports.import_config_rule_status.import_config_rule_status.delete_all_items")
@patch("reports.import_config_rule_status.import_config_rule_status.get_assumed_creds")
def test_lambda_handler(mock_get_assumed_creds, mock_delete_all_items, mock_get_table_items, mock_b3_client, _citizen_items_valid):
mock_get_assumed_creds.side_effect = Exception("Test error")
mock_delete_all_items.return_value = None
mock_get_table_items.return_value = _citizen_items_valid["Items"]
assert import_config_rule_status.lambda_handler({}, None) is None
@patch("boto3.client")
def test_get_config_rules_sources(mock_b3_client, _config_rules_valid):
mock_b3_client("config").get_paginator("describe_config_rules").paginate.return_value = \
[_config_rules_valid]
result = import_config_rule_status.get_config_rules_sources(boto3.client("config"))
assert result["Rule1"] == "arn:aws:lambda:ap-southeast-2:1234567890:function:ProxyLambda" | unit_tests/reports/test_import_config_rule_status.py | import datetime
import pytest
import boto3
from mock import patch, Mock
import reports.import_config_rule_status.import_config_rule_status as import_config_rule_status
@pytest.fixture(scope="function")
def _citizen_items_valid():
return {
"Items": [
{
"AccountId": {"S": "1"},
"AccountName": {"S": "Account1"},
"ExecutionRoleArn": {"S": "arn:etc"}
}
]
}
@pytest.fixture(scope="function")
def _config_rule_status_items_valid():
return {
"Items": [
{
"AccountId": {"S": "1"},
"AccountName": {"S": "Account1"},
"RuleName": {"S": "CheckTags"}
}
]
}
@pytest.fixture(scope="function")
def _config_rules_comp_valid():
return {
"ComplianceByConfigRules": [
{
"ConfigRuleName": "Rule1",
"Compliance": {"ComplianceType": "COMPLIANT"}
}
]
}
@pytest.fixture(scope="function")
def _config_rules_valid():
return {
"ConfigRules": [
{
"ConfigRuleName": "Rule1",
"Source": {
"SourceIdentifier": \
"arn:aws:lambda:ap-southeast-2:1234567890:function:ProxyLambda"
}
}
]
}
@pytest.fixture(scope="function")
def _config_rule_invoke_success():
return {
"ConfigRulesEvaluationStatus": [
{
"LastSuccessfulInvocationTime": datetime.datetime(2018, 2, 27, 16, 52, 24, 964000, tzinfo=None),
"FirstEvaluationStarted": True,
"ConfigRuleName": "CheckConfigRule",
"ConfigRuleArn": "arn:aws:config:ap-southeast-2:213618447103:config-rule/config-rule-3thzbc",
"FirstActivatedTime": datetime.datetime(2017, 9, 12, 15, 5, 23, 46000, tzinfo=None),
"LastSuccessfulEvaluationTime": datetime.datetime(2018, 2, 27, 16, 52, 39, 510000, tzinfo=None),
"ConfigRuleId": "config-rule-3thzbc"
}
]
}
@patch("boto3.client")
def test_get_assumed_creds_empty(mock_b3_client):
assert not import_config_rule_status.get_assumed_creds(boto3.client("sts"), {})
@patch("boto3.client")
def test_get_assumed_creds(mock_b3_client):
assert import_config_rule_status.get_assumed_creds(
boto3.client("sts"),
{"creds": "TestCreds"}
)
@patch("boto3.client")
def test_get_table_items(mock_b3_client, _citizen_items_valid):
mock_b3_client("dynamodb").get_paginator("scan").paginate.return_value = [_citizen_items_valid]
assert import_config_rule_status.get_table_items(boto3.client("dynamobd"), "TestTable")
@patch("boto3.client")
def test_get_config_rules_statuses(mock_b3_client, _config_rules_comp_valid):
mock_b3_client("config").get_paginator(
"describe_compliance_by_config_rule"
).paginate.return_value = [_config_rules_comp_valid]
assert import_config_rule_status.get_config_rules_statuses(boto3.client("config"))
@patch("boto3.client")
def test_import_config_rule_statuses(
mock_b3_client,
_citizen_items_valid,
_config_rules_comp_valid,
_config_rules_valid
):
def mock_get_paginator(arg):
side_mock = Mock()
if arg == "describe_compliance_by_config_rule":
side_mock.paginate.return_value = [_config_rules_comp_valid]
elif arg == "describe_config_rules":
side_mock.paginate.return_value = [_config_rules_valid]
return side_mock
mock_b3_client("config").get_paginator.side_effect = mock_get_paginator
assert import_config_rule_status.import_config_rule_statuses(
"TestTable",
_citizen_items_valid["Items"][0],
boto3.client("sts"),
boto3.client("dynamodb"),
"",
""
) is None
@patch("boto3.client")
def test_get_config_rule_invoke_success(mockb3_client, _config_rule_invoke_success):
mockb3_client("config").describe_config_rule_evaluation_status.return_value = \
_config_rule_invoke_success
rule_invocation_time, invocation_result = import_config_rule_status.get_config_rule_invoke_success(boto3.client("config"),"CheckConfigRule")
assert invocation_result == "SUCCESS"
@patch("boto3.client")
def test_delete_all_items(mock_b3_client, _config_rule_status_items_valid):
mock_b3_client("dynamodb").get_paginator("scan").paginate.return_value = \
[_config_rule_status_items_valid]
assert import_config_rule_status.delete_all_items(boto3.client("dynamodb"), None) is None
@patch("boto3.client")
@patch("reports.import_config_rule_status.import_config_rule_status.get_table_items")
@patch("reports.import_config_rule_status.import_config_rule_status.delete_all_items")
@patch("reports.import_config_rule_status.import_config_rule_status.get_assumed_creds")
def test_lambda_handler(mock_get_assumed_creds, mock_delete_all_items, mock_get_table_items, mock_b3_client, _citizen_items_valid):
mock_get_assumed_creds.side_effect = Exception("Test error")
mock_delete_all_items.return_value = None
mock_get_table_items.return_value = _citizen_items_valid["Items"]
assert import_config_rule_status.lambda_handler({}, None) is None
@patch("boto3.client")
def test_get_config_rules_sources(mock_b3_client, _config_rules_valid):
mock_b3_client("config").get_paginator("describe_config_rules").paginate.return_value = \
[_config_rules_valid]
result = import_config_rule_status.get_config_rules_sources(boto3.client("config"))
assert result["Rule1"] == "arn:aws:lambda:ap-southeast-2:1234567890:function:ProxyLambda" | 0.373419 | 0.218482 |
__title__ = 'Create Lintel'
__author__ = 'htl'
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
import rpw
from rpw.ui.forms import Label, TextBox, Button, ComboBox, FlexForm
from htl import selection
uiapp = __revit__
uidoc = uiapp.ActiveUIDocument
app = uiapp.Application
doc = uidoc.Document
def create_lintel(host, l1, l2, beam_type):
host_height = doc.GetElement(host.GetTypeId()).LookupParameter('Height').AsDouble()
host_width = doc.GetElement(host.GetTypeId()).LookupParameter('Width').AsDouble()
level = doc.GetElement(host.Host.LevelId)
beam_height = beam_type.LookupParameter('h').AsDouble()
lintel_location_point = host.Location.Point + XYZ(0, 0, host_height + beam_height)
host_location_curve = host.Host.Location.Curve
l1 = l1/304.8
l2 = l2/304.8
if isinstance(host_location_curve, Line):
wall_direction = host.Host.Location.Curve.Direction
start = lintel_location_point - (l1 + host_width/2) * wall_direction
end = lintel_location_point + (l2 + host_width/2) * wall_direction
beam_location = Line.CreateBound(start, end)
curve = clr.Reference[Curve](beam_location)
overloads = (Curve, FamilySymbol, Level, Structure.StructuralType)
with rpw.db.Transaction('create lintel'):
beam = doc.Create.NewFamilyInstance.Overloads[overloads](beam_location, beam_type, level, Structure.StructuralType.Beam)
def main():
try:
elements = selection.select_objects_by_category('Windows', 'Doors')
except:
return
all_beam_types = rpw.db.Collector(of_category='Structural Framing', is_type=True).get_elements(wrapped=False)
components = [
Label('Lintel (Beam) Type:'),
ComboBox('beam_type', {b.LookupParameter('Type Name').AsString(): b for b in all_beam_types}),
Label('L1:'),
TextBox('l1'),
Label('L2:'),
TextBox('l2'),
Button('Create Lintels')
]
ff = FlexForm('Create Lintels', components)
ff.show()
if ff.values:
beam_type = ff.values['beam_type']
try:
l1 = float(ff.values['l1'])
l2 = float(ff.values['l2'])
except:
return
if not beam_type.IsActive:
with rpw.db.Transaction('Activate Beam Type'):
beam_type.Activate()
for e in elements:
create_lintel(e, l1, l2, beam_type)
if __name__ == '__main__':
main() | HTL.tab/Architecture.panel/Lintel.pushbutton/script.py | __title__ = 'Create Lintel'
__author__ = 'htl'
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
import rpw
from rpw.ui.forms import Label, TextBox, Button, ComboBox, FlexForm
from htl import selection
uiapp = __revit__
uidoc = uiapp.ActiveUIDocument
app = uiapp.Application
doc = uidoc.Document
def create_lintel(host, l1, l2, beam_type):
host_height = doc.GetElement(host.GetTypeId()).LookupParameter('Height').AsDouble()
host_width = doc.GetElement(host.GetTypeId()).LookupParameter('Width').AsDouble()
level = doc.GetElement(host.Host.LevelId)
beam_height = beam_type.LookupParameter('h').AsDouble()
lintel_location_point = host.Location.Point + XYZ(0, 0, host_height + beam_height)
host_location_curve = host.Host.Location.Curve
l1 = l1/304.8
l2 = l2/304.8
if isinstance(host_location_curve, Line):
wall_direction = host.Host.Location.Curve.Direction
start = lintel_location_point - (l1 + host_width/2) * wall_direction
end = lintel_location_point + (l2 + host_width/2) * wall_direction
beam_location = Line.CreateBound(start, end)
curve = clr.Reference[Curve](beam_location)
overloads = (Curve, FamilySymbol, Level, Structure.StructuralType)
with rpw.db.Transaction('create lintel'):
beam = doc.Create.NewFamilyInstance.Overloads[overloads](beam_location, beam_type, level, Structure.StructuralType.Beam)
def main():
try:
elements = selection.select_objects_by_category('Windows', 'Doors')
except:
return
all_beam_types = rpw.db.Collector(of_category='Structural Framing', is_type=True).get_elements(wrapped=False)
components = [
Label('Lintel (Beam) Type:'),
ComboBox('beam_type', {b.LookupParameter('Type Name').AsString(): b for b in all_beam_types}),
Label('L1:'),
TextBox('l1'),
Label('L2:'),
TextBox('l2'),
Button('Create Lintels')
]
ff = FlexForm('Create Lintels', components)
ff.show()
if ff.values:
beam_type = ff.values['beam_type']
try:
l1 = float(ff.values['l1'])
l2 = float(ff.values['l2'])
except:
return
if not beam_type.IsActive:
with rpw.db.Transaction('Activate Beam Type'):
beam_type.Activate()
for e in elements:
create_lintel(e, l1, l2, beam_type)
if __name__ == '__main__':
main() | 0.45641 | 0.136062 |
import codecs
import json
import random
def shuffle_list(paper_list,number):
random.shuffle(paper_list)
return paper_list[:number]
def expand_by_coop(flag):
with codecs.open("./raw_data/author_press.json","r","utf-8") as fid:
author_press = json.load(fid)
with codecs.open("./raw_data/author_cooperators.json","r","utf-8") as fid:
author_cooperators = json.load(fid)
with codecs.open("./raw_data/p_author_press_final.json","r","utf-8") as fid:
p_author_press = json.load(fid)
with codecs.open("./raw_data/t_author_press.json","r","utf-8") as fid:
t_author_press = json.load(fid)
if flag == 'training':
author_list = t_author_press.keys()
file_name = "./raw_data/t_author_press_ex_coop.json"
f_author_press = t_author_press
if flag == 'test':
author_list = p_author_press.keys()
file_name = "./raw_data/p_author_press_ex_coop.json"
f_author_press = p_author_press
t_author_press_ex = {}
for author in author_list:
press = author_press[author]
if len(f_author_press[author]) < 20:
coop = []
coop.extend(author_cooperators[author])
for v in coop:
press.extend(shuffle_list(author_press[author],20))
t_author_press_ex.setdefault(author,press)
with codecs.open(file_name,"w","utf-8") as fid:
json.dump(t_author_press_ex,fid,ensure_ascii=False)
def expand_by_cite(flag):
with codecs.open("./raw_data/author_indx_citeindx.json","r","utf-8") as fid:
author_indx_citeindx = json.load(fid)
with codecs.open("./raw_data/indx_press.json","r","utf-8") as fid:
indx_press = json.load(fid)
with codecs.open("./raw_data/p_author_press_final.json","r","utf-8") as fid:
p_author_press = json.load(fid)
with codecs.open("./raw_data/t_author_press.json","r","utf-8") as fid:
t_author_press = json.load(fid)
if flag == 'training':
author_list = t_author_press.keys()
file_name = "./raw_data/t_author_press_ex_cite.json"
f_author_press = t_author_press
if flag == 'test':
author_list = p_author_press.keys()
file_name = "./raw_data/p_author_press_ex_cite.json"
f_author_press = p_author_press
t_author_press_ex = {}
for author in author_list:
press = f_author_press[author]
if len(f_author_press[author]) < 20:
cite = []
for indx in author_indx_citeindx[author].keys():
cite.extend(author_indx_citeindx[author][indx])
for v in cite:
press.extend(indx_press[str(v)])
t_author_press_ex.setdefault(author,press)
with codecs.open(file_name,"w","utf-8") as fid:
json.dump(t_author_press_ex,fid,ensure_ascii=False)
def read_paper():
indx_press = {}
with codecs.open("./raw_data/papers.txt","r","utf-8") as fid:
for eachLine in fid:
if eachLine.startswith('#index'):
i = int(eachLine[6:])
indx_press.setdefault(str(i),[])
elif eachLine.startswith("#c"):
press = eachLine[2:-1].strip()
indx_press[str(i)].append(press)
else:
pass
with codecs.open("./raw_data/indx_press.json","w",'utf-8') as fid_json:
json.dump(indx_press,fid_json,ensure_ascii=False)
if __name__ == "__main__":
read_paper()
expand_by_cite('training')
expand_by_coop('training')
expand_by_cite('test')
expand_by_coop('test') | code/expand_author_press.py | import codecs
import json
import random
def shuffle_list(paper_list,number):
random.shuffle(paper_list)
return paper_list[:number]
def expand_by_coop(flag):
with codecs.open("./raw_data/author_press.json","r","utf-8") as fid:
author_press = json.load(fid)
with codecs.open("./raw_data/author_cooperators.json","r","utf-8") as fid:
author_cooperators = json.load(fid)
with codecs.open("./raw_data/p_author_press_final.json","r","utf-8") as fid:
p_author_press = json.load(fid)
with codecs.open("./raw_data/t_author_press.json","r","utf-8") as fid:
t_author_press = json.load(fid)
if flag == 'training':
author_list = t_author_press.keys()
file_name = "./raw_data/t_author_press_ex_coop.json"
f_author_press = t_author_press
if flag == 'test':
author_list = p_author_press.keys()
file_name = "./raw_data/p_author_press_ex_coop.json"
f_author_press = p_author_press
t_author_press_ex = {}
for author in author_list:
press = author_press[author]
if len(f_author_press[author]) < 20:
coop = []
coop.extend(author_cooperators[author])
for v in coop:
press.extend(shuffle_list(author_press[author],20))
t_author_press_ex.setdefault(author,press)
with codecs.open(file_name,"w","utf-8") as fid:
json.dump(t_author_press_ex,fid,ensure_ascii=False)
def expand_by_cite(flag):
with codecs.open("./raw_data/author_indx_citeindx.json","r","utf-8") as fid:
author_indx_citeindx = json.load(fid)
with codecs.open("./raw_data/indx_press.json","r","utf-8") as fid:
indx_press = json.load(fid)
with codecs.open("./raw_data/p_author_press_final.json","r","utf-8") as fid:
p_author_press = json.load(fid)
with codecs.open("./raw_data/t_author_press.json","r","utf-8") as fid:
t_author_press = json.load(fid)
if flag == 'training':
author_list = t_author_press.keys()
file_name = "./raw_data/t_author_press_ex_cite.json"
f_author_press = t_author_press
if flag == 'test':
author_list = p_author_press.keys()
file_name = "./raw_data/p_author_press_ex_cite.json"
f_author_press = p_author_press
t_author_press_ex = {}
for author in author_list:
press = f_author_press[author]
if len(f_author_press[author]) < 20:
cite = []
for indx in author_indx_citeindx[author].keys():
cite.extend(author_indx_citeindx[author][indx])
for v in cite:
press.extend(indx_press[str(v)])
t_author_press_ex.setdefault(author,press)
with codecs.open(file_name,"w","utf-8") as fid:
json.dump(t_author_press_ex,fid,ensure_ascii=False)
def read_paper():
indx_press = {}
with codecs.open("./raw_data/papers.txt","r","utf-8") as fid:
for eachLine in fid:
if eachLine.startswith('#index'):
i = int(eachLine[6:])
indx_press.setdefault(str(i),[])
elif eachLine.startswith("#c"):
press = eachLine[2:-1].strip()
indx_press[str(i)].append(press)
else:
pass
with codecs.open("./raw_data/indx_press.json","w",'utf-8') as fid_json:
json.dump(indx_press,fid_json,ensure_ascii=False)
if __name__ == "__main__":
read_paper()
expand_by_cite('training')
expand_by_coop('training')
expand_by_cite('test')
expand_by_coop('test') | 0.071118 | 0.17637 |
import abc
import os
from plaso.lib import errors
from plaso.parsers import manager
class BaseFileEntryFilter(object):
"""Class that defines the file entry filter interface."""
@abc.abstractmethod
def Match(self, file_entry):
"""Determines if a file entry matches the filter.
Args:
file_entry: a file entry object (instance of dfvfs.FileEntry).
Returns:
A boolean value that indicates a match.
"""
class FileNameFileEntryFilter(BaseFileEntryFilter):
"""Class that defines a file name file entry filter."""
def __init__(self, filename):
"""Initializes a file entry filter object.
Args:
filename: string containing the name of the file.
"""
super(FileNameFileEntryFilter, self).__init__()
self._filename = filename.lower()
def Match(self, file_entry):
"""Determines if a file entry matches the filter.
Args:
file_entry: a file entry object (instance of dfvfs.FileEntry).
Returns:
A boolean value that indicates a match.
"""
if not file_entry:
return False
filename = file_entry.name.lower()
return filename == self._filename
class BaseParser(object):
"""Class that defines the parser object interface."""
NAME = u'base_parser'
DESCRIPTION = u''
# List of filters that should match for the parser to be applied.
FILTERS = frozenset()
# Every derived parser class that implements plugins should define
# its own _plugin_classes dict:
# _plugin_classes = {}
# We deliberately don't define it here to make sure the plugins of
# different parser classes don't end up in the same dict.
_plugin_classes = None
@classmethod
def DeregisterPlugin(cls, plugin_class):
"""Deregisters a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class: the class object of the plugin.
Raises:
KeyError: if plugin class is not set for the corresponding name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name not in cls._plugin_classes:
raise KeyError(
u'Plugin class not set for name: {0:s}.'.format(
plugin_class.NAME))
del cls._plugin_classes[plugin_name]
# TOOD: move this to a filter.
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
The format specification (instance of FormatSpecification) or
None if not available."""
return
@classmethod
def GetPluginNames(cls, parser_filter_string=None):
"""Retrieves the plugin names.
Args:
parser_filter_string: optional parser filter string.
Returns:
A list of plugin names.
"""
plugin_names = []
for plugin_name, _ in cls.GetPlugins(
parser_filter_string=parser_filter_string):
plugin_names.append(plugin_name)
return sorted(plugin_names)
@classmethod
def GetPluginObjectByName(cls, plugin_name):
"""Retrieves a specific plugin object by its name.
Args:
plugin_name: the name of the plugin.
Returns:
A plugin object (instance of BasePlugin) or None.
"""
plugin_class = cls._plugin_classes.get(plugin_name, None)
if not plugin_class:
return
return plugin_class()
@classmethod
def GetPluginObjects(cls, parser_filter_string=None):
"""Retrieves the plugin objects.
Args:
parser_filter_string: optional parser filter string.
Returns:
A list of plugin objects (instances of BasePlugin).
"""
plugin_objects = []
for _, plugin_class in cls.GetPlugins(
parser_filter_string=parser_filter_string):
plugin_object = plugin_class()
plugin_objects.append(plugin_object)
return plugin_objects
@classmethod
def GetPlugins(cls, parser_filter_string=None):
"""Retrieves the registered plugins.
Args:
parser_filter_string: optional parser filter string.
Yields:
A tuple that contains the uniquely identifying name of the plugin
and the plugin class (subclass of BasePlugin).
"""
if parser_filter_string:
includes, excludes = manager.ParsersManager.GetFilterListsFromString(
parser_filter_string)
else:
includes = None
excludes = None
for plugin_name, plugin_class in cls._plugin_classes.iteritems():
if excludes and plugin_name in excludes:
continue
if includes and plugin_name not in includes:
continue
yield plugin_name, plugin_class
@classmethod
def RegisterPlugin(cls, plugin_class):
"""Registers a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class: the class object of the plugin.
Raises:
KeyError: if plugin class is already set for the corresponding name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name in cls._plugin_classes:
raise KeyError((
u'Plugin class already set for name: {0:s}.').format(
plugin_class.NAME))
cls._plugin_classes[plugin_name] = plugin_class
@classmethod
def RegisterPlugins(cls, plugin_classes):
"""Registers plugin classes.
Args:
plugin_classes: a list of class objects of the plugins.
Raises:
KeyError: if plugin class is already set for the corresponding name.
"""
for plugin_class in plugin_classes:
cls.RegisterPlugin(plugin_class)
@classmethod
def SupportsPlugins(cls):
"""Determines if a parser supports plugins.
Returns:
A boolean value indicating whether the parser supports plugins.
"""
return cls._plugin_classes is not None
class FileEntryParser(BaseParser):
"""Class that defines the file entry parser interface."""
def Parse(self, parser_mediator, **kwargs):
"""Parsers the file entry and extracts event objects.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_entry = parser_mediator.GetFileEntry()
if not file_entry:
raise errors.UnableToParseFile(u'Invalid file entry')
parser_mediator.AppendToParserChain(self)
try:
self.ParseFileEntry(parser_mediator, file_entry, **kwargs)
finally:
parser_mediator.PopFromParserChain()
@abc.abstractmethod
def ParseFileEntry(self, parser_mediator, file_entry, **kwargs):
"""Parses a file entry.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
file_entry: a file entry object (instance of dfvfs.FileEntry).
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
class FileObjectParser(BaseParser):
"""Class that defines the file-like object parser interface."""
# The initial file offset. Set this value to None if no initial
# file offset seek needs to be performed.
_INITIAL_FILE_OFFSET = 0
def Parse(self, parser_mediator, file_object, **kwargs):
"""Parses a single file-like object.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
file_object: a file-like object to parse.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
if not file_object:
raise errors.UnableToParseFile(u'Invalid file object')
if self._INITIAL_FILE_OFFSET is not None:
file_object.seek(self._INITIAL_FILE_OFFSET, os.SEEK_SET)
parser_mediator.AppendToParserChain(self)
try:
self.ParseFileObject(parser_mediator, file_object, **kwargs)
finally:
parser_mediator.PopFromParserChain()
@abc.abstractmethod
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a file-like object.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
file_object: a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
""" | plaso/parsers/interface.py | import abc
import os
from plaso.lib import errors
from plaso.parsers import manager
class BaseFileEntryFilter(object):
"""Class that defines the file entry filter interface."""
@abc.abstractmethod
def Match(self, file_entry):
"""Determines if a file entry matches the filter.
Args:
file_entry: a file entry object (instance of dfvfs.FileEntry).
Returns:
A boolean value that indicates a match.
"""
class FileNameFileEntryFilter(BaseFileEntryFilter):
"""Class that defines a file name file entry filter."""
def __init__(self, filename):
"""Initializes a file entry filter object.
Args:
filename: string containing the name of the file.
"""
super(FileNameFileEntryFilter, self).__init__()
self._filename = filename.lower()
def Match(self, file_entry):
"""Determines if a file entry matches the filter.
Args:
file_entry: a file entry object (instance of dfvfs.FileEntry).
Returns:
A boolean value that indicates a match.
"""
if not file_entry:
return False
filename = file_entry.name.lower()
return filename == self._filename
class BaseParser(object):
"""Class that defines the parser object interface."""
NAME = u'base_parser'
DESCRIPTION = u''
# List of filters that should match for the parser to be applied.
FILTERS = frozenset()
# Every derived parser class that implements plugins should define
# its own _plugin_classes dict:
# _plugin_classes = {}
# We deliberately don't define it here to make sure the plugins of
# different parser classes don't end up in the same dict.
_plugin_classes = None
@classmethod
def DeregisterPlugin(cls, plugin_class):
"""Deregisters a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class: the class object of the plugin.
Raises:
KeyError: if plugin class is not set for the corresponding name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name not in cls._plugin_classes:
raise KeyError(
u'Plugin class not set for name: {0:s}.'.format(
plugin_class.NAME))
del cls._plugin_classes[plugin_name]
# TOOD: move this to a filter.
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
The format specification (instance of FormatSpecification) or
None if not available."""
return
@classmethod
def GetPluginNames(cls, parser_filter_string=None):
"""Retrieves the plugin names.
Args:
parser_filter_string: optional parser filter string.
Returns:
A list of plugin names.
"""
plugin_names = []
for plugin_name, _ in cls.GetPlugins(
parser_filter_string=parser_filter_string):
plugin_names.append(plugin_name)
return sorted(plugin_names)
@classmethod
def GetPluginObjectByName(cls, plugin_name):
"""Retrieves a specific plugin object by its name.
Args:
plugin_name: the name of the plugin.
Returns:
A plugin object (instance of BasePlugin) or None.
"""
plugin_class = cls._plugin_classes.get(plugin_name, None)
if not plugin_class:
return
return plugin_class()
@classmethod
def GetPluginObjects(cls, parser_filter_string=None):
"""Retrieves the plugin objects.
Args:
parser_filter_string: optional parser filter string.
Returns:
A list of plugin objects (instances of BasePlugin).
"""
plugin_objects = []
for _, plugin_class in cls.GetPlugins(
parser_filter_string=parser_filter_string):
plugin_object = plugin_class()
plugin_objects.append(plugin_object)
return plugin_objects
@classmethod
def GetPlugins(cls, parser_filter_string=None):
"""Retrieves the registered plugins.
Args:
parser_filter_string: optional parser filter string.
Yields:
A tuple that contains the uniquely identifying name of the plugin
and the plugin class (subclass of BasePlugin).
"""
if parser_filter_string:
includes, excludes = manager.ParsersManager.GetFilterListsFromString(
parser_filter_string)
else:
includes = None
excludes = None
for plugin_name, plugin_class in cls._plugin_classes.iteritems():
if excludes and plugin_name in excludes:
continue
if includes and plugin_name not in includes:
continue
yield plugin_name, plugin_class
@classmethod
def RegisterPlugin(cls, plugin_class):
"""Registers a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class: the class object of the plugin.
Raises:
KeyError: if plugin class is already set for the corresponding name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name in cls._plugin_classes:
raise KeyError((
u'Plugin class already set for name: {0:s}.').format(
plugin_class.NAME))
cls._plugin_classes[plugin_name] = plugin_class
@classmethod
def RegisterPlugins(cls, plugin_classes):
"""Registers plugin classes.
Args:
plugin_classes: a list of class objects of the plugins.
Raises:
KeyError: if plugin class is already set for the corresponding name.
"""
for plugin_class in plugin_classes:
cls.RegisterPlugin(plugin_class)
@classmethod
def SupportsPlugins(cls):
"""Determines if a parser supports plugins.
Returns:
A boolean value indicating whether the parser supports plugins.
"""
return cls._plugin_classes is not None
class FileEntryParser(BaseParser):
"""Class that defines the file entry parser interface."""
def Parse(self, parser_mediator, **kwargs):
"""Parsers the file entry and extracts event objects.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_entry = parser_mediator.GetFileEntry()
if not file_entry:
raise errors.UnableToParseFile(u'Invalid file entry')
parser_mediator.AppendToParserChain(self)
try:
self.ParseFileEntry(parser_mediator, file_entry, **kwargs)
finally:
parser_mediator.PopFromParserChain()
@abc.abstractmethod
def ParseFileEntry(self, parser_mediator, file_entry, **kwargs):
"""Parses a file entry.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
file_entry: a file entry object (instance of dfvfs.FileEntry).
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
class FileObjectParser(BaseParser):
"""Class that defines the file-like object parser interface."""
# The initial file offset. Set this value to None if no initial
# file offset seek needs to be performed.
_INITIAL_FILE_OFFSET = 0
def Parse(self, parser_mediator, file_object, **kwargs):
"""Parses a single file-like object.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
file_object: a file-like object to parse.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
if not file_object:
raise errors.UnableToParseFile(u'Invalid file object')
if self._INITIAL_FILE_OFFSET is not None:
file_object.seek(self._INITIAL_FILE_OFFSET, os.SEEK_SET)
parser_mediator.AppendToParserChain(self)
try:
self.ParseFileObject(parser_mediator, file_object, **kwargs)
finally:
parser_mediator.PopFromParserChain()
@abc.abstractmethod
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a file-like object.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
file_object: a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
""" | 0.85744 | 0.305141 |
"""Import of required packages/libraries."""
import datetime
import os
from flask import flash
from flask import Flask
from flask import redirect
from flask import render_template
from flask import request
from flask import Response
from flask import send_file
from flask import url_for
from flask_basicauth import BasicAuth
from flask_bootstrap import Bootstrap
import forms
from forms import BRAND_TRACK
import survey_service
app = Flask(__name__)
app.config['SECRET_KEY'] = 'supersecretkey'
Bootstrap(app)
app.config['BASIC_AUTH_USERNAME'] = os.environ.get('AUTH_USERNAME')
app.config['BASIC_AUTH_PASSWORD'] = os.environ.get('AUTH_PASSWORD')
basic_auth = BasicAuth(app)
app.config['BASIC_AUTH_FORCE'] = True
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/index')
def index():
all_surveys = survey_service.get_all()
return render_template('index.html', all_surveys=all_surveys)
@app.route('/survey/create', methods=['GET', 'POST'])
def create():
"""Survey creation."""
form = forms.QuestionForm()
if form.validate_on_submit():
survey_service.create(form)
return redirect(url_for('index'))
return render_template('questions.html', title='Survey Creation', form=form)
@app.route('/survey/preview/<string:survey_id>', methods=['GET'])
def preview(survey_id):
"""Survey preview."""
survey_doc = survey_service.get_doc_by_id(survey_id)
if survey_doc.exists:
survey_info = survey_doc.to_dict()
return render_template(
'creative.html',
survey=survey_info,
survey_id=survey_id,
manual_responses=True,
show_back_button=True,
all_question_json=survey_service.get_question_json(survey_info),
seg='preview',
thankyou_text=survey_service.get_thank_you_text(survey_info),
next_text=survey_service.get_next_text(survey_info),
comment_text=survey_service.get_comment_text(survey_info))
else:
flash('Survey not found')
return redirect(url_for('index'))
@app.route('/survey/delete', methods=['GET', 'DELETE'])
def delete():
"""Delete survey."""
if request.method == 'GET':
docref_id = request.args.get('survey_id')
survey_service.delete_by_id(docref_id)
flash(f'Survey \'{docref_id}\' deleted')
return redirect(url_for('index'))
@app.route('/survey/edit', methods=['POST', 'PUT', 'GET'])
def edit():
"""Edit Survey."""
form = forms.QuestionForm()
docref_id = request.args.get('survey_id')
edit_doc = survey_service.get_doc_by_id(docref_id)
if request.method == 'GET':
survey_service.set_form_data(form, edit_doc)
if form.validate_on_submit():
survey_service.update_by_id(docref_id, form)
return redirect(url_for('index'))
return render_template('questions.html', form=form)
@app.route('/survey/download_zip/<string:survey_id>', methods=['GET'])
def download_zip(survey_id):
"""Download zip of survey creative(s)."""
survey_doc = survey_service.get_doc_by_id(survey_id)
filename, data = survey_service.zip_file(survey_id, survey_doc.to_dict())
return send_file(
data,
mimetype='application/zip',
add_etags=False,
cache_timeout=0,
last_modified=datetime.datetime.now(),
as_attachment=True,
attachment_filename=filename)
@app.route('/survey/download_responses/<string:survey_id>', methods=['GET'])
def download_responses(survey_id):
"""Download survey responses."""
if request.method == 'GET':
csv = survey_service.download_responses(survey_id)
return Response(
csv,
mimetype='text/csv',
headers={'Content-disposition': 'attachment; filename=surveydata.csv'})
@app.route('/survey/reporting/<string:survey_id>', methods=['GET'])
def reporting(survey_id):
"""Survey reporting."""
survey_doc = survey_service.get_doc_by_id(survey_id)
if survey_doc.exists:
survey_info = survey_doc.to_dict()
results = survey_service.get_brand_lift_results(survey_id)
return render_template(
'reporting.html',
results=results,
survey=survey_info,
survey_id=survey_id)
else:
flash('Survey not found')
return redirect(url_for('index'))
@app.context_processor
def inject_receiver_params():
return {
'receiver_url':
os.environ.get(
'RECEIVER_URL',
'https://us-central1-jerraldwee-testing.cloudfunctions.net/receiver'
)
}
@app.template_filter('get_all_question_text')
def get_all_question_text(survey):
return survey_service.get_all_question_text(survey.to_dict())
@app.template_filter('format_percentage')
def format_percentage(num):
return '{:.2%}'.format(num)
@app.template_filter('has_reporting')
def is_brand_track(survey):
return survey.to_dict().get('surveytype', '') != BRAND_TRACK
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True) | creative/app/main.py | """Import of required packages/libraries."""
import datetime
import os
from flask import flash
from flask import Flask
from flask import redirect
from flask import render_template
from flask import request
from flask import Response
from flask import send_file
from flask import url_for
from flask_basicauth import BasicAuth
from flask_bootstrap import Bootstrap
import forms
from forms import BRAND_TRACK
import survey_service
app = Flask(__name__)
app.config['SECRET_KEY'] = 'supersecretkey'
Bootstrap(app)
app.config['BASIC_AUTH_USERNAME'] = os.environ.get('AUTH_USERNAME')
app.config['BASIC_AUTH_PASSWORD'] = os.environ.get('AUTH_PASSWORD')
basic_auth = BasicAuth(app)
app.config['BASIC_AUTH_FORCE'] = True
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/index')
def index():
all_surveys = survey_service.get_all()
return render_template('index.html', all_surveys=all_surveys)
@app.route('/survey/create', methods=['GET', 'POST'])
def create():
"""Survey creation."""
form = forms.QuestionForm()
if form.validate_on_submit():
survey_service.create(form)
return redirect(url_for('index'))
return render_template('questions.html', title='Survey Creation', form=form)
@app.route('/survey/preview/<string:survey_id>', methods=['GET'])
def preview(survey_id):
"""Survey preview."""
survey_doc = survey_service.get_doc_by_id(survey_id)
if survey_doc.exists:
survey_info = survey_doc.to_dict()
return render_template(
'creative.html',
survey=survey_info,
survey_id=survey_id,
manual_responses=True,
show_back_button=True,
all_question_json=survey_service.get_question_json(survey_info),
seg='preview',
thankyou_text=survey_service.get_thank_you_text(survey_info),
next_text=survey_service.get_next_text(survey_info),
comment_text=survey_service.get_comment_text(survey_info))
else:
flash('Survey not found')
return redirect(url_for('index'))
@app.route('/survey/delete', methods=['GET', 'DELETE'])
def delete():
"""Delete survey."""
if request.method == 'GET':
docref_id = request.args.get('survey_id')
survey_service.delete_by_id(docref_id)
flash(f'Survey \'{docref_id}\' deleted')
return redirect(url_for('index'))
@app.route('/survey/edit', methods=['POST', 'PUT', 'GET'])
def edit():
"""Edit Survey."""
form = forms.QuestionForm()
docref_id = request.args.get('survey_id')
edit_doc = survey_service.get_doc_by_id(docref_id)
if request.method == 'GET':
survey_service.set_form_data(form, edit_doc)
if form.validate_on_submit():
survey_service.update_by_id(docref_id, form)
return redirect(url_for('index'))
return render_template('questions.html', form=form)
@app.route('/survey/download_zip/<string:survey_id>', methods=['GET'])
def download_zip(survey_id):
"""Download zip of survey creative(s)."""
survey_doc = survey_service.get_doc_by_id(survey_id)
filename, data = survey_service.zip_file(survey_id, survey_doc.to_dict())
return send_file(
data,
mimetype='application/zip',
add_etags=False,
cache_timeout=0,
last_modified=datetime.datetime.now(),
as_attachment=True,
attachment_filename=filename)
@app.route('/survey/download_responses/<string:survey_id>', methods=['GET'])
def download_responses(survey_id):
"""Download survey responses."""
if request.method == 'GET':
csv = survey_service.download_responses(survey_id)
return Response(
csv,
mimetype='text/csv',
headers={'Content-disposition': 'attachment; filename=surveydata.csv'})
@app.route('/survey/reporting/<string:survey_id>', methods=['GET'])
def reporting(survey_id):
"""Survey reporting."""
survey_doc = survey_service.get_doc_by_id(survey_id)
if survey_doc.exists:
survey_info = survey_doc.to_dict()
results = survey_service.get_brand_lift_results(survey_id)
return render_template(
'reporting.html',
results=results,
survey=survey_info,
survey_id=survey_id)
else:
flash('Survey not found')
return redirect(url_for('index'))
@app.context_processor
def inject_receiver_params():
return {
'receiver_url':
os.environ.get(
'RECEIVER_URL',
'https://us-central1-jerraldwee-testing.cloudfunctions.net/receiver'
)
}
@app.template_filter('get_all_question_text')
def get_all_question_text(survey):
return survey_service.get_all_question_text(survey.to_dict())
@app.template_filter('format_percentage')
def format_percentage(num):
return '{:.2%}'.format(num)
@app.template_filter('has_reporting')
def is_brand_track(survey):
return survey.to_dict().get('surveytype', '') != BRAND_TRACK
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True) | 0.510496 | 0.08374 |
"""Module with classes to format results from the Google Cloud Translation API"""
import logging
import pandas as pd
from typing import AnyStr, Dict
from plugin_io_utils import (
API_COLUMN_NAMES_DESCRIPTION_DICT,
ErrorHandlingEnum,
build_unique_column_names,
generate_unique,
safe_json_loads,
move_api_columns_to_end,
)
LANGUAGE_CODE_LABELS = {
"af": "Afrikaans",
"sq": "Albanian",
"am": "Amharic",
"ar": "Arabic",
"hy": "Armenian",
"az": "Azerbaijani",
"eu": "Basque",
"be": "Belarusian",
"bn": "Bengali",
"bs": "Bosnian",
"bg": "Bulgarian",
"ca": "Catalan",
"ceb": "Cebuano",
"zh-CN": "Chinese (Simplified)",
"zh-TW": "Chinese (Traditional)",
"co": "Corsican",
"hr": "Croatian",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"eo": "Esperanto",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"fy": "Frisian",
"gl": "Galician",
"ka": "Georgian",
"de": "German",
"el": "Greek",
"gu": "Gujarati",
"ht": "Haitian",
"ha": "Hausa",
"haw": "Hawaiian",
"he": "Hebrew",
"hi": "Hindi",
"hmn": "Hmong",
"hu": "Hungarian",
"is": "Icelandic",
"ig": "Igbo",
"id": "Indonesian",
"ga": "Irish",
"it": "Italian",
"ja": "Japanese",
"jv": "Javanese",
"kn": "Kannada",
"kk": "Kazakh",
"km": "Khmer",
"rw": "Kinyarwanda",
"ko": "Korean",
"ku": "Kurdish",
"ky": "Kyrgyz",
"lo": "Lao",
"la": "Latin",
"lv": "Latvian",
"lt": "Lithuanian",
"lb": "Luxembourgish",
"mk": "Macedonian",
"mg": "Malagasy",
"ms": "Malay",
"ml": "Malayalam",
"mt": "Maltese",
"mi": "Maori",
"mr": "Marathi",
"mn": "Mongolian",
"my": "Myanmar",
"ne": "Nepali",
"no": "Norwegian",
"ny": "Nyanja",
"or": "Odia",
"ps": "Pashto",
"fa": "Persian",
"pl": "Polish",
"pt": "Portuguese",
"pa": "Punjabi",
"ro": "Romanian",
"ru": "Russian",
"sm": "Samoan",
"gd": "Scots",
"sr": "Serbian",
"st": "Sesotho",
"sn": "Shona",
"sd": "Sindhi",
"si": "Sinhala",
"sk": "Slovak",
"sl": "Slovenian",
"so": "Somali",
"es": "Spanish",
"su": "Sundanese",
"sw": "Swahili",
"sv": "Swedish",
"tl": "Tagalog",
"tg": "Tajik",
"ta": "Tamil",
"tt": "Tatar",
"te": "Telugu",
"th": "Thai",
"tr": "Turkish",
"tk": "Turkmen",
"uk": "Ukrainian",
"ur": "Urdu",
"ug": "Uyghur",
"uz": "Uzbek",
"vi": "Vietnamese",
"cy": "Welsh",
"xh": "Xhosa",
"yi": "Yiddish",
"yo": "Yoruba",
"zu": "Zulu",
}
# ==============================================================================
# CLASS AND FUNCTION DEFINITION
# ==============================================================================
class GenericAPIFormatter:
"""
Geric Formatter class for API responses:
- initialize with generic parameters
- compute generic column descriptions
- apply format_row to dataframe
"""
def __init__(
self,
input_df: pd.DataFrame,
column_prefix: AnyStr = "api",
error_handling: ErrorHandlingEnum = ErrorHandlingEnum.LOG,
):
self.input_df = input_df
self.column_prefix = column_prefix
self.error_handling = error_handling
self.api_column_names = build_unique_column_names(input_df, column_prefix)
self.column_description_dict = {
v: API_COLUMN_NAMES_DESCRIPTION_DICT[k] for k, v in self.api_column_names._asdict().items()
}
def format_row(self, row: Dict) -> Dict:
return row
def format_df(self, df: pd.DataFrame) -> pd.DataFrame:
logging.info("Formatting API results...")
df = df.apply(func=self.format_row, axis=1)
df = move_api_columns_to_end(df, self.api_column_names, self.error_handling)
logging.info("Formatting API results: Done.")
return df
class TranslationAPIFormatter(GenericAPIFormatter):
"""
Formatter class for translation API responses:
- make sure response is valid JSON
"""
def __init__(
self,
input_df: pd.DataFrame,
input_column: AnyStr,
target_language: AnyStr,
source_language: AnyStr = None,
column_prefix: AnyStr = "translation_api",
error_handling: ErrorHandlingEnum = ErrorHandlingEnum.LOG,
):
super().__init__(input_df, column_prefix, error_handling)
self.translated_text_column_name = generate_unique(
f"{input_column}_{target_language.replace('-', '_')}", input_df.columns, prefix=None
)
self.detected_language_column_name = generate_unique(f"{input_column}_language", input_df.columns, prefix=None)
self.source_language = source_language
self.input_column = input_column
self.input_df_columns = input_df.columns
self.target_language = target_language
self.target_language_label = LANGUAGE_CODE_LABELS[self.target_language]
self._compute_column_description()
def _compute_column_description(self):
self.column_description_dict[
self.translated_text_column_name
] = f"{self.target_language_label} translation of the '{self.input_column}' column by Google Cloud Translation"
if not self.source_language:
self.column_description_dict[
self.detected_language_column_name
] = f"Detected language of the '{self.input_column}' column by Google Cloud Translation"
def format_row(self, row: Dict) -> Dict:
raw_response = row[self.api_column_names.response]
response = safe_json_loads(raw_response, self.error_handling)
if not self.source_language:
row[self.detected_language_column_name] = response.get("detectedSourceLanguage", "")
row[self.translated_text_column_name] = response.get("translatedText", "")
return row | python-lib/google_translate_api_formatting.py | """Module with classes to format results from the Google Cloud Translation API"""
import logging
import pandas as pd
from typing import AnyStr, Dict
from plugin_io_utils import (
API_COLUMN_NAMES_DESCRIPTION_DICT,
ErrorHandlingEnum,
build_unique_column_names,
generate_unique,
safe_json_loads,
move_api_columns_to_end,
)
LANGUAGE_CODE_LABELS = {
"af": "Afrikaans",
"sq": "Albanian",
"am": "Amharic",
"ar": "Arabic",
"hy": "Armenian",
"az": "Azerbaijani",
"eu": "Basque",
"be": "Belarusian",
"bn": "Bengali",
"bs": "Bosnian",
"bg": "Bulgarian",
"ca": "Catalan",
"ceb": "Cebuano",
"zh-CN": "Chinese (Simplified)",
"zh-TW": "Chinese (Traditional)",
"co": "Corsican",
"hr": "Croatian",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"eo": "Esperanto",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"fy": "Frisian",
"gl": "Galician",
"ka": "Georgian",
"de": "German",
"el": "Greek",
"gu": "Gujarati",
"ht": "Haitian",
"ha": "Hausa",
"haw": "Hawaiian",
"he": "Hebrew",
"hi": "Hindi",
"hmn": "Hmong",
"hu": "Hungarian",
"is": "Icelandic",
"ig": "Igbo",
"id": "Indonesian",
"ga": "Irish",
"it": "Italian",
"ja": "Japanese",
"jv": "Javanese",
"kn": "Kannada",
"kk": "Kazakh",
"km": "Khmer",
"rw": "Kinyarwanda",
"ko": "Korean",
"ku": "Kurdish",
"ky": "Kyrgyz",
"lo": "Lao",
"la": "Latin",
"lv": "Latvian",
"lt": "Lithuanian",
"lb": "Luxembourgish",
"mk": "Macedonian",
"mg": "Malagasy",
"ms": "Malay",
"ml": "Malayalam",
"mt": "Maltese",
"mi": "Maori",
"mr": "Marathi",
"mn": "Mongolian",
"my": "Myanmar",
"ne": "Nepali",
"no": "Norwegian",
"ny": "Nyanja",
"or": "Odia",
"ps": "Pashto",
"fa": "Persian",
"pl": "Polish",
"pt": "Portuguese",
"pa": "Punjabi",
"ro": "Romanian",
"ru": "Russian",
"sm": "Samoan",
"gd": "Scots",
"sr": "Serbian",
"st": "Sesotho",
"sn": "Shona",
"sd": "Sindhi",
"si": "Sinhala",
"sk": "Slovak",
"sl": "Slovenian",
"so": "Somali",
"es": "Spanish",
"su": "Sundanese",
"sw": "Swahili",
"sv": "Swedish",
"tl": "Tagalog",
"tg": "Tajik",
"ta": "Tamil",
"tt": "Tatar",
"te": "Telugu",
"th": "Thai",
"tr": "Turkish",
"tk": "Turkmen",
"uk": "Ukrainian",
"ur": "Urdu",
"ug": "Uyghur",
"uz": "Uzbek",
"vi": "Vietnamese",
"cy": "Welsh",
"xh": "Xhosa",
"yi": "Yiddish",
"yo": "Yoruba",
"zu": "Zulu",
}
# ==============================================================================
# CLASS AND FUNCTION DEFINITION
# ==============================================================================
class GenericAPIFormatter:
"""
Geric Formatter class for API responses:
- initialize with generic parameters
- compute generic column descriptions
- apply format_row to dataframe
"""
def __init__(
self,
input_df: pd.DataFrame,
column_prefix: AnyStr = "api",
error_handling: ErrorHandlingEnum = ErrorHandlingEnum.LOG,
):
self.input_df = input_df
self.column_prefix = column_prefix
self.error_handling = error_handling
self.api_column_names = build_unique_column_names(input_df, column_prefix)
self.column_description_dict = {
v: API_COLUMN_NAMES_DESCRIPTION_DICT[k] for k, v in self.api_column_names._asdict().items()
}
def format_row(self, row: Dict) -> Dict:
return row
def format_df(self, df: pd.DataFrame) -> pd.DataFrame:
logging.info("Formatting API results...")
df = df.apply(func=self.format_row, axis=1)
df = move_api_columns_to_end(df, self.api_column_names, self.error_handling)
logging.info("Formatting API results: Done.")
return df
class TranslationAPIFormatter(GenericAPIFormatter):
"""
Formatter class for translation API responses:
- make sure response is valid JSON
"""
def __init__(
self,
input_df: pd.DataFrame,
input_column: AnyStr,
target_language: AnyStr,
source_language: AnyStr = None,
column_prefix: AnyStr = "translation_api",
error_handling: ErrorHandlingEnum = ErrorHandlingEnum.LOG,
):
super().__init__(input_df, column_prefix, error_handling)
self.translated_text_column_name = generate_unique(
f"{input_column}_{target_language.replace('-', '_')}", input_df.columns, prefix=None
)
self.detected_language_column_name = generate_unique(f"{input_column}_language", input_df.columns, prefix=None)
self.source_language = source_language
self.input_column = input_column
self.input_df_columns = input_df.columns
self.target_language = target_language
self.target_language_label = LANGUAGE_CODE_LABELS[self.target_language]
self._compute_column_description()
def _compute_column_description(self):
self.column_description_dict[
self.translated_text_column_name
] = f"{self.target_language_label} translation of the '{self.input_column}' column by Google Cloud Translation"
if not self.source_language:
self.column_description_dict[
self.detected_language_column_name
] = f"Detected language of the '{self.input_column}' column by Google Cloud Translation"
def format_row(self, row: Dict) -> Dict:
raw_response = row[self.api_column_names.response]
response = safe_json_loads(raw_response, self.error_handling)
if not self.source_language:
row[self.detected_language_column_name] = response.get("detectedSourceLanguage", "")
row[self.translated_text_column_name] = response.get("translatedText", "")
return row | 0.868548 | 0.455441 |
"""
CloudFront Plugin
"""
import logging
import urlparse
import uglwcdriver.lib.abstractlwc as abstractlwc
logger = logging.getLogger('syndicate_cloudfront_cdn')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('syndicate_cloudfront_cdn.log')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
class plugin_impl(abstractlwc.awcbase):
def __init__(self, config):
logger.info("__init__")
if not config:
raise ValueError("wc configuration is not given correctly")
cloudfront_config = config.get("cloudfront")
if not cloudfront_config:
raise ValueError("cloudfront configuration is not given correctly")
self.cloudfront_config = cloudfront_config
# parse map
url_mappings = self.cloudfront_config.get("map")
if not url_mappings:
raise ValueError("cloudfront url mapping configuration is not given correctly")
if not isinstance(url_mappings, list):
raise ValueError("cloudfront url mapping configuration is not an array")
self.url_mappings = url_mappings
self.mappings = {}
for url_mapping in self.url_mappings:
host = url_mapping.get("host")
host = host.encode('ascii', 'ignore')
if not host:
raise ValueError("cloudfront host is not given correctly")
cdn_prefix = url_mapping.get("cdn_prefix")
cdn_prefix = cdn_prefix.encode('ascii', 'ignore')
key = None
if host in ["*"]:
key = "*"
else:
host_parts = urlparse.urlparse(host)
host_scheme = None
host_host = None
if len(host_parts.scheme) > 0:
host_scheme = host_parts.scheme
host_host = host_parts.netloc
else:
host_scheme = "http"
host_host = host_parts.path
key = "%s://%s" % (host_scheme, host_host)
if cdn_prefix:
prefix_parts = urlparse.urlparse(cdn_prefix)
prefix_scheme = None
prefix_host = None
if len(prefix_parts.scheme) > 0:
prefix_scheme = prefix_parts.scheme
prefix_host = prefix_parts.netloc
else:
prefix_scheme = "http"
prefix_host = prefix_parts.path
self.mappings[key] = (cdn_prefix, prefix_scheme, prefix_host)
else:
self.mappings[key] = (None, None, None)
def translate(self, url):
"""
make the URL accessible via the CloudFront CDN prefix
"""
url_parts = urlparse.urlparse(url)
url_scheme = None
url_host = None
url_scheme = url_parts.scheme
url_host = url_parts.netloc
key = "%s://%s" % (url_scheme, url_host)
if key in self.mappings:
_, prefix_scheme, prefix_host = self.mappings.get(key)
if prefix_scheme and prefix_host:
return '{}://{}/{}'.format(prefix_scheme, prefix_host, url_parts.path)
else:
return url
else:
# wildcard
if "*" in self.mappings:
_, prefix_scheme, prefix_host = self.mappings.get("*")
if prefix_scheme and prefix_host:
return '{}://{}/{}'.format(prefix_scheme, prefix_host, url_parts.path)
else:
return url
return url | src/uglwcdriver/plugins/cloudfront/cloudfront_plugin.py | """
CloudFront Plugin
"""
import logging
import urlparse
import uglwcdriver.lib.abstractlwc as abstractlwc
logger = logging.getLogger('syndicate_cloudfront_cdn')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('syndicate_cloudfront_cdn.log')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
class plugin_impl(abstractlwc.awcbase):
def __init__(self, config):
logger.info("__init__")
if not config:
raise ValueError("wc configuration is not given correctly")
cloudfront_config = config.get("cloudfront")
if not cloudfront_config:
raise ValueError("cloudfront configuration is not given correctly")
self.cloudfront_config = cloudfront_config
# parse map
url_mappings = self.cloudfront_config.get("map")
if not url_mappings:
raise ValueError("cloudfront url mapping configuration is not given correctly")
if not isinstance(url_mappings, list):
raise ValueError("cloudfront url mapping configuration is not an array")
self.url_mappings = url_mappings
self.mappings = {}
for url_mapping in self.url_mappings:
host = url_mapping.get("host")
host = host.encode('ascii', 'ignore')
if not host:
raise ValueError("cloudfront host is not given correctly")
cdn_prefix = url_mapping.get("cdn_prefix")
cdn_prefix = cdn_prefix.encode('ascii', 'ignore')
key = None
if host in ["*"]:
key = "*"
else:
host_parts = urlparse.urlparse(host)
host_scheme = None
host_host = None
if len(host_parts.scheme) > 0:
host_scheme = host_parts.scheme
host_host = host_parts.netloc
else:
host_scheme = "http"
host_host = host_parts.path
key = "%s://%s" % (host_scheme, host_host)
if cdn_prefix:
prefix_parts = urlparse.urlparse(cdn_prefix)
prefix_scheme = None
prefix_host = None
if len(prefix_parts.scheme) > 0:
prefix_scheme = prefix_parts.scheme
prefix_host = prefix_parts.netloc
else:
prefix_scheme = "http"
prefix_host = prefix_parts.path
self.mappings[key] = (cdn_prefix, prefix_scheme, prefix_host)
else:
self.mappings[key] = (None, None, None)
def translate(self, url):
"""
make the URL accessible via the CloudFront CDN prefix
"""
url_parts = urlparse.urlparse(url)
url_scheme = None
url_host = None
url_scheme = url_parts.scheme
url_host = url_parts.netloc
key = "%s://%s" % (url_scheme, url_host)
if key in self.mappings:
_, prefix_scheme, prefix_host = self.mappings.get(key)
if prefix_scheme and prefix_host:
return '{}://{}/{}'.format(prefix_scheme, prefix_host, url_parts.path)
else:
return url
else:
# wildcard
if "*" in self.mappings:
_, prefix_scheme, prefix_host = self.mappings.get("*")
if prefix_scheme and prefix_host:
return '{}://{}/{}'.format(prefix_scheme, prefix_host, url_parts.path)
else:
return url
return url | 0.570331 | 0.065755 |
from typing import Optional
from fastapi import FastAPI, Request, Depends, BackgroundTasks
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from sqlalchemy.orm import Session
from pydantic import BaseModel
import yfinance
import models
from database import SessionLocal, engine
from models import Stock
class StockRequest(BaseModel):
symbol: str
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
@app.get("/")
def home(
request: Request,
db: Session = Depends(get_db),
forward_pe='',
dividend_yield='',
ma50=None,
ma200=None,
):
"""
display the stock screener dashboard / homepage
"""
stocks = db.query(Stock)
if forward_pe:
stocks = stocks.filter(Stock.forward_pe <= forward_pe)
if dividend_yield:
stocks = stocks.filter(Stock.dividend_yield >= dividend_yield)
if ma50:
stocks = stocks.filter(Stock.price >= Stock.ma50)
if ma200:
stocks = stocks.filter(Stock.price >= Stock.ma200)
return templates.TemplateResponse(
"home.html",
{
"request": request,
"stocks": stocks,
"dividend_yield": dividend_yield,
"forward_pe": forward_pe,
"ma50": ma50,
"ma200": ma200,
},
)
def fetch_stock_data(id: int):
db = SessionLocal()
stock = db.query(Stock).filter(Stock.id == id).first()
yahoo_data = yfinance.Ticker(stock.symbol)
stock.ma200 = yahoo_data.info['twoHundredDayAverage']
stock.ma50 = yahoo_data.info['fiftyDayAverage']
stock.price = yahoo_data.info['previousClose']
stock.forward_pe = yahoo_data.info['forwardPE']
stock.forward_eps = yahoo_data.info['forwardEps']
dividend = yahoo_data.info['dividendYield']
stock.dividend_yield = 0 if dividend == None else dividend * 100
db.add(stock)
db.commit()
print(' Data fetched from Yahoo!Finance and saved for', stock.symbol)
@app.post("/stock")
async def create_stock(
stock_request: StockRequest,
background_tasks: BackgroundTasks,
db: Session = Depends(get_db)
):
"""creates a stock and stores it in the database
Returns:
[type]: [description]
"""
stock = Stock()
stock.symbol = stock_request.symbol
db.add(stock)
db.commit()
background_tasks.add_task(fetch_stock_data, stock.id)
return {
"code": "success",
"message": "stock created",
} | main.py | from typing import Optional
from fastapi import FastAPI, Request, Depends, BackgroundTasks
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from sqlalchemy.orm import Session
from pydantic import BaseModel
import yfinance
import models
from database import SessionLocal, engine
from models import Stock
class StockRequest(BaseModel):
symbol: str
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
@app.get("/")
def home(
request: Request,
db: Session = Depends(get_db),
forward_pe='',
dividend_yield='',
ma50=None,
ma200=None,
):
"""
display the stock screener dashboard / homepage
"""
stocks = db.query(Stock)
if forward_pe:
stocks = stocks.filter(Stock.forward_pe <= forward_pe)
if dividend_yield:
stocks = stocks.filter(Stock.dividend_yield >= dividend_yield)
if ma50:
stocks = stocks.filter(Stock.price >= Stock.ma50)
if ma200:
stocks = stocks.filter(Stock.price >= Stock.ma200)
return templates.TemplateResponse(
"home.html",
{
"request": request,
"stocks": stocks,
"dividend_yield": dividend_yield,
"forward_pe": forward_pe,
"ma50": ma50,
"ma200": ma200,
},
)
def fetch_stock_data(id: int):
db = SessionLocal()
stock = db.query(Stock).filter(Stock.id == id).first()
yahoo_data = yfinance.Ticker(stock.symbol)
stock.ma200 = yahoo_data.info['twoHundredDayAverage']
stock.ma50 = yahoo_data.info['fiftyDayAverage']
stock.price = yahoo_data.info['previousClose']
stock.forward_pe = yahoo_data.info['forwardPE']
stock.forward_eps = yahoo_data.info['forwardEps']
dividend = yahoo_data.info['dividendYield']
stock.dividend_yield = 0 if dividend == None else dividend * 100
db.add(stock)
db.commit()
print(' Data fetched from Yahoo!Finance and saved for', stock.symbol)
@app.post("/stock")
async def create_stock(
stock_request: StockRequest,
background_tasks: BackgroundTasks,
db: Session = Depends(get_db)
):
"""creates a stock and stores it in the database
Returns:
[type]: [description]
"""
stock = Stock()
stock.symbol = stock_request.symbol
db.add(stock)
db.commit()
background_tasks.add_task(fetch_stock_data, stock.id)
return {
"code": "success",
"message": "stock created",
} | 0.719384 | 0.166134 |
import pygame
pygame.init()
screen = pygame.display.set_mode((640, 480))
COLOR_INACTIVE = pygame.Color('lightskyblue3')
COLOR_ACTIVE = pygame.Color('dodgerblue2')
FONT = pygame.font.Font(None, 32)
class InputBox:
def __init__(self, x, y, w, h, text=''):
self.rect = pygame.Rect(x, y, w, h)
self.color = COLOR_INACTIVE
self.text = text
self.txt_surface = FONT.render(text, True, self.color)
self.active = False
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if self.rect.collidepoint(event.pos):
# Toggle the active variable.
self.active = not self.active
else:
self.active = False
# Change the current color of the input box.
self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
if event.type == pygame.KEYDOWN:
if self.active:
if event.key == pygame.K_RETURN:
print(self.text)
self.text = ''
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += event.unicode
# Re-render the text.
self.txt_surface = FONT.render(self.text, True, self.color)
def update(self):
# Resize the box if the text is too long.
width = max(200, self.txt_surface.get_width()+10)
self.rect.w = width
def draw(self, screen):
# Blit the text.
screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))
# Blit the rect.
pygame.draw.rect(screen, self.color, self.rect, 2)
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 300
WHITE = (255, 255, 255)
def main():
pygame.init()
screen = pygame.display.set_mode(size=(SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
input_box1 = InputBox(100, 100, 100, 32)
input_box2 = InputBox(100, 150, 100, 32)
input_boxes = [input_box1, input_box2]
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
for box in input_boxes:
box.handle_event(event)
screen.fill(WHITE)
for box in input_boxes:
box.update()
box.draw(screen)
pygame.display.update()
if __name__ == "__main__":
main()
pygame.quit() | input_box.py | import pygame
pygame.init()
screen = pygame.display.set_mode((640, 480))
COLOR_INACTIVE = pygame.Color('lightskyblue3')
COLOR_ACTIVE = pygame.Color('dodgerblue2')
FONT = pygame.font.Font(None, 32)
class InputBox:
def __init__(self, x, y, w, h, text=''):
self.rect = pygame.Rect(x, y, w, h)
self.color = COLOR_INACTIVE
self.text = text
self.txt_surface = FONT.render(text, True, self.color)
self.active = False
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if self.rect.collidepoint(event.pos):
# Toggle the active variable.
self.active = not self.active
else:
self.active = False
# Change the current color of the input box.
self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
if event.type == pygame.KEYDOWN:
if self.active:
if event.key == pygame.K_RETURN:
print(self.text)
self.text = ''
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += event.unicode
# Re-render the text.
self.txt_surface = FONT.render(self.text, True, self.color)
def update(self):
# Resize the box if the text is too long.
width = max(200, self.txt_surface.get_width()+10)
self.rect.w = width
def draw(self, screen):
# Blit the text.
screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))
# Blit the rect.
pygame.draw.rect(screen, self.color, self.rect, 2)
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 300
WHITE = (255, 255, 255)
def main():
pygame.init()
screen = pygame.display.set_mode(size=(SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
input_box1 = InputBox(100, 100, 100, 32)
input_box2 = InputBox(100, 150, 100, 32)
input_boxes = [input_box1, input_box2]
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
for box in input_boxes:
box.handle_event(event)
screen.fill(WHITE)
for box in input_boxes:
box.update()
box.draw(screen)
pygame.display.update()
if __name__ == "__main__":
main()
pygame.quit() | 0.370225 | 0.181608 |
from django.core.exceptions import ValidationError
from nails_project.schedule.models import Schedule
from tests.base.tests import NailsProjectTestCase
class ScheduleModelTests(NailsProjectTestCase):
def test_saveModel_whenValid_shouldBeValid(self):
data = {
'date': "2021-08-10",
'start_time': '09:00',
'end_time': '19:00',
}
obj = Schedule(**data)
obj.full_clean()
obj.save()
self.assertEqual('2021-08-10', obj.date.strftime('%Y-%m-%d'))
self.assertEqual('09:00', obj.start_time.strftime('%H:%M'))
self.assertEqual('19:00', obj.end_time.strftime('%H:%M'))
self.assertTrue(Schedule.objects.filter(pk=obj.id).exists())
def test_saveModel_whenInvalid_shouldBeInvalid_dateError(self):
data = {
'date': None,
'start_time': '09:00',
'end_time': '19:00',
}
with self.assertRaises(ValidationError) as error:
obj = Schedule(**data)
obj.full_clean()
obj.save()
self.assertIsNotNone(error)
self.assertFalse(Schedule.objects.all().exists())
def test_saveModel_whenValid_shouldBeValid_startTimeNone(self):
data = {
'date': '2021-08-10',
'start_time': None,
'end_time': '19:00',
}
obj = Schedule(**data)
obj.full_clean()
obj.save()
self.assertEqual('2021-08-10', obj.date.strftime('%Y-%m-%d'))
self.assertIsNone(obj.start_time)
self.assertEqual('19:00', obj.end_time.strftime('%H:%M'))
self.assertTrue(Schedule.objects.filter(pk=obj.id).exists())
def test_saveModel_whenValid_shouldBeValid_endTimeNone(self):
data = {
'date': '2021-08-10',
'start_time': '19:00',
'end_time': None,
}
obj = Schedule(**data)
obj.full_clean()
obj.save()
self.assertEqual('2021-08-10', obj.date.strftime('%Y-%m-%d'))
self.assertIsNone(obj.end_time)
self.assertEqual('19:00', obj.start_time.strftime('%H:%M'))
self.assertTrue(Schedule.objects.filter(pk=obj.id).exists()) | tests/schedule/models/test_schedule_model.py | from django.core.exceptions import ValidationError
from nails_project.schedule.models import Schedule
from tests.base.tests import NailsProjectTestCase
class ScheduleModelTests(NailsProjectTestCase):
def test_saveModel_whenValid_shouldBeValid(self):
data = {
'date': "2021-08-10",
'start_time': '09:00',
'end_time': '19:00',
}
obj = Schedule(**data)
obj.full_clean()
obj.save()
self.assertEqual('2021-08-10', obj.date.strftime('%Y-%m-%d'))
self.assertEqual('09:00', obj.start_time.strftime('%H:%M'))
self.assertEqual('19:00', obj.end_time.strftime('%H:%M'))
self.assertTrue(Schedule.objects.filter(pk=obj.id).exists())
def test_saveModel_whenInvalid_shouldBeInvalid_dateError(self):
data = {
'date': None,
'start_time': '09:00',
'end_time': '19:00',
}
with self.assertRaises(ValidationError) as error:
obj = Schedule(**data)
obj.full_clean()
obj.save()
self.assertIsNotNone(error)
self.assertFalse(Schedule.objects.all().exists())
def test_saveModel_whenValid_shouldBeValid_startTimeNone(self):
data = {
'date': '2021-08-10',
'start_time': None,
'end_time': '19:00',
}
obj = Schedule(**data)
obj.full_clean()
obj.save()
self.assertEqual('2021-08-10', obj.date.strftime('%Y-%m-%d'))
self.assertIsNone(obj.start_time)
self.assertEqual('19:00', obj.end_time.strftime('%H:%M'))
self.assertTrue(Schedule.objects.filter(pk=obj.id).exists())
def test_saveModel_whenValid_shouldBeValid_endTimeNone(self):
data = {
'date': '2021-08-10',
'start_time': '19:00',
'end_time': None,
}
obj = Schedule(**data)
obj.full_clean()
obj.save()
self.assertEqual('2021-08-10', obj.date.strftime('%Y-%m-%d'))
self.assertIsNone(obj.end_time)
self.assertEqual('19:00', obj.start_time.strftime('%H:%M'))
self.assertTrue(Schedule.objects.filter(pk=obj.id).exists()) | 0.550849 | 0.341308 |