code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('home', views.home, name='home'),
path('login', views.ulogin, name='login'),
path('logout', views.ulogout, name='logout'),
path('password_change', views.password_change, name='password_change'),
# path('users', views.users, name='users'),
path('explorer', views.explorer, name='explorer'),
# path('reports/base_report', views.resources, name='reports'),
# path('docs', views.docs, name='docs'),
] | [
"django.urls.path"
] | [((71, 106), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (75, 106), False, 'from django.urls import path\n'), ((112, 149), 'django.urls.path', 'path', (['"""home"""', 'views.home'], {'name': '"""home"""'}), "('home', views.home, name='home')\n", (116, 149), False, 'from django.urls import path\n'), ((155, 196), 'django.urls.path', 'path', (['"""login"""', 'views.ulogin'], {'name': '"""login"""'}), "('login', views.ulogin, name='login')\n", (159, 196), False, 'from django.urls import path\n'), ((202, 246), 'django.urls.path', 'path', (['"""logout"""', 'views.ulogout'], {'name': '"""logout"""'}), "('logout', views.ulogout, name='logout')\n", (206, 246), False, 'from django.urls import path\n'), ((252, 322), 'django.urls.path', 'path', (['"""password_change"""', 'views.password_change'], {'name': '"""password_change"""'}), "('password_change', views.password_change, name='password_change')\n", (256, 322), False, 'from django.urls import path\n'), ((378, 427), 'django.urls.path', 'path', (['"""explorer"""', 'views.explorer'], {'name': '"""explorer"""'}), "('explorer', views.explorer, name='explorer')\n", (382, 427), False, 'from django.urls import path\n')] |
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score, confusion_matrix
from keras.callbacks import ModelCheckpoint
import seaborn as sns
from keras.optimizers import Adam
import pickle
import matplotlib.pyplot as plt
import lime
import lime.lime_tabular
from lime.lime_tabular import LimeTabularExplainer
import os
# fix random seed for reproducibility
np.random.seed(7)
# load dataset
dataset = np.genfromtxt("covid_filtered_1-5_allMin3.csv", delimiter=",", encoding="utf8")
dataset = dataset[1:, :]
np.random.shuffle(dataset)
# split into input and output variables
df_label = dataset[:, 23]
label = []
for lab in df_label:
if lab == 1:
label.append([0]) # class 1
elif lab == 2 or lab == 3:
label.append([1]) # class 23
elif lab == 4 or lab == 5:
label.append([2]) # class 45
else:
print("DATA ERROR")
inputColumns = [0, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
label = np.array(label)
xFit, xTest, yFit, yTest = train_test_split(dataset[:, inputColumns], label, test_size=0.3, random_state=42,
stratify=label)
'''
# test:
xTest_c1 = []
yTest_c1 = []
xTest_c23 = []
yTest_c23 = []
xTest_c45 = []
yTest_c45 = []
for i in range(len(yTest)):
if yTest[i][0] == 1: # class 1
xTest_c1.append(xTest[i])
yTest_c1.append(yTest[i])
elif yTest[i][1] == 1: # class 2-3
xTest_c23.append(xTest[i])
yTest_c23.append(yTest[i])
elif yTest[i][2] == 1: # class 4-5
xTest_c45.append(xTest[i])
yTest_c45.append(yTest[i])
xTest_c1 = numpy.array(xTest_c1)
yTest_c1 = numpy.array(yTest_c1)
xTest_c23 = numpy.array(xTest_c23)
yTest_c23 = numpy.array(yTest_c23)
xTest_c45 = numpy.array(xTest_c45)
yTest_c45 = numpy.array(yTest_c45)
'''
parameters = {'bootstrap': True,
'min_samples_leaf': 3,
'n_estimators': 50,
'min_samples_split': 10,
'max_features': 'sqrt',
'max_depth': 6,
'max_leaf_nodes': None}
RF_model = RandomForestClassifier(**parameters)
yFit = np.array(yFit).ravel()
RF_model.fit(xFit, yFit)
RF_predictions = RF_model.predict(xTest)
score = accuracy_score(yTest, RF_predictions)
print(score)
from sklearn import tree
import matplotlib.pyplot as plt
fn = ['sex', 'HSD', 'entry_month', 'symptoms_month', 'pneumonia', 'age_group', 'pregnancy', 'diabetes',
'copd', 'asthma', 'immsupr', 'hypertension', 'other_disease', 'cardiovascular', 'obesity',
'renal_chronic', 'tobacco', 'contact_other_covid']
cn = ['Low', 'Middle', 'High']
fig = plt.figure(figsize=(35, 6), dpi=900)
tree.plot_tree(RF_model.estimators_[0],
feature_names=fn,
class_names=cn,
filled=True,
rounded=True,
precision=2,
fontsize=4)
fig.savefig('rf_individualtree.png')
'''
# Get and reshape confusion matrix data
matrix = confusion_matrix(yTest, RF_predictions)
matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]
# Build the plot
plt.figure(figsize=(16, 7))
sns.set(font_scale=1.4)
sns.heatmap(matrix, annot=True, annot_kws={'size': 10},
cmap=plt.cm.Greens, linewidths=0.2)
# Add labels to the plot
class_names = ['Low severity', 'Medium severity', 'High severity']
tick_marks = np.arange(len(class_names))
tick_marks2 = tick_marks + 0.5
plt.xticks(tick_marks, class_names, rotation=25)
plt.yticks(tick_marks2, class_names, rotation=0)
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.title('Confusion Matrix for Random Forest Model')
plt.show()
# create model
model = Sequential()
model.add(Dense(729, input_dim=len(inputColumns), activation='sigmoid'))
model.add(Dense(243, activation='sigmoid'))
model.add(Dense(81, activation='sigmoid'))
model.add(Dense(27, activation='sigmoid'))
model.add(Dense(9, activation='sigmoid'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.002), metrics=['accuracy'])
# Fit the model (train the model)
model.fit(xFit, yFit, epochs=1000, batch_size=50)
# evaluate the model
print("\n-------------------------------------------------------")
print("\ntotal(%i):" % len(xTest))
scores = model.evaluate(xTest, yTest)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
# test:
print("\nclass1(%i):" % len(xTest_c1))
scores = model.evaluate(xTest_c1, yTest_c1)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
print("\nclass23(%i):" % len(xTest_c23))
scores = model.evaluate(xTest_c23, yTest_c23)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
print("\nclass45(%i):" % len(xTest_c45))
scores = model.evaluate(xTest_c45, yTest_c45)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
'''
| [
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"sklearn.tree.plot_tree",
"numpy.genfromtxt",
"sklearn.metrics.accuracy_score",
"numpy.random.shuffle"
] | [((613, 630), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (627, 630), True, 'import numpy as np\n'), ((657, 736), 'numpy.genfromtxt', 'np.genfromtxt', (['"""covid_filtered_1-5_allMin3.csv"""'], {'delimiter': '""","""', 'encoding': '"""utf8"""'}), "('covid_filtered_1-5_allMin3.csv', delimiter=',', encoding='utf8')\n", (670, 736), True, 'import numpy as np\n'), ((762, 788), 'numpy.random.shuffle', 'np.random.shuffle', (['dataset'], {}), '(dataset)\n', (779, 788), True, 'import numpy as np\n'), ((1207, 1222), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1215, 1222), True, 'import numpy as np\n'), ((1250, 1351), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dataset[:, inputColumns]', 'label'], {'test_size': '(0.3)', 'random_state': '(42)', 'stratify': 'label'}), '(dataset[:, inputColumns], label, test_size=0.3,\n random_state=42, stratify=label)\n', (1266, 1351), False, 'from sklearn.model_selection import train_test_split\n'), ((2317, 2353), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '(**parameters)\n', (2339, 2353), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2458, 2495), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['yTest', 'RF_predictions'], {}), '(yTest, RF_predictions)\n', (2472, 2495), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n'), ((2863, 2899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(35, 6)', 'dpi': '(900)'}), '(figsize=(35, 6), dpi=900)\n', (2873, 2899), True, 'import matplotlib.pyplot as plt\n'), ((2900, 3029), 'sklearn.tree.plot_tree', 'tree.plot_tree', (['RF_model.estimators_[0]'], {'feature_names': 'fn', 'class_names': 'cn', 'filled': '(True)', 'rounded': '(True)', 'precision': '(2)', 'fontsize': '(4)'}), '(RF_model.estimators_[0], feature_names=fn, class_names=cn,\n filled=True, rounded=True, precision=2, fontsize=4)\n', (2914, 3029), False, 'from sklearn import tree\n'), ((2361, 2375), 'numpy.array', 'np.array', (['yFit'], {}), '(yFit)\n', (2369, 2375), True, 'import numpy as np\n')] |
'''
Micro Object Detector Net
the author:Luis
date : 11.25
'''
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import *
from models.base_models import vgg, vgg_base
from ptflops import get_model_complexity_info
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True,
bn=False, bias=True, up_size=0):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.in_channels = in_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
self.up_size = up_size
self.up_sample = nn.Upsample(size=(up_size, up_size), mode='bilinear') if up_size != 0 else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
if self.up_size > 0:
x = self.up_sample(x)
return x
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class MOD(nn.Module):
def __init__(self, base, extras, upper, upper2, head, num_classes, size):
super(MOD, self).__init__()
self.num_classes = num_classes
self.extras = nn.ModuleList(extras)
self.size = size
self.base = nn.ModuleList(base)
# self.L2Norm = nn.ModuleList(extras)
self.upper = nn.ModuleList(upper)
self.upper2 = nn.ModuleList(upper2)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
self.softmax = nn.Softmax()
self.predict1 = nn.ModuleList(extra_predict1(self.size))
self.predict2 = nn.ModuleList(extra_predict2(self.size))
def forward(self, x, test=False):
scale_source = []
upper_source = []
loc = []
conf = []
mid_trans = []
# get the F.T of conv4
for k in range(23):
x = self.base[k](x)
scale_source.append(x)
for k in range(23, len(self.base)):
x = self.base[k](x)
scale_source.append(x)
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
scale_source.append(x)
upper_source = scale_source
lenscale = len(scale_source)
orgin = x
for k in range(len(self.upper) - 1):
# bn = nn.BatchNorm2d(self.upper[lenscale-k-2].in_channels,affine=True)
# print(self.upper[lenscale-k-2].in_channels)
# print(self.upper[lenscale-k-1].out_channels)
# print(scale_source[lenscale-k-2].size())
se = SELayer(self.upper[lenscale - k - 1].out_channels, 16)
upper_source[0] = upper_source[0] + se(self.upper[lenscale - k - 1](upper_source[lenscale - k - 1]))
# upper_source[0] =upper_source[0]+ self.upper[lenscale-k-1](upper_source[lenscale-k-1])
for k in range(len(self.upper) - 2):
se = SELayer(self.upper2[lenscale - k - 1].out_channels, 16)
upper_source[1] = upper_source[1] + se(self.upper2[lenscale - k - 1](upper_source[lenscale - k - 1]))
# upper_source[1] = upper_source[1] + self.upper2[lenscale-k-1](upper_source[lenscale-k-1])
bn = nn.BatchNorm2d(512, affine=True)
upper_source[0] = bn(upper_source[0])
# bn1 = nn.BatchNorm2d(1024,affine = True)
# upper_source[1] = bn1(upper_source[1])
predict_layer1 = []
predict_layer1.append(upper_source[0])
origin_fea = upper_source[0]
# print('origin_fea',origin_fea.size())
for k, v in enumerate(self.predict1):
origin_fea = v(origin_fea)
# print('ori',origin_fea.size())
predict_layer1.append(origin_fea)
bn = nn.BatchNorm2d(2048, affine=True)
# print(predict_layer1[1].size())
# print(upper_source[1].size())
# predict_layer1[1] = bn(torch.cat([predict_layer1[1],upper_source[1]],1))
predict_layer1[1] = predict_layer1[1] + upper_source[1]
origin_fea2 = upper_source[1]
for k, v in enumerate(self.predict2):
origin_fea2 = v(origin_fea2)
# predict_layer2.append(origin_fea2)
# bn = nn.BatchNorm2d(v.out_channels*2,affine=True)
# if not k==len(self.predict2)-1:
# predict_layer1[k+2] = bn(torch.cat([predict_layer1[k+2],origin_fea2],1))
# else:
# predict_layer1[k+2] = torch.cat([predict_layer1[k+2],origin_fea2],1)
predict_layer1[k + 2] = predict_layer1[k + 2] + origin_fea2
for (x, l, c) in zip(predict_layer1, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
# for (x, l, c) in zip(upper_source, self.loc, self.conf):
# loc.append(l(x).permute(0, 2, 3, 1).contiguous())
# conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
# print(loc.size())
# print(conf.size())
if test:
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
# print(loc.size())
# print(conf.size())
return output
def low_pooling(vgg, extracts, size):
if size == 300:
up_size = layer_size('300')[k]
elif size == 512:
up_size = layer_size('512')[k]
layers = []
def extra_predict1(size):
if size == 300:
layers = [BasicConv(512, 1024, kernel_size=3, stride=2, padding=1),
BasicConv(1024, 512, kernel_size=3, stride=2, padding=1), \
BasicConv(512, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=3, stride=2, padding=1), \
BasicConv(256, 256, kernel_size=3, stride=1, padding=0)]
elif size == 512:
layers = [BasicConv(512, 1024, kernel_size=3, stride=2, padding=1),
BasicConv(1024, 512, kernel_size=3, stride=2, padding=1), \
BasicConv(512, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=3, stride=2, padding=1), \
BasicConv(256, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=4, padding=1, stride=1)]
return layers
def extra_predict2(size):
if size == 300:
layers = [BasicConv(1024, 512, kernel_size=3, stride=2, padding=1), \
BasicConv(512, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=3, stride=2, padding=1), \
BasicConv(256, 256, kernel_size=3, stride=1, padding=0)]
elif size == 512:
layers = [BasicConv(1024, 512, kernel_size=3, stride=2, padding=1), \
BasicConv(512, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=3, stride=2, padding=1), \
BasicConv(256, 256, kernel_size=3, stride=2, padding=1),
BasicConv(256, 256, kernel_size=4, padding=1, stride=1)]
return layers
def upper_deconv(vgg, extracts, size):
layers = []
layers2 = []
if size == 300:
layers.append(BasicConv(512, 128 * 4, kernel_size=1, padding=0))
layers += [(BasicConv(vgg[-2].out_channels, 512, kernel_size=1, padding=0, up_size=38))]
layers.append(BasicConv(extracts[1].out_channels, 512, kernel_size=1, padding=0, up_size=38))
layers.append(BasicConv(extracts[3].out_channels, 512, kernel_size=1, padding=0, up_size=38))
layers.append(BasicConv(extracts[5].out_channels, 512, kernel_size=1, padding=0, up_size=38))
layers.append(BasicConv(extracts[7].out_channels, 512, kernel_size=1, padding=0, up_size=38))
layers2.append(BasicConv(512, 128 * 4, kernel_size=1, padding=0))
layers2 += [(BasicConv(vgg[-2].out_channels, 1024, kernel_size=1, padding=0, up_size=19))]
layers2.append(BasicConv(extracts[1].out_channels, 1024, kernel_size=1, padding=0, up_size=19))
layers2.append(BasicConv(extracts[3].out_channels, 1024, kernel_size=1, padding=0, up_size=19))
layers2.append(BasicConv(extracts[5].out_channels, 1024, kernel_size=1, padding=0, up_size=19))
layers2.append(BasicConv(extracts[7].out_channels, 1024, kernel_size=1, padding=0, up_size=19))
elif size == 512:
layers.append(BasicConv(512, 128 * 4, kernel_size=1, padding=0))
layers.append(BasicConv(vgg[-2].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[1].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[3].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[5].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[7].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers.append(BasicConv(extracts[9].out_channels, 512, kernel_size=1, padding=0, up_size=64))
layers2.append(BasicConv(512, 128 * 4, kernel_size=1, padding=0))
layers2.append(BasicConv(vgg[-2].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[1].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[3].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[5].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[7].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
layers2.append(BasicConv(extracts[9].out_channels, 1024, kernel_size=1, padding=0, up_size=32))
return vgg, extracts, layers, layers2
def add_extras(cfg, i, batch_norm=False, size=300):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
if size == 512:
layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))
layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))
# print(len(layers))
return layers
def multibox(vgg, extra_layers, upper, upper2, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [24, -2]
loc_layers += [nn.Conv2d(upper[0].out_channels, cfg[0] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(upper[0].out_channels, cfg[0] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(upper):
if k == 0:
continue
loc_layers += [nn.Conv2d(v.in_channels, cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.in_channels, cfg[k] * num_classes, kernel_size=3, padding=1)]
'''
for k, v in enumerate(vgg_source):
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
'''
return vgg, extra_layers, upper, upper2, (loc_layers, conf_layers)
layer_size = {
'300': [38, 19, 10, 5, 3, 1],
'512': [64, 32, 16, 8, 4, 2, 1],
}
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],
}
mbox = {
'300': [6, 6, 6, 6, 4, 4], # number of boxes per feature map location
'512': [6, 6, 6, 6, 6, 4, 4],
}
def build_net(size=300, num_classes=21):
if size != 300 and size != 512:
print("Error: Sorry only SSD300 and SSD512 is supported currently!")
return
return MOD(*multibox(*upper_deconv(vgg(vgg_base[str(size)], 3),
add_extras(extras[str(size)], 1024, size=size), size),
mbox[str(size)], num_classes), num_classes=num_classes, size=size)
if __name__ == '__main__':
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
with torch.no_grad():
model = build_net(size=512,num_classes=2)
print(model)
# x = torch.randn(16, 3, 300, 300)
model.cuda()
macs,params = get_model_complexity_info(model,(3,512,512),as_strings=True,print_per_layer_stat=True,verbose=True)
print('MACs: {0}'.format(macs))
print('Params: {0}'.format(params)) | [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Sigmoid",
"torch.nn.Softmax",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Upsample",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Linear",
"torch.no_grad",
"ptflops.get_model_complexity_info"
] | [((585, 723), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups', 'bias': 'bias'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n', (594, 723), True, 'import torch.nn as nn\n'), ((1438, 1461), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (1458, 1461), True, 'import torch.nn as nn\n'), ((2030, 2051), 'torch.nn.ModuleList', 'nn.ModuleList', (['extras'], {}), '(extras)\n', (2043, 2051), True, 'import torch.nn as nn\n'), ((2097, 2116), 'torch.nn.ModuleList', 'nn.ModuleList', (['base'], {}), '(base)\n', (2110, 2116), True, 'import torch.nn as nn\n'), ((2184, 2204), 'torch.nn.ModuleList', 'nn.ModuleList', (['upper'], {}), '(upper)\n', (2197, 2204), True, 'import torch.nn as nn\n'), ((2227, 2248), 'torch.nn.ModuleList', 'nn.ModuleList', (['upper2'], {}), '(upper2)\n', (2240, 2248), True, 'import torch.nn as nn\n'), ((2268, 2290), 'torch.nn.ModuleList', 'nn.ModuleList', (['head[0]'], {}), '(head[0])\n', (2281, 2290), True, 'import torch.nn as nn\n'), ((2311, 2333), 'torch.nn.ModuleList', 'nn.ModuleList', (['head[1]'], {}), '(head[1])\n', (2324, 2333), True, 'import torch.nn as nn\n'), ((2357, 2369), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (2367, 2369), True, 'import torch.nn as nn\n'), ((4060, 4092), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {'affine': '(True)'}), '(512, affine=True)\n', (4074, 4092), True, 'import torch.nn as nn\n'), ((4590, 4623), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2048)'], {'affine': '(True)'}), '(2048, affine=True)\n', (4604, 4623), True, 'import torch.nn as nn\n'), ((11908, 11978), 'torch.nn.Conv2d', 'nn.Conv2d', (['upper[0].out_channels', '(cfg[0] * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(upper[0].out_channels, cfg[0] * 4, kernel_size=3, padding=1)\n', (11917, 11978), True, 'import torch.nn as nn\n'), ((12000, 12085), 'torch.nn.Conv2d', 'nn.Conv2d', (['upper[0].out_channels', '(cfg[0] * num_classes)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(upper[0].out_channels, cfg[0] * num_classes, kernel_size=3, padding=1\n )\n', (12009, 12085), True, 'import torch.nn as nn\n'), ((13918, 13933), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13931, 13933), False, 'import torch\n'), ((14092, 14201), 'ptflops.get_model_complexity_info', 'get_model_complexity_info', (['model', '(3, 512, 512)'], {'as_strings': '(True)', 'print_per_layer_stat': '(True)', 'verbose': '(True)'}), '(model, (3, 512, 512), as_strings=True,\n print_per_layer_stat=True, verbose=True)\n', (14117, 14201), False, 'from ptflops import get_model_complexity_info\n'), ((768, 833), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {'eps': '(1e-05)', 'momentum': '(0.01)', 'affine': '(True)'}), '(out_planes, eps=1e-05, momentum=0.01, affine=True)\n', (782, 833), True, 'import torch.nn as nn\n'), ((869, 890), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (876, 890), True, 'import torch.nn as nn\n'), ((965, 1018), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(up_size, up_size)', 'mode': '"""bilinear"""'}), "(size=(up_size, up_size), mode='bilinear')\n", (976, 1018), True, 'import torch.nn as nn\n'), ((1507, 1547), 'torch.nn.Linear', 'nn.Linear', (['channel', '(channel // reduction)'], {}), '(channel, channel // reduction)\n', (1516, 1547), True, 'import torch.nn as nn\n'), ((1561, 1582), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1568, 1582), True, 'import torch.nn as nn\n'), ((1596, 1636), 'torch.nn.Linear', 'nn.Linear', (['(channel // reduction)', 'channel'], {}), '(channel // reduction, channel)\n', (1605, 1636), True, 'import torch.nn as nn\n'), ((1650, 1662), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1660, 1662), True, 'import torch.nn as nn\n'), ((11578, 11630), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(128)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(in_channels, 128, kernel_size=1, stride=1)\n', (11587, 11630), True, 'import torch.nn as nn\n'), ((11654, 11709), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(4)', 'stride': '(1)', 'padding': '(1)'}), '(128, 256, kernel_size=4, stride=1, padding=1)\n', (11663, 11709), True, 'import torch.nn as nn\n'), ((12180, 12242), 'torch.nn.Conv2d', 'nn.Conv2d', (['v.in_channels', '(cfg[k] * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(v.in_channels, cfg[k] * 4, kernel_size=3, padding=1)\n', (12189, 12242), True, 'import torch.nn as nn\n'), ((12268, 12340), 'torch.nn.Conv2d', 'nn.Conv2d', (['v.in_channels', '(cfg[k] * num_classes)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(v.in_channels, cfg[k] * num_classes, kernel_size=3, padding=1)\n', (12277, 12340), True, 'import torch.nn as nn\n'), ((11266, 11351), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'cfg[k + 1]'], {'kernel_size': '(1, 3)[flag]', 'stride': '(2)', 'padding': '(1)'}), '(in_channels, cfg[k + 1], kernel_size=(1, 3)[flag], stride=2,\n padding=1)\n', (11275, 11351), True, 'import torch.nn as nn\n'), ((11431, 11482), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': '(1, 3)[flag]'}), '(in_channels, v, kernel_size=(1, 3)[flag])\n', (11440, 11482), True, 'import torch.nn as nn\n')] |
import binascii
def print_stream(stream, description):
stream.seek(0)
data = stream.read()
print('***' + description + '***')
print(data)
stream.seek(0)
def test_message(encoding='ascii', hex_bitmap=False):
binary_bitmap = b'\xF0\x10\x05\x42\x84\x61\x80\x02\x02\x00\x00\x04\x00\x00\x00\x00'
bitmap = binary_bitmap
if hex_bitmap:
bitmap = binascii.hexlify(binary_bitmap)
return (
'1144'.encode(encoding) +
bitmap +
('164444555544445555111111000000009999150815171500123456789012333123423579957991200000'
'012306120612345612345657994211111111145BIG BOBS\\80 KERNDALE ST\\DANERLEY\\3103 VIC'
'AUS0080001001Y99901600000000000000011234567806999999').encode(encoding))
message_ascii_raw = test_message()
message_ebcdic_raw = test_message('cp500')
message_ascii_raw_hex = test_message(hex_bitmap=True)
message_ebcdic_raw_hex = test_message('cp500', hex_bitmap=True)
| [
"binascii.hexlify"
] | [((382, 413), 'binascii.hexlify', 'binascii.hexlify', (['binary_bitmap'], {}), '(binary_bitmap)\n', (398, 413), False, 'import binascii\n')] |
# Allow tests/ directory to see faster_than_requests/ package on PYTHONPATH
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
| [
"pathlib.Path"
] | [((132, 146), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (136, 146), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.conf import settings
from django.http.response import HttpResponse
from django.views.generic import View
class HealthCheckView(View):
def get(self, request, *args, **kwargs):
return HttpResponse(settings.HEALTH_CHECK_BODY, status=200)
| [
"django.http.response.HttpResponse"
] | [((290, 342), 'django.http.response.HttpResponse', 'HttpResponse', (['settings.HEALTH_CHECK_BODY'], {'status': '(200)'}), '(settings.HEALTH_CHECK_BODY, status=200)\n', (302, 342), False, 'from django.http.response import HttpResponse\n')] |
from assertpy import assert_that
import year2020.day21.reader as reader
def test_example():
lines = ['mxmxvkd kfcds sqjhc nhms (contains dairy, fish)\n',
'trh fvjkl sbzzf mxmxvkd (contains dairy)\n',
'sqjhc fvjkl (contains soy)\n',
'sqjhc mxmxvkd sbzzf (contains fish)\n']
result = reader.read_lines(lines)
assert_that(result).is_equal_to([(['mxmxvkd', 'kfcds', 'sqjhc', 'nhms'], ['dairy', 'fish']),
(['trh', 'fvjkl', 'sbzzf', 'mxmxvkd'], ['dairy']),
(['sqjhc', 'fvjkl'], ['soy']),
(['sqjhc', 'mxmxvkd', 'sbzzf'], ['fish'])])
| [
"assertpy.assert_that",
"year2020.day21.reader.read_lines"
] | [((332, 356), 'year2020.day21.reader.read_lines', 'reader.read_lines', (['lines'], {}), '(lines)\n', (349, 356), True, 'import year2020.day21.reader as reader\n'), ((361, 380), 'assertpy.assert_that', 'assert_that', (['result'], {}), '(result)\n', (372, 380), False, 'from assertpy import assert_that\n')] |
from .test_utils import (
BaseManagerTestCase,
skip_unless_module
)
from pulsar.managers.queued_drmaa import DrmaaQueueManager
class DrmaaManagerTest(BaseManagerTestCase):
def setUp(self):
super(DrmaaManagerTest, self).setUp()
self._set_manager()
def tearDown(self):
super(DrmaaManagerTest, self).setUp()
self.manager.shutdown()
def _set_manager(self, **kwds):
self.manager = DrmaaQueueManager('_default_', self.app, **kwds)
@skip_unless_module("drmaa")
def test_simple_execution(self):
self._test_simple_execution(self.manager)
@skip_unless_module("drmaa")
def test_cancel(self):
self._test_cancelling(self.manager)
| [
"pulsar.managers.queued_drmaa.DrmaaQueueManager"
] | [((442, 490), 'pulsar.managers.queued_drmaa.DrmaaQueueManager', 'DrmaaQueueManager', (['"""_default_"""', 'self.app'], {}), "('_default_', self.app, **kwds)\n", (459, 490), False, 'from pulsar.managers.queued_drmaa import DrmaaQueueManager\n')] |
import dico_command
class Basic(dico_command.Addon):
@dico_command.command("ping")
async def ping(self, ctx: dico_command.Context):
await ctx.reply(f"Pong! {round(self.bot.ping*1000)}ms")
def load(bot: dico_command.Bot):
bot.load_addons(Basic)
def unload(bot: dico_command.Bot):
bot.unload_addons(Basic)
| [
"dico_command.command"
] | [((60, 88), 'dico_command.command', 'dico_command.command', (['"""ping"""'], {}), "('ping')\n", (80, 88), False, 'import dico_command\n')] |
#!/usr/bin/env python3
from datetime import datetime, timezone, date
import os
import sys
import boto3
import logging
import json
#setup global logger
logger = logging.getLogger("SnapTool")
#set log level
LOGLEVEL = os.environ['LogLevel'].strip()
logger.setLevel(LOGLEVEL.upper())
logging.getLogger("botocore").setLevel(logging.ERROR)
#setup global RDS client
rds = boto3.client("rds")
#rds snapshot tool tag name
toolTagKey="SnapTool"
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj))
def startTool(timeNow):
dbClusters=[]
if os.environ['DatabaseNames'] == "ALL":
resp=rds.describe_db_clusters()
for db in resp['DBClusters']:
dbClusters.append(db['DBClusterIdentifier'])
else:
dbClusters=os.environ['DatabaseNames'].split(",")
#make all lowercase
dbClusters=[x.lower() for x in dbClusters]
verifyClusters(dbClusters)
backupConfig=[]
backupConfig.append({
"timePeriod": "yearly",
"retention": int(os.environ['YearlyRetention'])
})
backupConfig.append({
"timePeriod": "monthly",
"retention": int(os.environ['MonthlyRetention'])
})
backupConfig.append({
"timePeriod": "weekly",
"retention": int(os.environ['WeeklyRetention'])
})
for db in dbClusters:
logger.info("Analyzing snapshot status for DB:" + db)
newSnapPeriod = []
snapsToDelete = {}
for period in backupConfig:
if(period['retention']> 0):
if (validBackupTime(timeNow, period['timePeriod'])):
newSnapPeriod.append(period['timePeriod'])
#check if there are snaps to delete keeping in mind we will be creating a new one soon
snapsToDelete[period['timePeriod']] = checkDeleteNeeded(db, period['timePeriod'], period['retention']-1)
else:
#check if there are snaps to delete
snapsToDelete[period['timePeriod']] = checkDeleteNeeded(db, period['timePeriod'], period['retention'])
else:
logger.info("No " + period['timePeriod'] + " retention specified.")
# delete any snaps if present
deleteAllSnaps(db, period['timePeriod'])
if(newSnapPeriod != []):
createSnap(db, newSnapPeriod)
else:
logger.info("No snapshot needed today.")
#delete snaps if needed
for timePeriod in snapsToDelete.keys():
for snap in snapsToDelete[timePeriod]:
deleteSnap(snap, timePeriod)
def validBackupTime(timeNow, timePeriod):
backupDate = int(os.environ['BackupDate'])
backupMonth = os.environ['BackupMonth']
weeklyBackupDay = os.environ['WeeklyBackupDay']
logger.debug("Checking if " + timePeriod + " retention policy is satisfied.")
if (timePeriod == "yearly"):
if(timeNow.day == backupDate and timeNow.strftime("%B") == backupMonth):
logger.debug("Backup date matches specifications")
return True
elif (timePeriod == "monthly"):
if (timeNow.day == backupDate):
logger.debug("Backup date matches specifications")
return True
elif (timePeriod == "weekly"):
if(timeNow.strftime("%A") ==weeklyBackupDay):
logger.debug("Backup date matches specifications")
return True
else:
logger.error("Invalid time period. Exiting")
sys.exit(1)
logger.debug("Backup date does not match specifications. Skipping snapshot")
return False
def checkDeleteNeeded(db, timePeriod, retention):
snaps=getSnaps(db,timePeriod)
if(snaps is not None and len(snaps)>=retention):
return snaps[:-retention]
else:
return []
def deleteAllSnaps(db,timePeriod):
snaps = getSnaps(db, timePeriod)
if(snaps is not None):
logger.info("Removing any old " + timePeriod + " snapshots.")
for snap in snaps:
deleteSnap(snap, timePeriod)
def getSnaps(db, timePeriod):
validSnaps = []
if ("dateSimulationDebugFile" in os.environ):
# snapshot info is stored in local file for debugging
snapStore = {}
try:
with open(os.environ['dateSimulationDebugFile'], 'r') as fp:
snapStore = json.load(fp)
except Exception:
logger.exception("Failed to load snapshot store file. Failing")
sys.exit(1)
for snap in snapStore[db]:
if (timePeriod in snap['Tag']):
# time period matches
# convert date strings to datetime objects
snap['SnapshotCreateTime'] = datetime.strptime(snap['SnapshotCreateTime'],
"%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=timezone.utc)
validSnaps.append(snap)
else:
snaps = rds.describe_db_cluster_snapshots(
DBClusterIdentifier=db,
SnapshotType="manual"
)
for s in snaps['DBClusterSnapshots']:
tags = rds.list_tags_for_resource(ResourceName=s['DBClusterSnapshotArn'])
for t in tags['TagList']:
if t['Key'] == toolTagKey and timePeriod in t['Value']:
validSnaps.append(s)
if (len(validSnaps) > 0):
# sort snaps by date
sortedArray = sorted(
validSnaps,
key=lambda x: x['SnapshotCreateTime'],
reverse=False
)
return sortedArray
else:
return None
def createSnap(db, tags):
logger.info("Creating snapshot on DB:" + db + " with tags:" + str(tags))
if ("dateSimulationDebugFile" in os.environ):
# snapshot info is stored in local file for debugging
# get simulated date from env var
simDate = datetime.strptime(os.environ['debugDate'],
"%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=timezone.utc)
snap = {
"Tag": " ".join(tags),
"SnapshotCreateTime": simDate,
"DBClusterIdentifier" : db
}
try:
with open(os.environ['dateSimulationDebugFile'], 'r') as json_data:
snapJson= json.load(json_data)
snapJson[db].append(snap)
with open(os.environ['dateSimulationDebugFile'], 'w') as json_data:
json.dump(snapJson, json_data, default=json_serial)
except Exception:
logger.exception("Failed to read or write snapshot store file. Failing")
sys.exit(1)
else:
snapshotName=db + "-" + datetime.now().strftime('%Y-%m-%d')
rds.create_db_cluster_snapshot(
DBClusterSnapshotIdentifier=snapshotName,
DBClusterIdentifier=db,
Tags=[
{
"Key": toolTagKey,
"Value": " ".join(tags)
}
])
def deleteSnap(snapToDelete, timePeriod):
logger.debug("Received a delete request for the " + timePeriod + " time period.")
if ("dateSimulationDebugFile" in os.environ):
# snapshot info is stored in local file for debugging
#read local file
snapJson={}
try:
with open(os.environ['dateSimulationDebugFile'], 'r') as json_data:
snapJson = json.load(json_data)
except Exception:
logger.exception("Failed to read snapshot store file. Failing")
sys.exit(1)
#check all snaps to see if date matches
newSnapList=[]
for snap in snapJson[snapToDelete['DBClusterIdentifier']]:
# convert date strings to datetime objects
snap['SnapshotCreateTime'] = datetime.strptime(snap['SnapshotCreateTime'], "%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=timezone.utc)
if (snap['SnapshotCreateTime'].date() == snapToDelete['SnapshotCreateTime'].date()):
#found snap with correct date
tags = snap['Tag'].split(" ")
if(len(tags) ==1 and tags[0]==timePeriod):
#we can delete it
logger.info("Deleting " + timePeriod + " snap from test file")
continue
else:
#update tag to remove time period
tags.remove(timePeriod)
snap['Tag']=" ".join(tags)
#if we are NOT deleting the snap we add its info to a new list
newSnapList.append(snap)
snapJson[snapToDelete['DBClusterIdentifier']]=newSnapList
try:
#write to file
with open(os.environ['dateSimulationDebugFile'], 'w') as json_data:
json.dump(snapJson, json_data, default=json_serial)
except Exception:
logger.exception("Failed to write snapshot store file. Failing")
sys.exit(1)
else:
#using RDS information for snapshots
# check tags on snapshot
tags = rds.list_tags_for_resource(ResourceName=snapToDelete['DBClusterSnapshotArn'])
for t in tags['TagList']:
if t['Key'] == toolTagKey:
tags = t['Value'].split(" ")
if (len(tags) == 1 and tags[0] == timePeriod):
# if the time period specified is the only remaining timeperiod we can delete it
logger.info("Deleting snapshot: " + snapToDelete['DBClusterSnapshotIdentifier'] + " from RDS.")
#delete from RDS
rds.delete_db_cluster_snapshot(DBClusterSnapshotIdentifier=snapToDelete['DBClusterSnapshotArn'])
else:
# update tag to remove time period
logger.info("Removing time period tag:" + timePeriod + " from snapshot:" + snapToDelete['DBClusterSnapshotIdentifier'])
tags.remove(timePeriod)
#rds update tag on snapshot
t['Value']= " ".join(tags)
rds.add_tags_to_resource(ResourceName=snapToDelete['DBClusterSnapshotArn'], Tags=[t])
break
def verifyClusters(dbClusters):
existingDBClusters=[d['DBClusterIdentifier'] for d in rds.describe_db_clusters()['DBClusters']]
for db in dbClusters:
logger.debug("Checking if DB:" + db + " is an existing Aurora Cluster.")
if(db in existingDBClusters):
logger.debug("DB:" + db + " is a valid cluster.")
else:
logger.error("DB:" + db + " is NOT a valid cluster. Failing")
sys.exit(1)
def lambda_handler(event, context):
logger.info("Starting Aurora Snapshot Generator tool")
logger.debug("Environment Variables:")
for key in os.environ:
logger.debug("Found {}={}".format(key, os.environ[key]))
logger.debug("Checking for required env vars.")
requiredEnvVars = ['DatabaseNames', 'WeeklyRetention', 'MonthlyRetention', 'YearlyRetention','WeeklyBackupDay', 'BackupDate', 'BackupMonth']
for r in requiredEnvVars:
if r not in os.environ.keys():
logger.error("Required variable:" + r + " not found. Exiting.")
sys.exit(1)
timeNow=datetime.now(timezone.utc)
logger.debug("Month:" + str(timeNow.strftime("%B")) + " Day:" + str(timeNow.day) + " DOW:" + str(timeNow.strftime("%A")))
startTool(timeNow)
logger.info("End of Aurora Snapshot Generator tool")
| [
"logging.getLogger",
"os.environ.keys",
"boto3.client",
"datetime.datetime.strptime",
"datetime.datetime.now",
"sys.exit",
"json.load",
"json.dump"
] | [((162, 191), 'logging.getLogger', 'logging.getLogger', (['"""SnapTool"""'], {}), "('SnapTool')\n", (179, 191), False, 'import logging\n'), ((369, 388), 'boto3.client', 'boto3.client', (['"""rds"""'], {}), "('rds')\n", (381, 388), False, 'import boto3\n'), ((11407, 11433), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (11419, 11433), False, 'from datetime import datetime, timezone, date\n'), ((283, 312), 'logging.getLogger', 'logging.getLogger', (['"""botocore"""'], {}), "('botocore')\n", (300, 312), False, 'import logging\n'), ((10782, 10793), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10790, 10793), False, 'import sys\n'), ((11274, 11291), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (11289, 11291), False, 'import os\n'), ((11382, 11393), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11390, 11393), False, 'import sys\n'), ((3649, 3660), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3657, 3660), False, 'import sys\n'), ((4501, 4514), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (4510, 4514), False, 'import json\n'), ((4630, 4641), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4638, 4641), False, 'import sys\n'), ((6034, 6106), 'datetime.datetime.strptime', 'datetime.strptime', (["os.environ['debugDate']", '"""%Y-%m-%dT%H:%M:%S.%f+00:00"""'], {}), "(os.environ['debugDate'], '%Y-%m-%dT%H:%M:%S.%f+00:00')\n", (6051, 6106), False, 'from datetime import datetime, timezone, date\n'), ((6456, 6476), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (6465, 6476), False, 'import json\n'), ((6613, 6664), 'json.dump', 'json.dump', (['snapJson', 'json_data'], {'default': 'json_serial'}), '(snapJson, json_data, default=json_serial)\n', (6622, 6664), False, 'import json\n'), ((6789, 6800), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6797, 6800), False, 'import sys\n'), ((7570, 7590), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (7579, 7590), False, 'import json\n'), ((7706, 7717), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7714, 7717), False, 'import sys\n'), ((8941, 8992), 'json.dump', 'json.dump', (['snapJson', 'json_data'], {'default': 'json_serial'}), '(snapJson, json_data, default=json_serial)\n', (8950, 8992), False, 'import json\n'), ((9109, 9120), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9117, 9120), False, 'import sys\n'), ((6843, 6857), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6855, 6857), False, 'from datetime import datetime, timezone, date\n'), ((7953, 8028), 'datetime.datetime.strptime', 'datetime.strptime', (["snap['SnapshotCreateTime']", '"""%Y-%m-%dT%H:%M:%S.%f+00:00"""'], {}), "(snap['SnapshotCreateTime'], '%Y-%m-%dT%H:%M:%S.%f+00:00')\n", (7970, 8028), False, 'from datetime import datetime, timezone, date\n'), ((4865, 4940), 'datetime.datetime.strptime', 'datetime.strptime', (["snap['SnapshotCreateTime']", '"""%Y-%m-%dT%H:%M:%S.%f+00:00"""'], {}), "(snap['SnapshotCreateTime'], '%Y-%m-%dT%H:%M:%S.%f+00:00')\n", (4882, 4940), False, 'from datetime import datetime, timezone, date\n')] |
import torch
from fielder import FieldClass
import yaml
class ModelBase(FieldClass, torch.nn.Module):
"""Base Model Class"""
class FCNet(ModelBase):
d_in: int = 10
H: int = 100
n_hidden: int = 1
D_out: int = 1
def __post_init__(self):
super().__post_init__()
self.input_linear = torch.nn.Linear(self.d_in, self.H)
self.middle_linears = torch.nn.ModuleList(
[torch.nn.Linear(self.H, self.H) for _ in range(self.n_hidden)]
)
self.output_linear = torch.nn.Linear(self.H, self.D_out)
def layer_n_activation(self, x, n_layer=-1):
x = x["input"]
if n_layer == 0:
return x
x = self.input_linear(x).clamp(min=0)
if n_layer == 1:
return x
for i, layer in enumerate(self.middle_linears):
x = layer(x).clamp(min=0)
if i + 2 == n_layer:
return x
return x
def forward(self, x):
return {"y": self.output_linear(self.layer_n_activation(x))}
class SimpleTransformer(ModelBase):
"""
DON'T USE THIS AS A REFERENCE IMPLEMENTATION
It's BiDir, ie it doesn't have any attention masking,
and it hasn't been debugged carefully
"""
d_model: int = 16
m_mlp: int = 2
n_head: int = 1
n_ctx: int = 2
n_layer: int = 2
d_out: int = 1
def __post_init__(self):
super().__post_init__()
assert self.n_head * self.d_head == self.d_model
self.layers = torch.nn.ModuleList(
SimpleTransformerLayer(
d_model=self.d_model,
m_mlp=self.m_mlp,
n_head=self.n_head,
n_ctx=self.n_ctx,
)
for _ in range(self.n_layer)
)
self.output_linear = torch.nn.Linear(self.d_model * self.n_ctx, self.d_out)
@property
def d_head(self):
return self.d_model // self.n_head
@property
def D_in(self):
return self.n_ctx * self.d_model
def torso(self, x):
x = x.reshape(-1, self.n_ctx, self.d_model)
for layer in self.layers:
x = layer(x)
return x
def forward(self, x):
x = self.torso(x).reshape(-1, self.D_in)
return self.output_linear(x)
class SimpleTransformerLayer(ModelBase):
d_model: int = 32
m_mlp: int = 4
n_head: int = 1
n_ctx: int = 4
def __post_init__(self):
super().__post_init__()
assert self.n_head * self.d_head == self.d_model
self.mlp_linear1 = torch.nn.Linear(self.d_model, self.m_mlp * self.d_model)
self.mlp_linear2 = torch.nn.Linear(self.m_mlp * self.d_model, self.d_model)
self.query = torch.nn.Linear(self.d_model, self.d_model, bias=False)
self.key = torch.nn.Linear(self.d_model, self.d_model, bias=False)
self.value = torch.nn.Linear(self.d_model, self.d_model)
self.dense = torch.nn.Linear(self.d_model, self.d_model)
@property
def d_head(self):
return self.d_model // self.n_head
def reshape_as_heads(self, x):
new_shape = x.shape[:-1] + (self.n_head, self.d_head)
return x.reshape(*new_shape)
def reshape_as_d_model(self, x):
new_shape = x.shape[:-2] + (self.d_model,)
return x.reshape(*new_shape)
def attn(self, x):
q = self.reshape_as_heads(self.query(x))
k = self.reshape_as_heads(self.key(x))
v = self.reshape_as_heads(self.value(x))
attn_logits = torch.einsum("bshi,bthi->bhst", q, k) / torch.sqrt(
torch.tensor(self.d_head, dtype=torch.float)
)
attn_weights = torch.nn.Softmax(dim=-1)(attn_logits)
attention_result = torch.einsum("bhst,bthi->bshi", attn_weights, v)
result = self.reshape_as_d_model(attention_result)
return self.dense(result)
def mlp(self, x):
m = self.mlp_linear1(x).clamp(0)
return self.mlp_linear2(m)
def forward(self, x):
x = torch.layer_norm(x, normalized_shape=x.shape[1:])
a = self.attn(x)
x = torch.layer_norm(x + a, normalized_shape=x.shape[1:])
m = self.mlp(x)
return x + m
| [
"torch.nn.Softmax",
"torch.tensor",
"torch.einsum",
"torch.nn.Linear",
"torch.layer_norm"
] | [((324, 358), 'torch.nn.Linear', 'torch.nn.Linear', (['self.d_in', 'self.H'], {}), '(self.d_in, self.H)\n', (339, 358), False, 'import torch\n'), ((525, 560), 'torch.nn.Linear', 'torch.nn.Linear', (['self.H', 'self.D_out'], {}), '(self.H, self.D_out)\n', (540, 560), False, 'import torch\n'), ((1796, 1850), 'torch.nn.Linear', 'torch.nn.Linear', (['(self.d_model * self.n_ctx)', 'self.d_out'], {}), '(self.d_model * self.n_ctx, self.d_out)\n', (1811, 1850), False, 'import torch\n'), ((2545, 2601), 'torch.nn.Linear', 'torch.nn.Linear', (['self.d_model', '(self.m_mlp * self.d_model)'], {}), '(self.d_model, self.m_mlp * self.d_model)\n', (2560, 2601), False, 'import torch\n'), ((2629, 2685), 'torch.nn.Linear', 'torch.nn.Linear', (['(self.m_mlp * self.d_model)', 'self.d_model'], {}), '(self.m_mlp * self.d_model, self.d_model)\n', (2644, 2685), False, 'import torch\n'), ((2708, 2763), 'torch.nn.Linear', 'torch.nn.Linear', (['self.d_model', 'self.d_model'], {'bias': '(False)'}), '(self.d_model, self.d_model, bias=False)\n', (2723, 2763), False, 'import torch\n'), ((2783, 2838), 'torch.nn.Linear', 'torch.nn.Linear', (['self.d_model', 'self.d_model'], {'bias': '(False)'}), '(self.d_model, self.d_model, bias=False)\n', (2798, 2838), False, 'import torch\n'), ((2860, 2903), 'torch.nn.Linear', 'torch.nn.Linear', (['self.d_model', 'self.d_model'], {}), '(self.d_model, self.d_model)\n', (2875, 2903), False, 'import torch\n'), ((2925, 2968), 'torch.nn.Linear', 'torch.nn.Linear', (['self.d_model', 'self.d_model'], {}), '(self.d_model, self.d_model)\n', (2940, 2968), False, 'import torch\n'), ((3709, 3757), 'torch.einsum', 'torch.einsum', (['"""bhst,bthi->bshi"""', 'attn_weights', 'v'], {}), "('bhst,bthi->bshi', attn_weights, v)\n", (3721, 3757), False, 'import torch\n'), ((3990, 4039), 'torch.layer_norm', 'torch.layer_norm', (['x'], {'normalized_shape': 'x.shape[1:]'}), '(x, normalized_shape=x.shape[1:])\n', (4006, 4039), False, 'import torch\n'), ((4077, 4130), 'torch.layer_norm', 'torch.layer_norm', (['(x + a)'], {'normalized_shape': 'x.shape[1:]'}), '(x + a, normalized_shape=x.shape[1:])\n', (4093, 4130), False, 'import torch\n'), ((3502, 3539), 'torch.einsum', 'torch.einsum', (['"""bshi,bthi->bhst"""', 'q', 'k'], {}), "('bshi,bthi->bhst', q, k)\n", (3514, 3539), False, 'import torch\n'), ((3644, 3668), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (3660, 3668), False, 'import torch\n'), ((423, 454), 'torch.nn.Linear', 'torch.nn.Linear', (['self.H', 'self.H'], {}), '(self.H, self.H)\n', (438, 454), False, 'import torch\n'), ((3566, 3610), 'torch.tensor', 'torch.tensor', (['self.d_head'], {'dtype': 'torch.float'}), '(self.d_head, dtype=torch.float)\n', (3578, 3610), False, 'import torch\n')] |
import numpy as np
from pytope import Polytope
import matplotlib.pyplot as plt
np.random.seed(1)
# Create a polytope in R^2 with -1 <= x1 <= 4, -2 <= x2 <= 3
lower_bound1 = (-1, -2) # [-1, -2]' <= x
upper_bound1 = (4, 3) # x <= [4, 3]'
P1 = Polytope(lb=lower_bound1, ub=upper_bound1)
# Print the halfspace representation A*x <= b and H = [A b]
print('P1: ', repr(P1))
print('A =\n', P1.A)
print('b =\n', P1.b)
print('H =\n', P1.H)
# Create a square polytope in R^2 from specifying the four vertices
V2 = np.array([[1, 0], [0, -1], [-1, 0], [0, 1]])
P2 = Polytope(V2)
# Print the array of vertices:
print('P2: ', repr(P2))
print('V =\n', P2.V)
# Create a triangle in R^2 from specifying three half spaces (inequalities)
A3 = [[1, 0], [0, 1], [-1, -1]]
b3 = (2, 1, -1.5)
P3 = Polytope(A3, b3)
# Print the halfspace representation A*x <= b and H = [A b]
print('P3: ', repr(P3))
print('A =\n', P3.A)
print('b =\n', P3.b)
print('H =\n', P3.H)
# Determine and print the vertices:
print('V =\n', P3.V)
# P4: P3 shifted by a point p4
p4 = (1.4, 0.7)
P4 = P3 + p4
# P5: P4 shifted by a point p5 (in negative direction)
p5 = [0.4, 2]
P5 = P4 - p5
# P6: P2 scaled by s6 and shifted by p6
s6 = 0.2
p6 = -np.array([[0.4], [1.6]])
P6 = s6 * P2 + p6
# P7: P2 rotated 20 degrees (both clockwise and counter-clockwise)
rot7 = np.pi / 9.0
rot_mat7 = np.array([[np.cos(rot7), -np.sin(rot7)],
[np.sin(rot7), np.cos(rot7)]])
P7 = rot_mat7 * P2
P7_inv = P2 * rot_mat7
# P8: -P6
P8 = -P6
# P9: The convex hull of a set of 30 random points in [1, 2]' <= x [2, 3]'
V9 = np.random.uniform((1, 2), (2, 3), (30, 2))
P9 = Polytope(V9)
P9.minimize_V_rep()
# P10: the Minkowski sum of two squares (one large and one rotated and smaller)
P10_1 = Polytope(lb=(-0.6, -0.6), ub=(0.6, 0.6))
P10_2 = rot_mat7 * Polytope(lb=(-0.3, -0.3), ub=(0.3, 0.3))
P10 = P10_1 + P10_2
# Plot all of the polytopes.
# See the matplotlib.patches.Polygon documentation for a list of valid kwargs
fig1, ax1 = plt.subplots(num=1)
plt.grid()
plt.axis([-1.5, 4.5, -2.5, 3.5])
P1.plot(ax1, fill=False, edgecolor='r', linewidth=2)
P2.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), linewidth=1)
P3.plot(ax1, facecolor='b', edgecolor='k', linewidth=2, alpha=0.5)
P4.plot(ax1, facecolor='lightsalmon')
plt.scatter(P4.V[:, 0], P4.V[:, 1], c='k', marker='x') # the vertices of P4
# Polytope implements an additional keyword edgealpha:
P5.plot(ax1, fill=False, edgecolor='b', linewidth=8, edgealpha=0.2)
plt.plot(P5.centroid[0], P5.centroid[1], 'o') # the centroid of P5
P6.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), linewidth=1)
P7.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3)
P7_inv.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3, linestyle='--')
P8.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3)
P9.plot(ax1, facecolor='gray', alpha=0.6, edgecolor='k')
plt.plot(V9[:, 0], V9[:, 1], 'or', marker='o', markersize=2) # random points
plt.plot(P9.V[:, 0], P9.V[:, 1], 'og', marker='o', markersize=1) # P9's vertices
plt.title('Demonstration of various polytope operations')
# Plot the Minkowski sum of two squares
fig2, ax2 = plt.subplots(num=2)
plt.grid()
plt.axis([-2.5, 2.5, -2.5, 2.5])
P10_1.plot(ax2, fill=False, edgecolor=(1, 0, 0))
P10_2.plot(ax2, fill=False, edgecolor=(0, 0, 1))
P10.plot(ax2, fill=False,
edgecolor=(1, 0, 1), linestyle='--', linewidth=2)
for p in P10_1.V: # the smaller square + each of the vertices of the larger one
(P10_2 + p).plot(ax2, facecolor='grey', alpha=0.4,
edgecolor='k', linewidth=0.5)
ax2.legend((r'$P$', r'$Q$', r'$P \oplus Q$'))
plt.title('Minkowski sum of two polytopes')
# Plot two rotated rectangles and their intersection
rot1 = -np.pi / 18.0
rot_mat1 = np.array([[np.cos(rot1), -np.sin(rot1)],
[np.sin(rot1), np.cos(rot1)]])
rot2 = np.pi / 18.0
rot_mat2 = np.array([[np.cos(rot2), -np.sin(rot2)],
[np.sin(rot2), np.cos(rot2)]])
P_i1 = rot_mat1 * Polytope(lb=(-2, -1), ub=(1, 1))
P_i2 = rot_mat2 * Polytope(lb=(0, 0), ub=(2, 2))
P_i = P_i1 & P_i2 # intersection
fig3, ax3 = plt.subplots(num=3)
plt.grid()
plt.axis([-3.5, 3.5, -3.5, 3.5])
P_i1.plot(fill=False, edgecolor=(1, 0, 0), linestyle='--')
P_i2.plot(fill=False, edgecolor=(0, 0, 1), linestyle='--')
P_i.plot(fill=False,
edgecolor=(1, 0, 1), linestyle='-', linewidth=2)
ax3.legend((r'$P$', r'$Q$', r'$P \cap Q$'))
plt.title('Intersection of two polytopes')
# Plot two polytopes and their Pontryagin difference
P_m1 = Polytope(lb=(-3, -3), ub=(3, 3))
P_m2 = Polytope([[1, 0], [0, -1], [-1, 0], [0, 1]])
P_diff = P_m1 - P_m2
fig4, ax4 = plt.subplots(num=4)
plt.grid()
plt.axis([-3.5, 3.5, -3.5, 3.5])
P_m1.plot(fill=False, edgecolor=(1, 0, 0))
P_m2.plot(fill=False, edgecolor=(0, 0, 1))
P_diff.plot(fill=False,
edgecolor=(1, 0, 1), linestyle='--', linewidth=2)
ax4.legend((r'$P$', r'$Q$', r'$P \ominus Q$'))
plt.title('Pontryagin difference of two polytopes')
plt.setp([ax1, ax2, ax3, ax4], xlabel=r'$x_1$', ylabel=r'$x_2$')
| [
"matplotlib.pyplot.setp",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"numpy.cos",
"matplotlib.pyplot.axis",
"pytope.Polytope",
"matplotlib.pyplot.subplots... | [((82, 99), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (96, 99), True, 'import numpy as np\n'), ((247, 289), 'pytope.Polytope', 'Polytope', ([], {'lb': 'lower_bound1', 'ub': 'upper_bound1'}), '(lb=lower_bound1, ub=upper_bound1)\n', (255, 289), False, 'from pytope import Polytope\n'), ((511, 555), 'numpy.array', 'np.array', (['[[1, 0], [0, -1], [-1, 0], [0, 1]]'], {}), '([[1, 0], [0, -1], [-1, 0], [0, 1]])\n', (519, 555), True, 'import numpy as np\n'), ((561, 573), 'pytope.Polytope', 'Polytope', (['V2'], {}), '(V2)\n', (569, 573), False, 'from pytope import Polytope\n'), ((782, 798), 'pytope.Polytope', 'Polytope', (['A3', 'b3'], {}), '(A3, b3)\n', (790, 798), False, 'from pytope import Polytope\n'), ((1580, 1622), 'numpy.random.uniform', 'np.random.uniform', (['(1, 2)', '(2, 3)', '(30, 2)'], {}), '((1, 2), (2, 3), (30, 2))\n', (1597, 1622), True, 'import numpy as np\n'), ((1628, 1640), 'pytope.Polytope', 'Polytope', (['V9'], {}), '(V9)\n', (1636, 1640), False, 'from pytope import Polytope\n'), ((1750, 1790), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-0.6, -0.6)', 'ub': '(0.6, 0.6)'}), '(lb=(-0.6, -0.6), ub=(0.6, 0.6))\n', (1758, 1790), False, 'from pytope import Polytope\n'), ((1991, 2010), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(1)'}), '(num=1)\n', (2003, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2021), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2019, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2054), 'matplotlib.pyplot.axis', 'plt.axis', (['[-1.5, 4.5, -2.5, 3.5]'], {}), '([-1.5, 4.5, -2.5, 3.5])\n', (2030, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2329), 'matplotlib.pyplot.scatter', 'plt.scatter', (['P4.V[:, 0]', 'P4.V[:, 1]'], {'c': '"""k"""', 'marker': '"""x"""'}), "(P4.V[:, 0], P4.V[:, 1], c='k', marker='x')\n", (2286, 2329), True, 'import matplotlib.pyplot as plt\n'), ((2475, 2520), 'matplotlib.pyplot.plot', 'plt.plot', (['P5.centroid[0]', 'P5.centroid[1]', '"""o"""'], {}), "(P5.centroid[0], P5.centroid[1], 'o')\n", (2483, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2970, 3030), 'matplotlib.pyplot.plot', 'plt.plot', (['V9[:, 0]', 'V9[:, 1]', '"""or"""'], {'marker': '"""o"""', 'markersize': '(2)'}), "(V9[:, 0], V9[:, 1], 'or', marker='o', markersize=2)\n", (2978, 3030), True, 'import matplotlib.pyplot as plt\n'), ((3048, 3112), 'matplotlib.pyplot.plot', 'plt.plot', (['P9.V[:, 0]', 'P9.V[:, 1]', '"""og"""'], {'marker': '"""o"""', 'markersize': '(1)'}), "(P9.V[:, 0], P9.V[:, 1], 'og', marker='o', markersize=1)\n", (3056, 3112), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3186), 'matplotlib.pyplot.title', 'plt.title', (['"""Demonstration of various polytope operations"""'], {}), "('Demonstration of various polytope operations')\n", (3138, 3186), True, 'import matplotlib.pyplot as plt\n'), ((3240, 3259), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(2)'}), '(num=2)\n', (3252, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3260, 3270), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3268, 3270), True, 'import matplotlib.pyplot as plt\n'), ((3271, 3303), 'matplotlib.pyplot.axis', 'plt.axis', (['[-2.5, 2.5, -2.5, 2.5]'], {}), '([-2.5, 2.5, -2.5, 2.5])\n', (3279, 3303), True, 'import matplotlib.pyplot as plt\n'), ((3716, 3759), 'matplotlib.pyplot.title', 'plt.title', (['"""Minkowski sum of two polytopes"""'], {}), "('Minkowski sum of two polytopes')\n", (3725, 3759), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4228), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(3)'}), '(num=3)\n', (4221, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4229, 4239), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4237, 4239), True, 'import matplotlib.pyplot as plt\n'), ((4240, 4272), 'matplotlib.pyplot.axis', 'plt.axis', (['[-3.5, 3.5, -3.5, 3.5]'], {}), '([-3.5, 3.5, -3.5, 3.5])\n', (4248, 4272), True, 'import matplotlib.pyplot as plt\n'), ((4514, 4556), 'matplotlib.pyplot.title', 'plt.title', (['"""Intersection of two polytopes"""'], {}), "('Intersection of two polytopes')\n", (4523, 4556), True, 'import matplotlib.pyplot as plt\n'), ((4618, 4650), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-3, -3)', 'ub': '(3, 3)'}), '(lb=(-3, -3), ub=(3, 3))\n', (4626, 4650), False, 'from pytope import Polytope\n'), ((4658, 4702), 'pytope.Polytope', 'Polytope', (['[[1, 0], [0, -1], [-1, 0], [0, 1]]'], {}), '([[1, 0], [0, -1], [-1, 0], [0, 1]])\n', (4666, 4702), False, 'from pytope import Polytope\n'), ((4736, 4755), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(4)'}), '(num=4)\n', (4748, 4755), True, 'import matplotlib.pyplot as plt\n'), ((4756, 4766), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4764, 4766), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4799), 'matplotlib.pyplot.axis', 'plt.axis', (['[-3.5, 3.5, -3.5, 3.5]'], {}), '([-3.5, 3.5, -3.5, 3.5])\n', (4775, 4799), True, 'import matplotlib.pyplot as plt\n'), ((5019, 5070), 'matplotlib.pyplot.title', 'plt.title', (['"""Pontryagin difference of two polytopes"""'], {}), "('Pontryagin difference of two polytopes')\n", (5028, 5070), True, 'import matplotlib.pyplot as plt\n'), ((5072, 5134), 'matplotlib.pyplot.setp', 'plt.setp', (['[ax1, ax2, ax3, ax4]'], {'xlabel': '"""$x_1$"""', 'ylabel': '"""$x_2$"""'}), "([ax1, ax2, ax3, ax4], xlabel='$x_1$', ylabel='$x_2$')\n", (5080, 5134), True, 'import matplotlib.pyplot as plt\n'), ((1203, 1227), 'numpy.array', 'np.array', (['[[0.4], [1.6]]'], {}), '([[0.4], [1.6]])\n', (1211, 1227), True, 'import numpy as np\n'), ((1810, 1850), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-0.3, -0.3)', 'ub': '(0.3, 0.3)'}), '(lb=(-0.3, -0.3), ub=(0.3, 0.3))\n', (1818, 1850), False, 'from pytope import Polytope\n'), ((4081, 4113), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-2, -1)', 'ub': '(1, 1)'}), '(lb=(-2, -1), ub=(1, 1))\n', (4089, 4113), False, 'from pytope import Polytope\n'), ((4132, 4162), 'pytope.Polytope', 'Polytope', ([], {'lb': '(0, 0)', 'ub': '(2, 2)'}), '(lb=(0, 0), ub=(2, 2))\n', (4140, 4162), False, 'from pytope import Polytope\n'), ((1355, 1367), 'numpy.cos', 'np.cos', (['rot7'], {}), '(rot7)\n', (1361, 1367), True, 'import numpy as np\n'), ((1407, 1419), 'numpy.sin', 'np.sin', (['rot7'], {}), '(rot7)\n', (1413, 1419), True, 'import numpy as np\n'), ((1421, 1433), 'numpy.cos', 'np.cos', (['rot7'], {}), '(rot7)\n', (1427, 1433), True, 'import numpy as np\n'), ((3857, 3869), 'numpy.cos', 'np.cos', (['rot1'], {}), '(rot1)\n', (3863, 3869), True, 'import numpy as np\n'), ((3909, 3921), 'numpy.sin', 'np.sin', (['rot1'], {}), '(rot1)\n', (3915, 3921), True, 'import numpy as np\n'), ((3923, 3935), 'numpy.cos', 'np.cos', (['rot1'], {}), '(rot1)\n', (3929, 3935), True, 'import numpy as np\n'), ((3981, 3993), 'numpy.cos', 'np.cos', (['rot2'], {}), '(rot2)\n', (3987, 3993), True, 'import numpy as np\n'), ((4033, 4045), 'numpy.sin', 'np.sin', (['rot2'], {}), '(rot2)\n', (4039, 4045), True, 'import numpy as np\n'), ((4047, 4059), 'numpy.cos', 'np.cos', (['rot2'], {}), '(rot2)\n', (4053, 4059), True, 'import numpy as np\n'), ((1370, 1382), 'numpy.sin', 'np.sin', (['rot7'], {}), '(rot7)\n', (1376, 1382), True, 'import numpy as np\n'), ((3872, 3884), 'numpy.sin', 'np.sin', (['rot1'], {}), '(rot1)\n', (3878, 3884), True, 'import numpy as np\n'), ((3996, 4008), 'numpy.sin', 'np.sin', (['rot2'], {}), '(rot2)\n', (4002, 4008), True, 'import numpy as np\n')] |
#python libraries
import re
import os
# this package
from apetools.baseclass import BaseClass
from apetools.commons import enumerations
from apetools.commons import expressions
from apetools.commons.errors import ConfigurationError
MAC_UNAVAILABLE = "MAC Unavailable (use `netcfg`)"
class IfconfigError(ConfigurationError):
"""
raise this if there is a user error
"""
# end class Ifconfig error
class IfconfigCommand(BaseClass):
"""
The IfconfigCommand interprets ifconfig
"""
def __init__(self, connection, interface, operating_system=None):
"""
:param:
- `connection`: A connection to the device
- `interface`: The interface to check
- `operating_system` : The operating system on the devices.
"""
super(IfconfigCommand, self).__init__()
self.connection = connection
self.interface = interface
self._operating_system = operating_system
self._ip_address = None
self._mac_address = None
self._output = None
self._ip_expression = None
return
@property
def operating_system(self):
"""
:return: the operating system for the device to query
"""
if self._operating_system is None:
self._operating_system = self.connection.os
return self._operating_system
@property
def ip_address(self):
"""
:return: The IP Address of the interface
"""
return self._match(self.ip_expression,
expressions.IP_ADDRESS_NAME)
@property
def ip_expression(self):
"""
:return: a compiled expression to get the ip address
"""
if self._ip_expression is None:
if self.operating_system == enumerations.OperatingSystem.linux:
expression = expressions.LINUX_IP
elif self.operating_system == enumerations.OperatingSystem.android:
expression = expressions.ANDROID_IP
self._ip_expression = re.compile(expression)
return self._ip_expression
@property
def mac_address(self):
"""
:return: MAC Address of the interface
"""
if self._mac_address is None:
if self.operating_system == enumerations.OperatingSystem.linux:
expression = expressions.LINUX_MAC
elif self.operating_system == enumerations.OperatingSystem.android:
self._mac_address = MAC_UNAVAILABLE
return self._mac_address
self._mac_address = self._match(re.compile(expression),
expressions.MAC_ADDRESS_NAME)
return self._mac_address
@property
def output(self):
"""
:return: The output of the ifconfig command on the device
"""
return self.connection.ifconfig(self.interface)
def _match(self, expression, name):
"""
:param:
- `expression`: The regular expression to match
- `name`: The group name to pull the match out of the line
:return: The named-group that matched or None
"""
for line in self.output.output:
match = expression.search(line)
if match:
return match.group(name)
for line in self.output.error:
self.logger.error(line)
return
# end class IfconfigCommand | [
"re.compile"
] | [((2047, 2069), 're.compile', 're.compile', (['expression'], {}), '(expression)\n', (2057, 2069), False, 'import re\n'), ((2604, 2626), 're.compile', 're.compile', (['expression'], {}), '(expression)\n', (2614, 2626), False, 'import re\n')] |
import retro # pip install gym-retro
import numpy as np # pip install numpy
import cv2 # pip install opencv-python
import neat # pip install neat-python
import pickle # pip install cloudpickle
class Worker(object):
def __init__(self, genome, config):
self.genome = genome
self.config = config
def work(self):
self.env = retro.make('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')
self.env.reset()
ob, _, _, _ = self.env.step(self.env.action_space.sample())
inx = int(ob.shape[0]/8)
iny = int(ob.shape[1]/8)
done = False
net = neat.nn.FeedForwardNetwork.create(self.genome, self.config)
fitness = 0
xpos = 0
xpos_max = 0
counter = 0
imgarray = []
while not done:
# self.env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
actions = net.activate(imgarray)
ob, rew, done, info = self.env.step(actions)
xpos = info['x']
if xpos > xpos_max:
xpos_max = xpos
counter = 0
fitness += 1
else:
counter += 1
if counter > 250:
done = True
if xpos == info['screen_x_end'] and xpos > 500:
fitness += 100000
done = True
print(fitness)
return fitness
def eval_genomes(genome, config):
worky = Worker(genome, config)
return worky.work()
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-feedforward')
p = neat.Population(config)
p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-13')
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(10))
pe = neat.ParallelEvaluator(10, eval_genomes)
winner = p.run(pe.evaluate)
with open('winner.pkl', 'wb') as output:
pickle.dump(winner, output, 1)
| [
"pickle.dump",
"neat.StdOutReporter",
"neat.Population",
"numpy.reshape",
"neat.Config",
"neat.nn.FeedForwardNetwork.create",
"neat.Checkpointer.restore_checkpoint",
"neat.StatisticsReporter",
"numpy.ndarray.flatten",
"cv2.cvtColor",
"numpy.interp",
"neat.ParallelEvaluator",
"cv2.resize",
... | [((1918, 2050), 'neat.Config', 'neat.Config', (['neat.DefaultGenome', 'neat.DefaultReproduction', 'neat.DefaultSpeciesSet', 'neat.DefaultStagnation', '"""config-feedforward"""'], {}), "(neat.DefaultGenome, neat.DefaultReproduction, neat.\n DefaultSpeciesSet, neat.DefaultStagnation, 'config-feedforward')\n", (1929, 2050), False, 'import neat\n'), ((2094, 2117), 'neat.Population', 'neat.Population', (['config'], {}), '(config)\n', (2109, 2117), False, 'import neat\n'), ((2122, 2180), 'neat.Checkpointer.restore_checkpoint', 'neat.Checkpointer.restore_checkpoint', (['"""neat-checkpoint-13"""'], {}), "('neat-checkpoint-13')\n", (2158, 2180), False, 'import neat\n'), ((2231, 2256), 'neat.StatisticsReporter', 'neat.StatisticsReporter', ([], {}), '()\n', (2254, 2256), False, 'import neat\n'), ((2323, 2363), 'neat.ParallelEvaluator', 'neat.ParallelEvaluator', (['(10)', 'eval_genomes'], {}), '(10, eval_genomes)\n', (2345, 2363), False, 'import neat\n'), ((2196, 2221), 'neat.StdOutReporter', 'neat.StdOutReporter', (['(True)'], {}), '(True)\n', (2215, 2221), False, 'import neat\n'), ((2294, 2315), 'neat.Checkpointer', 'neat.Checkpointer', (['(10)'], {}), '(10)\n', (2311, 2315), False, 'import neat\n'), ((2439, 2469), 'pickle.dump', 'pickle.dump', (['winner', 'output', '(1)'], {}), '(winner, output, 1)\n', (2450, 2469), False, 'import pickle\n'), ((403, 463), 'retro.make', 'retro.make', (['"""SonicTheHedgehog-Genesis"""', '"""GreenHillZone.Act1"""'], {}), "('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')\n", (413, 463), False, 'import retro\n'), ((694, 753), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['self.genome', 'self.config'], {}), '(self.genome, self.config)\n', (727, 753), False, 'import neat\n'), ((945, 971), 'cv2.resize', 'cv2.resize', (['ob', '(inx, iny)'], {}), '(ob, (inx, iny))\n', (955, 971), False, 'import cv2\n'), ((989, 1025), 'cv2.cvtColor', 'cv2.cvtColor', (['ob', 'cv2.COLOR_BGR2GRAY'], {}), '(ob, cv2.COLOR_BGR2GRAY)\n', (1001, 1025), False, 'import cv2\n'), ((1043, 1069), 'numpy.reshape', 'np.reshape', (['ob', '(inx, iny)'], {}), '(ob, (inx, iny))\n', (1053, 1069), True, 'import numpy as np\n'), ((1106, 1128), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['ob'], {}), '(ob)\n', (1124, 1128), True, 'import numpy as np\n'), ((1152, 1191), 'numpy.interp', 'np.interp', (['imgarray', '(0, 254)', '(-1, +1)'], {}), '(imgarray, (0, 254), (-1, +1))\n', (1161, 1191), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import torch
import numpy as np
import torchvision
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from utils import *
from IPython import embed
class DCGAN(object):
def __init__(self, args):
self.args = args
self.model = dict()
self.data = dict()
self.rescache = dict()
self.device = args.use_gpu and torch.cuda.is_available()
def _report_settings(self):
''' Report the settings '''
str = '-' * 16
print('%sEnvironment Versions%s' % (str, str))
print("- Python : {}".format(sys.version.strip().split('|')[0]))
print("- PyTorch : {}".format(torch.__version__))
print("- TorchVison: {}".format(torchvision.__version__))
print("- USE_GPU : {}".format(self.device))
print('-' * 52)
def _model_loader(self):
self.model['generator'] = Generator(self.args.in_dim, self.args.gchannels)
self.model['discriminator'] = Discriminator(self.args.dchannels)
self.model['criterion'] = nn.BCELoss()
self.model['opti_gene'] = optim.Adam(self.model['generator'].parameters(), \
lr=self.args.base_lr, betas=(self.args.beta, 0.999))
self.model['opti_disc'] = optim.Adam(self.model['discriminator'].parameters(), \
lr=self.args.base_lr, betas=(self.args.beta, 0.999))
# self.model['scheduler'] = torch.optim.lr_scheduler.MultiStepLR(
# self.model['optimizer'], milestones=[12, 20, 30, 45], gamma=self.args.gamma)
if self.device:
self.model['generator'] = self.model['generator'].cuda()
self.model['discriminator'] = self.model['discriminator'].cuda()
if len(self.args.gpu_ids) > 1:
self.model['generator'] = torch.nn.DataParallel(self.model['generator'], device_ids=self.args.gpu_ids)
self.model['discriminator'] = torch.nn.DataParallel(self.model['discriminator'], device_ids=self.args.gpu_ids)
torch.backends.cudnn.benchmark = True
print('Parallel mode was going ...')
else:
print('Single-gpu mode was going ...')
else:
print('CPU mode was going ...')
if len(self.args.resume) > 2:
checkpoint = torch.load(self.args.resume, map_location=lambda storage, loc: storage)
self.args.start = checkpoint['epoch']
self.model['generator'].load_state_dict(checkpoint['generator'])
self.model['discriminator'].load_state_dict(checkpoint['discriminator'])
print('Resuming the train process at %3d epoches ...' % self.args.start)
print('Model loading was finished ...')
def _data_loader(self):
self.data['train_loader'] = DataLoader(
CelebA(args=self.args),
batch_size = self.args.batch_size, \
shuffle = True,\
num_workers= self.args.workers)
self.data['fixed_noise'] = torch.randn(64, self.args.in_dim ,1, 1)
if self.device:
self.data['fixed_noise'] = self.data['fixed_noise'].cuda()
self.rescache['gloss'] = []
self.rescache['dloss'] = []
self.rescache['fake'] = []
print('Data loading was finished ...')
def _model_train(self, epoch = 0):
total_dloss, total_gloss = 0, 0
for idx, imgs in enumerate(self.data['train_loader']):
# update discriminator
self.model['discriminator'].train()
self.model['generator'].eval()
imgs.requires_grad = False
if self.device:
imgs = imgs.cuda()
b_size = imgs.size(0)
self.model['discriminator'].zero_grad()
gty = torch.full((b_size,), 1)
if self.device:
gty = gty.cuda()
predy = self.model['discriminator'](imgs).view(-1)
dloss_real = self.model['criterion'](predy, gty)
dloss_real.backward()
noise = torch.randn(b_size, self.args.in_dim, 1, 1)
if self.device:
noise = noise.cuda()
fake = self.model['generator'](noise)
gty.fill_(0) # TODO
predy = self.model['discriminator'](fake.detach()).view(-1)
dloss_fake = self.model['criterion'](predy, gty)
dloss_fake.backward()
self.model['opti_disc'].step()
d_loss_real = dloss_real.mean().item()
d_loss_fake = dloss_fake.mean().item()
d_loss = d_loss_real + d_loss_fake
self.rescache['dloss'].append(d_loss)
total_dloss += d_loss
# update generator
self.model['generator'].train()
self.model['discriminator'].eval()
self.model['generator'].zero_grad()
gty.fill_(1) # TODO
predy = self.model['discriminator'](fake).view(-1)
gloss = self.model['criterion'](predy, gty)
gloss.backward()
self.model['opti_gene'].step()
g_loss = gloss.mean().item()
self.rescache['gloss'].append(g_loss)
total_gloss += g_loss
if (idx + 1) % self.args.print_freq == 0:
print('epoch : %2d|%2d, iter : %4d|%4d, dloss : %.4f, gloss : %.4f' % \
(epoch, self.args.epoches, idx+1, len(self.data['train_loader']), \
np.mean(self.rescache['dloss']), np.mean(self.rescache['gloss'])))
if (idx + 1) % self.args.monitor_freq == 0:
with torch.no_grad():
self.model['generator'].eval()
fake = self.model['generator'](self.data['fixed_noise']).detach().cpu()
self.rescache['fake'].append(fake)
return total_dloss, total_gloss
def _main_loop(self):
min_loss = 1e3
for epoch in range(self.args.start, self.args.epoches + 1):
start_time = time.time()
dloss, gloss = self._model_train(epoch)
train_loss = dloss + gloss
# self.model['scheduler'].step()
end_time = time.time()
print('Single epoch cost time : %.2f mins' % ((end_time - start_time)/60))
if not os.path.exists(self.args.save_to):
os.mkdir(self.args.save_to)
if (min_loss > train_loss) and (not self.args.is_debug):
print('%snew SOTA was found%s' % ('*'*16, '*'*16))
min_loss = train_loss
filename = os.path.join(self.args.save_to, 'sota.pth.tar')
torch.save({
'epoch' : epoch,
'generator' : self.model['generator'].state_dict(),
'discriminator' : self.model['discriminator'].state_dict(),
'loss' : min_loss,
}, filename)
if (epoch % self.args.save_freq == 0) and (not self.args.is_debug):
filename = os.path.join(self.args.save_to, 'epoch_'+str(epoch)+'.pth.tar')
torch.save({
'epoch' : epoch,
'generator' : self.model['generator'].state_dict(),
'discriminator' : self.model['discriminator'].state_dict(),
'loss' : train_loss,
}, filename)
if self.args.is_debug:
break
def _visual_res(self):
''' Visual the training process '''
# gloss and dloss
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(self.rescache['gloss'], label="gloss")
plt.plot(self.rescache['dloss'], label="dloss")
plt.xlabel("iterations")
plt.ylabel("loss")
plt.legend()
plt.savefig('loss.jpg', dpi=400)
# save the fake-images
np.save('fake.npy', self.rescache['fake'])
def train_runner(self):
self._report_settings()
self._model_loader()
self._data_loader()
self._main_loop()
self._visual_res()
if __name__ == "__main__":
faceu = DCGAN(training_args())
faceu.train_runner()
| [
"matplotlib.pyplot.ylabel",
"torch.cuda.is_available",
"numpy.save",
"os.path.exists",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.mkdir",
"torch.randn",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.legend",
"torch.full... | [((1189, 1201), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1199, 1201), True, 'import torch.nn as nn\n'), ((3336, 3375), 'torch.randn', 'torch.randn', (['(64)', 'self.args.in_dim', '(1)', '(1)'], {}), '(64, self.args.in_dim, 1, 1)\n', (3347, 3375), False, 'import torch\n'), ((7981, 8008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7991, 8008), True, 'import matplotlib.pyplot as plt\n'), ((8016, 8077), 'matplotlib.pyplot.title', 'plt.title', (['"""Generator and Discriminator Loss During Training"""'], {}), "('Generator and Discriminator Loss During Training')\n", (8025, 8077), True, 'import matplotlib.pyplot as plt\n'), ((8086, 8133), 'matplotlib.pyplot.plot', 'plt.plot', (["self.rescache['gloss']"], {'label': '"""gloss"""'}), "(self.rescache['gloss'], label='gloss')\n", (8094, 8133), True, 'import matplotlib.pyplot as plt\n'), ((8142, 8189), 'matplotlib.pyplot.plot', 'plt.plot', (["self.rescache['dloss']"], {'label': '"""dloss"""'}), "(self.rescache['dloss'], label='dloss')\n", (8150, 8189), True, 'import matplotlib.pyplot as plt\n'), ((8198, 8222), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (8208, 8222), True, 'import matplotlib.pyplot as plt\n'), ((8231, 8249), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (8241, 8249), True, 'import matplotlib.pyplot as plt\n'), ((8258, 8270), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8268, 8270), True, 'import matplotlib.pyplot as plt\n'), ((8279, 8311), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""loss.jpg"""'], {'dpi': '(400)'}), "('loss.jpg', dpi=400)\n", (8290, 8311), True, 'import matplotlib.pyplot as plt\n'), ((8360, 8402), 'numpy.save', 'np.save', (['"""fake.npy"""', "self.rescache['fake']"], {}), "('fake.npy', self.rescache['fake'])\n", (8367, 8402), True, 'import numpy as np\n'), ((512, 537), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (535, 537), False, 'import torch\n'), ((2532, 2603), 'torch.load', 'torch.load', (['self.args.resume'], {'map_location': '(lambda storage, loc: storage)'}), '(self.args.resume, map_location=lambda storage, loc: storage)\n', (2542, 2603), False, 'import torch\n'), ((4116, 4140), 'torch.full', 'torch.full', (['(b_size,)', '(1)'], {}), '((b_size,), 1)\n', (4126, 4140), False, 'import torch\n'), ((4381, 4424), 'torch.randn', 'torch.randn', (['b_size', 'self.args.in_dim', '(1)', '(1)'], {}), '(b_size, self.args.in_dim, 1, 1)\n', (4392, 4424), False, 'import torch\n'), ((6396, 6407), 'time.time', 'time.time', ([], {}), '()\n', (6405, 6407), False, 'import time\n'), ((6567, 6578), 'time.time', 'time.time', ([], {}), '()\n', (6576, 6578), False, 'import time\n'), ((2026, 2102), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (["self.model['generator']"], {'device_ids': 'self.args.gpu_ids'}), "(self.model['generator'], device_ids=self.args.gpu_ids)\n", (2047, 2102), False, 'import torch\n'), ((2149, 2234), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (["self.model['discriminator']"], {'device_ids': 'self.args.gpu_ids'}), "(self.model['discriminator'], device_ids=self.args.gpu_ids\n )\n", (2170, 2234), False, 'import torch\n'), ((6686, 6719), 'os.path.exists', 'os.path.exists', (['self.args.save_to'], {}), '(self.args.save_to)\n', (6700, 6719), False, 'import os\n'), ((6737, 6764), 'os.mkdir', 'os.mkdir', (['self.args.save_to'], {}), '(self.args.save_to)\n', (6745, 6764), False, 'import os\n'), ((6967, 7014), 'os.path.join', 'os.path.join', (['self.args.save_to', '"""sota.pth.tar"""'], {}), "(self.args.save_to, 'sota.pth.tar')\n", (6979, 7014), False, 'import os\n'), ((5989, 6004), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6002, 6004), False, 'import torch\n'), ((727, 746), 'sys.version.strip', 'sys.version.strip', ([], {}), '()\n', (744, 746), False, 'import sys\n'), ((5844, 5875), 'numpy.mean', 'np.mean', (["self.rescache['dloss']"], {}), "(self.rescache['dloss'])\n", (5851, 5875), True, 'import numpy as np\n'), ((5877, 5908), 'numpy.mean', 'np.mean', (["self.rescache['gloss']"], {}), "(self.rescache['gloss'])\n", (5884, 5908), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# FSRobo-R Package BSDL
# ---------
# Copyright (C) 2019 FUJISOFT. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------
import time
import socket
import json
from struct import pack, unpack
class JSONSocket(object):
BUFFER_SIZE = 4096
def __init__(self, ip_addr, port):
self._ip_addr = ip_addr
self._port = port
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._decoder = json.JSONDecoder(strict=False)
self._recv_buffer = ''
def connect(self):
self._sock.connect((self._ip_addr, self._port))
def close(self):
self._sock.close()
def send(self, data):
self._sock.sendall(json.dumps(data))
def recv(self):
need_recv = len(self._recv_buffer) == 0
#print('len: {}'.format(len(self._recv_buffer)))
while True:
try:
if need_recv:
recv_data = self._sock.recv(self.BUFFER_SIZE)
if (len(recv_data) == 0):
raise socket.error('recv error')
self._recv_buffer += recv_data
need_recv = False
else:
# XXX leading null char causes ValueError. Should fix server?
self._recv_buffer = self._recv_buffer.strip('\0')
data, index = self._decoder.raw_decode(self._recv_buffer)
self._recv_buffer = self._recv_buffer[index:]
#print('OK!:{}:{}:'.format(self._recv_buffer, self._recv_buffer.encode('hex')))
return data
except ValueError as e:
#print(e)
#print(self._recv_buffer)
#print(self._recv_buffer.encode('hex'))
need_recv = True
class InfoCatchClient(object):
ControlPort = 5000
def __init__(self, ip_addr='192.168.0.23'):
self._ip_addr = ip_addr
self._data_port = None
self._data_sock = None
def send_control(self, msg):
sock = JSONSocket(self._ip_addr, self.ControlPort)
sock.connect()
sock.send(msg)
data = sock.recv()
sock.close()
return data
def recv(self):
return self._data_sock.recv()
def connect(self, filters, sampling_time=10):
control_data = {'ST00': 'ON', 'ST01': sampling_time, 'ST02': sampling_time}
for filter in filters:
control_data[filter] = 'ON'
res = self.send_control(control_data)
if (res['RT00'] != 'OK'):
return False
self._data_port = res['RT01']
self._data_sock = JSONSocket(self._ip_addr, self._data_port)
retry_count = 0
while True:
try:
self._data_sock.connect()
break
except socket.error as e:
retry_count += 1
if retry_count > 10:
raise socket.error(e)
time.sleep(0.5)
def close(self):
if self._data_port is not None:
control_data = {'ST00': 'OFF', 'RT01': self._data_port}
res = self.send_control(control_data)
if res['RT00'] != 'OK':
print('warning: send_control returns {}'.format(res))
self._data_port = None
if self._data_sock is not None:
self._data_sock.close()
class Label(object):
I000 = "i000" # 0xFFFF Date/Time
# Header block
H003 = "h003" # 0x0008 update_counter
H004 = "h004" # 0x000C now_updating
# Memory I/O
M000 = "m000" # 0x0100 dio_io
M001 = "m001" # 0x0104 dio_io
M100 = "m100" # 0x0300 mio_si0
M102 = "m102" # 0x0308 mio_si2
M107 = "m107" # 0x031C mio_sl3
M200 = "m200" # 0x0320 dio_io[0]
M201 = "m201" # 0x0324 dio_io[1]
M202 = "m202" # 0x0328 dio_io[2]
M203 = "m203" # 0x032C dio_io[3]
M204 = "m204" # 0x0330 dio_io[4]
M205 = "m205" # 0x0334 dio_io[5]
M206 = "m206" # 0x0338 dio_io[6]
M207 = "m207" # 0x033C dio_io[7]
M208 = "m208" # 0x0340 dio_io[8]
M209 = "m209" # 0x0344 dio_io[9]
M210 = "m210" # 0x0348 dio_io[10]
M211 = "m211" # 0x034C dio_io[11]
M212 = "m212" # 0x0350 dio_io[12]
M213 = "m213" # 0x0354 dio_io[13]
M214 = "m214" # 0x0358 dio_io[14]
M215 = "m215" # 0x035C dio_io[15]
M216 = "m216" # 0x0360 dio_io[16]
M217 = "m217" # 0x0364 dio_io[17]
M218 = "m218" # 0x0368 dio_io[18]
M219 = "m219" # 0x036C dio_io[19]
M220 = "m220" # 0x0370 dio_io[20]
M221 = "m221" # 0x0374 dio_io[21]
M222 = "m222" # 0x0378 dio_io[22]
M223 = "m223" # 0x037C dio_io[23]
M224 = "m224" # 0x0380 dio_io[24]
M225 = "m225" # 0x0384 dio_io[25]
M226 = "m226" # 0x0388 dio_io[26]
M227 = "m227" # 0x038C dio_io[27]
M228 = "m228" # 0x0390 dio_io[28]
M229 = "m229" # 0x0394 dio_io[29]
M230 = "m230" # 0x0398 dio_io[30]
M231 = "m231" # 0x039C dio_io[31]
M232 = "m232" # 0x03A0 dio_io[32]
M233 = "m233" # 0x03A4 dio_io[33]
M234 = "m234" # 0x03A8 dio_io[34]
M235 = "m235" # 0x03AC dio_io[35]
M236 = "m236" # 0x03B0 dio_io[36]
M237 = "m237" # 0x03B4 dio_io[37]
M238 = "m238" # 0x03B8 dio_io[38]
M239 = "m239" # 0x03BC dio_io[39]
M240 = "m240" # 0x03C0 dio_io[40]
M241 = "m241" # 0x03C4 dio_io[41]
M242 = "m242" # 0x03C8 dio_io[42]
M243 = "m243" # 0x03CC dio_io[43]
M244 = "m244" # 0x03D0 dio_io[44]
M245 = "m245" # 0x03D4 dio_io[45]
M246 = "m246" # 0x03D8 dio_io[46]
M247 = "m247" # 0x03DC dio_io[47]
M248 = "m248" # 0x03E0 dio_io[48]
M249 = "m249" # 0x03E4 dio_io[49]
M250 = "m250" # 0x03E8 dio_io[50]
M251 = "m251" # 0x03EC dio_io[51]
M252 = "m252" # 0x03F0 dio_io[52]
M253 = "m253" # 0x03F4 dio_io[53]
M254 = "m254" # 0x03F8 dio_io[54]
M255 = "m255" # 0x03FC dio_io[55]
M256 = "m256" # 0x0400 dio_io[56]
M257 = "m257" # 0x0404 dio_io[57]
M258 = "m258" # 0x0408 dio_io[58]
M259 = "m259" # 0x040C dio_io[59]
M260 = "m260" # 0x0410 dio_io[60]
M261 = "m261" # 0x0414 dio_io[61]
M262 = "m262" # 0x0418 dio_io[62]
M263 = "m263" # 0x041C dio_io[63]
M264 = "m264" # 0x0420 dio_io[64]
M265 = "m265" # 0x0424 dio_io[65]
M266 = "m266" # 0x0428 dio_io[66]
M267 = "m267" # 0x042C dio_io[67]
M268 = "m268" # 0x0430 dio_io[68]
M269 = "m269" # 0x0434 dio_io[69]
M270 = "m270" # 0x0438 dio_io[70]
M271 = "m271" # 0x043C dio_io[71]
M272 = "m272" # 0x0440 dio_io[72]
M273 = "m273" # 0x0444 dio_io[73]
M274 = "m274" # 0x0448 dio_io[74]
M275 = "m275" # 0x044C dio_io[75]
M276 = "m276" # 0x0450 dio_io[76]
M277 = "m277" # 0x0454 dio_io[77]
M278 = "m278" # 0x0458 dio_io[78]
M279 = "m279" # 0x045C dio_io[79]
M280 = "m280" # 0x0460 dio_io[80]
M281 = "m281" # 0x0464 dio_io[81]
M282 = "m282" # 0x0468 dio_io[82]
M283 = "m283" # 0x046C dio_io[83]
M284 = "m284" # 0x0470 dio_io[84]
M285 = "m285" # 0x0474 dio_io[85]
M286 = "m286" # 0x0478 dio_io[86]
M287 = "m287" # 0x047C dio_io[87]
M288 = "m288" # 0x0480 dio_io[88]
M289 = "m289" # 0x0484 dio_io[89]
M290 = "m290" # 0x0488 dio_io[90]
M291 = "m291" # 0x048C dio_io[91]
M292 = "m292" # 0x0490 dio_io[92]
M293 = "m293" # 0x0494 dio_io[93]
M294 = "m294" # 0x0498 dio_io[94]
M295 = "m295" # 0x049C dio_io[95]
M296 = "m296" # 0x04A0 dio_io[96]
M297 = "m297" # 0x04A4 dio_io[97]
M298 = "m298" # 0x04A8 dio_io[98]
M299 = "m299" # 0x04AC dio_io[99]
M300 = "m300" # 0x04B0 dio_io[100]
M301 = "m301" # 0x04B4 dio_io[101]
M302 = "m302" # 0x04B8 dio_io[102]
M303 = "m303" # 0x04BC dio_io[103]
M304 = "m304" # 0x04C0 dio_io[104]
M305 = "m305" # 0x04C4 dio_io[105]
M306 = "m306" # 0x04C8 dio_io[106]
M307 = "m307" # 0x04CC dio_io[107]
M308 = "m308" # 0x04D0 dio_io[108]
M309 = "m309" # 0x04D4 dio_io[109]
M310 = "m310" # 0x04D8 dio_io[110]
M311 = "m311" # 0x04DC dio_io[111]
M312 = "m312" # 0x04E0 dio_io[112]
M313 = "m313" # 0x04E4 dio_io[113]
M314 = "m314" # 0x04E8 dio_io[114]
M315 = "m315" # 0x04EC dio_io[115]
M316 = "m316" # 0x04F0 dio_io[116]
M317 = "m317" # 0x04F4 dio_io[117]
M318 = "m318" # 0x04F8 dio_io[118]
M319 = "m319" # 0x04FC dio_io[119]
M320 = "m320" # 0x0500 dio_io[120]
# Ethercat joiont information
S000 = "s000" # 0x0500 cia402ctrl[0-5]
S001 = "s001" # 0x0502 ctrl[0-5]
S002 = "s002" # 0x0504 cia402targetpls[0-5]
S003 = "s003" # 0x0508 notification[0-5]
S004 = "s004" # 0x050C cia402sts[0-5]
S005 = "s005" # 0x050E sts[0-5]
S006 = "s006" # 0x0510 rtn[0-5]
S007 = "s007" # 0x0512 cia402err[0-5]
S008 = "s008" # 0x0514 alarm[0-5]
S009 = "s009" # 0x0518 targetplsfb[0-5]
S010 = "s010" # 0x051C cia402actualpls[0-5]
S011 = "s011" # 0x0520 cia402followingerr[0-5]
S012 = "s012" # 0x0524 observer_output_value[0-5]
S013 = "s013" # 0x0528 torque[0-5]
S014 = "s014" # 0x052A thermal[0-5]
S015 = "s015" # 0x052C disturbance[0-5]
S016 = "s016" # 0x052E gainrate[0-5]
S017 = "s017" # 0x0530 polerate[0-5]
S018 = "s018" # 0x0532 filtered_torque[0-5]
S019 = "s019" # 0x0534 filtered_velocity[0-5]
S020 = "s020" # 0x0536 filtered_D[0-5]
S020 = "s020" # 0x0538 filtered_Q[0-5]
# Force torque sensor information
F000 = "f000" # 0x0700 sts
F001 = "f001" # 0x0701 gain_sts
F100 = "f100" # 0x0710 zero_point[0-7]
F200 = "f200" # 0x0720 raw_value[0-7]
F300 = "f300" # 0x0730 gain[0-7]
# System management block information
Y000 = "y000" # 0x0800 robtask_name[0-31]
Y001 = "y001" # 0x0820 running_name[0-31]
Y002 = "y002" # 0x0840 running_pid
Y003 = "y003" # 0x0844 assign_port[0]
Y004 = "y004" # 0x0846 assign_port[1]
Y005 = "y005" # 0x0848 assign_port[2]
Y006 = "y006" # 0x084A assign_port[3]
Y007 = "y007" # 0x084C assign_port[4]
Y008 = "y008" # 0x085E assign_port[5]
Y009 = "y009" # 0x0850 assign_port[6]
Y010 = "y010" # 0x0852 assign_port[7]
Y011 = "y011" # 0x0854 assign_port[8]
Y012 = "y012" # 0x0856 assign_port[9]
Y013 = "y013" # 0x0858 assign_port[10]
Y014 = "y014" # 0x085A assign_port[11]
# User block information
U000 = "u000" # 0x1800 intval[0]
U001 = "u001" # 0x1804 intval[1]
U002 = "u002" # 0x1808 intval[2]
U003 = "u003" # 0x180C intval[3]
U004 = "u004" # 0x1810 intval[4]
U005 = "u005" # 0x1814 intval[5]
U006 = "u006" # 0x1818 intval[6]
U007 = "u007" # 0x181C intval[7]
U008 = "u008" # 0x1820 intval[8]
U009 = "u009" # 0x1824 intval[9]
U010 = "u010" # 0x1828 intval[10]
U011 = "u011" # 0x182C intval[11]
U012 = "u012" # 0x1830 intval[12]
U013 = "u013" # 0x1834 intval[13]
U014 = "u014" # 0x1838 intval[14]
U015 = "u015" # 0x183C intval[15]
U016 = "u016" # 0x1840 intval[16]
U017 = "u017" # 0x1844 intval[17]
U018 = "u018" # 0x1848 intval[18]
U019 = "u019" # 0x184C intval[19]
U020 = "u020" # 0x1850 intval[20]
U021 = "u021" # 0x1854 intval[21]
U022 = "u022" # 0x1858 intval[22]
U023 = "u023" # 0x185C intval[23]
U024 = "u024" # 0x1860 intval[24]
U025 = "u025" # 0x1864 intval[25]
U026 = "u026" # 0x1868 intval[26]
U027 = "u027" # 0x186C intval[27]
U028 = "u028" # 0x1870 intval[28]
U029 = "u029" # 0x1874 intval[29]
U030 = "u030" # 0x1878 intval[30]
U031 = "u031" # 0x187C intval[31]
U032 = "u032" # 0x1880 intval[32]
U033 = "u033" # 0x1884 intval[33]
U034 = "u034" # 0x1888 intval[34]
U035 = "u035" # 0x188C intval[35]
U036 = "u036" # 0x1890 intval[36]
U037 = "u037" # 0x1894 intval[37]
U038 = "u038" # 0x1898 intval[38]
U039 = "u039" # 0x189C intval[39]
U040 = "u040" # 0x18A0 intval[40]
U041 = "u041" # 0x18A4 intval[41]
U042 = "u042" # 0x18A8 intval[42]
U043 = "u043" # 0x18AC intval[43]
U044 = "u044" # 0x18B0 intval[44]
U045 = "u045" # 0x18B4 intval[45]
U046 = "u046" # 0x18B8 intval[46]
U047 = "u047" # 0x18BC intval[47]
U048 = "u048" # 0x18C0 intval[48]
U049 = "u049" # 0x18C4 intval[49]
U050 = "u050" # 0x18C8 intval[50]
U051 = "u051" # 0x18CC intval[51]
U052 = "u052" # 0x18D0 intval[52]
U053 = "u053" # 0x18D4 intval[53]
U054 = "u054" # 0x18D8 intval[54]
U055 = "u055" # 0x18DC intval[55]
U056 = "u056" # 0x18E0 intval[56]
U057 = "u057" # 0x18E4 intval[57]
U058 = "u058" # 0x18E8 intval[58]
U059 = "u059" # 0x18EC intval[59]
U060 = "u060" # 0x18F0 intval[60]
U061 = "u061" # 0x18F4 intval[61]
U062 = "u062" # 0x18F8 intval[62]
U063 = "u063" # 0x18FC intval[63]
U064 = "u064" # 0x1900 intval[64]
U065 = "u065" # 0x1904 intval[65]
U066 = "u066" # 0x1908 intval[66]
U067 = "u067" # 0x190C intval[67]
U068 = "u068" # 0x1910 intval[68]
U069 = "u069" # 0x1914 intval[69]
U070 = "u070" # 0x1918 intval[70]
U071 = "u071" # 0x191C intval[71]
U072 = "u072" # 0x1920 intval[72]
U073 = "u073" # 0x1924 intval[73]
U074 = "u074" # 0x1928 intval[74]
U075 = "u075" # 0x192C intval[75]
U076 = "u076" # 0x1930 intval[76]
U077 = "u077" # 0x1934 intval[77]
U078 = "u078" # 0x1938 intval[78]
U079 = "u079" # 0x193C intval[79]
U080 = "u080" # 0x1940 intval[80]
U081 = "u081" # 0x1944 intval[81]
U082 = "u082" # 0x1948 intval[82]
U083 = "u083" # 0x194C intval[83]
U084 = "u084" # 0x1950 intval[84]
U085 = "u085" # 0x1954 intval[85]
U086 = "u086" # 0x1958 intval[86]
U087 = "u087" # 0x195C intval[87]
U088 = "u088" # 0x1960 intval[88]
U089 = "u089" # 0x1964 intval[89]
U090 = "u090" # 0x1968 intval[90]
U091 = "u091" # 0x196C intval[91]
U092 = "u092" # 0x1970 intval[92]
U093 = "u093" # 0x1974 intval[93]
U094 = "u094" # 0x1978 intval[94]
U095 = "u095" # 0x197C intval[95]
U096 = "u096" # 0x1980 intval[96]
U097 = "u097" # 0x1984 intval[97]
U098 = "u098" # 0x1988 intval[98]
U099 = "u099" # 0x198C intval[99]
U100 = "u100" # 0x1990 intval[100]
U101 = "u101" # 0x1994 intval[101]
U102 = "u102" # 0x1998 intval[102]
U103 = "u103" # 0x199C intval[103]
U104 = "u104" # 0x19A0 intval[104]
U105 = "u105" # 0x19A4 intval[105]
U106 = "u106" # 0x19A8 intval[106]
U107 = "u107" # 0x19AC intval[107]
U108 = "u108" # 0x19B0 intval[108]
U109 = "u109" # 0x19B4 intval[109]
U110 = "u110" # 0x19B8 intval[110]
U111 = "u111" # 0x19BC intval[111]
U112 = "u112" # 0x19C0 intval[112]
U113 = "u113" # 0x19C4 intval[113]
U114 = "u114" # 0x19C8 intval[114]
U115 = "u115" # 0x19CC intval[115]
U116 = "u116" # 0x19D0 intval[116]
U117 = "u117" # 0x19D4 intval[117]
U118 = "u118" # 0x19D8 intval[118]
U119 = "u119" # 0x19DC intval[119]
U120 = "u120" # 0x19E0 intval[120]
U121 = "u121" # 0x19E4 intval[121]
U122 = "u122" # 0x19E8 intval[122]
U123 = "u123" # 0x19EC intval[123]
U124 = "u124" # 0x19F0 intval[124]
U125 = "u125" # 0x19F4 intval[125]
U126 = "u126" # 0x19F8 intval[126]
U127 = "u127" # 0x19FC intval[127]
U128 = "u128" # 0x1A00 intval[128]
U129 = "u129" # 0x1A04 intval[129]
U130 = "u130" # 0x1A08 intval[130]
U131 = "u131" # 0x1A0C intval[131]
U132 = "u132" # 0x1A10 intval[132]
U133 = "u133" # 0x1A14 intval[133]
U134 = "u134" # 0x1A18 intval[134]
U135 = "u135" # 0x1A1C intval[135]
U136 = "u136" # 0x1A20 intval[136]
U137 = "u137" # 0x1A24 intval[137]
U138 = "u138" # 0x1A28 intval[138]
U139 = "u139" # 0x1A2C intval[139]
U140 = "u140" # 0x1A30 intval[140]
U141 = "u141" # 0x1A34 intval[141]
U142 = "u142" # 0x1A38 intval[142]
U143 = "u143" # 0x1A3C intval[143]
U144 = "u144" # 0x1A40 intval[144]
U145 = "u145" # 0x1A44 intval[145]
U146 = "u146" # 0x1A48 intval[146]
U147 = "u147" # 0x1A4C intval[147]
U148 = "u148" # 0x1A50 intval[148]
U149 = "u149" # 0x1A54 intval[149]
U150 = "u150" # 0x1A58 intval[150]
U151 = "u151" # 0x1A5C intval[151]
U152 = "u152" # 0x1A60 intval[152]
U153 = "u153" # 0x1A64 intval[153]
U154 = "u154" # 0x1A68 intval[154]
U155 = "u155" # 0x1A6C intval[155]
U156 = "u156" # 0x1A70 intval[156]
U157 = "u157" # 0x1A74 intval[157]
U158 = "u158" # 0x1A78 intval[158]
U159 = "u159" # 0x1A7C intval[159]
U160 = "u160" # 0x1A80 intval[160]
U161 = "u161" # 0x1A84 intval[161]
U162 = "u162" # 0x1A88 intval[162]
U163 = "u163" # 0x1A8C intval[163]
U164 = "u164" # 0x1A90 intval[164]
U165 = "u165" # 0x1A94 intval[165]
U166 = "u166" # 0x1A98 intval[166]
U167 = "u167" # 0x1A9C intval[167]
U168 = "u168" # 0x1AA0 intval[168]
U169 = "u169" # 0x1AA4 intval[169]
U170 = "u170" # 0x1AA8 intval[170]
U171 = "u171" # 0x1AAC intval[171]
U172 = "u172" # 0x1AB0 intval[172]
U173 = "u173" # 0x1AB4 intval[173]
U174 = "u174" # 0x1AB8 intval[174]
U175 = "u175" # 0x1ABC intval[175]
U176 = "u176" # 0x1AC0 intval[176]
U177 = "u177" # 0x1AC4 intval[177]
U178 = "u178" # 0x1AC8 intval[178]
U179 = "u179" # 0x1ACC intval[179]
U180 = "u180" # 0x1AD0 intval[180]
U181 = "u181" # 0x1AD4 intval[181]
U182 = "u182" # 0x1AD8 intval[182]
U183 = "u183" # 0x1ADC intval[183]
U184 = "u184" # 0x1AE0 intval[184]
U185 = "u185" # 0x1AE4 intval[185]
U186 = "u186" # 0x1AE8 intval[186]
U187 = "u187" # 0x1AEC intval[187]
U188 = "u188" # 0x1AF0 intval[188]
U189 = "u189" # 0x1AF4 intval[189]
U190 = "u190" # 0x1AF8 intval[190]
U191 = "u191" # 0x1AFC intval[191]
U192 = "u192" # 0x1B00 intval[192]
U193 = "u193" # 0x1B04 intval[193]
U194 = "u194" # 0x1B08 intval[194]
U195 = "u195" # 0x1B0C intval[195]
U196 = "u196" # 0x1B10 intval[196]
U197 = "u197" # 0x1B14 intval[197]
U198 = "u198" # 0x1B18 intval[198]
U199 = "u199" # 0x1B1C intval[199]
U200 = "u200" # 0x1B20 intval[200]
U201 = "u201" # 0x1B24 intval[201]
U202 = "u202" # 0x1B28 intval[202]
U203 = "u203" # 0x1B2C intval[203]
U204 = "u204" # 0x1B30 intval[204]
U205 = "u205" # 0x1B34 intval[205]
U206 = "u206" # 0x1B38 intval[206]
U207 = "u207" # 0x1B3C intval[207]
U208 = "u208" # 0x1B40 intval[208]
U209 = "u209" # 0x1B44 intval[209]
U210 = "u210" # 0x1B48 intval[210]
U211 = "u211" # 0x1B4C intval[211]
U212 = "u212" # 0x1B50 intval[212]
U213 = "u213" # 0x1B54 intval[213]
U214 = "u214" # 0x1B58 intval[214]
U215 = "u215" # 0x1B5C intval[215]
U216 = "u216" # 0x1B60 intval[216]
U217 = "u217" # 0x1B64 intval[217]
U218 = "u218" # 0x1B68 intval[218]
U219 = "u219" # 0x1B6C intval[219]
U220 = "u220" # 0x1B70 intval[220]
U221 = "u221" # 0x1B74 intval[221]
U222 = "u222" # 0x1B78 intval[222]
U223 = "u223" # 0x1B7C intval[223]
U224 = "u224" # 0x1B80 intval[224]
U225 = "u225" # 0x1B84 intval[225]
U226 = "u226" # 0x1B88 intval[226]
U227 = "u227" # 0x1B8C intval[227]
U228 = "u228" # 0x1B90 intval[228]
U229 = "u229" # 0x1B94 intval[229]
U230 = "u230" # 0x1B98 intval[230]
U231 = "u231" # 0x1B9C intval[231]
U232 = "u232" # 0x1BA0 intval[232]
U233 = "u233" # 0x1BA4 intval[233]
U234 = "u234" # 0x1BA8 intval[234]
U235 = "u235" # 0x1BAC intval[235]
U236 = "u236" # 0x1BB0 intval[236]
U237 = "u237" # 0x1BB4 intval[237]
U238 = "u238" # 0x1BB8 intval[238]
U239 = "u239" # 0x1BBC intval[239]
U240 = "u240" # 0x1BC0 intval[240]
U241 = "u241" # 0x1BC4 intval[241]
U242 = "u242" # 0x1BC8 intval[242]
U243 = "u243" # 0x1BCC intval[243]
U244 = "u244" # 0x1BD0 intval[244]
U245 = "u245" # 0x1BD4 intval[245]
U246 = "u246" # 0x1BD8 intval[246]
U247 = "u247" # 0x1BDC intval[247]
U248 = "u248" # 0x1BE0 intval[248]
U249 = "u249" # 0x1BE4 intval[249]
U250 = "u250" # 0x1BE8 intval[250]
U251 = "u251" # 0x1BEC intval[251]
U252 = "u252" # 0x1BF0 intval[252]
U253 = "u253" # 0x1BF4 intval[253]
U254 = "u254" # 0x1BF8 intval[254]
U255 = "u255" # 0x1BFC intval[255]
M300 = "m300" # 0x1C00 floatval[0]
M301 = "m301" # 0x1C08 floatval[1]
M302 = "m302" # 0x1C10 floatval[2]
M303 = "m303" # 0x1C18 floatval[3]
M304 = "m304" # 0x1C20 floatval[4]
M305 = "m305" # 0x1C28 floatval[5]
M306 = "m306" # 0x1C30 floatval[6]
M307 = "m307" # 0x1C38 floatval[7]
M308 = "m308" # 0x1C40 floatval[8]
M309 = "m309" # 0x1C48 floatval[9]
M310 = "m310" # 0x1C50 floatval[10]
M311 = "m311" # 0x1C58 floatval[11]
M312 = "m312" # 0x1C60 floatval[12]
M313 = "m313" # 0x1C68 floatval[13]
M314 = "m314" # 0x1C70 floatval[14]
M315 = "m315" # 0x1C78 floatval[15]
M316 = "m316" # 0x1C80 floatval[16]
M317 = "m317" # 0x1C88 floatval[17]
M318 = "m318" # 0x1C90 floatval[18]
M319 = "m319" # 0x1C98 floatval[19]
M320 = "m320" # 0x1CA0 floatval[20]
M321 = "m321" # 0x1CA8 floatval[21]
M322 = "m322" # 0x1CB0 floatval[22]
M323 = "m323" # 0x1CB8 floatval[23]
M324 = "m324" # 0x1CC0 floatval[24]
M325 = "m325" # 0x1CC8 floatval[25]
M326 = "m326" # 0x1CD0 floatval[26]
M327 = "m327" # 0x1CD8 floatval[27]
M328 = "m328" # 0x1CE0 floatval[28]
M329 = "m329" # 0x1CE8 floatval[29]
M330 = "m330" # 0x1CF0 floatval[30]
M331 = "m331" # 0x1CF8 floatval[31]
M332 = "m332" # 0x1D00 floatval[32]
M333 = "m333" # 0x1D08 floatval[33]
M334 = "m334" # 0x1D10 floatval[34]
M335 = "m335" # 0x1D18 floatval[35]
M336 = "m336" # 0x1D20 floatval[36]
M337 = "m337" # 0x1D28 floatval[37]
M338 = "m338" # 0x1D30 floatval[38]
M339 = "m339" # 0x1D38 floatval[39]
M340 = "m340" # 0x1D40 floatval[40]
M341 = "m341" # 0x1D48 floatval[41]
M342 = "m342" # 0x1D50 floatval[42]
M343 = "m343" # 0x1D58 floatval[43]
M344 = "m344" # 0x1D60 floatval[44]
M345 = "m345" # 0x1D68 floatval[45]
M346 = "m346" # 0x1D70 floatval[46]
M347 = "m347" # 0x1D78 floatval[47]
M348 = "m348" # 0x1D80 floatval[48]
M349 = "m349" # 0x1D88 floatval[49]
M350 = "m350" # 0x1D90 floatval[50]
M351 = "m351" # 0x1D98 floatval[51]
M352 = "m352" # 0x1DA0 floatval[52]
M353 = "m353" # 0x1DA8 floatval[53]
M354 = "m354" # 0x1DB0 floatval[54]
M355 = "m355" # 0x1DB8 floatval[55]
M356 = "m356" # 0x1DC0 floatval[56]
M357 = "m357" # 0x1DC8 floatval[57]
M358 = "m358" # 0x1DD0 floatval[58]
M359 = "m359" # 0x1DD8 floatval[59]
M360 = "m360" # 0x1DE0 floatval[60]
M361 = "m361" # 0x1DE8 floatval[61]
M362 = "m362" # 0x1DF0 floatval[62]
M363 = "m363" # 0x1DF8 floatval[63]
M364 = "m364" # 0x1E00 floatval[64]
M365 = "m365" # 0x1E08 floatval[65]
M366 = "m366" # 0x1E10 floatval[66]
M367 = "m367" # 0x1E18 floatval[67]
M368 = "m368" # 0x1E20 floatval[68]
M369 = "m369" # 0x1E28 floatval[69]
M370 = "m370" # 0x1E30 floatval[70]
M371 = "m371" # 0x1E38 floatval[71]
M372 = "m372" # 0x1E40 floatval[72]
M373 = "m373" # 0x1E48 floatval[73]
M374 = "m374" # 0x1E50 floatval[74]
M375 = "m375" # 0x1E58 floatval[75]
M376 = "m376" # 0x1E60 floatval[76]
M377 = "m377" # 0x1E68 floatval[77]
M378 = "m378" # 0x1E70 floatval[78]
M379 = "m379" # 0x1E78 floatval[79]
M380 = "m380" # 0x1E80 floatval[80]
M381 = "m381" # 0x1E88 floatval[81]
M382 = "m382" # 0x1E90 floatval[82]
M383 = "m383" # 0x1E98 floatval[83]
M384 = "m384" # 0x1EA0 floatval[84]
M385 = "m385" # 0x1EA8 floatval[85]
M386 = "m386" # 0x1EB0 floatval[86]
M387 = "m387" # 0x1EB8 floatval[87]
M388 = "m388" # 0x1EC0 floatval[88]
M389 = "m389" # 0x1EC8 floatval[89]
M390 = "m390" # 0x1ED0 floatval[90]
M391 = "m391" # 0x1ED8 floatval[91]
M392 = "m392" # 0x1EE0 floatval[92]
M393 = "m393" # 0x1EE8 floatval[93]
M394 = "m394" # 0x1EF0 floatval[94]
M395 = "m395" # 0x1EF8 floatval[95]
M396 = "m396" # 0x1F00 floatval[96]
M397 = "m397" # 0x1F08 floatval[97]
M398 = "m398" # 0x1F10 floatval[98]
M399 = "m399" # 0x1F18 floatval[99]
M400 = "m400" # 0x1F20 floatval[100]
M401 = "m401" # 0x1F28 floatval[101]
M402 = "m402" # 0x1F30 floatval[102]
M403 = "m403" # 0x1F38 floatval[103]
M404 = "m404" # 0x1F40 floatval[104]
M405 = "m405" # 0x1F48 floatval[105]
M406 = "m406" # 0x1F50 floatval[106]
M407 = "m407" # 0x1F58 floatval[107]
M408 = "m408" # 0x1F60 floatval[108]
M409 = "m409" # 0x1F68 floatval[109]
M410 = "m410" # 0x1F70 floatval[110]
M411 = "m411" # 0x1F78 floatval[111]
M412 = "m412" # 0x1F80 floatval[112]
M413 = "m413" # 0x1F88 floatval[113]
M414 = "m414" # 0x1F90 floatval[114]
M415 = "m415" # 0x1F98 floatval[115]
M416 = "m416" # 0x1FA0 floatval[116]
M417 = "m417" # 0x1FA8 floatval[117]
M418 = "m418" # 0x1FB0 floatval[118]
M419 = "m419" # 0x1FB8 floatval[119]
M420 = "m420" # 0x1FC0 floatval[120]
M421 = "m421" # 0x1FC8 floatval[121]
M422 = "m422" # 0x1FD0 floatval[122]
M423 = "m423" # 0x1FD8 floatval[123]
M424 = "m424" # 0x1FE0 floatval[124]
M425 = "m425" # 0x1FE8 floatval[125]
M426 = "m426" # 0x1FF0 floatval[126]
M427 = "m427" # 0x1FF8 floatval[127]
M428 = "m428" # 0x2000 floatval[128]
M429 = "m429" # 0x2008 floatval[129]
M430 = "m430" # 0x2010 floatval[130]
M431 = "m431" # 0x2018 floatval[131]
M432 = "m432" # 0x2020 floatval[132]
M433 = "m433" # 0x2028 floatval[133]
M434 = "m434" # 0x2030 floatval[134]
M435 = "m435" # 0x2038 floatval[135]
M436 = "m436" # 0x2040 floatval[136]
M437 = "m437" # 0x2048 floatval[137]
M438 = "m438" # 0x2050 floatval[138]
M439 = "m439" # 0x2058 floatval[139]
M440 = "m440" # 0x2060 floatval[140]
M441 = "m441" # 0x2068 floatval[141]
M442 = "m442" # 0x2070 floatval[142]
M443 = "m443" # 0x2078 floatval[143]
M444 = "m444" # 0x2080 floatval[144]
M445 = "m445" # 0x2088 floatval[145]
M446 = "m446" # 0x2090 floatval[146]
M447 = "m447" # 0x2098 floatval[147]
M448 = "m448" # 0x20A0 floatval[148]
M449 = "m449" # 0x20A8 floatval[149]
M450 = "m450" # 0x20B0 floatval[150]
M451 = "m451" # 0x20B8 floatval[151]
M452 = "m452" # 0x20C0 floatval[152]
M453 = "m453" # 0x20C8 floatval[153]
M454 = "m454" # 0x20D0 floatval[154]
M455 = "m455" # 0x20D8 floatval[155]
M456 = "m456" # 0x20E0 floatval[156]
M457 = "m457" # 0x20E8 floatval[157]
M458 = "m458" # 0x20F0 floatval[158]
M459 = "m459" # 0x20F8 floatval[159]
M460 = "m460" # 0x2100 floatval[160]
M461 = "m461" # 0x2108 floatval[161]
M462 = "m462" # 0x2110 floatval[162]
M463 = "m463" # 0x2118 floatval[163]
M464 = "m464" # 0x2120 floatval[164]
M465 = "m465" # 0x2128 floatval[165]
M466 = "m466" # 0x2130 floatval[166]
M467 = "m467" # 0x2138 floatval[167]
M468 = "m468" # 0x2140 floatval[168]
M469 = "m469" # 0x2148 floatval[169]
M470 = "m470" # 0x2150 floatval[170]
M471 = "m471" # 0x2158 floatval[171]
M472 = "m472" # 0x2160 floatval[172]
M473 = "m473" # 0x2168 floatval[173]
M474 = "m474" # 0x2170 floatval[174]
M475 = "m475" # 0x2178 floatval[175]
M476 = "m476" # 0x2180 floatval[176]
M477 = "m477" # 0x2188 floatval[177]
M478 = "m478" # 0x2190 floatval[178]
M479 = "m479" # 0x2198 floatval[179]
M480 = "m480" # 0x21A0 floatval[180]
M481 = "m481" # 0x21A8 floatval[181]
M482 = "m482" # 0x21B0 floatval[182]
M483 = "m483" # 0x21B8 floatval[183]
M484 = "m484" # 0x21C0 floatval[184
M485 = "m485" # 0x21C8 floatval[185]
M486 = "m486" # 0x21D0 floatval[186]
M487 = "m487" # 0x21D8 floatval[187]
M488 = "m488" # 0x21E0 floatval[188]
M489 = "m489" # 0x21E8 floatval[189]
M490 = "m490" # 0x21F0 floatval[190]
M491 = "m491" # 0x21F8 floatval[191]
M492 = "m492" # 0x2200 floatval[192]
M493 = "m493" # 0x2208 floatval[193]
M494 = "m494" # 0x2210 floatval[194]
M495 = "m495" # 0x2218 floatval[195]
M496 = "m496" # 0x2220 floatval[196]
M497 = "m497" # 0x2228 floatval[197]
M498 = "m498" # 0x2230 floatval[198]
M499 = "m499" # 0x2238 floatval[199]
M500 = "m500" # 0x2240 floatval[200]
M501 = "m501" # 0x2248 floatval[201]
M502 = "m502" # 0x2250 floatval[202]
M503 = "m503" # 0x2258 floatval[203]
M504 = "m504" # 0x2260 floatval[204]
M505 = "m505" # 0x2268 floatval[205]
M506 = "m506" # 0x2270 floatval[206]
M507 = "m507" # 0x2278 floatval[207]
M508 = "m508" # 0x2280 floatval[208]
M509 = "m509" # 0x2288 floatval[209]
M510 = "m510" # 0x2290 floatval[210]
M511 = "m511" # 0x2298 floatval[211]
M512 = "m512" # 0x22A0 floatval[212]
M513 = "m513" # 0x22A8 floatval[213]
M514 = "m514" # 0x22B0 floatval[214]
M515 = "m515" # 0x22B8 floatval[215]
M516 = "m516" # 0x22C0 floatval[216]
M517 = "m517" # 0x22C8 floatval[217]
M518 = "m518" # 0x22D0 floatval[218]
M519 = "m519" # 0x22D8 floatval[219]
M520 = "m520" # 0x22E0 floatval[220]
M521 = "m521" # 0x22E8 floatval[221]
M522 = "m522" # 0x22F0 floatval[222]
M523 = "m523" # 0x22F8 floatval[223]
M524 = "m524" # 0x2300 floatval[224]
M525 = "m525" # 0x2308 floatval[225]
M526 = "m526" # 0x2310 floatval[226]
M527 = "m527" # 0x2318 floatval[227]
M528 = "m528" # 0x2320 floatval[228]
M529 = "m529" # 0x2328 floatval[229]
M530 = "m530" # 0x2330 floatval[230]
M531 = "m531" # 0x2338 floatval[231]
M532 = "m532" # 0x2340 floatval[232]
M533 = "m533" # 0x2348 floatval[233]
M534 = "m534" # 0x2350 floatval[234]
M535 = "m535" # 0x2358 floatval[235]
M536 = "m536" # 0x2360 floatval[236]
M537 = "m537" # 0x2368 floatval[237]
M538 = "m538" # 0x2370 floatval[238]
M539 = "m539" # 0x2378 floatval[239]
M540 = "m540" # 0x2380 floatval[240]
M541 = "m541" # 0x2388 floatval[241]
M542 = "m542" # 0x2390 floatval[242]
M543 = "m543" # 0x2398 floatval[243]
M544 = "m544" # 0x23A0 floatval[244]
M545 = "m545" # 0x23A8 floatval[245]
M546 = "m546" # 0x23B0 floatval[246]
M547 = "m547" # 0x23B8 floatval[247]
M548 = "m548" # 0x23C0 floatval[248]
M549 = "m549" # 0x23C8 floatval[249]
M550 = "m550" # 0x23D0 floatval[250]
M551 = "m551" # 0x23D8 floatval[251]
M552 = "m552" # 0x23E0 floatval[252]
M553 = "m553" # 0x23E8 floatval[253]
M554 = "m554" # 0x23F0 floatval[254]
M555 = "m555" # 0x23F8 floatval[255]
# Controller state information
C000 = "c000" # 0x2800 errcode
C001 = "c001" # 0x2802 bTeachMode
C002 = "c002" # 0x2804 bSPILargeFrame
# Robot configuration information
G000 = "g000" # 0x2C00 manip_type[0-35]
G001 = "g001" # 0x2C24 manip_serial[0-35]
G002 = "g002" # 0x2C48 format_version[0-2]
G003 = "g003" # 0x2C54 parameter_version[0-2]
# Robot status information
R000 = "r000" # 0x3000 cmdx,cmdy,cmdz,cmdrz,cmdry,cmdrx
R100 = "r100" # 0x3040 posture
R101 = "r101" # 0x3044 coordinate
R102 = "r102" # 0x3048 singular
R103 = "r103" # 0x304C multiturn
R200 = "r200" # 0x3050 joint[0-5]
R300 = "r300" # 0x3090 velocity
R301 = "r301" # 0x3098 vel_error_axes
R302 = "r302" # 0x309C softlimit
R303 = "r303" # 0x30A0 joint_svon_to_svoff[0-5]
R304 = "r304" # 0x30E0 b_saved
R305 = "r305" # 0x30E4 toolno
R306 = "r306" # 0x30E8 hdorgx,hdorgy,hdorgz,hdorgrz,hdorgry,hdorgrx
R400 = "r400" # 0x3128 carte_svon_to_svoff[0-5]
R401 = "r401" # 0x3168 svon_to_svoff_posture
R402 = "r402" # 0x316C svon_to_svoff_coordinate
R403 = "r403" # 0x3170 svon_to_svoff_singular
R404 = "r404" # 0x3174 svon_to_svoff_multiturn
R405 = "r405" # 0x3178 svon_to_svoff_toolno
R406 = "r406" # 0x317C bRequestHold
R407 = "r407" # 0x317E bRequestSuspend
R408 = "r408" # 0x3180 bSuspended
R409 = "r409" # 0x3184 permitted_worker_id
R410 = "r410" # 0x3188 tool_org_params[0-5]
R411 = "r411" # 0x31B8 tool_fwdmatrix[0-11]
R412 = "r412" # 0x3218 last_hold_factor
R413 = "r413" # 0x3219 vdesc0_sts
R414 = "r414" # 0x321A vdesc1_sts
R415 = "r415" # 0x321B n_queued
R416 = "r416" # 0x321C logical_cmd_pulse[0-5]
R417 = "r417" # 0x323C logical_fb_pulse[0-5]
R418 = "r418" # 0x325C holdinfo
R419 = "r419" # 0x3260 svsts
R419 = "r419" # 0x3264 manip_pwr
R420 = "r420" # 0x3266 ems
R421 = "r421" # 0x3268 vdesc0_mvid
R422 = "r422" # 0x326C vdesc1_mvid
if __name__ == '__main__':
c = InfoCatchClient()
def callback(data):
print(data)
c.connect([InfoCatchClient.Label.I000,
InfoCatchClient.Label.R200,
InfoCatchClient.Label.M000,
InfoCatchClient.Label.M001,
InfoCatchClient.Label.M100,
InfoCatchClient.Label.M102,
InfoCatchClient.Label.F000,
InfoCatchClient.Label.F200,
InfoCatchClient.Label.F300,
])
for x in range(10):
data = c.recv()
print(data)
c.close()
| [
"socket.socket",
"json.dumps",
"time.sleep",
"json.JSONDecoder",
"socket.error"
] | [((1857, 1906), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1870, 1906), False, 'import socket\n'), ((1931, 1961), 'json.JSONDecoder', 'json.JSONDecoder', ([], {'strict': '(False)'}), '(strict=False)\n', (1947, 1961), False, 'import json\n'), ((2176, 2192), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2186, 2192), False, 'import json\n'), ((4471, 4486), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4481, 4486), False, 'import time\n'), ((2530, 2556), 'socket.error', 'socket.error', (['"""recv error"""'], {}), "('recv error')\n", (2542, 2556), False, 'import socket\n'), ((4439, 4454), 'socket.error', 'socket.error', (['e'], {}), '(e)\n', (4451, 4454), False, 'import socket\n')] |
import unittest
from random import randint
from model.node import Node
from model.linked_list import LinkedList
SIZE = 5
class TestLinkedList(unittest.TestCase):
def test_copy(self):
nodes = []
for i in range(SIZE):
nodes.append(Node(i))
if i:
nodes[i - 1].next = nodes[i]
for i in range(SIZE):
number = randint(0, SIZE)
if number < SIZE:
nodes[i].random = nodes[number]
if nodes:
linked_list = LinkedList(nodes[0])
linked_list_copy = linked_list.copy()
loop = linked_list.head
loop_copy = linked_list_copy.head
while loop:
self.assertEqual(loop.data, loop_copy.data)
if loop.next:
self.assertEqual(loop.next.data, loop_copy.next.data)
if loop.random:
self.assertEqual(loop.random.data, loop_copy.random.data)
loop = loop.next
loop_copy = loop_copy.next
print('----------------------------------------')
print('Original')
print('----------------------------------------')
print(linked_list)
print('----------------------------------------')
print('Copia')
print('----------------------------------------')
print(linked_list_copy)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"random.randint",
"model.linked_list.LinkedList",
"model.node.Node"
] | [((1467, 1482), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1480, 1482), False, 'import unittest\n'), ((390, 406), 'random.randint', 'randint', (['(0)', 'SIZE'], {}), '(0, SIZE)\n', (397, 406), False, 'from random import randint\n'), ((530, 550), 'model.linked_list.LinkedList', 'LinkedList', (['nodes[0]'], {}), '(nodes[0])\n', (540, 550), False, 'from model.linked_list import LinkedList\n'), ((265, 272), 'model.node.Node', 'Node', (['i'], {}), '(i)\n', (269, 272), False, 'from model.node import Node\n')] |
from pygments.lexer import RegexLexer, include, words
from pygments.token import *
# https://docs.nvidia.com/cuda/parallel-thread-execution/index.html
class CustomLexer(RegexLexer):
string = r'"[^"]*?"'
followsym = r'[a-zA-Z0-9_$]*'
identifier = r'(?:[a-zA-Z]' + followsym + r'| [_$%]' + followsym + r')'
tokens = {
'root': [
include('whitespace'),
(r'%' + identifier, Name.Variable),
include('definition'),
include('statement'),
include('type'),
(identifier, Name.Variable),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\b\d+[LlUu]*\b', Number.Integer),
(r'[&|^+*/%=~-]', Operator),
(r'[()\[\]\{\},.;<>@]', Punctuation),
],
'whitespace': [
(r'(\n|\s)+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single),
],
'definition': [
(words(('func', 'reg'), prefix=r'\.', suffix=r'\b'), Keyword.Reserved),
(r'^' + identifier + r':', Name.Label),
],
'statement': [
# directive
(words((
'address_size', 'file', 'minnctapersm', 'target', 'align', 'func', 'param',
'tex', 'branchtarget', 'global', 'pragma', 'version', 'callprototype',
'loc', 'reg', 'visible', 'calltargets', 'local', 'reqntid', 'weak', 'const',
'maxnctapersm', 'section', 'entry', 'maxnreg', 'shared', 'extern',
'maxntid', 'sreg', ), prefix=r'\.', suffix=r'\b'), Keyword),
# instruction
(words((
'abs', 'div', 'or', 'sin', 'add', 'ex2', 'pmevent', 'slct', 'vmad', 'addc',
'exit', 'popc', 'sqrt', 'vmax', 'and', 'fma', 'prefetch', 'st', 'atom',
'isspacep', 'prefetchu', 'sub', 'vmin', 'bar', 'ld', 'prmt', 'subc', 'bfe',
'ldu', 'rcp', 'suld', 'vote', 'bfi', 'lg2', 'red', 'suq', 'vset', 'bfind',
'mad', 'rem', 'sured', 'bret', 'sust', 'vshl', 'brev', 'madc', 'rsqrt',
'testp', 'vshr', 'brkpt', 'max', 'sad', 'tex', 'vsub', 'call', 'membar',
'selp', 'tld4', 'clz', 'min', 'set', 'trap', 'xor', 'cnot', 'mov', 'setp',
'txq', 'copysign', 'mul', 'shf', 'vabsdiff', 'cos', 'shfl', 'cvta', 'not',
'shr', 'cvt', 'neg', 'shl', 'vadd'), prefix=r'\b', suffix=r'[\.\w]+\b'), Keyword),
(words((
'vavrg', 'vmax', 'vmin', 'vset', 'mad', 'vsub', 'mul', 'vabsdiff',
'vadd'), prefix=r'\b', suffix=r'[24]\b'), Keyword),
],
'type': [
(words((
's8', 's16', 's32', 's64',
'u8', 'u16', 'u32', 'u64',
'f16', 'f16x2', 'f32', 'f64',
'b8', 'b16', 'b32', 'b64',
'pred'), prefix=r'\.', suffix=r'\b'), Keyword.Type),
],
}
| [
"pygments.lexer.words",
"pygments.lexer.include"
] | [((366, 387), 'pygments.lexer.include', 'include', (['"""whitespace"""'], {}), "('whitespace')\n", (373, 387), False, 'from pygments.lexer import RegexLexer, include, words\n'), ((450, 471), 'pygments.lexer.include', 'include', (['"""definition"""'], {}), "('definition')\n", (457, 471), False, 'from pygments.lexer import RegexLexer, include, words\n'), ((485, 505), 'pygments.lexer.include', 'include', (['"""statement"""'], {}), "('statement')\n", (492, 505), False, 'from pygments.lexer import RegexLexer, include, words\n'), ((519, 534), 'pygments.lexer.include', 'include', (['"""type"""'], {}), "('type')\n", (526, 534), False, 'from pygments.lexer import RegexLexer, include, words\n'), ((1156, 1206), 'pygments.lexer.words', 'words', (["('func', 'reg')"], {'prefix': '"""\\\\."""', 'suffix': '"""\\\\b"""'}), "(('func', 'reg'), prefix='\\\\.', suffix='\\\\b')\n", (1161, 1206), False, 'from pygments.lexer import RegexLexer, include, words\n'), ((1350, 1716), 'pygments.lexer.words', 'words', (["('address_size', 'file', 'minnctapersm', 'target', 'align', 'func', 'param',\n 'tex', 'branchtarget', 'global', 'pragma', 'version', 'callprototype',\n 'loc', 'reg', 'visible', 'calltargets', 'local', 'reqntid', 'weak',\n 'const', 'maxnctapersm', 'section', 'entry', 'maxnreg', 'shared',\n 'extern', 'maxntid', 'sreg')"], {'prefix': '"""\\\\."""', 'suffix': '"""\\\\b"""'}), "(('address_size', 'file', 'minnctapersm', 'target', 'align', 'func',\n 'param', 'tex', 'branchtarget', 'global', 'pragma', 'version',\n 'callprototype', 'loc', 'reg', 'visible', 'calltargets', 'local',\n 'reqntid', 'weak', 'const', 'maxnctapersm', 'section', 'entry',\n 'maxnreg', 'shared', 'extern', 'maxntid', 'sreg'), prefix='\\\\.', suffix\n ='\\\\b')\n", (1355, 1716), False, 'from pygments.lexer import RegexLexer, include, words\n'), ((1829, 2539), 'pygments.lexer.words', 'words', (["('abs', 'div', 'or', 'sin', 'add', 'ex2', 'pmevent', 'slct', 'vmad', 'addc',\n 'exit', 'popc', 'sqrt', 'vmax', 'and', 'fma', 'prefetch', 'st', 'atom',\n 'isspacep', 'prefetchu', 'sub', 'vmin', 'bar', 'ld', 'prmt', 'subc',\n 'bfe', 'ldu', 'rcp', 'suld', 'vote', 'bfi', 'lg2', 'red', 'suq', 'vset',\n 'bfind', 'mad', 'rem', 'sured', 'bret', 'sust', 'vshl', 'brev', 'madc',\n 'rsqrt', 'testp', 'vshr', 'brkpt', 'max', 'sad', 'tex', 'vsub', 'call',\n 'membar', 'selp', 'tld4', 'clz', 'min', 'set', 'trap', 'xor', 'cnot',\n 'mov', 'setp', 'txq', 'copysign', 'mul', 'shf', 'vabsdiff', 'cos',\n 'shfl', 'cvta', 'not', 'shr', 'cvt', 'neg', 'shl', 'vadd')"], {'prefix': '"""\\\\b"""', 'suffix': '"""[\\\\.\\\\w]+\\\\b"""'}), "(('abs', 'div', 'or', 'sin', 'add', 'ex2', 'pmevent', 'slct', 'vmad',\n 'addc', 'exit', 'popc', 'sqrt', 'vmax', 'and', 'fma', 'prefetch', 'st',\n 'atom', 'isspacep', 'prefetchu', 'sub', 'vmin', 'bar', 'ld', 'prmt',\n 'subc', 'bfe', 'ldu', 'rcp', 'suld', 'vote', 'bfi', 'lg2', 'red', 'suq',\n 'vset', 'bfind', 'mad', 'rem', 'sured', 'bret', 'sust', 'vshl', 'brev',\n 'madc', 'rsqrt', 'testp', 'vshr', 'brkpt', 'max', 'sad', 'tex', 'vsub',\n 'call', 'membar', 'selp', 'tld4', 'clz', 'min', 'set', 'trap', 'xor',\n 'cnot', 'mov', 'setp', 'txq', 'copysign', 'mul', 'shf', 'vabsdiff',\n 'cos', 'shfl', 'cvta', 'not', 'shr', 'cvt', 'neg', 'shl', 'vadd'),\n prefix='\\\\b', suffix='[\\\\.\\\\w]+\\\\b')\n", (1834, 2539), False, 'from pygments.lexer import RegexLexer, include, words\n'), ((2671, 2789), 'pygments.lexer.words', 'words', (["('vavrg', 'vmax', 'vmin', 'vset', 'mad', 'vsub', 'mul', 'vabsdiff', 'vadd')"], {'prefix': '"""\\\\b"""', 'suffix': '"""[24]\\\\b"""'}), "(('vavrg', 'vmax', 'vmin', 'vset', 'mad', 'vsub', 'mul', 'vabsdiff',\n 'vadd'), prefix='\\\\b', suffix='[24]\\\\b')\n", (2676, 2789), False, 'from pygments.lexer import RegexLexer, include, words\n'), ((2872, 3035), 'pygments.lexer.words', 'words', (["('s8', 's16', 's32', 's64', 'u8', 'u16', 'u32', 'u64', 'f16', 'f16x2',\n 'f32', 'f64', 'b8', 'b16', 'b32', 'b64', 'pred')"], {'prefix': '"""\\\\."""', 'suffix': '"""\\\\b"""'}), "(('s8', 's16', 's32', 's64', 'u8', 'u16', 'u32', 'u64', 'f16', 'f16x2',\n 'f32', 'f64', 'b8', 'b16', 'b32', 'b64', 'pred'), prefix='\\\\.', suffix=\n '\\\\b')\n", (2877, 3035), False, 'from pygments.lexer import RegexLexer, include, words\n')] |
# -*- coding: utf-8 -*-
"""Basic tests for state and entity relationships in dork
"""
import dork.types
from tests.utils import has_many, is_a
def test_items_exist():
"""the dork module should define an Item
"""
assert "Item" in vars(dork.types)
is_a(dork.types.Item, type)
def test_holders_exist():
"""the dork module should define an Holder
"""
assert "Holder" in vars(dork.types)
is_a(dork.types.Holder, type)
def test_players_exist():
"""the dork module should define an Player
"""
assert "Player" in vars(dork.types)
is_a(dork.types.Player, type)
def test_rooms_exist():
"""the dork module should define an Room
"""
assert "Room" in vars(dork.types)
is_a(dork.types.Room, type)
def test_path_exists():
"""the dork module should define an Path
"""
assert "Path" in vars(dork.types)
is_a(dork.types.Path, type)
def test_map_exists():
"""the dork module should define an Map
"""
assert "Map" in vars(dork.types)
is_a(dork.types.Map, type)
def test_holder_has_many_items():
"""A Holder should have many Items
"""
has_many(dork.types.Holder, "holder", dork.types.Item, "items")
def test_player_is_a_holder(player):
"""A Player should be a Holder
"""
is_a(player, dork.types.Holder)
def test_room_is_a_holder(room):
"""A Room should be a Holder
"""
is_a(room, dork.types.Holder)
def test_room_has_many_players():
"""A Room should have many players
"""
has_many(dork.types.Room, "room", dork.types.Player, "players")
def test_room_has_many_paths():
"""A Room should have many Paths through exits and entrances.
"""
has_many(dork.types.Room, "entrance", dork.types.Path, "entrances")
has_many(dork.types.Room, "exit", dork.types.Path, "exits")
def test_map_has_many_rooms():
"""A Map should have many Rooms
"""
has_many(dork.types.Map, "map", dork.types.Room, "rooms")
| [
"tests.utils.is_a",
"tests.utils.has_many"
] | [((264, 291), 'tests.utils.is_a', 'is_a', (['dork.types.Item', 'type'], {}), '(dork.types.Item, type)\n', (268, 291), False, 'from tests.utils import has_many, is_a\n'), ((419, 448), 'tests.utils.is_a', 'is_a', (['dork.types.Holder', 'type'], {}), '(dork.types.Holder, type)\n', (423, 448), False, 'from tests.utils import has_many, is_a\n'), ((576, 605), 'tests.utils.is_a', 'is_a', (['dork.types.Player', 'type'], {}), '(dork.types.Player, type)\n', (580, 605), False, 'from tests.utils import has_many, is_a\n'), ((727, 754), 'tests.utils.is_a', 'is_a', (['dork.types.Room', 'type'], {}), '(dork.types.Room, type)\n', (731, 754), False, 'from tests.utils import has_many, is_a\n'), ((876, 903), 'tests.utils.is_a', 'is_a', (['dork.types.Path', 'type'], {}), '(dork.types.Path, type)\n', (880, 903), False, 'from tests.utils import has_many, is_a\n'), ((1022, 1048), 'tests.utils.is_a', 'is_a', (['dork.types.Map', 'type'], {}), '(dork.types.Map, type)\n', (1026, 1048), False, 'from tests.utils import has_many, is_a\n'), ((1136, 1199), 'tests.utils.has_many', 'has_many', (['dork.types.Holder', '"""holder"""', 'dork.types.Item', '"""items"""'], {}), "(dork.types.Holder, 'holder', dork.types.Item, 'items')\n", (1144, 1199), False, 'from tests.utils import has_many, is_a\n'), ((1286, 1317), 'tests.utils.is_a', 'is_a', (['player', 'dork.types.Holder'], {}), '(player, dork.types.Holder)\n', (1290, 1317), False, 'from tests.utils import has_many, is_a\n'), ((1398, 1427), 'tests.utils.is_a', 'is_a', (['room', 'dork.types.Holder'], {}), '(room, dork.types.Holder)\n', (1402, 1427), False, 'from tests.utils import has_many, is_a\n'), ((1515, 1578), 'tests.utils.has_many', 'has_many', (['dork.types.Room', '"""room"""', 'dork.types.Player', '"""players"""'], {}), "(dork.types.Room, 'room', dork.types.Player, 'players')\n", (1523, 1578), False, 'from tests.utils import has_many, is_a\n'), ((1691, 1758), 'tests.utils.has_many', 'has_many', (['dork.types.Room', '"""entrance"""', 'dork.types.Path', '"""entrances"""'], {}), "(dork.types.Room, 'entrance', dork.types.Path, 'entrances')\n", (1699, 1758), False, 'from tests.utils import has_many, is_a\n'), ((1763, 1822), 'tests.utils.has_many', 'has_many', (['dork.types.Room', '"""exit"""', 'dork.types.Path', '"""exits"""'], {}), "(dork.types.Room, 'exit', dork.types.Path, 'exits')\n", (1771, 1822), False, 'from tests.utils import has_many, is_a\n'), ((1904, 1961), 'tests.utils.has_many', 'has_many', (['dork.types.Map', '"""map"""', 'dork.types.Room', '"""rooms"""'], {}), "(dork.types.Map, 'map', dork.types.Room, 'rooms')\n", (1912, 1961), False, 'from tests.utils import has_many, is_a\n')] |
#!/usr/bin/env python2
# coding: utf-8
import sys
def main():
s = ' '.join((u'❄ ☃ ❄', sys.version.split()[0], u'❄ ☃ ❄'))
print(type(s))
return {'snowy_version': s}
if __name__ == '__main__':
main()
| [
"sys.version.split"
] | [((98, 117), 'sys.version.split', 'sys.version.split', ([], {}), '()\n', (115, 117), False, 'import sys\n')] |
import pytest
import cue
def test_basic():
cue.compile('')
assert '1' == str(cue.compile('1'))
assert ['1', '2', '3', '{\n\ta: 1\n}'] == [str(v) for v in cue.compile('[1,2,3,{a:1}]')]
assert [('a', '1'), ('b', '2')] == [(str(k), str(v)) for k, v in cue.compile('{a: 1, b: 2}')]
with pytest.raises(cue.CueError):
cue.compile('a')
v1 = cue.compile('{a: 1}')
v2 = cue.compile('{a: 2}')
v3 = cue.compile('{a: <3}')
assert False == v1.unifies_with(v2)
assert True == v1.unifies_with(v3)
assert True == v1.unifies_with(v3)
assert True == v2.unifies_with(v3)
with pytest.raises(ValueError):
iter(cue.compile('1'))
assert True == cue.compile('null').is_null()
assert True == cue.compile('true').is_bool()
assert True == cue.compile('1').is_int()
assert True == cue.compile('1.0').is_float()
assert True == cue.compile(r"'\x03abc'").is_bytes()
assert True == cue.compile('"hi"').is_string()
assert True == cue.compile('{a:1}').is_struct()
assert True == cue.compile('[1,2]').is_list()
assert 1 == cue.compile('1').to_int()
assert 2 == int(cue.compile('2'))
with pytest.raises(ValueError):
assert 1 == cue.compile('"hi"').to_int()
assert 9223372036854775807 == int(cue.compile("9223372036854775807"))
with pytest.raises(cue.CueError):
assert 9223372036854775808 == int(cue.compile("9223372036854775808"))
assert -9223372036854775807 == int(cue.compile('-9223372036854775807 '))
with pytest.raises(cue.CueError):
assert -9223372036854775808 == int(cue.compile('-9223372036854775808 '))
assert 1.0 == cue.compile('1.0').to_float()
assert 2.0 == float(cue.compile('2.0'))
with pytest.raises(ValueError):
assert 1.0 == cue.compile('"hi"').to_int()
assert 4.9 == float(cue.compile('1 + 3.9'))
assert True == cue.compile('true').to_bool()
assert False == bool(cue.compile('false && true'))
with pytest.raises(ValueError):
assert True == cue.compile('"hi"').to_int()
assert "ok" == cue.compile('"ok"').to_string()
assert '"okk"' == str(cue.compile('"ok" + "k"'))
with pytest.raises(ValueError):
assert "ok" == cue.compile('1').to_string()
assert {'a': 1, 'b': [{'c': 1}]} == cue.compile('{a: 1, b: [{c: 1}]}').to_dict()
assert {} == cue.compile('').to_dict()
with pytest.raises(ValueError):
assert {} == cue.compile('1').to_dict()
assert [1,2,{'a':2,'b':{'c':2}}] == cue.compile('[1,2,{a:2,b:{c:2}}]').to_list()
with pytest.raises(ValueError):
assert [] == cue.compile('1').to_list()
assert True == cue.compile('true').to_python()
assert 1 == cue.compile('1').to_python()
assert 1.0 == cue.compile('1.0').to_python()
assert "hi" == cue.compile('"hi"').to_python()
with pytest.raises(ValueError):
cue.compile('a: int').to_python()
with pytest.raises(ValueError):
cue.compile('a: <3').to_python()
def test_dumps():
assert '1.0' == cue.dumps(1.0)
assert '1' == cue.dumps(1)
assert 'true' == cue.dumps(True)
assert '"true"' == cue.dumps("true")
assert '[1,2,3]' == cue.dumps([1,2,3])
assert '[{a:1},{b:2},{c:[2,"hi"]}]' == cue.dumps([{'a':1},{'b':2},{'c':[2,'hi']}])
def test_loads():
assert 1.0 == cue.loads('1.0')
assert 1 == cue.loads('1')
assert True == cue.loads('true')
assert "true" == cue.loads('"true"')
assert [1,2,3] == cue.loads('[1,2,3]')
assert [{'a':1},{'b':2},{'c':[2,'hi']}] == cue.loads('[{a:1},{b:2},{c:[2,"hi"]}]')
def test_dumps_loads():
ps = [
(1.0, '1.0'),
(1, '1'),
(True, 'true'),
("true", '"true"'),
([1,2,3], '[1,2,3]'),
([{'a':1},{'b':2},{'c':[2,'hi']}], '[{a:1},{b:2},{c:[2,"hi"]}]'),
]
for p, s in ps:
assert p == cue.loads(cue.dumps(p))
assert s == cue.dumps(cue.loads(s))
| [
"cue.loads",
"cue.compile",
"pytest.raises",
"cue.dumps"
] | [((48, 63), 'cue.compile', 'cue.compile', (['""""""'], {}), "('')\n", (59, 63), False, 'import cue\n'), ((353, 374), 'cue.compile', 'cue.compile', (['"""{a: 1}"""'], {}), "('{a: 1}')\n", (364, 374), False, 'import cue\n'), ((382, 403), 'cue.compile', 'cue.compile', (['"""{a: 2}"""'], {}), "('{a: 2}')\n", (393, 403), False, 'import cue\n'), ((411, 433), 'cue.compile', 'cue.compile', (['"""{a: <3}"""'], {}), "('{a: <3}')\n", (422, 433), False, 'import cue\n'), ((296, 323), 'pytest.raises', 'pytest.raises', (['cue.CueError'], {}), '(cue.CueError)\n', (309, 323), False, 'import pytest\n'), ((329, 345), 'cue.compile', 'cue.compile', (['"""a"""'], {}), "('a')\n", (340, 345), False, 'import cue\n'), ((590, 615), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (603, 615), False, 'import pytest\n'), ((1114, 1139), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1127, 1139), False, 'import pytest\n'), ((1265, 1292), 'pytest.raises', 'pytest.raises', (['cue.CueError'], {}), '(cue.CueError)\n', (1278, 1292), False, 'import pytest\n'), ((1450, 1477), 'pytest.raises', 'pytest.raises', (['cue.CueError'], {}), '(cue.CueError)\n', (1463, 1477), False, 'import pytest\n'), ((1652, 1677), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1665, 1677), False, 'import pytest\n'), ((1880, 1905), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1893, 1905), False, 'import pytest\n'), ((2063, 2088), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2076, 2088), False, 'import pytest\n'), ((2270, 2295), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2283, 2295), False, 'import pytest\n'), ((2432, 2457), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2445, 2457), False, 'import pytest\n'), ((2700, 2725), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2713, 2725), False, 'import pytest\n'), ((2772, 2797), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2785, 2797), False, 'import pytest\n'), ((2873, 2887), 'cue.dumps', 'cue.dumps', (['(1.0)'], {}), '(1.0)\n', (2882, 2887), False, 'import cue\n'), ((2904, 2916), 'cue.dumps', 'cue.dumps', (['(1)'], {}), '(1)\n', (2913, 2916), False, 'import cue\n'), ((2936, 2951), 'cue.dumps', 'cue.dumps', (['(True)'], {}), '(True)\n', (2945, 2951), False, 'import cue\n'), ((2973, 2990), 'cue.dumps', 'cue.dumps', (['"""true"""'], {}), "('true')\n", (2982, 2990), False, 'import cue\n'), ((3013, 3033), 'cue.dumps', 'cue.dumps', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (3022, 3033), False, 'import cue\n'), ((3073, 3122), 'cue.dumps', 'cue.dumps', (["[{'a': 1}, {'b': 2}, {'c': [2, 'hi']}]"], {}), "([{'a': 1}, {'b': 2}, {'c': [2, 'hi']}])\n", (3082, 3122), False, 'import cue\n'), ((3152, 3168), 'cue.loads', 'cue.loads', (['"""1.0"""'], {}), "('1.0')\n", (3161, 3168), False, 'import cue\n'), ((3183, 3197), 'cue.loads', 'cue.loads', (['"""1"""'], {}), "('1')\n", (3192, 3197), False, 'import cue\n'), ((3215, 3232), 'cue.loads', 'cue.loads', (['"""true"""'], {}), "('true')\n", (3224, 3232), False, 'import cue\n'), ((3252, 3271), 'cue.loads', 'cue.loads', (['""""true\\""""'], {}), '(\'"true"\')\n', (3261, 3271), False, 'import cue\n'), ((3292, 3312), 'cue.loads', 'cue.loads', (['"""[1,2,3]"""'], {}), "('[1,2,3]')\n", (3301, 3312), False, 'import cue\n'), ((3358, 3397), 'cue.loads', 'cue.loads', (['"""[{a:1},{b:2},{c:[2,"hi"]}]"""'], {}), '(\'[{a:1},{b:2},{c:[2,"hi"]}]\')\n', (3367, 3397), False, 'import cue\n'), ((84, 100), 'cue.compile', 'cue.compile', (['"""1"""'], {}), "('1')\n", (95, 100), False, 'import cue\n'), ((626, 642), 'cue.compile', 'cue.compile', (['"""1"""'], {}), "('1')\n", (637, 642), False, 'import cue\n'), ((1089, 1105), 'cue.compile', 'cue.compile', (['"""2"""'], {}), "('2')\n", (1100, 1105), False, 'import cue\n'), ((1222, 1256), 'cue.compile', 'cue.compile', (['"""9223372036854775807"""'], {}), "('9223372036854775807')\n", (1233, 1256), False, 'import cue\n'), ((1405, 1441), 'cue.compile', 'cue.compile', (['"""-9223372036854775807 """'], {}), "('-9223372036854775807 ')\n", (1416, 1441), False, 'import cue\n'), ((1625, 1643), 'cue.compile', 'cue.compile', (['"""2.0"""'], {}), "('2.0')\n", (1636, 1643), False, 'import cue\n'), ((1748, 1770), 'cue.compile', 'cue.compile', (['"""1 + 3.9"""'], {}), "('1 + 3.9')\n", (1759, 1770), False, 'import cue\n'), ((1843, 1871), 'cue.compile', 'cue.compile', (['"""false && true"""'], {}), "('false && true')\n", (1854, 1871), False, 'import cue\n'), ((2029, 2054), 'cue.compile', 'cue.compile', (['""""ok" + "k\\""""'], {}), '(\'"ok" + "k"\')\n', (2040, 2054), False, 'import cue\n'), ((163, 191), 'cue.compile', 'cue.compile', (['"""[1,2,3,{a:1}]"""'], {}), "('[1,2,3,{a:1}]')\n", (174, 191), False, 'import cue\n'), ((260, 287), 'cue.compile', 'cue.compile', (['"""{a: 1, b: 2}"""'], {}), "('{a: 1, b: 2}')\n", (271, 287), False, 'import cue\n'), ((662, 681), 'cue.compile', 'cue.compile', (['"""null"""'], {}), "('null')\n", (673, 681), False, 'import cue\n'), ((709, 728), 'cue.compile', 'cue.compile', (['"""true"""'], {}), "('true')\n", (720, 728), False, 'import cue\n'), ((756, 772), 'cue.compile', 'cue.compile', (['"""1"""'], {}), "('1')\n", (767, 772), False, 'import cue\n'), ((799, 817), 'cue.compile', 'cue.compile', (['"""1.0"""'], {}), "('1.0')\n", (810, 817), False, 'import cue\n'), ((846, 871), 'cue.compile', 'cue.compile', (['"""\'\\\\x03abc\'"""'], {}), '("\'\\\\x03abc\'")\n', (857, 871), False, 'import cue\n'), ((900, 919), 'cue.compile', 'cue.compile', (['""""hi\\""""'], {}), '(\'"hi"\')\n', (911, 919), False, 'import cue\n'), ((949, 969), 'cue.compile', 'cue.compile', (['"""{a:1}"""'], {}), "('{a:1}')\n", (960, 969), False, 'import cue\n'), ((999, 1019), 'cue.compile', 'cue.compile', (['"""[1,2]"""'], {}), "('[1,2]')\n", (1010, 1019), False, 'import cue\n'), ((1045, 1061), 'cue.compile', 'cue.compile', (['"""1"""'], {}), "('1')\n", (1056, 1061), False, 'import cue\n'), ((1332, 1366), 'cue.compile', 'cue.compile', (['"""9223372036854775808"""'], {}), "('9223372036854775808')\n", (1343, 1366), False, 'import cue\n'), ((1518, 1554), 'cue.compile', 'cue.compile', (['"""-9223372036854775808 """'], {}), "('-9223372036854775808 ')\n", (1529, 1554), False, 'import cue\n'), ((1573, 1591), 'cue.compile', 'cue.compile', (['"""1.0"""'], {}), "('1.0')\n", (1584, 1591), False, 'import cue\n'), ((1790, 1809), 'cue.compile', 'cue.compile', (['"""true"""'], {}), "('true')\n", (1801, 1809), False, 'import cue\n'), ((1973, 1992), 'cue.compile', 'cue.compile', (['""""ok\\""""'], {}), '(\'"ok"\')\n', (1984, 1992), False, 'import cue\n'), ((2177, 2211), 'cue.compile', 'cue.compile', (['"""{a: 1, b: [{c: 1}]}"""'], {}), "('{a: 1, b: [{c: 1}]}')\n", (2188, 2211), False, 'import cue\n'), ((2237, 2252), 'cue.compile', 'cue.compile', (['""""""'], {}), "('')\n", (2248, 2252), False, 'import cue\n'), ((2380, 2414), 'cue.compile', 'cue.compile', (['"""[1,2,{a:2,b:{c:2}}]"""'], {}), "('[1,2,{a:2,b:{c:2}}]')\n", (2391, 2414), False, 'import cue\n'), ((2521, 2540), 'cue.compile', 'cue.compile', (['"""true"""'], {}), "('true')\n", (2532, 2540), False, 'import cue\n'), ((2567, 2583), 'cue.compile', 'cue.compile', (['"""1"""'], {}), "('1')\n", (2578, 2583), False, 'import cue\n'), ((2612, 2630), 'cue.compile', 'cue.compile', (['"""1.0"""'], {}), "('1.0')\n", (2623, 2630), False, 'import cue\n'), ((2660, 2679), 'cue.compile', 'cue.compile', (['""""hi\\""""'], {}), '(\'"hi"\')\n', (2671, 2679), False, 'import cue\n'), ((2731, 2752), 'cue.compile', 'cue.compile', (['"""a: int"""'], {}), "('a: int')\n", (2742, 2752), False, 'import cue\n'), ((2803, 2823), 'cue.compile', 'cue.compile', (['"""a: <3"""'], {}), "('a: <3')\n", (2814, 2823), False, 'import cue\n'), ((3653, 3665), 'cue.dumps', 'cue.dumps', (['p'], {}), '(p)\n', (3662, 3665), False, 'import cue\n'), ((3693, 3705), 'cue.loads', 'cue.loads', (['s'], {}), '(s)\n', (3702, 3705), False, 'import cue\n'), ((1157, 1176), 'cue.compile', 'cue.compile', (['""""hi\\""""'], {}), '(\'"hi"\')\n', (1168, 1176), False, 'import cue\n'), ((1697, 1716), 'cue.compile', 'cue.compile', (['""""hi\\""""'], {}), '(\'"hi"\')\n', (1708, 1716), False, 'import cue\n'), ((1926, 1945), 'cue.compile', 'cue.compile', (['""""hi\\""""'], {}), '(\'"hi"\')\n', (1937, 1945), False, 'import cue\n'), ((2109, 2125), 'cue.compile', 'cue.compile', (['"""1"""'], {}), "('1')\n", (2120, 2125), False, 'import cue\n'), ((2314, 2330), 'cue.compile', 'cue.compile', (['"""1"""'], {}), "('1')\n", (2325, 2330), False, 'import cue\n'), ((2476, 2492), 'cue.compile', 'cue.compile', (['"""1"""'], {}), "('1')\n", (2487, 2492), False, 'import cue\n')] |
from mqfactory.tools import Policy, Rule, CATCH_ALL
def test_empty_policy():
p = Policy()
assert p.match({"something": "something"}) == CATCH_ALL
assert p.match({}) == CATCH_ALL
def test_policy():
p = Policy([
Rule({ "a": 1, "b": 1, "c": 1 }, "a=1,b=1,c=1" ),
Rule({ "a": 1, "b": 1, }, "a=1,b=1" ),
Rule({ "a": 1, }, "a=1" ),
Rule({ "b": 1, }, "b=1" ),
])
assert p.match({"a": 1}).value == "a=1"
assert p.match({"b": 1}).value == "b=1"
assert p.match({"a": 2, "b": 1, "c": 1}).value == "b=1"
assert p.match({"a": 1, "b": 1, "c": 1}).value == "a=1,b=1,c=1"
assert p.match({"a": 2, "b": 2, "c": 2}).value is None
assert p.match({"d": 1}).value is None
| [
"mqfactory.tools.Rule",
"mqfactory.tools.Policy"
] | [((84, 92), 'mqfactory.tools.Policy', 'Policy', ([], {}), '()\n', (90, 92), False, 'from mqfactory.tools import Policy, Rule, CATCH_ALL\n'), ((224, 269), 'mqfactory.tools.Rule', 'Rule', (["{'a': 1, 'b': 1, 'c': 1}", '"""a=1,b=1,c=1"""'], {}), "({'a': 1, 'b': 1, 'c': 1}, 'a=1,b=1,c=1')\n", (228, 269), False, 'from mqfactory.tools import Policy, Rule, CATCH_ALL\n'), ((284, 317), 'mqfactory.tools.Rule', 'Rule', (["{'a': 1, 'b': 1}", '"""a=1,b=1"""'], {}), "({'a': 1, 'b': 1}, 'a=1,b=1')\n", (288, 317), False, 'from mqfactory.tools import Policy, Rule, CATCH_ALL\n'), ((340, 361), 'mqfactory.tools.Rule', 'Rule', (["{'a': 1}", '"""a=1"""'], {}), "({'a': 1}, 'a=1')\n", (344, 361), False, 'from mqfactory.tools import Policy, Rule, CATCH_ALL\n'), ((392, 413), 'mqfactory.tools.Rule', 'Rule', (["{'b': 1}", '"""b=1"""'], {}), "({'b': 1}, 'b=1')\n", (396, 413), False, 'from mqfactory.tools import Policy, Rule, CATCH_ALL\n')] |
from threading import Thread
num = 0
def do_sth():
global num
for i in range(1000000):
num += 1
adda()
addb()
def adda():
global num
num += 1
def addb():
global num
num += 1
t1 = Thread(target=do_sth)
t2 = Thread(target=do_sth)
t1.start()
t2.start()
t1.join()
t2.join()
print(num)
# 这个计算数值错误
| [
"threading.Thread"
] | [((228, 249), 'threading.Thread', 'Thread', ([], {'target': 'do_sth'}), '(target=do_sth)\n', (234, 249), False, 'from threading import Thread\n'), ((255, 276), 'threading.Thread', 'Thread', ([], {'target': 'do_sth'}), '(target=do_sth)\n', (261, 276), False, 'from threading import Thread\n')] |
# coding=utf-8
from builtins import input
from optparse import make_option
from django.core import exceptions
from django.utils.encoding import force_str
from django.conf import settings
from django.db.utils import IntegrityError
from django.core.management import call_command
from tenant_schemas.utils import get_tenant_model
from bluebottle.members.models import Member
from bluebottle.common.management.commands.base import Command as BaseCommand
from bluebottle.utils.models import Language
class Command(BaseCommand):
help = 'Create a tenant'
option_list = BaseCommand.options + (
make_option('--full-name',
help='Specifies the full name for the tenant (e.g. "Our New Tenant").'),
make_option('--schema-name',
help='Specifies the schema name for the tenant (e.g. "new_tenant").'),
make_option('--domain-url',
help='Specifies the domain_url for the tenant (e.g. "new-tenant.localhost").'),
make_option('--client-name',
help='Specifies the client name for the tenant (e.g. "new-tenant").'),
make_option('--languages',
default='en',
help='Specifies the client languages (e.g. "en,nl").'),
make_option('--post-command',
help='Calls another management command after the tenant is created.')
)
def handle(self, *args, **options):
name = options.get('full_name', None)
client_name = options.get('client_name', None)
schema_name = options.get('schema_name', None)
domain_url = options.get('domain_url', None)
languages = options.get('languages', 'en')
post_command = options.get('post_command', None)
# If full-name is specified then don't prompt for any values.
if name:
if not client_name:
client_name = ''.join(ch if ch.isalnum() else '-' for ch in name).lower()
if not schema_name:
schema_name = client_name.replace('-', '_')
if not domain_url:
base_domain = getattr(settings, 'TENANT_BASE_DOMAIN', 'localhost')
domain_url = '{0}.{1}'.format(client_name, base_domain)
client_name.replace('_', '-')
client = self.store_client(
name=name,
client_name=client_name,
domain_url=domain_url,
schema_name=schema_name
)
if client is False:
return
if not client:
name = None
while name is None:
if not name:
input_msg = 'Tenant name'
name = eval(input(force_str('%s: ' % input_msg)))
default_client_name = ''.join(ch if ch.isalnum() else '-' for ch in name).lower()
default_schema_name = default_client_name.replace('-', '_')
base_domain = getattr(settings, 'TENANT_BASE_DOMAIN', 'localhost')
default_domain_url = '{0}.{1}'.format(default_client_name, base_domain)
while client_name is None:
if not client_name:
input_msg = 'Client name'
input_msg = "%s (leave blank to use '%s')" % (input_msg, default_client_name)
client_name = eval(input(force_str('%s: ' % input_msg))) or default_client_name
while schema_name is None:
if not schema_name:
input_msg = 'Database schema name'
input_msg = "%s (leave blank to use '%s')" % (input_msg, default_schema_name)
schema_name = eval(input(force_str('%s: ' % input_msg))) or default_schema_name
while domain_url is None:
if not domain_url:
input_msg = 'Domain url'
input_msg = "%s (leave blank to use '%s')" % (input_msg, default_domain_url)
domain_url = eval(input(force_str('%s: ' % input_msg))) or default_domain_url
client_name.replace('_', '-')
client = self.store_client(
name=name,
client_name=client_name,
domain_url=domain_url,
schema_name=schema_name
)
if client is False:
break
if not client:
name = None
continue
if client and client_name:
from django.db import connection
connection.set_tenant(client)
self.create_languages(languages)
self.create_client_superuser()
call_command('loaddata', 'geo_data')
call_command('loaddata', 'geo_data')
call_command('loaddata', 'skills')
call_command('search_index', '--rebuild', '-f')
call_command('loadlinks', '-f', 'links.json')
call_command('loadpages', '-f', 'pages.json')
if client and post_command:
call_command(post_command, *args, **options)
return
def create_languages(self, languages):
for lang in languages.split(","):
if lang == 'nl':
Language.objects.get_or_create(
code='nl',
defaults={
'language_name': 'Dutch',
'native_name': 'Nederlands'
}
)
if lang == 'en':
Language.objects.get_or_create(
code='en',
defaults={
'language_name': 'English',
'native_name': 'English'
}
)
if lang == 'fr':
Language.objects.get_or_create(
code='fr',
defaults={
'language_name': 'French',
'native_name': 'Français'
}
)
def create_client_superuser(self):
password = '<PASSWORD>='
su = Member.objects.create(first_name='admin',
last_name='example',
email='<EMAIL>',
password=password,
is_active=True,
is_staff=True,
is_superuser=True)
su.save()
def store_client(self, name, client_name, domain_url, schema_name):
try:
client = get_tenant_model().objects.create(
name=name,
client_name=client_name,
domain_url=domain_url.split(":", 1)[0], # strip optional port
schema_name=schema_name
)
client.save()
return client
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
name = None
return False
except IntegrityError:
self.stderr.write("Error: We've already got a tenant with that name or property.")
return False
| [
"django.core.management.call_command",
"django.utils.encoding.force_str",
"bluebottle.members.models.Member.objects.create",
"optparse.make_option",
"bluebottle.utils.models.Language.objects.get_or_create",
"tenant_schemas.utils.get_tenant_model",
"django.db.connection.set_tenant"
] | [((6095, 6252), 'bluebottle.members.models.Member.objects.create', 'Member.objects.create', ([], {'first_name': '"""admin"""', 'last_name': '"""example"""', 'email': '"""<EMAIL>"""', 'password': 'password', 'is_active': '(True)', 'is_staff': '(True)', 'is_superuser': '(True)'}), "(first_name='admin', last_name='example', email=\n '<EMAIL>', password=password, is_active=True, is_staff=True,\n is_superuser=True)\n", (6116, 6252), False, 'from bluebottle.members.models import Member\n'), ((609, 712), 'optparse.make_option', 'make_option', (['"""--full-name"""'], {'help': '"""Specifies the full name for the tenant (e.g. "Our New Tenant")."""'}), '(\'--full-name\', help=\n \'Specifies the full name for the tenant (e.g. "Our New Tenant").\')\n', (620, 712), False, 'from optparse import make_option\n'), ((737, 840), 'optparse.make_option', 'make_option', (['"""--schema-name"""'], {'help': '"""Specifies the schema name for the tenant (e.g. "new_tenant")."""'}), '(\'--schema-name\', help=\n \'Specifies the schema name for the tenant (e.g. "new_tenant").\')\n', (748, 840), False, 'from optparse import make_option\n'), ((865, 976), 'optparse.make_option', 'make_option', (['"""--domain-url"""'], {'help': '"""Specifies the domain_url for the tenant (e.g. "new-tenant.localhost")."""'}), '(\'--domain-url\', help=\n \'Specifies the domain_url for the tenant (e.g. "new-tenant.localhost").\')\n', (876, 976), False, 'from optparse import make_option\n'), ((1001, 1104), 'optparse.make_option', 'make_option', (['"""--client-name"""'], {'help': '"""Specifies the client name for the tenant (e.g. "new-tenant")."""'}), '(\'--client-name\', help=\n \'Specifies the client name for the tenant (e.g. "new-tenant").\')\n', (1012, 1104), False, 'from optparse import make_option\n'), ((1129, 1229), 'optparse.make_option', 'make_option', (['"""--languages"""'], {'default': '"""en"""', 'help': '"""Specifies the client languages (e.g. "en,nl")."""'}), '(\'--languages\', default=\'en\', help=\n \'Specifies the client languages (e.g. "en,nl").\')\n', (1140, 1229), False, 'from optparse import make_option\n'), ((1274, 1378), 'optparse.make_option', 'make_option', (['"""--post-command"""'], {'help': '"""Calls another management command after the tenant is created."""'}), "('--post-command', help=\n 'Calls another management command after the tenant is created.')\n", (1285, 1378), False, 'from optparse import make_option\n'), ((4531, 4560), 'django.db.connection.set_tenant', 'connection.set_tenant', (['client'], {}), '(client)\n', (4552, 4560), False, 'from django.db import connection\n'), ((4661, 4697), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '"""geo_data"""'], {}), "('loaddata', 'geo_data')\n", (4673, 4697), False, 'from django.core.management import call_command\n'), ((4710, 4746), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '"""geo_data"""'], {}), "('loaddata', 'geo_data')\n", (4722, 4746), False, 'from django.core.management import call_command\n'), ((4759, 4793), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '"""skills"""'], {}), "('loaddata', 'skills')\n", (4771, 4793), False, 'from django.core.management import call_command\n'), ((4806, 4853), 'django.core.management.call_command', 'call_command', (['"""search_index"""', '"""--rebuild"""', '"""-f"""'], {}), "('search_index', '--rebuild', '-f')\n", (4818, 4853), False, 'from django.core.management import call_command\n'), ((4866, 4911), 'django.core.management.call_command', 'call_command', (['"""loadlinks"""', '"""-f"""', '"""links.json"""'], {}), "('loadlinks', '-f', 'links.json')\n", (4878, 4911), False, 'from django.core.management import call_command\n'), ((4924, 4969), 'django.core.management.call_command', 'call_command', (['"""loadpages"""', '"""-f"""', '"""pages.json"""'], {}), "('loadpages', '-f', 'pages.json')\n", (4936, 4969), False, 'from django.core.management import call_command\n'), ((5019, 5063), 'django.core.management.call_command', 'call_command', (['post_command', '*args'], {}), '(post_command, *args, **options)\n', (5031, 5063), False, 'from django.core.management import call_command\n'), ((5211, 5322), 'bluebottle.utils.models.Language.objects.get_or_create', 'Language.objects.get_or_create', ([], {'code': '"""nl"""', 'defaults': "{'language_name': 'Dutch', 'native_name': 'Nederlands'}"}), "(code='nl', defaults={'language_name':\n 'Dutch', 'native_name': 'Nederlands'})\n", (5241, 5322), False, 'from bluebottle.utils.models import Language\n'), ((5493, 5603), 'bluebottle.utils.models.Language.objects.get_or_create', 'Language.objects.get_or_create', ([], {'code': '"""en"""', 'defaults': "{'language_name': 'English', 'native_name': 'English'}"}), "(code='en', defaults={'language_name':\n 'English', 'native_name': 'English'})\n", (5523, 5603), False, 'from bluebottle.utils.models import Language\n'), ((5774, 5884), 'bluebottle.utils.models.Language.objects.get_or_create', 'Language.objects.get_or_create', ([], {'code': '"""fr"""', 'defaults': "{'language_name': 'French', 'native_name': 'Français'}"}), "(code='fr', defaults={'language_name':\n 'French', 'native_name': 'Français'})\n", (5804, 5884), False, 'from bluebottle.utils.models import Language\n'), ((2733, 2762), 'django.utils.encoding.force_str', 'force_str', (["('%s: ' % input_msg)"], {}), "('%s: ' % input_msg)\n", (2742, 2762), False, 'from django.utils.encoding import force_str\n'), ((6579, 6597), 'tenant_schemas.utils.get_tenant_model', 'get_tenant_model', ([], {}), '()\n', (6595, 6597), False, 'from tenant_schemas.utils import get_tenant_model\n'), ((3360, 3389), 'django.utils.encoding.force_str', 'force_str', (["('%s: ' % input_msg)"], {}), "('%s: ' % input_msg)\n", (3369, 3389), False, 'from django.utils.encoding import force_str\n'), ((3689, 3718), 'django.utils.encoding.force_str', 'force_str', (["('%s: ' % input_msg)"], {}), "('%s: ' % input_msg)\n", (3698, 3718), False, 'from django.utils.encoding import force_str\n'), ((4004, 4033), 'django.utils.encoding.force_str', 'force_str', (["('%s: ' % input_msg)"], {}), "('%s: ' % input_msg)\n", (4013, 4033), False, 'from django.utils.encoding import force_str\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for objax.util.image."""
import io
import unittest
from typing import Tuple
import jax.numpy as jn
import numpy as np
from PIL import Image
import objax
class TestUtilImage(unittest.TestCase):
def ndimarange(self, dims: Tuple[int, ...]):
return np.arange(np.prod(dims), dtype=float).reshape(dims)
def test_nchw(self):
"""Test nchw."""
x = self.ndimarange((2, 3, 4, 5))
self.assertEqual(objax.util.image.nchw(x).tolist(), x.transpose((0, 3, 1, 2)).tolist())
self.assertEqual(objax.util.image.nchw(jn.array(x)).tolist(), x.transpose((0, 3, 1, 2)).tolist())
x = self.ndimarange((2, 3, 4, 5, 6))
self.assertEqual(objax.util.image.nchw(x).tolist(), x.transpose((0, 1, 4, 2, 3)).tolist())
self.assertEqual(objax.util.image.nchw(jn.array(x)).tolist(), x.transpose((0, 1, 4, 2, 3)).tolist())
def test_nhwc(self):
"""Test nhwc."""
x = self.ndimarange((2, 3, 4, 5))
self.assertEqual(objax.util.image.nhwc(x).tolist(), x.transpose((0, 2, 3, 1)).tolist())
self.assertEqual(objax.util.image.nhwc(jn.array(x)).tolist(), x.transpose((0, 2, 3, 1)).tolist())
x = self.ndimarange((2, 3, 4, 5, 6))
self.assertEqual(objax.util.image.nhwc(x).tolist(), x.transpose((0, 1, 3, 4, 2)).tolist())
self.assertEqual(objax.util.image.nhwc(jn.array(x)).tolist(), x.transpose((0, 1, 3, 4, 2)).tolist())
def test_normalize(self):
"""Test normalize methods."""
x = np.arange(256)
y = objax.util.image.normalize_to_unit_float(x)
self.assertEqual((x / 128 - (1 - 1 / 256)).tolist(), y.tolist())
self.assertEqual(y.tolist(), y.clip(-1, 1).tolist())
z = objax.util.image.normalize_to_uint8(y)
self.assertEqual(x.tolist(), z.tolist())
z = objax.util.image.normalize_to_uint8(y + 1 / 128)
self.assertEqual((x + 1).clip(0, 255).tolist(), z.tolist())
z = objax.util.image.normalize_to_uint8(y - 1 / 128)
self.assertEqual((x - 1).clip(0, 255).tolist(), z.tolist())
def test_to_png(self):
"""Test to_png."""
x = np.zeros((3, 32, 32), np.float) + 1 / 255
x[:, :12, :12] = 1
x[:, -12:, -12:] = -1
y = objax.util.image.to_png(x)
self.assertEqual(y, b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\x00\x00 \x08\x02\x00\x00\x00\xfc'
b'\x18\xed\xa3\x00\x00\x00FIDATx\x9cc\xfc\xff\xff?\x03!\xd0\xd8\xd8HP\r.\xc0D\xb6\xceQ'
b'\x0bF-\x18\xb5`\x04Y\xc0BI9C\x0c\x18\xfaA4j\xc1\x08\xb0\x80\x85\x12\xcd\r\r\r\x04\xd5'
b'\x0c\xfd \x1a\xb5`\xd4\x82Q\x0b\xe8`\x01\x00\xe3\xf1\x07\xc7\x82\x83p\xa5\x00\x00\x00\x00'
b'IEND\xaeB`\x82')
z = np.array(Image.open(io.BytesIO(y)))
z = (z.transpose((2, 0, 1)) - 127.5) / 127.5
self.assertEqual(x.tolist(), z.tolist())
if __name__ == '__main__':
unittest.main()
| [
"numpy.prod",
"objax.util.image.nhwc",
"objax.util.image.normalize_to_uint8",
"objax.util.image.nchw",
"io.BytesIO",
"jax.numpy.array",
"numpy.zeros",
"objax.util.image.to_png",
"objax.util.image.normalize_to_unit_float",
"unittest.main",
"numpy.arange"
] | [((3549, 3564), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3562, 3564), False, 'import unittest\n'), ((2080, 2094), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (2089, 2094), True, 'import numpy as np\n'), ((2107, 2150), 'objax.util.image.normalize_to_unit_float', 'objax.util.image.normalize_to_unit_float', (['x'], {}), '(x)\n', (2147, 2150), False, 'import objax\n'), ((2297, 2335), 'objax.util.image.normalize_to_uint8', 'objax.util.image.normalize_to_uint8', (['y'], {}), '(y)\n', (2332, 2335), False, 'import objax\n'), ((2397, 2445), 'objax.util.image.normalize_to_uint8', 'objax.util.image.normalize_to_uint8', (['(y + 1 / 128)'], {}), '(y + 1 / 128)\n', (2432, 2445), False, 'import objax\n'), ((2526, 2574), 'objax.util.image.normalize_to_uint8', 'objax.util.image.normalize_to_uint8', (['(y - 1 / 128)'], {}), '(y - 1 / 128)\n', (2561, 2574), False, 'import objax\n'), ((2821, 2847), 'objax.util.image.to_png', 'objax.util.image.to_png', (['x'], {}), '(x)\n', (2844, 2847), False, 'import objax\n'), ((2710, 2741), 'numpy.zeros', 'np.zeros', (['(3, 32, 32)', 'np.float'], {}), '((3, 32, 32), np.float)\n', (2718, 2741), True, 'import numpy as np\n'), ((3398, 3411), 'io.BytesIO', 'io.BytesIO', (['y'], {}), '(y)\n', (3408, 3411), False, 'import io\n'), ((861, 874), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (868, 874), True, 'import numpy as np\n'), ((1021, 1045), 'objax.util.image.nchw', 'objax.util.image.nchw', (['x'], {}), '(x)\n', (1042, 1045), False, 'import objax\n'), ((1268, 1292), 'objax.util.image.nchw', 'objax.util.image.nchw', (['x'], {}), '(x)\n', (1289, 1292), False, 'import objax\n'), ((1569, 1593), 'objax.util.image.nhwc', 'objax.util.image.nhwc', (['x'], {}), '(x)\n', (1590, 1593), False, 'import objax\n'), ((1816, 1840), 'objax.util.image.nhwc', 'objax.util.image.nhwc', (['x'], {}), '(x)\n', (1837, 1840), False, 'import objax\n'), ((1139, 1150), 'jax.numpy.array', 'jn.array', (['x'], {}), '(x)\n', (1147, 1150), True, 'import jax.numpy as jn\n'), ((1389, 1400), 'jax.numpy.array', 'jn.array', (['x'], {}), '(x)\n', (1397, 1400), True, 'import jax.numpy as jn\n'), ((1687, 1698), 'jax.numpy.array', 'jn.array', (['x'], {}), '(x)\n', (1695, 1698), True, 'import jax.numpy as jn\n'), ((1937, 1948), 'jax.numpy.array', 'jn.array', (['x'], {}), '(x)\n', (1945, 1948), True, 'import jax.numpy as jn\n')] |
from __future__ import print_function
import sys
sys.path.append('./')
from colorprinter.pycolor import PyColor
from colorprinter.pycolor import cprint
@PyColor('ured')
def printer(string):
a = 1
b = 2
print(str((a + b)**4) + string)
class TestClass(object):
def test_pycolor(self):
printer('edony')
def test_cprint(self):
cprint('ugreen', 'hello edony')
def test_setformat(self):
py_color = PyColor('green')
py_color.format = 'ucyan'
cprint(py_color.format, 'this is test')
def test_disableformat(self):
py_color = PyColor('ured')
cprint(py_color.format, 'this is test')
py_color.disable()
cprint(py_color.format, 'this is disable')
assert py_color.format is ''
def test_colorstr(self):
str1 = 'this is a test'
py_color = PyColor('green')
str2 = py_color.colorstr(str1)
str3 = py_color.colorstr(str1, 'red')
assert((str2 == '\033[0;32;40mthis is a test\033[0m') and
(str3 == '\033[0;31;40mthis is a test\033[0m'))
#if __name__ == "__main__":
# cprint('ugreen', 'hello edony')
# printer('edony')
# py_color = PyColor('green')
# py_color.format = 'ucyan'
# print(py_color.format)
# cprint(py_color.format, 'this is test')
| [
"colorprinter.pycolor.cprint",
"sys.path.append",
"colorprinter.pycolor.PyColor"
] | [((49, 70), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (64, 70), False, 'import sys\n'), ((154, 169), 'colorprinter.pycolor.PyColor', 'PyColor', (['"""ured"""'], {}), "('ured')\n", (161, 169), False, 'from colorprinter.pycolor import PyColor\n'), ((362, 393), 'colorprinter.pycolor.cprint', 'cprint', (['"""ugreen"""', '"""hello edony"""'], {}), "('ugreen', 'hello edony')\n", (368, 393), False, 'from colorprinter.pycolor import cprint\n'), ((444, 460), 'colorprinter.pycolor.PyColor', 'PyColor', (['"""green"""'], {}), "('green')\n", (451, 460), False, 'from colorprinter.pycolor import PyColor\n'), ((503, 542), 'colorprinter.pycolor.cprint', 'cprint', (['py_color.format', '"""this is test"""'], {}), "(py_color.format, 'this is test')\n", (509, 542), False, 'from colorprinter.pycolor import cprint\n'), ((597, 612), 'colorprinter.pycolor.PyColor', 'PyColor', (['"""ured"""'], {}), "('ured')\n", (604, 612), False, 'from colorprinter.pycolor import PyColor\n'), ((621, 660), 'colorprinter.pycolor.cprint', 'cprint', (['py_color.format', '"""this is test"""'], {}), "(py_color.format, 'this is test')\n", (627, 660), False, 'from colorprinter.pycolor import cprint\n'), ((696, 738), 'colorprinter.pycolor.cprint', 'cprint', (['py_color.format', '"""this is disable"""'], {}), "(py_color.format, 'this is disable')\n", (702, 738), False, 'from colorprinter.pycolor import cprint\n'), ((857, 873), 'colorprinter.pycolor.PyColor', 'PyColor', (['"""green"""'], {}), "('green')\n", (864, 873), False, 'from colorprinter.pycolor import PyColor\n')] |
import itertools
from pm4py.objects.petri.petrinet import PetriNet
from da4py.main.objects.pnToFormulas import petri_net_to_SAT
from da4py.main.utils import variablesGenerator as vg, formulas
from da4py.main.utils.formulas import Or, And
from da4py.main.utils.unSat2qbfReader import writeQDimacs, cadetOutputQDimacs, runCadet
BOOLEAN_VAR_MARKING_PN_1="m1_ip"
BOOLEAN_VAR_MARKING_PN_2="m2_ip"
BOOLEAN_VAR_FIRING_TRANSITION_PN_1="tau1_ia"
BOOLEAN_VAR_FIRING_TRANSITION_PN_2="tau2_ia"
BOOLEAN_VAR_DIFF1="diff_1i"
BOOLEAN_VAR_DIFF2="diff_2i"
def apply(net1, m01, mf1, net2, m02, mf2, size_of_run, d, silent_label=None):
vars = vg.VariablesGenerator()
#we=add_wait_net_end(net1,"wf")
w1=add_wait_net(net1,"wf")
adapted_size_of_run = size_of_run * size_of_run +size_of_run
pn1_formula, pn1_places, pn1_transitions, pn1_silent_transitions=petri_net_to_SAT(net1, m01, mf1, vars,
adapted_size_of_run,
reach_final=True,
label_m=BOOLEAN_VAR_MARKING_PN_1,
label_t=BOOLEAN_VAR_FIRING_TRANSITION_PN_1,
silent_transition=silent_label,
space_between_fired=1+size_of_run)
print("etape1")
pn1_force_wait_transitions=force_wait_transition(vars,w1,pn1_transitions, adapted_size_of_run,size_of_run+1)
print("etape2")
w2=add_wait_net(net2,"wf")
pn2_formula, pn2_places, pn2_transitions, pn2_silent_transitions=petri_net_to_SAT(net2, m02, mf2, vars,
adapted_size_of_run,
reach_final=True,
label_m=BOOLEAN_VAR_MARKING_PN_2,
label_t=BOOLEAN_VAR_FIRING_TRANSITION_PN_2,
silent_transition=silent_label)
dist_formulas = distanceNets(vars,adapted_size_of_run,vars.getFunction(BOOLEAN_VAR_FIRING_TRANSITION_PN_1),
vars.getFunction(BOOLEAN_VAR_FIRING_TRANSITION_PN_2),pn1_transitions,pn2_transitions,w1,w2)
print("etape3")
maxDist_formulas=maxDistance(vars,vars.getFunction(BOOLEAN_VAR_DIFF1),vars.getFunction(BOOLEAN_VAR_DIFF2),d,adapted_size_of_run)
#notTooManyW=numberOfWaitInRun(vars,size_of_run,vars.getFunction(BOOLEAN_VAR_FIRING_TRANSITION_PN_1),pn1_transitions,w1,we)
print("etape4")
from pm4py.visualization.petrinet import factory as vizu
#vizu.apply(net2,m02,mf2).view()
listOfForAll=vars.getAll(BOOLEAN_VAR_MARKING_PN_1)+vars.getAll(BOOLEAN_VAR_FIRING_TRANSITION_PN_1)
listOfExist=vars.getAll(BOOLEAN_VAR_MARKING_PN_2)+vars.getAll(BOOLEAN_VAR_FIRING_TRANSITION_PN_2)+vars.getAll(BOOLEAN_VAR_DIFF1)+vars.getAll(BOOLEAN_VAR_DIFF2)
full_formula=Or([],[],[And([],[],[pn1_formula,pn1_force_wait_transitions]).negation(),And([],[],[dist_formulas,maxDist_formulas,pn2_formula])])
print("etape5")
cnf=full_formula.operatorToCnf(vars.iterator)
print("etape6")
listOfExist+=list(range(vars.iterator,full_formula.nbVars))
writeQDimacs(full_formula.nbVars,listOfForAll, listOfExist, cnf)
print("mais voila")
runCadet()
positives,negatives=cadetOutputQDimacs()
for var in positives:
if vars.getVarName(var) != None and vars.getVarName(var).startswith("tau1_ia"):
print(vars.getVarName(var),pn1_transitions[int(vars.getVarName(var).split(", ")[1].split("]")[0])])
print("....")
for var in negatives:
if vars.getVarName(var) != None and vars.getVarName(var).startswith("tau1_ia"):
print(vars.getVarName(var),pn1_transitions[int(vars.getVarName(var).split(", ")[1].split("]")[0])])
def force_wait_transition(vars,w1, pn1_transitions,adapted_size_of_run, space_between_fired):
pos=[]
neg=[]
for i in range(0,adapted_size_of_run+1):
if i%space_between_fired!=0:
for t in pn1_transitions:
if t!=w1:
neg.append(vars.get(BOOLEAN_VAR_FIRING_TRANSITION_PN_1,[i,pn1_transitions.index(t)]))
else :
pos.append(vars.get(BOOLEAN_VAR_FIRING_TRANSITION_PN_1,[i,pn1_transitions.index(t)]))
return And(pos,neg,[])
def add_wait_net(net,wait_label):
'''
Words don't have the same length. To compare them we add a "wait" transition at the end of the model and the
traces.
:return:
'''
wait_transition = PetriNet.Transition(wait_label, wait_label)
net.transitions.add(wait_transition)
return wait_transition
def add_wait_net_end(pn, wait_label):
'''
Words don't have the same length. To compare them we add a "wait" transition at the end of the model and the
traces.
:return:
'''
wait_transition = PetriNet.Transition(wait_label, wait_label)
for place in pn.places:
if len(place.out_arcs) == 0:
arcIn = PetriNet.Arc(place, wait_transition)
arcOut = PetriNet.Arc(wait_transition, place)
pn.arcs.add(arcIn)
pn.arcs.add(arcOut)
wait_transition.in_arcs.add(arcIn)
wait_transition.out_arcs.add(arcOut)
place.out_arcs.add(arcIn)
place.in_arcs.add(arcOut)
pn.transitions.add(wait_transition)
return wait_transition
def numberOfWaitInRun(vars,size_of_run, tau1,pn1_transitions,w1,we):
list_to_size_of_run= list(range(1,size_of_run*2+1))
minw1=int((size_of_run)/2)
# IDEA : there are at least max_distance number of w1 variables to false
combinaisons_of_instants=list(itertools.combinations(list_to_size_of_run,minw1))
w1ToTrue=[]
for instants in combinaisons_of_instants:
listOfW1ToTrue=[]
for i in instants:
if i <=int(size_of_run):
listOfW1ToTrue.append(tau1([i,pn1_transitions.index(w1)]))
else :
listOfW1ToTrue.append(tau1([i-int(size_of_run),pn1_transitions.index(we)]))
w1ToTrue.append(And(listOfW1ToTrue,[],[]))
return Or([],[],w1ToTrue)
def distanceNets(vars,size_of_run, tau1,tau2,pn1_transitions,pn2_transitions,w1,w2):
formula=[]
vars.add(BOOLEAN_VAR_DIFF1,[(1,size_of_run+1)])
vars.add(BOOLEAN_VAR_DIFF2,[(1,size_of_run+1)])
for i in range (1,size_of_run+1):
for t1 in pn1_transitions:
'''
listOfSameLabels=[tau2([i,pn2_transitions.index(t2)]) for t2 in pn2_transitions if t2.label==t1.label]
listOfSameLabels.append(vars.getFunction(BOOLEAN_VAR_DIFF)([i]))
formula.append(Or(listOfSameLabels,[tau1([i,pn1_transitions.index(t1)]) ],[]))
'''
if t1 != w1:
listOfSameLabels=[tau2([i,pn2_transitions.index(t2)]) for t2 in pn2_transitions if t2.label==t1.label]
listOfSameLabels.append(tau2([i,pn2_transitions.index(w2)]))
formula.append(Or(listOfSameLabels,[tau1([i,pn1_transitions.index(t1)]) ],[And([vars.getFunction(BOOLEAN_VAR_DIFF1)([i]),
vars.getFunction(BOOLEAN_VAR_DIFF2)([i])],[],[])]))
formula.append(Or([vars.getFunction(BOOLEAN_VAR_DIFF1)([i])],[tau2([i,pn2_transitions.index(w2)]),
tau1([i,pn1_transitions.index(t1)])],[]))
else :
formula.append(Or([vars.getFunction(BOOLEAN_VAR_DIFF2)([i]),tau2([i,pn2_transitions.index(w2)])],
[tau1([i,pn1_transitions.index(t1)])],[]))
return And([],[],formula)
def maxDistance(vars,diff1,diff2, max_d,size_of_run):
list_to_size_of_run= list(range(1,size_of_run*2+1))
max_distance=size_of_run*2-max_d
# IDEA : there are at least max_distance number of false variables
combinaisons_of_instants=list(itertools.combinations(list_to_size_of_run,max_distance))
print("wala")
distFalseVariables=[]
for instants in combinaisons_of_instants:
list_distances=[]
for i in instants:
if i <=size_of_run:
list_distances.append(diff1([i]))
else :
list_distances.append(diff2([i-size_of_run]))
distFalseVariables.append(And([],list_distances,[]))
return Or([],[],distFalseVariables)
def maxDistance2(vars,diff1,diff2, max_d,size_of_run):
list_to_size_of_run= list(range(1,size_of_run*2+1))
max_distance=max_d
# IDEA : there are at least max_distance number of false variables
combinaisons_of_instants=list(itertools.combinations(list_to_size_of_run,max_distance))
distFalseVariables=[]
for instants in combinaisons_of_instants:
list_distances=[]
for i in instants:
if i <=size_of_run:
list_distances.append(diff1([i]))
else :
list_distances.append(diff2([i-size_of_run]))
list_distances2=[]
for i in range(1,size_of_run*2+1):
if i not in instants:
if i <=size_of_run:
list_distances2.append(diff1([i]))
else :
list_distances2.append(diff2([i-size_of_run]))
distFalseVariables.append(Or([],list_distances,[And([],list_distances2,[])]))
return Or([],[],distFalseVariables)
| [
"da4py.main.utils.formulas.Or",
"da4py.main.utils.formulas.And",
"da4py.main.objects.pnToFormulas.petri_net_to_SAT",
"da4py.main.utils.unSat2qbfReader.runCadet",
"itertools.combinations",
"da4py.main.utils.variablesGenerator.VariablesGenerator",
"da4py.main.utils.unSat2qbfReader.writeQDimacs",
"pm4py.... | [((633, 656), 'da4py.main.utils.variablesGenerator.VariablesGenerator', 'vg.VariablesGenerator', ([], {}), '()\n', (654, 656), True, 'from da4py.main.utils import variablesGenerator as vg, formulas\n'), ((858, 1096), 'da4py.main.objects.pnToFormulas.petri_net_to_SAT', 'petri_net_to_SAT', (['net1', 'm01', 'mf1', 'vars', 'adapted_size_of_run'], {'reach_final': '(True)', 'label_m': 'BOOLEAN_VAR_MARKING_PN_1', 'label_t': 'BOOLEAN_VAR_FIRING_TRANSITION_PN_1', 'silent_transition': 'silent_label', 'space_between_fired': '(1 + size_of_run)'}), '(net1, m01, mf1, vars, adapted_size_of_run, reach_final=\n True, label_m=BOOLEAN_VAR_MARKING_PN_1, label_t=\n BOOLEAN_VAR_FIRING_TRANSITION_PN_1, silent_transition=silent_label,\n space_between_fired=1 + size_of_run)\n', (874, 1096), False, 'from da4py.main.objects.pnToFormulas import petri_net_to_SAT\n'), ((1851, 2048), 'da4py.main.objects.pnToFormulas.petri_net_to_SAT', 'petri_net_to_SAT', (['net2', 'm02', 'mf2', 'vars', 'adapted_size_of_run'], {'reach_final': '(True)', 'label_m': 'BOOLEAN_VAR_MARKING_PN_2', 'label_t': 'BOOLEAN_VAR_FIRING_TRANSITION_PN_2', 'silent_transition': 'silent_label'}), '(net2, m02, mf2, vars, adapted_size_of_run, reach_final=\n True, label_m=BOOLEAN_VAR_MARKING_PN_2, label_t=\n BOOLEAN_VAR_FIRING_TRANSITION_PN_2, silent_transition=silent_label)\n', (1867, 2048), False, 'from da4py.main.objects.pnToFormulas import petri_net_to_SAT\n'), ((3680, 3745), 'da4py.main.utils.unSat2qbfReader.writeQDimacs', 'writeQDimacs', (['full_formula.nbVars', 'listOfForAll', 'listOfExist', 'cnf'], {}), '(full_formula.nbVars, listOfForAll, listOfExist, cnf)\n', (3692, 3745), False, 'from da4py.main.utils.unSat2qbfReader import writeQDimacs, cadetOutputQDimacs, runCadet\n'), ((3773, 3783), 'da4py.main.utils.unSat2qbfReader.runCadet', 'runCadet', ([], {}), '()\n', (3781, 3783), False, 'from da4py.main.utils.unSat2qbfReader import writeQDimacs, cadetOutputQDimacs, runCadet\n'), ((3808, 3828), 'da4py.main.utils.unSat2qbfReader.cadetOutputQDimacs', 'cadetOutputQDimacs', ([], {}), '()\n', (3826, 3828), False, 'from da4py.main.utils.unSat2qbfReader import writeQDimacs, cadetOutputQDimacs, runCadet\n'), ((4810, 4827), 'da4py.main.utils.formulas.And', 'And', (['pos', 'neg', '[]'], {}), '(pos, neg, [])\n', (4813, 4827), False, 'from da4py.main.utils.formulas import Or, And\n'), ((5037, 5080), 'pm4py.objects.petri.petrinet.PetriNet.Transition', 'PetriNet.Transition', (['wait_label', 'wait_label'], {}), '(wait_label, wait_label)\n', (5056, 5080), False, 'from pm4py.objects.petri.petrinet import PetriNet\n'), ((5364, 5407), 'pm4py.objects.petri.petrinet.PetriNet.Transition', 'PetriNet.Transition', (['wait_label', 'wait_label'], {}), '(wait_label, wait_label)\n', (5383, 5407), False, 'from pm4py.objects.petri.petrinet import PetriNet\n'), ((6610, 6630), 'da4py.main.utils.formulas.Or', 'Or', (['[]', '[]', 'w1ToTrue'], {}), '([], [], w1ToTrue)\n', (6612, 6630), False, 'from da4py.main.utils.formulas import Or, And\n'), ((8182, 8202), 'da4py.main.utils.formulas.And', 'And', (['[]', '[]', 'formula'], {}), '([], [], formula)\n', (8185, 8202), False, 'from da4py.main.utils.formulas import Or, And\n'), ((8891, 8921), 'da4py.main.utils.formulas.Or', 'Or', (['[]', '[]', 'distFalseVariables'], {}), '([], [], distFalseVariables)\n', (8893, 8921), False, 'from da4py.main.utils.formulas import Or, And\n'), ((9889, 9919), 'da4py.main.utils.formulas.Or', 'Or', (['[]', '[]', 'distFalseVariables'], {}), '([], [], distFalseVariables)\n', (9891, 9919), False, 'from da4py.main.utils.formulas import Or, And\n'), ((6159, 6209), 'itertools.combinations', 'itertools.combinations', (['list_to_size_of_run', 'minw1'], {}), '(list_to_size_of_run, minw1)\n', (6181, 6209), False, 'import itertools\n'), ((8455, 8512), 'itertools.combinations', 'itertools.combinations', (['list_to_size_of_run', 'max_distance'], {}), '(list_to_size_of_run, max_distance)\n', (8477, 8512), False, 'import itertools\n'), ((9160, 9217), 'itertools.combinations', 'itertools.combinations', (['list_to_size_of_run', 'max_distance'], {}), '(list_to_size_of_run, max_distance)\n', (9182, 9217), False, 'import itertools\n'), ((3464, 3523), 'da4py.main.utils.formulas.And', 'And', (['[]', '[]', '[dist_formulas, maxDist_formulas, pn2_formula]'], {}), '([], [], [dist_formulas, maxDist_formulas, pn2_formula])\n', (3467, 3523), False, 'from da4py.main.utils.formulas import Or, And\n'), ((5493, 5529), 'pm4py.objects.petri.petrinet.PetriNet.Arc', 'PetriNet.Arc', (['place', 'wait_transition'], {}), '(place, wait_transition)\n', (5505, 5529), False, 'from pm4py.objects.petri.petrinet import PetriNet\n'), ((5551, 5587), 'pm4py.objects.petri.petrinet.PetriNet.Arc', 'PetriNet.Arc', (['wait_transition', 'place'], {}), '(wait_transition, place)\n', (5563, 5587), False, 'from pm4py.objects.petri.petrinet import PetriNet\n'), ((6572, 6599), 'da4py.main.utils.formulas.And', 'And', (['listOfW1ToTrue', '[]', '[]'], {}), '(listOfW1ToTrue, [], [])\n', (6575, 6599), False, 'from da4py.main.utils.formulas import Or, And\n'), ((8853, 8880), 'da4py.main.utils.formulas.And', 'And', (['[]', 'list_distances', '[]'], {}), '([], list_distances, [])\n', (8856, 8880), False, 'from da4py.main.utils.formulas import Or, And\n'), ((3401, 3455), 'da4py.main.utils.formulas.And', 'And', (['[]', '[]', '[pn1_formula, pn1_force_wait_transitions]'], {}), '([], [], [pn1_formula, pn1_force_wait_transitions])\n', (3404, 3455), False, 'from da4py.main.utils.formulas import Or, And\n'), ((9848, 9876), 'da4py.main.utils.formulas.And', 'And', (['[]', 'list_distances2', '[]'], {}), '([], list_distances2, [])\n', (9851, 9876), False, 'from da4py.main.utils.formulas import Or, And\n')] |
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Output the differences between two Docker images.
Usage:
python docker_diff.py [--deep=path] <image_1> <image_2>
"""
import argparse
import json
import logging
import os
import shutil
import subprocess
import tarfile
import tempfile
def call(cmd, **kwargs):
logging.info('exec %s', ' '.join(cmd))
return subprocess.call(cmd, **kwargs)
def check_call(cmd):
logging.info('exec %s', ' '.join(cmd))
return subprocess.check_call(cmd)
def dockerfile_layers(tf):
'''Given a `docker save` tarball, return the layer metadata in order.'''
layer_by_parent = {}
for m in tf.getmembers():
if m.name.endswith('/json'):
layer = json.load(tf.extractfile(m))
layer_by_parent[layer.get('parent')] = layer
# assemble layers by following parent pointers
layers = []
parent = None # base image has no parent
while parent in layer_by_parent:
layer = layer_by_parent[parent]
layers.append(layer)
parent = layer['id']
return layers
def is_whiteout(fname):
return fname.startswith('.wh.') or '/.wh.' in fname
def extract_layers(tf, layers, outdir):
'''Extract docker layers to a specific directory (fake a union mount).'''
for l in layers:
obj = tf.extractfile('%s/layer.tar' % l['id'])
with tarfile.open(fileobj=obj) as f:
# Complication: .wh. files indicate deletions.
# https://github.com/docker/docker/blob/master/image/spec/v1.md
members = f.getmembers()
members_good = [m for m in members if not is_whiteout(m.name)]
f.extractall(outdir, members_good)
for m in members:
name = m.name
if is_whiteout(name):
path = os.path.join(outdir, name.replace('.wh.', ''))
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.unlink(path)
def docker_diff(image_a, image_b, tmpdir, deep):
# dump images for inspection
tf_a_path = '%s/a.tar' % tmpdir
tf_b_path = '%s/b.tar' % tmpdir
check_call(['docker', 'save', '-o', tf_a_path, image_a])
check_call(['docker', 'save', '-o', tf_b_path, image_b])
tf_a = tarfile.open(tf_a_path)
tf_b = tarfile.open(tf_b_path)
# find layers in order
layers_a = dockerfile_layers(tf_a)
layers_b = dockerfile_layers(tf_b)
# minor optimization: skip identical layers
common = len(os.path.commonprefix([layers_a, layers_b]))
tf_a_out = '%s/a' % tmpdir
tf_b_out = '%s/b' % tmpdir
extract_layers(tf_a, layers_a[common:], tf_a_out)
extract_layers(tf_b, layers_b[common:], tf_b_out)
# actually compare the resulting directories
# just show whether something changed (OS upgrades change a lot)
call(['diff', '-qr', 'a', 'b'], cwd=tmpdir)
if deep:
# if requested, do a more in-depth content diff as well.
call([
'diff', '-rU5',
os.path.join('a', deep),
os.path.join('b', deep)],
cwd=tmpdir)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--deep', help='Show full differences for specific directory')
parser.add_argument('image_a')
parser.add_argument('image_b')
options = parser.parse_args()
tmpdir = tempfile.mkdtemp(prefix='docker_diff_')
try:
docker_diff(options.image_a, options.image_b, tmpdir, options.deep)
finally:
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"os.path.exists",
"tarfile.open",
"argparse.ArgumentParser",
"subprocess.check_call",
"os.path.join",
"os.path.isdir",
"tempfile.mkdtemp",
"subprocess.call",
"os.path.commonprefix",
"shutil.rmtree",
"os.unlink"
] | [((932, 962), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd, **kwargs)\n', (947, 962), False, 'import subprocess\n'), ((1040, 1066), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {}), '(cmd)\n', (1061, 1066), False, 'import subprocess\n'), ((2898, 2921), 'tarfile.open', 'tarfile.open', (['tf_a_path'], {}), '(tf_a_path)\n', (2910, 2921), False, 'import tarfile\n'), ((2933, 2956), 'tarfile.open', 'tarfile.open', (['tf_b_path'], {}), '(tf_b_path)\n', (2945, 2956), False, 'import tarfile\n'), ((3748, 3787), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3767, 3787), False, 'import logging\n'), ((3801, 3826), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3824, 3826), False, 'import argparse\n'), ((4032, 4071), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""docker_diff_"""'}), "(prefix='docker_diff_')\n", (4048, 4071), False, 'import tempfile\n'), ((3129, 3171), 'os.path.commonprefix', 'os.path.commonprefix', (['[layers_a, layers_b]'], {}), '([layers_a, layers_b])\n', (3149, 3171), False, 'import os\n'), ((4178, 4199), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (4191, 4199), False, 'import shutil\n'), ((1932, 1957), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'obj'}), '(fileobj=obj)\n', (1944, 1957), False, 'import tarfile\n'), ((3647, 3670), 'os.path.join', 'os.path.join', (['"""a"""', 'deep'], {}), "('a', deep)\n", (3659, 3670), False, 'import os\n'), ((3684, 3707), 'os.path.join', 'os.path.join', (['"""b"""', 'deep'], {}), "('b', deep)\n", (3696, 3707), False, 'import os\n'), ((2455, 2474), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2468, 2474), False, 'import os\n'), ((2500, 2519), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2513, 2519), False, 'import shutil\n'), ((2545, 2565), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2559, 2565), False, 'import os\n'), ((2591, 2606), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (2600, 2606), False, 'import os\n')] |
from src import mining
from tkinter import filedialog
from PyQt4 import QtGui
dir=filedialog.askdirectory()
direcciones, nomArchivo = mining.path(dir)
cont=mining.coincidencias(direcciones,nomArchivo)
for i in range(len(nomArchivo)):
print(nomArchivo[i],cont[i]) | [
"src.mining.coincidencias",
"tkinter.filedialog.askdirectory",
"src.mining.path"
] | [((82, 107), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {}), '()\n', (105, 107), False, 'from tkinter import filedialog\n'), ((135, 151), 'src.mining.path', 'mining.path', (['dir'], {}), '(dir)\n', (146, 151), False, 'from src import mining\n'), ((158, 203), 'src.mining.coincidencias', 'mining.coincidencias', (['direcciones', 'nomArchivo'], {}), '(direcciones, nomArchivo)\n', (178, 203), False, 'from src import mining\n')] |
from django.db import models
from .utils import get_ip_from_request
class Ip(models.Model):
address = models.GenericIPAddressField(unique=True, db_index=True)
@classmethod
def get_or_create(cls, request):
raw_ip = get_ip_from_request(request)
if not raw_ip:
return None
obj, _ = cls.objects.get_or_create(address=raw_ip)
return obj
def __str__(self):
return self.address.__str__()
| [
"django.db.models.GenericIPAddressField"
] | [((109, 165), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'unique': '(True)', 'db_index': '(True)'}), '(unique=True, db_index=True)\n', (137, 165), False, 'from django.db import models\n')] |
#! /usr/bin/env python3
# __author__ = "<NAME>"
# __credits__ = []
# __version__ = "0.2.1"
# __maintainer__ = "<NAME>"
# __email__ = "<EMAIL>"
# __status__ = "Prototype"
#
# Responsible for starting the required number of processes and threads
import threading
import time
from qs_backend.workers.worker_fetch_stock import StockWorker
from qs_backend.publisher.publish_stock import PublishStock
from qs_backend.dal.user_stock_pref_dal import UserStockPrefDAL
class Backend:
def __init__(self):
pass
def start_stock_tickers(self):
# Fetch all the stocks that users have chosen.
while True:
user_stock_pref_dal_obj = UserStockPrefDAL()
stock_exception, available_stocks = user_stock_pref_dal_obj.get_all_stock_preferences()
for stock in available_stocks:
stock_key = stock
# Start FetchStock Threads
stock_worker_obj = StockWorker()
ft_stock_thread = threading.Thread(target=stock_worker_obj.fetch_stock_price, args=(stock_key,))
ft_stock_thread.daemon = True
ft_stock_thread.start()
time.sleep(60)
if __name__ == '__main__':
backend_process = Backend()
# Start all the Stock Worker Threads
stock_ticker_thread = threading.Thread(target=backend_process.start_stock_tickers)
stock_ticker_thread.start()
time.sleep(5)
# Wait for sometime, before you start the publisher.
# The below makes a blocking call !
pub_stock = PublishStock()
pub_stock.start_publishing()
| [
"qs_backend.dal.user_stock_pref_dal.UserStockPrefDAL",
"qs_backend.publisher.publish_stock.PublishStock",
"time.sleep",
"qs_backend.workers.worker_fetch_stock.StockWorker",
"threading.Thread"
] | [((1326, 1386), 'threading.Thread', 'threading.Thread', ([], {'target': 'backend_process.start_stock_tickers'}), '(target=backend_process.start_stock_tickers)\n', (1342, 1386), False, 'import threading\n'), ((1424, 1437), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1434, 1437), False, 'import time\n'), ((1551, 1565), 'qs_backend.publisher.publish_stock.PublishStock', 'PublishStock', ([], {}), '()\n', (1563, 1565), False, 'from qs_backend.publisher.publish_stock import PublishStock\n'), ((685, 703), 'qs_backend.dal.user_stock_pref_dal.UserStockPrefDAL', 'UserStockPrefDAL', ([], {}), '()\n', (701, 703), False, 'from qs_backend.dal.user_stock_pref_dal import UserStockPrefDAL\n'), ((1184, 1198), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (1194, 1198), False, 'import time\n'), ((959, 972), 'qs_backend.workers.worker_fetch_stock.StockWorker', 'StockWorker', ([], {}), '()\n', (970, 972), False, 'from qs_backend.workers.worker_fetch_stock import StockWorker\n'), ((1007, 1085), 'threading.Thread', 'threading.Thread', ([], {'target': 'stock_worker_obj.fetch_stock_price', 'args': '(stock_key,)'}), '(target=stock_worker_obj.fetch_stock_price, args=(stock_key,))\n', (1023, 1085), False, 'import threading\n')] |
from collections import Counter
from utils import read_bits
def run_part_one():
print("--------------------------------")
print("Advent of Code 2021 - 3 - Part 1")
bits = read_bits('input.txt')
gamma_rate = ""
epsilon_rate = ""
for x in range(len(bits[0])):
relevant_bits = get_relevant_bits_for_row(bits, x)
gamma_rate += relevant_bits[0]
epsilon_rate += relevant_bits[1]
print('Binary - Gamma rate: {0}, Epsilon rate: {1}'.format(gamma_rate, epsilon_rate))
print('Decimal - Gamma rate: {0}, Epsilon rate: {1}'.format(int(gamma_rate, 2), int(epsilon_rate, 2)))
print('Power consumption: {0}'.format(int(gamma_rate, 2) * int(epsilon_rate, 2)))
def get_relevant_bits_for_row(bits: [[str]], row: int):
row_bits = []
for x in range(len(bits)):
row_bits.append(bits[x][row])
number_of_bits = dict(Counter(row_bits))
most_significant_bit = '0' if number_of_bits['0'] > number_of_bits['1'] else '1'
least_significant_bit = '0' if most_significant_bit == '1' else '1'
return most_significant_bit, least_significant_bit
| [
"collections.Counter",
"utils.read_bits"
] | [((186, 208), 'utils.read_bits', 'read_bits', (['"""input.txt"""'], {}), "('input.txt')\n", (195, 208), False, 'from utils import read_bits\n'), ((879, 896), 'collections.Counter', 'Counter', (['row_bits'], {}), '(row_bits)\n', (886, 896), False, 'from collections import Counter\n')] |
import os
import sys
import json
import time
import numpy as np
import tensorflow as tf
from blocks.helpers import Monitor
from blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np
from blocks.optimizers import adam_updates
import data.load_data as load_data
from masks import get_generator
from .learner import Learner
class FullyObservedLearner(Learner):
def __init__(self, nr_gpu, save_dir, img_size, exp_name="default"):
super().__init__(nr_gpu, save_dir, img_size, exp_name)
def train_epoch(self, mgen, which_set='train'):
if which_set == 'train':
data_set = self.train_set
elif which_set == 'eval':
data_set = self.eval_set
elif which_set == 'test':
data_set = self.test_set
for data in data_set:
if self.num_channels == 3:
data = np.cast[np.float32]((data - 127.5) / 127.5)
ds = np.split(data, self.nr_gpu)
feed_dict = {}
feed_dict.update({model.is_training: True for model in self.models})
feed_dict.update({model.dropout_p: 0.5 for model in self.models})
feed_dict.update({model.x: ds[i] for i, model in enumerate(self.models)})
feed_dict.update({model.x_bar: ds[i] for i, model in enumerate(self.models)})
masks_np = [mgen.gen(self.batch_size//self.nr_gpu) for i in range(self.nr_gpu)]
feed_dict.update({model.masks: masks_np[i] for i, model in enumerate(self.models)})
self.sess.run(self.train_step, feed_dict=feed_dict)
def eval_epoch(self, mgen, which_set='eval'):
if which_set == 'train':
data_set = self.train_set
elif which_set == 'eval':
data_set = self.eval_set
elif which_set == 'test':
data_set = self.test_set
for data in data_set:
if self.num_channels == 3:
data = np.cast[np.float32]((data - 127.5) / 127.5)
ds = np.split(data, self.nr_gpu)
feed_dict = {}
feed_dict.update({model.is_training: False for model in self.models})
feed_dict.update({model.dropout_p: 0.0 for model in self.models})
feed_dict.update({model.x: ds[i] for i, model in enumerate(self.models)})
feed_dict.update({model.x_bar: ds[i] for i, model in enumerate(self.models)})
masks_np = [mgen.gen(self.batch_size//self.nr_gpu) for i in range(self.nr_gpu)]
feed_dict.update({model.masks: masks_np[i] for i, model in enumerate(self.models)})
self.monitor.evaluate(self.sess, feed_dict)
def sample(self, data, mgen, same_inputs=False, use_mask_at=None):
if self.num_channels == 3:
data = np.cast[np.float32]((data - 127.5) / 127.5)
if same_inputs:
for i in range(data.shape[0]):
data[i] = data[3]
ori_data = data.copy()
ds = np.split(data.copy(), self.nr_gpu)
feed_dict = {}
feed_dict.update({model.is_training: False for model in self.models})
feed_dict.update({model.dropout_p: 0.0 for model in self.models})
feed_dict.update({model.x: ds[i] for i, model in enumerate(self.models)})
feed_dict.update({model.x_bar: ds[i] for i, model in enumerate(self.models)})
if use_mask_at is not None:
masks_np = np.load(use_mask_at)['masks']
masks_np = np.split(masks_np, self.nr_gpu)
else:
masks_np = [mgen.gen(self.batch_size//self.nr_gpu) for i in range(self.nr_gpu)]
np.savez(mgen.name+"_"+self.data_set, masks=np.concatenate(masks_np))
if same_inputs:
for g in range(self.nr_gpu):
for i in range(self.batch_size//self.nr_gpu):
masks_np[g][i] = masks_np[0][0]
feed_dict.update({model.masks: masks_np[i] for i, model in enumerate(self.models)})
#
for i in range(self.nr_gpu):
ds[i] *= broadcast_masks_np(masks_np[i], num_channels=self.num_channels)
masked_data = np.concatenate(ds, axis=0)
x_gen = [ds[i].copy() for i in range(self.nr_gpu)]
for yi in range(self.img_size):
for xi in range(self.img_size):
if np.min(np.array([masks_np[i][:, yi, xi] for i in range(self.nr_gpu)])) > 0:
continue
feed_dict.update({model.x_bar:x_gen[i] for i, model in enumerate(self.models)})
x_hats = self.sess.run([model.x_hat for model in self.models], feed_dict=feed_dict)
for i in range(self.nr_gpu):
bmask = broadcast_masks_np(masks_np[i][:, yi, xi] , num_channels=self.num_channels)
x_gen[i][:, yi, xi, :] = x_hats[i][:, yi, xi, :] * (1.-bmask) + x_gen[i][:, yi, xi, :] * bmask
gen_data = np.concatenate(x_gen, axis=0)
if self.num_channels == 1:
masks_np = np.concatenate(masks_np, axis=0)
masks_np = broadcast_masks_np(masks_np, num_channels=self.num_channels)
masked_data += (1-masks_np) * 0.5
return ori_data, masked_data, gen_data
| [
"numpy.split",
"numpy.load",
"blocks.helpers.broadcast_masks_np",
"numpy.concatenate"
] | [((4122, 4148), 'numpy.concatenate', 'np.concatenate', (['ds'], {'axis': '(0)'}), '(ds, axis=0)\n', (4136, 4148), True, 'import numpy as np\n'), ((4895, 4924), 'numpy.concatenate', 'np.concatenate', (['x_gen'], {'axis': '(0)'}), '(x_gen, axis=0)\n', (4909, 4924), True, 'import numpy as np\n'), ((974, 1001), 'numpy.split', 'np.split', (['data', 'self.nr_gpu'], {}), '(data, self.nr_gpu)\n', (982, 1001), True, 'import numpy as np\n'), ((2034, 2061), 'numpy.split', 'np.split', (['data', 'self.nr_gpu'], {}), '(data, self.nr_gpu)\n', (2042, 2061), True, 'import numpy as np\n'), ((3476, 3507), 'numpy.split', 'np.split', (['masks_np', 'self.nr_gpu'], {}), '(masks_np, self.nr_gpu)\n', (3484, 3507), True, 'import numpy as np\n'), ((4036, 4099), 'blocks.helpers.broadcast_masks_np', 'broadcast_masks_np', (['masks_np[i]'], {'num_channels': 'self.num_channels'}), '(masks_np[i], num_channels=self.num_channels)\n', (4054, 4099), False, 'from blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np\n'), ((4983, 5015), 'numpy.concatenate', 'np.concatenate', (['masks_np'], {'axis': '(0)'}), '(masks_np, axis=0)\n', (4997, 5015), True, 'import numpy as np\n'), ((5039, 5099), 'blocks.helpers.broadcast_masks_np', 'broadcast_masks_np', (['masks_np'], {'num_channels': 'self.num_channels'}), '(masks_np, num_channels=self.num_channels)\n', (5057, 5099), False, 'from blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np\n'), ((3423, 3443), 'numpy.load', 'np.load', (['use_mask_at'], {}), '(use_mask_at)\n', (3430, 3443), True, 'import numpy as np\n'), ((3670, 3694), 'numpy.concatenate', 'np.concatenate', (['masks_np'], {}), '(masks_np)\n', (3684, 3694), True, 'import numpy as np\n'), ((4685, 4759), 'blocks.helpers.broadcast_masks_np', 'broadcast_masks_np', (['masks_np[i][:, yi, xi]'], {'num_channels': 'self.num_channels'}), '(masks_np[i][:, yi, xi], num_channels=self.num_channels)\n', (4703, 4759), False, 'from blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np\n')] |
from nose.tools import *
from unittest.mock import patch, Mock
from rxaws.source.sourcebase import SourceBase
from botocore.client import BaseClient
class BaseClient(Mock):
""" mock boto BaseClient, won't really do anything"""
class TestSourceBase:
# inject the mock BaseClient
@patch('boto3.client', return_value=(BaseClient()))
# mock the get_source_iterable abstractmethod
# @patch.multiple(SourceBase, __abstractmethods__=set(), execute=Mock(return_value=[1,2,3]))
def setup(self, mock_return_value):
# create instance of class under test
self.cut_sourcebase = SourceBase()
def teardown(self):
pass
def test_sourcebase_create(self):
# when a new SourceBase instance is created
# is should contain an aws client
assert self.cut_sourcebase.conn is not None
assert isinstance(self.cut_sourcebase.conn, BaseClient) is True, \
'expected BaseClient got: %s' % type(self.cut_sourcebase.conn)
| [
"botocore.client.BaseClient",
"rxaws.source.sourcebase.SourceBase"
] | [((607, 619), 'rxaws.source.sourcebase.SourceBase', 'SourceBase', ([], {}), '()\n', (617, 619), False, 'from rxaws.source.sourcebase import SourceBase\n'), ((330, 342), 'botocore.client.BaseClient', 'BaseClient', ([], {}), '()\n', (340, 342), False, 'from botocore.client import BaseClient\n')] |
import pytest
from checkout_sdk.events.events import RetrieveEventsRequest
from checkout_sdk.events.events_client import EventsClient
@pytest.fixture(scope='class')
def client(mock_sdk_configuration, mock_api_client):
return EventsClient(api_client=mock_api_client, configuration=mock_sdk_configuration)
class TestEventsClient:
def test_retrieve_all_event_types(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.get', return_value='response')
assert client.retrieve_all_event_types() == 'response'
def test_retrieve_events(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.get', return_value='response')
assert client.retrieve_events(RetrieveEventsRequest()) == 'response'
def test_retrieve_event(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.get', return_value='response')
assert client.retrieve_event('event_id') == 'response'
def test_retrieve_event_notification(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.get', return_value='response')
assert client.retrieve_event_notification('event_id', 'notification_id') == 'response'
def test_retry_webhook(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.post', return_value='response')
assert client.retry_webhook('event_id', 'webhook_id') == 'response'
def test_retry_all_webhooks(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.post', return_value='response')
assert client.retry_all_webhooks('event_id') == 'response'
| [
"pytest.fixture",
"checkout_sdk.events.events_client.EventsClient",
"checkout_sdk.events.events.RetrieveEventsRequest"
] | [((138, 167), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (152, 167), False, 'import pytest\n'), ((232, 310), 'checkout_sdk.events.events_client.EventsClient', 'EventsClient', ([], {'api_client': 'mock_api_client', 'configuration': 'mock_sdk_configuration'}), '(api_client=mock_api_client, configuration=mock_sdk_configuration)\n', (244, 310), False, 'from checkout_sdk.events.events_client import EventsClient\n'), ((755, 778), 'checkout_sdk.events.events.RetrieveEventsRequest', 'RetrieveEventsRequest', ([], {}), '()\n', (776, 778), False, 'from checkout_sdk.events.events import RetrieveEventsRequest\n')] |
from __future__ import print_function
import os
import numpy as np
from tqdm import trange
from models import *
from utils import save_image
class Trainer(object):
def __init__(self, config, batch_manager):
tf.compat.v1.set_random_seed(config.random_seed)
self.config = config
self.batch_manager = batch_manager
self.x, self.y = batch_manager.batch()
self.xt =tf.compat.v1.placeholder(tf.float32, shape=int_shape(self.x))
self.yt =tf.compat.v1.placeholder(tf.float32, shape=int_shape(self.y))
self.dataset = config.dataset
self.beta1 = config.beta1
self.beta2 = config.beta2
self.optimizer = config.optimizer
self.batch_size = config.batch_size
self.lr = tf.Variable(config.lr, name='lr')
self.lr_update = tf.assign(self.lr, tf.maximum(self.lr*0.1, config.lr_lower_boundary), name='lr_update')
self.height = config.height
self.width = config.width
self.b_num = config.batch_size
self.conv_hidden_num = config.conv_hidden_num
self.repeat_num = config.repeat_num
self.use_l2 = config.use_l2
self.use_norm = config.use_norm
self.model_dir = config.model_dir
self.use_gpu = config.use_gpu
self.data_format = config.data_format
if self.data_format == 'NCHW':
self.x = nhwc_to_nchw(self.x)
self.y = nhwc_to_nchw(self.y)
self.xt = nhwc_to_nchw(self.xt)
self.yt = nhwc_to_nchw(self.yt)
self.start_step = config.start_step
self.log_step = config.log_step
self.test_step = config.test_step
self.max_step = config.max_step
self.save_sec = config.save_sec
self.lr_update_step = config.lr_update_step
self.step = tf.Variable(self.start_step, name='step', trainable=False)
self.is_train = config.is_train
self.build_model()
self.saver = tf.compat.v1.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=self.saver,
summary_op=None,
summary_writer=self.summary_writer,
save_model_secs=self.save_sec,
global_step=self.step,
ready_for_local_init_op=None)
gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)
sess_config = tf.compat.v1.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
if self.is_train:
self.batch_manager.start_thread(self.sess)
def build_model(self):
self.y_, self.var = VDSR(
self.x, self.conv_hidden_num, self.repeat_num, self.data_format, self.use_norm)
self.y_img = denorm_img(self.y_, self.data_format) # for debug
self.yt_, _ = VDSR(
self.xt, self.conv_hidden_num, self.repeat_num, self.data_format, self.use_norm,
train=False, reuse=True)
self.yt_ = tf.clip_by_value(self.yt_, 0, 1)
self.yt_img = denorm_img(self.yt_, self.data_format)
show_all_variables()
if self.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer
else:
raise Exception("[!] Caution! Paper didn't use {} opimizer other than Adam".format(self.config.optimizer))
optimizer = optimizer(self.lr, beta1=self.beta1, beta2=self.beta2)
# losses
# l1 and l2
self.loss_l1 = tf.reduce_mean(tf.abs(self.y_ - self.y))
self.loss_l2 = tf.reduce_mean(tf.squared_difference(self.y_, self.y))
# total
if self.use_l2:
self.loss = self.loss_l2
else:
self.loss = self.loss_l1
# test loss
self.tl1 = 1 - tf.reduce_mean(tf.abs(self.yt_ - self.yt))
self.tl2 = 1 - tf.reduce_mean(tf.squared_difference(self.yt_, self.yt))
self.test_acc_l1 =tf.compat.v1.placeholder(tf.float32)
self.test_acc_l2 =tf.compat.v1.placeholder(tf.float32)
self.test_acc_iou =tf.compat.v1.placeholder(tf.float32)
self.optim = optimizer.minimize(self.loss, global_step=self.step, var_list=self.var)
summary = [
tf.summary.image("y", self.y_img),
tf.summary.scalar("loss/loss", self.loss),
tf.summary.scalar("loss/loss_l1", self.loss_l1),
tf.summary.scalar("loss/loss_l2", self.loss_l2),
tf.summary.scalar("misc/lr", self.lr),
tf.summary.scalar('misc/q', self.batch_manager.q.size())
]
self.summary_op = tf.summary.merge(summary)
summary = [
tf.summary.image("x_sample", denorm_img(self.x, self.data_format)),
tf.summary.image("y_sample", denorm_img(self.y, self.data_format)),
]
self.summary_once = tf.summary.merge(summary) # call just once
summary = [
tf.summary.scalar("loss/test_acc_l1", self.test_acc_l1),
tf.summary.scalar("loss/test_acc_l2", self.test_acc_l2),
tf.summary.scalar("loss/test_acc_iou", self.test_acc_iou),
]
self.summary_test = tf.summary.merge(summary)
def train(self):
x_list, xs, ys, sample_list = self.batch_manager.random_list(self.b_num)
save_image(xs, '{}/x_gt.png'.format(self.model_dir))
save_image(ys, '{}/y_gt.png'.format(self.model_dir))
with open('{}/gt.txt'.format(self.model_dir), 'w') as f:
for sample in sample_list:
f.write(sample + '\n')
# call once
summary_once = self.sess.run(self.summary_once)
self.summary_writer.add_summary(summary_once, 0)
self.summary_writer.flush()
for step in trange(self.start_step, self.max_step):
fetch_dict = {
"optim": self.optim,
"loss": self.loss,
}
if step % self.log_step == 0 or step == self.max_step-1:
fetch_dict.update({
"summary": self.summary_op,
})
# if step % self.test_step == self.test_step-1 or step == self.max_step-1:
if True:
l1, l2, iou, nb = 0, 0, 0, 0
for x, y in self.batch_manager.test_batch():
if self.data_format == 'NCHW':
x = to_nchw_numpy(x)
y = to_nchw_numpy(y)
tl1, tl2, y_ = self.sess.run([self.tl1, self.tl2, self.yt_], {self.xt: x, self.yt: y})
l1 += tl1
l2 += tl2
nb += 1
# iou
y_I = np.logical_and(y>0, y_>0)
y_I_sum = np.sum(y_I, axis=(1, 2, 3))
y_U = np.logical_or(y>0, y_>0)
y_U_sum = np.sum(y_U, axis=(1, 2, 3))
# print(y_I_sum, y_U_sum)
nonzero_id = np.where(y_U_sum != 0)[0]
if nonzero_id.shape[0] == 0:
acc = 1.0
else:
acc = np.average(y_I_sum[nonzero_id] / y_U_sum[nonzero_id])
iou += acc
if nb > 500:
break
l1 /= float(nb)
l2 /= float(nb)
iou /= float(nb)
summary_test = self.sess.run(self.summary_test,
{self.test_acc_l1: l1, self.test_acc_l2: l2, self.test_acc_iou: iou})
self.summary_writer.add_summary(summary_test, step)
self.summary_writer.flush()
result = self.sess.run(fetch_dict)
if step % self.log_step == 0 or step == self.max_step-1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
loss = result['loss']
assert not np.isnan(loss), 'Model diverged with loss = NaN'
print("\n[{}/{}] Loss: {:.6f}".format(step, self.max_step, loss))
if step % (self.log_step * 10) == 0 or step == self.max_step-1:
self.generate(x_list, self.model_dir, idx=step)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run(self.lr_update)
# save last checkpoint..
save_path = os.path.join(self.model_dir, 'model.ckpt')
self.saver.save(self.sess, save_path, global_step=self.step)
self.batch_manager.stop_thread()
def generate(self, x_samples, root_path=None, idx=None):
if self.data_format == 'NCHW':
x_samples = to_nchw_numpy(x_samples)
generated = self.sess.run(self.yt_img, {self.xt: x_samples})
y_path = os.path.join(root_path, 'y_{}.png'.format(idx))
save_image(generated, y_path, nrow=self.b_num)
print("[*] Samples saved: {}".format(y_path)) | [
"numpy.logical_and",
"numpy.average",
"numpy.where",
"os.path.join",
"numpy.logical_or",
"utils.save_image",
"numpy.sum",
"numpy.isnan",
"tqdm.trange"
] | [((6028, 6066), 'tqdm.trange', 'trange', (['self.start_step', 'self.max_step'], {}), '(self.start_step, self.max_step)\n', (6034, 6066), False, 'from tqdm import trange\n'), ((8716, 8758), 'os.path.join', 'os.path.join', (['self.model_dir', '"""model.ckpt"""'], {}), "(self.model_dir, 'model.ckpt')\n", (8728, 8758), False, 'import os\n'), ((9161, 9207), 'utils.save_image', 'save_image', (['generated', 'y_path'], {'nrow': 'self.b_num'}), '(generated, y_path, nrow=self.b_num)\n', (9171, 9207), False, 'from utils import save_image\n'), ((6989, 7018), 'numpy.logical_and', 'np.logical_and', (['(y > 0)', '(y_ > 0)'], {}), '(y > 0, y_ > 0)\n', (7003, 7018), True, 'import numpy as np\n'), ((7045, 7072), 'numpy.sum', 'np.sum', (['y_I'], {'axis': '(1, 2, 3)'}), '(y_I, axis=(1, 2, 3))\n', (7051, 7072), True, 'import numpy as np\n'), ((7099, 7127), 'numpy.logical_or', 'np.logical_or', (['(y > 0)', '(y_ > 0)'], {}), '(y > 0, y_ > 0)\n', (7112, 7127), True, 'import numpy as np\n'), ((7154, 7181), 'numpy.sum', 'np.sum', (['y_U'], {'axis': '(1, 2, 3)'}), '(y_U, axis=(1, 2, 3))\n', (7160, 7181), True, 'import numpy as np\n'), ((8272, 8286), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (8280, 8286), True, 'import numpy as np\n'), ((7261, 7283), 'numpy.where', 'np.where', (['(y_U_sum != 0)'], {}), '(y_U_sum != 0)\n', (7269, 7283), True, 'import numpy as np\n'), ((7426, 7479), 'numpy.average', 'np.average', (['(y_I_sum[nonzero_id] / y_U_sum[nonzero_id])'], {}), '(y_I_sum[nonzero_id] / y_U_sum[nonzero_id])\n', (7436, 7479), True, 'import numpy as np\n')] |
from zerocopy import send_from
from socket import *
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', 25000))
s.listen(1)
c,a = s.accept()
import numpy
a = numpy.arange(0.0, 50000000.0)
send_from(a, c)
c.close()
| [
"zerocopy.send_from",
"numpy.arange"
] | [((153, 182), 'numpy.arange', 'numpy.arange', (['(0.0)', '(50000000.0)'], {}), '(0.0, 50000000.0)\n', (165, 182), False, 'import numpy\n'), ((183, 198), 'zerocopy.send_from', 'send_from', (['a', 'c'], {}), '(a, c)\n', (192, 198), False, 'from zerocopy import send_from\n')] |
# Copyright (C) 2014, 2015 University of Vienna
# All rights reserved.
# BSD license.
# Author: <NAME> <<EMAIL>>
# Heap-based minimum-degree ordering with NO lookahead.
#
# See also min_degree.py which uses lookahead, and simple_md.py which is a
# hacked version of min_degree.py that still uses repeated linear scans to find
# the minimum degree nodes but does not do lookahead.
from __future__ import print_function
from py3compat import cPickle_loads, cPickle_dumps, cPickle_HIGHEST_PROTOCOL
from itertools import chain
from networkx import max_weight_matching
from pqueue import PriorityQueue as heapdict
from py3compat import irange
from order_util import colp_to_spiked_form, get_hessenberg_order, check_spiked_form,\
coo_matrix_to_bipartite, partial_relabel, argsort, \
get_inverse_perm, get_row_weights
from plot_ordering import plot_hessenberg, plot_bipartite
def hessenberg(rows, cols, values, n_rows, n_cols, tie_breaking):
'Tie breaking options: MIN_FIRST, MAX_FIRST, IGNORE'
assert tie_breaking in ('IGNORE', 'MIN_FIRST', 'MAX_FIRST'), tie_breaking
# The col IDs in cols are shifted by n_rows, must undo later
g, eqs, _ = coo_matrix_to_bipartite(rows, cols, values, (n_rows, n_cols))
if tie_breaking != 'IGNORE':
# Relabel the rows such that they are ordered by weight
row_weights = get_row_weights(g, n_rows)
reverse = True if tie_breaking == 'MAX_FIRST' else False
row_pos = argsort(row_weights, reverse)
mapping = {n: i for i, n in enumerate(row_pos)}
#
eqs = set(mapping[eq] for eq in eqs)
g = partial_relabel(g, mapping)
#
rperm, cperm, _, _, _, _ = to_hessenberg_form(g, eqs)
# Finally, shift the colp such that it is a permutation of 0 .. n_cols-1
cperm = [c-n_rows for c in cperm]
#
if tie_breaking != 'IGNORE':
rperm = [row_pos[r] for r in rperm]
#
rowp, colp = get_inverse_perm(rperm, cperm)
assert sorted(rowp) == list(irange(n_rows))
assert sorted(colp) == list(irange(n_cols))
return rowp, colp
################################################################################
#
# TODO Hereafter, rowp and colp here seems to be consistently used for
# rperm and cperm, the permuted row and col identifiers.
#
################################################################################
def to_spiked_form(g, eqs, forbidden=None):
'''Returns the tuple of: bool singular, [row permutation],
[column permutation], [spike variables], [residual equations]. The spikes
and the residuals are ordered according to the permutation.'''
# Check singularity, apparently only the permutation to spiked form needs it
#assert 2*len(eqs) == len(g), 'Not a square matrix!'
matches = max_weight_matching(g)
print(len(matches))
#if len(matches) != 2*len(eqs):
# return (True, [], [], [], [])
if forbidden is None:
forbidden = set()
rowp, colp_hess, matches, tear_set, sink_set = min_degree(g, eqs, forbidden)
print('ok')
colp = colp_to_spiked_form(rowp, colp_hess, matches, tear_set, sink_set)
#check_spiked_form(g, rowp, colp, tear_set)
plot_hessenberg(g, rowp, colp_hess, [], '')
plot_bipartite(g, forbidden, rowp, colp)
tears = [c for c in colp if c in tear_set]
sinks = [r for r in rowp if r in sink_set]
return (False, rowp, colp, tears, sinks)
def to_hessenberg_form(g, eqs, forbidden=None):
'''Returns the tuple of: [row permutation], [column permutation],
[guessed variables], [residual equations], [row matches], [col matches].
Everything is ordered according to the permutation.'''
rowp, colp, matches, tear_set, sink_set = min_degree(g, eqs, forbidden)
tears = [c for c in colp if c in tear_set]
sinks = [r for r in rowp if r in sink_set]
row_matches = [r for r in rowp if r in matches]
col_matches = [c for c in colp if c in matches]
#plot_hessenberg(g, rowp, colp, [], '')
#plot_bipartite(g, forbidden, rowp, colp)
return (rowp, colp, tears, sinks, row_matches, col_matches)
def min_degree(g_orig, eqs, forbidden=None):
'''Returns: tuple([row permutation], [column permutation],
{eq: var and var: eq matches}, set(tear vars), set(residual equations)).'''
# Duplicated in bb_tear.initial_solution with none forbidden
assert eqs
if forbidden is None:
forbidden = set()
if not isinstance(eqs, (set, dict)):
eqs = set(eqs) # Make sure that `n in eqs` will be O(1).
g_allowed, g = setup_graphs(g_orig, forbidden)
eq_tot = create_heap(g_allowed, g, eqs)
rowp, matches = [ ], { }
while eq_tot:
(_cost, _tot, _eq), eq = eq_tot.popitem()
#assert _eq == eq, (_eq, eq)
#print('Eq:', eq)
rowp.append(eq)
if g_allowed[eq]:
var = sorted(g_allowed[eq])[0] # or [-1] for last
assert eq not in matches
assert var not in matches
matches[eq] = var
matches[var] = eq
#print('Var:', var)
vrs = sorted(g[eq])
eqs_update = set(chain.from_iterable(g[v] for v in vrs))
eqs_update.discard(eq)
g_allowed.remove_node(eq)
g.remove_node(eq)
g_allowed.remove_nodes_from(vrs)
g.remove_nodes_from(vrs)
for e in sorted(eqs_update): # keep in sync with create_heap
tot = len(g[e])
cost = tot-1 if g_allowed[e] else tot
eq_tot[e] = (cost, tot, e)
assert len(rowp) == len(eqs)
# The row permutation determines the column permutation, let's get it!
# get_hessenberg_order also asserts non-increasing envelope, among others
colp = get_hessenberg_order(g_orig, eqs, rowp)
sink_set = { n for n in rowp if n not in matches }
tear_set = { n for n in colp if n not in matches }
#
#print('Number of tears:', len(tear_set))
#print('Row permutation:', rowp)
#print('Col permutation:', colp)
#
return rowp, colp, matches, tear_set, sink_set
def setup_graphs(g_orig, forbidden):
# g is a copy of g_orig; g_allowed contains only the allowed edges of g_orig
g_pkl = cPickle_dumps(g_orig, cPickle_HIGHEST_PROTOCOL)
g = cPickle_loads(g_pkl)
g_allowed = cPickle_loads(g_pkl)
adj = g_allowed.adj
for u, v in forbidden:
g_allowed.remove_edge(u,v)
#del adj[u][v]
#del adj[v][u] # assumes no self loops
return g_allowed, g
def create_heap(g_allowed, g, eqs):
eq_tot = heapdict()
for e in sorted(eqs):
tot = len(g[e])
cost = tot-1 if g_allowed[e] else tot
eq_tot[e] = (cost, tot, e)
return eq_tot
def run_tests():
from test_tearing import gen_testproblems
for g, eqs, forbidden in gen_testproblems():
rowp, colp, tears, sinks, mr, mc = to_hessenberg_form(g, eqs, forbidden)
print('Rowp:', rowp)
print('Colp:', colp)
print('Tears:', tears)
print('Residuals:', sinks)
print('mr:', mr)
print('mc:', mc)
if __name__=='__main__':
run_tests()
| [
"order_util.get_inverse_perm",
"networkx.max_weight_matching",
"order_util.argsort",
"test_tearing.gen_testproblems",
"order_util.get_row_weights",
"py3compat.cPickle_dumps",
"py3compat.cPickle_loads",
"plot_ordering.plot_hessenberg",
"order_util.coo_matrix_to_bipartite",
"pqueue.PriorityQueue",
... | [((1204, 1265), 'order_util.coo_matrix_to_bipartite', 'coo_matrix_to_bipartite', (['rows', 'cols', 'values', '(n_rows, n_cols)'], {}), '(rows, cols, values, (n_rows, n_cols))\n', (1227, 1265), False, 'from order_util import colp_to_spiked_form, get_hessenberg_order, check_spiked_form, coo_matrix_to_bipartite, partial_relabel, argsort, get_inverse_perm, get_row_weights\n'), ((1961, 1991), 'order_util.get_inverse_perm', 'get_inverse_perm', (['rperm', 'cperm'], {}), '(rperm, cperm)\n', (1977, 1991), False, 'from order_util import colp_to_spiked_form, get_hessenberg_order, check_spiked_form, coo_matrix_to_bipartite, partial_relabel, argsort, get_inverse_perm, get_row_weights\n'), ((2816, 2838), 'networkx.max_weight_matching', 'max_weight_matching', (['g'], {}), '(g)\n', (2835, 2838), False, 'from networkx import max_weight_matching\n'), ((3103, 3168), 'order_util.colp_to_spiked_form', 'colp_to_spiked_form', (['rowp', 'colp_hess', 'matches', 'tear_set', 'sink_set'], {}), '(rowp, colp_hess, matches, tear_set, sink_set)\n', (3122, 3168), False, 'from order_util import colp_to_spiked_form, get_hessenberg_order, check_spiked_form, coo_matrix_to_bipartite, partial_relabel, argsort, get_inverse_perm, get_row_weights\n'), ((3226, 3269), 'plot_ordering.plot_hessenberg', 'plot_hessenberg', (['g', 'rowp', 'colp_hess', '[]', '""""""'], {}), "(g, rowp, colp_hess, [], '')\n", (3241, 3269), False, 'from plot_ordering import plot_hessenberg, plot_bipartite\n'), ((3274, 3314), 'plot_ordering.plot_bipartite', 'plot_bipartite', (['g', 'forbidden', 'rowp', 'colp'], {}), '(g, forbidden, rowp, colp)\n', (3288, 3314), False, 'from plot_ordering import plot_hessenberg, plot_bipartite\n'), ((5807, 5846), 'order_util.get_hessenberg_order', 'get_hessenberg_order', (['g_orig', 'eqs', 'rowp'], {}), '(g_orig, eqs, rowp)\n', (5827, 5846), False, 'from order_util import colp_to_spiked_form, get_hessenberg_order, check_spiked_form, coo_matrix_to_bipartite, partial_relabel, argsort, get_inverse_perm, get_row_weights\n'), ((6272, 6319), 'py3compat.cPickle_dumps', 'cPickle_dumps', (['g_orig', 'cPickle_HIGHEST_PROTOCOL'], {}), '(g_orig, cPickle_HIGHEST_PROTOCOL)\n', (6285, 6319), False, 'from py3compat import cPickle_loads, cPickle_dumps, cPickle_HIGHEST_PROTOCOL\n'), ((6328, 6348), 'py3compat.cPickle_loads', 'cPickle_loads', (['g_pkl'], {}), '(g_pkl)\n', (6341, 6348), False, 'from py3compat import cPickle_loads, cPickle_dumps, cPickle_HIGHEST_PROTOCOL\n'), ((6365, 6385), 'py3compat.cPickle_loads', 'cPickle_loads', (['g_pkl'], {}), '(g_pkl)\n', (6378, 6385), False, 'from py3compat import cPickle_loads, cPickle_dumps, cPickle_HIGHEST_PROTOCOL\n'), ((6623, 6633), 'pqueue.PriorityQueue', 'heapdict', ([], {}), '()\n', (6631, 6633), True, 'from pqueue import PriorityQueue as heapdict\n'), ((6878, 6896), 'test_tearing.gen_testproblems', 'gen_testproblems', ([], {}), '()\n', (6894, 6896), False, 'from test_tearing import gen_testproblems\n'), ((1385, 1411), 'order_util.get_row_weights', 'get_row_weights', (['g', 'n_rows'], {}), '(g, n_rows)\n', (1400, 1411), False, 'from order_util import colp_to_spiked_form, get_hessenberg_order, check_spiked_form, coo_matrix_to_bipartite, partial_relabel, argsort, get_inverse_perm, get_row_weights\n'), ((1495, 1524), 'order_util.argsort', 'argsort', (['row_weights', 'reverse'], {}), '(row_weights, reverse)\n', (1502, 1524), False, 'from order_util import colp_to_spiked_form, get_hessenberg_order, check_spiked_form, coo_matrix_to_bipartite, partial_relabel, argsort, get_inverse_perm, get_row_weights\n'), ((1648, 1675), 'order_util.partial_relabel', 'partial_relabel', (['g', 'mapping'], {}), '(g, mapping)\n', (1663, 1675), False, 'from order_util import colp_to_spiked_form, get_hessenberg_order, check_spiked_form, coo_matrix_to_bipartite, partial_relabel, argsort, get_inverse_perm, get_row_weights\n'), ((2024, 2038), 'py3compat.irange', 'irange', (['n_rows'], {}), '(n_rows)\n', (2030, 2038), False, 'from py3compat import irange\n'), ((2072, 2086), 'py3compat.irange', 'irange', (['n_cols'], {}), '(n_cols)\n', (2078, 2086), False, 'from py3compat import irange\n'), ((5194, 5232), 'itertools.chain.from_iterable', 'chain.from_iterable', (['(g[v] for v in vrs)'], {}), '(g[v] for v in vrs)\n', (5213, 5232), False, 'from itertools import chain\n')] |
#
# Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
#
import unittest
from deephaven import read_csv, DHError
from deephaven.plot import Color, Colors
from deephaven.plot import Figure
from deephaven.plot import LineEndStyle, LineStyle
from tests.testbase import BaseTestCase
class ColorTestCase(BaseTestCase):
def setUp(self):
self.test_table = read_csv("tests/data/test_table.csv")
def tearDown(self) -> None:
self.test_table = None
def test_color(self):
figure = Figure()
new_f = figure.plot_xy("plot1", self.test_table, x="a", y="b")
line = new_f.line(color=Colors.RED, style=LineStyle(width=1.0, end_style=LineEndStyle.ROUND))
self.assertIsNotNone(line)
def test_color_hsl(self):
figure = Figure()
custom_color = Color.of_hsl(h=128, s=58, l=68, alpha=0.6)
new_f = figure.plot_xy("plot1", self.test_table, x="a", y="b")
line = new_f.line(color=custom_color, style=LineStyle(width=1.0, end_style=LineEndStyle.ROUND))
self.assertIsNotNone(line)
def test_color_factory(self):
Color.of_name("RED")
Color.of_rgb(12, 16, 188, 200)
Color.of_rgb_f(0.2, 0.6, 0.88, alpha=0.2)
Color.of_hsl(h=128, s=58, l=68, alpha=0.6)
with self.assertRaises(DHError):
Color.of_name("REDDER")
with self.assertRaises(DHError):
Color.of_rgb(12, 16, 288)
with self.assertRaises(DHError):
Color.of_rgb_f(1.2, 0.6, 0.88, alpha=0.2)
with self.assertRaises(DHError):
Color.of_hsl(h=377, s=58, l=168, alpha=10)
if __name__ == '__main__':
unittest.main()
| [
"deephaven.plot.Color.of_hsl",
"deephaven.plot.Color.of_name",
"deephaven.plot.LineStyle",
"deephaven.plot.Color.of_rgb_f",
"deephaven.plot.Color.of_rgb",
"deephaven.plot.Figure",
"unittest.main",
"deephaven.read_csv"
] | [((1658, 1673), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1671, 1673), False, 'import unittest\n'), ((378, 415), 'deephaven.read_csv', 'read_csv', (['"""tests/data/test_table.csv"""'], {}), "('tests/data/test_table.csv')\n", (386, 415), False, 'from deephaven import read_csv, DHError\n'), ((524, 532), 'deephaven.plot.Figure', 'Figure', ([], {}), '()\n', (530, 532), False, 'from deephaven.plot import Figure\n'), ((789, 797), 'deephaven.plot.Figure', 'Figure', ([], {}), '()\n', (795, 797), False, 'from deephaven.plot import Figure\n'), ((821, 863), 'deephaven.plot.Color.of_hsl', 'Color.of_hsl', ([], {'h': '(128)', 's': '(58)', 'l': '(68)', 'alpha': '(0.6)'}), '(h=128, s=58, l=68, alpha=0.6)\n', (833, 863), False, 'from deephaven.plot import Color, Colors\n'), ((1117, 1137), 'deephaven.plot.Color.of_name', 'Color.of_name', (['"""RED"""'], {}), "('RED')\n", (1130, 1137), False, 'from deephaven.plot import Color, Colors\n'), ((1146, 1176), 'deephaven.plot.Color.of_rgb', 'Color.of_rgb', (['(12)', '(16)', '(188)', '(200)'], {}), '(12, 16, 188, 200)\n', (1158, 1176), False, 'from deephaven.plot import Color, Colors\n'), ((1185, 1226), 'deephaven.plot.Color.of_rgb_f', 'Color.of_rgb_f', (['(0.2)', '(0.6)', '(0.88)'], {'alpha': '(0.2)'}), '(0.2, 0.6, 0.88, alpha=0.2)\n', (1199, 1226), False, 'from deephaven.plot import Color, Colors\n'), ((1235, 1277), 'deephaven.plot.Color.of_hsl', 'Color.of_hsl', ([], {'h': '(128)', 's': '(58)', 'l': '(68)', 'alpha': '(0.6)'}), '(h=128, s=58, l=68, alpha=0.6)\n', (1247, 1277), False, 'from deephaven.plot import Color, Colors\n'), ((1331, 1354), 'deephaven.plot.Color.of_name', 'Color.of_name', (['"""REDDER"""'], {}), "('REDDER')\n", (1344, 1354), False, 'from deephaven.plot import Color, Colors\n'), ((1408, 1433), 'deephaven.plot.Color.of_rgb', 'Color.of_rgb', (['(12)', '(16)', '(288)'], {}), '(12, 16, 288)\n', (1420, 1433), False, 'from deephaven.plot import Color, Colors\n'), ((1487, 1528), 'deephaven.plot.Color.of_rgb_f', 'Color.of_rgb_f', (['(1.2)', '(0.6)', '(0.88)'], {'alpha': '(0.2)'}), '(1.2, 0.6, 0.88, alpha=0.2)\n', (1501, 1528), False, 'from deephaven.plot import Color, Colors\n'), ((1582, 1624), 'deephaven.plot.Color.of_hsl', 'Color.of_hsl', ([], {'h': '(377)', 's': '(58)', 'l': '(168)', 'alpha': '(10)'}), '(h=377, s=58, l=168, alpha=10)\n', (1594, 1624), False, 'from deephaven.plot import Color, Colors\n'), ((654, 704), 'deephaven.plot.LineStyle', 'LineStyle', ([], {'width': '(1.0)', 'end_style': 'LineEndStyle.ROUND'}), '(width=1.0, end_style=LineEndStyle.ROUND)\n', (663, 704), False, 'from deephaven.plot import LineEndStyle, LineStyle\n'), ((987, 1037), 'deephaven.plot.LineStyle', 'LineStyle', ([], {'width': '(1.0)', 'end_style': 'LineEndStyle.ROUND'}), '(width=1.0, end_style=LineEndStyle.ROUND)\n', (996, 1037), False, 'from deephaven.plot import LineEndStyle, LineStyle\n')] |
import uiza
from uiza import Connection
from uiza.api_resources.base.base import UizaBase
from uiza.settings.config import settings
from uiza.utility.utility import set_url
class Entity(UizaBase):
def __init__(self):
self.connection = Connection(workspace_api_domain=uiza.workspace_api_domain, api_key=uiza.authorization)
self.connection.url = set_url(
workspace_api_domain=self.connection.workspace_api_domain,
api_type=settings.uiza_api.entity.type,
api_version=settings.uiza_api.entity.version,
api_sub_url=settings.uiza_api.entity.sub_url
)
def search(self, keyword):
"""
Search entity base on keyword entered
:param keyword: keyword for search entity
"""
self.connection.url = '{}/search'.format(self.connection.url)
params = dict(keyword=keyword, appId=uiza.app_id)
query = self.url_encode(params=params)
data = self.connection.get(query=query)
return data
def generate_iframe(self, entityId, api):
"""
Generate iframe entity base on keyword entered
:param entityId: id of entity
:param api: api iframe
"""
self.connection.url = '{}/iframe'.format(self.connection.url)
params = dict(entityId=entityId, api=api, appId=uiza.app_id)
query = self.url_encode(params=params)
data = self.connection.get(query=query)
return data
def publish(self, id):
"""
Publish entity to CDN, use for streaming
:param id: identifier of entity
"""
self.connection.url = '{}/publish'.format(self.connection.url)
data = self.connection.post(data={'id': id, 'appId': uiza.app_id})
return data
def get_status_publish(self, id):
"""
Get status publish entity
:param id: identifier of entity
"""
self.connection.url = '{}/publish/status'.format(self.connection.url)
query = self.url_encode(params={'id': id, 'appId': uiza.app_id})
data = self.connection.get(query=query)
return data
def get_media_tracking(self, **kwargs):
"""
Get media tracking
:param progress: progress of entity. This is optional
"""
self.connection.url = '{}/tracking'.format(self.connection.url)
params = dict(appId=uiza.app_id)
if kwargs:
params.update(kwargs)
query = self.url_encode(params=params)
data = self.connection.get(query=query)
return data
def get_media_upload_detail(self, id):
"""
Get media upload detail
:param id: identifier of entity
"""
self.connection.url = '{}/tracking'.format(self.connection.url)
query = self.url_encode(params={'id': id, 'appId': uiza.app_id})
data = self.connection.get(query=query)
return data
def get_aws_upload_key(self):
"""
Return the bucket temporary upload storage & key for upload
:param appId: appId
"""
aws_sub_url = 'admin/app/config/aws'
self.connection.url = set_url(
workspace_api_domain=self.connection.workspace_api_domain,
api_type=settings.uiza_api.entity.type,
api_version=settings.uiza_api.entity.version,
api_sub_url=aws_sub_url
)
query = self.url_encode(params={'appId': uiza.app_id})
data = self.connection.get(query=query)
return data
| [
"uiza.utility.utility.set_url",
"uiza.Connection"
] | [((250, 341), 'uiza.Connection', 'Connection', ([], {'workspace_api_domain': 'uiza.workspace_api_domain', 'api_key': 'uiza.authorization'}), '(workspace_api_domain=uiza.workspace_api_domain, api_key=uiza.\n authorization)\n', (260, 341), False, 'from uiza import Connection\n'), ((367, 575), 'uiza.utility.utility.set_url', 'set_url', ([], {'workspace_api_domain': 'self.connection.workspace_api_domain', 'api_type': 'settings.uiza_api.entity.type', 'api_version': 'settings.uiza_api.entity.version', 'api_sub_url': 'settings.uiza_api.entity.sub_url'}), '(workspace_api_domain=self.connection.workspace_api_domain, api_type\n =settings.uiza_api.entity.type, api_version=settings.uiza_api.entity.\n version, api_sub_url=settings.uiza_api.entity.sub_url)\n', (374, 575), False, 'from uiza.utility.utility import set_url\n'), ((3160, 3347), 'uiza.utility.utility.set_url', 'set_url', ([], {'workspace_api_domain': 'self.connection.workspace_api_domain', 'api_type': 'settings.uiza_api.entity.type', 'api_version': 'settings.uiza_api.entity.version', 'api_sub_url': 'aws_sub_url'}), '(workspace_api_domain=self.connection.workspace_api_domain, api_type\n =settings.uiza_api.entity.type, api_version=settings.uiza_api.entity.\n version, api_sub_url=aws_sub_url)\n', (3167, 3347), False, 'from uiza.utility.utility import set_url\n')] |
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from microtc.utils import load_model, Counter
from b4msa.textmodel import TextModel
from microtc.weighting import TFIDF
from microtc.utils import SparseMatrix
from scipy.sparse import csr_matrix
from typing import List, Iterable, OrderedDict, Union, Dict, Any, Tuple
from .utils import download_tokens, handle_day
TM_ARGS=dict(usr_option="delete", num_option="none",
url_option="delete", emo_option="none",
del_dup=False, del_punc=True)
class Vocabulary(object):
"""
Vocabulary class is used to transform the tokens and their
respective frequencies in a Text Model, as well as, to analyze
the tokens obtained from tweets collected.
This class can be used to replicate some of the Text Models
developed for :py:class:`EvoMSA.base.EvoMSA`.
:param data: Tokens and their frequencies
:type data: str or list
:param lang: Language (Ar, En, or Es)
:type lang: str
:param country: Two letter country code
:type country: str
:param states: Whether to keep the state or accumulate the information on the country
:type states: bool
>>> from text_models.vocabulary import Vocabulary
>>> day = dict(year=2020, month=2, day=14)
>>> voc = Vocabulary(day, lang="En", country="US")
"""
def __init__(self, data, lang: str="Es",
country: str=None, states: bool=False) -> None:
self._lang = lang
self._country = country
self._states = states
if isinstance(data, dict) and len(data) > 3:
self._data = data
else:
self.date = data
self._init(data)
if not states:
self._n_words = sum([v for k, v in self.voc.items() if k.count("~") == 0])
self._n_bigrams = sum([v for k, v in self.voc.items() if k.count("~")])
def probability(self):
"""Transform frequency to a probability"""
voc = self.voc
for k in voc:
num = voc[k]
if k.count("~"):
den = self._n_bigrams
else:
den = self._n_words
voc[k] = num / den
def _init(self, data):
"""
Process the :py:attr:`data` to create a :py:class:`microtc.utils.Counter`
"""
def sum_vocs(vocs):
voc = vocs[0]
for v in vocs[1:]:
voc = voc + v
return voc
if isinstance(data, list):
vocs = [download_tokens(day, lang=self._lang, country=self._country)
for day in data]
vocs = [load_model(x) for x in vocs]
if isinstance(vocs[0], Counter):
voc = sum_vocs(vocs)
elif not self._states:
vocs = [sum_vocs([v for _, v in i]) for i in vocs]
voc = sum_vocs(vocs)
else:
voc = {k: v for k, v in vocs[0]}
for v in vocs[1:]:
for k, d in v:
try:
voc[k] = voc[k] + d
except KeyError:
voc[k] = d
self._data = voc
else:
self.voc = load_model(download_tokens(data, lang=self._lang, country=self._country))
@property
def date(self):
"""
Date obtained from the filename, on multiple files, this is not available.
"""
return self._date
@date.setter
def date(self, d):
if isinstance(d, list):
self._date = None
return
self._date = handle_day(d)
@property
def weekday(self):
"""
Weekday
"""
return str(self.date.weekday())
@property
def voc(self):
"""Vocabulary, i.e., tokens and their frequencies"""
return self._data
@voc.setter
def voc(self, d):
if not isinstance(d, list):
self._data = d
return
if self._states:
self._data = {k: v for k, v in d}
return
aggr = d[0][1]
for _, v in d[1:]:
aggr = aggr + v
self._data = aggr
def common_words(self, quantile: float=None, bigrams=True):
"""Words used frequently; these correspond to py:attr:`EvoMSA.base.EvoMSA(B4MSA=True)`
In the case quantile is given the these words and bigrams correspond to
the most frequent.
"""
if quantile is None:
from EvoMSA.utils import download
return load_model(download("b4msa_%s.tm" % self._lang)).model.word2id
words_N = sum([v for k, v in self.voc.items() if k.count("~") == 0])
score = [[k, v / words_N] for k, v in self.voc.items() if k.count("~") == 0]
score.sort(key=lambda x: x[1], reverse=True)
cum, k = 0, 0
while cum <= quantile:
cum += score[k][1]
k += 1
output = [k for k, _ in score[:k]]
if bigrams:
bigrams_N = sum([v for k, v in self.voc.items() if k.count("~")])
score_bi = [[k, v / bigrams_N] for k, v in self.voc.items() if k.count("~")]
score_bi.sort(key=lambda x: x[1], reverse=True)
cum, k = 0, 0
while cum <= quantile:
cum += score_bi[k][1]
k += 1
output += [k for k, _ in score_bi[:k]]
return output
@staticmethod
def _co_occurrence(word: str, voc: dict) -> dict:
D = dict()
for k, v in voc.items():
if k.count("~") == 0:
continue
a, b = k.split("~")
if a != word and b != word:
continue
key = a if a != word else b
D[key] = v
return D
def co_occurrence(self, word: str) -> dict:
if self._states:
return {k: self._co_occurrence(word, v) for k, v in self.voc.items()}
return self._co_occurrence(word, self.voc)
def day_words(self) -> "Vocabulary":
"""Words used on the same day of different years"""
from datetime import date, datetime
hoy = date.today()
hoy = datetime(year=hoy.year, month=hoy.month, day=hoy.month)
L = []
for year in range(2015, hoy.year + 1):
try:
curr = datetime(year=year, month=self.date.month, day=self.date.day)
except ValueError:
continue
if (curr - self.date).days == 0:
continue
try:
download_tokens(curr, lang=self._lang, country=self._country)
except Exception:
continue
L.append(curr)
if len(L) == 0:
return None
return self.__class__(L if len(L) > 1 else L[0],
lang=self._lang,
country=self._country,
states=self._states)
def __iter__(self):
for x in self.voc:
yield x
def remove_emojis(self):
"""Remove emojis"""
from .dataset import Dataset
data = Dataset()
data.add(data.load_emojis())
keys = [(k, [x for x in data.klass(k) if not x.isnumeric()]) for k in self]
keys = [(k, v) for k, v in keys if len(v) and v[0] != "#"]
for k, v in keys:
del self.voc[k]
def previous_day(self):
"""Previous day"""
import datetime
one_day = datetime.timedelta(days=1)
r = self.date - one_day
_ = self.__class__(r, lang=self._lang,
country=self._country,
states=self._states)
return _
def __len__(self):
return len(self.voc)
def __getitem__(self, key):
return self.voc[key]
def __contains__(self, key):
return key in self.voc
def get(self, data, defaultvalue=0):
"""Frequency of data"""
return self.voc.get(data, defaultvalue)
def items(self):
"""Items of :py:attr:`self.voc`"""
return self.voc.items()
def remove(self, words: dict, bigrams=True) -> None:
"""
Remove the words from the current vocabulary
:param words: Tokens
"""
if not bigrams:
voc = self.voc
for w in words:
try:
del voc[w]
except Exception:
continue
return
K = []
for k in self.voc:
if k.count("~"):
a, b = k.split("~")
if a in words or b in words:
K.append(k)
if k in words:
K.append(k)
for k in K:
del self.voc[k]
def remove_qgrams(self):
pass
def histogram(self, min_elements: int=30, words: bool=False):
group = defaultdict(list)
[group[v].append(k) for k, v in self.voc.items() if words or k.count("~")]
keys = list(group.keys())
keys.sort()
lst = list()
hist = OrderedDict()
for k in keys:
_ = group[k]
if len(lst) + len(_) >= min_elements:
hist[k] = lst + _
lst = list()
continue
lst += _
if len(lst):
hist[k] = lst
return hist
class Tokenize(object):
""" Tokenize transforms a text into a sequence, where
each number identifies a particular token; the q-grams
that are not found in the text are ignored.
>>> from text_models import Tokenize
>>> tok = Tokenize().fit(["hi~mario", "mario"])
>>> tok.transform("good <NAME>")
[1]
"""
def __init__(self, tm_args: Dict[str, Any]=TM_ARGS):
self._head = dict()
self._vocabulary = dict()
self._tag = "__end__"
self._textmodel = TextModel(**tm_args)
@property
def vocabulary(self) -> Dict[str, int]:
"""Vocabulary used"""
return self._vocabulary
@property
def textModel(self):
"""Text model, i.e., :py:class::`b4msa.text_model.TextModel`
"""
return self._textmodel
def fit(self, tokens: List[str]) -> 'Tokenize':
"""Train the tokenizer.
:param tokens: Vocabulary as a list of tokens
:type tokens: List[str]
"""
voc = self.vocabulary
head = self._head
tag = self._tag
for word in tokens:
if word in voc:
continue
current = head
for char in word:
try:
current = current[char]
except KeyError:
_ = dict()
current[char] = _
current = _
cnt = len(voc)
voc[word] = cnt
current[tag] = cnt
return self
def transform(self, texts: Union[Iterable[str], str]) -> List[Union[List[int], int]]:
"""Transform the input into a sequence where each element represents
a token in the vocabulary (i.e., :py:attr:`text_models.vocabulary.Tokenize.vocabulary`)"""
func = self.textModel.text_transformations
trans = self._transform
if isinstance(texts, str):
return trans(func(texts))
return [trans(func(x)) for x in texts]
def _transform(self, text: str) -> List[int]:
L = []
i = 0
while i < len(text):
wordid, pos = self.find(text, i=i)
if wordid == -1:
i += 1
continue
i = pos
L.append(wordid)
return L
def find(self, text: str, i: int=0) -> Tuple[int, int]:
end = i
head = self._head
current = head
tag = self._tag
wordid = -1
while i < len(text):
char = text[i]
try:
current = current[char]
i += 1
try:
wordid = current[tag]
end = i
except KeyError:
pass
except KeyError:
break
return wordid, end
def id2word(self, id: int) -> str:
"""Token associated with id
:param id: Identifier
:type id: int
"""
try:
id2w = self._id2w
except AttributeError:
id2w = {v: k for k, v in self.vocabulary.items()}
self._id2w = id2w
return id2w[id]
class BagOfWords(SparseMatrix):
"""Bag of word model using TFIDF and
:py:class:`text_models.vocabulary.Tokenize`
:param tokens: Language (Ar|En|Es) or list of tokens
:type tokens: str|List
"""
def __init__(self, tokens: Union[str, List[str]]="Es"):
from microtc.utils import load_model
from EvoMSA.utils import download
if isinstance(tokens, list):
xx = tokens
else:
xx = list(load_model(download("b4msa_%s.tm" % tokens)).model.word2id.keys())
tok = Tokenize()
f = lambda cdn: "~".join([x for x in cdn.split("~") if len(x)])
tok.fit([f(k) for k in xx if k.count("~") and k[:2] != "q:"])
tok.fit([f(k) for k in xx if k.count("~") == 0 and k[:2] != "q:"])
qgrams = [f(k[2:]) for k in xx if k[:2] == "q:"]
tok.fit([x for x in qgrams if x.count("~") == 0 if len(x) >=2])
self._tokenize = tok
self._text = "text"
@property
def tokenize(self) -> Tokenize:
"""
:py:class:`text_models.vocabulary.Tokenize` instance
"""
return self._tokenize
def get_text(self, data: Union[dict, str]) -> str:
"""Get text keywords from dict"""
if isinstance(data, str):
return data
return data[self._text]
def fit(self, X: List[Union[str, dict]]) -> 'BagOfWords':
""" Train the Bag of words model"""
from microtc.utils import Counter
get_text = self.get_text
cnt = Counter()
tokens = self.tokenize.transform([get_text(x) for x in X])
[cnt.update(x) for x in tokens]
self._tfidf = TFIDF.counter(cnt)
return self
@property
def tfidf(self)->TFIDF:
return self._tfidf
def id2word(self, id: int) -> str:
"""Token associated with id
:param id: Identifier
:type id: int
"""
try:
w_id2w = self._w_id2w
except AttributeError:
self._w_id2w = {v: k for k, v in self.tfidf.word2id.items()}
w_id2w = self._w_id2w
id = w_id2w[id]
return self.tokenize.id2word(id)
@property
def num_terms(self):
return len(self.tokenize.vocabulary)
def _transform(self, data: List[str]) -> List[Tuple[int, float]]:
"""Transform a list of text to a Bag of Words using TFIDF"""
data = self.tokenize.transform(data)
tfidf = self.tfidf
return [tfidf[x] for x in data]
def transform(self, data: List[str]) -> csr_matrix:
"""Transform a list of text to a Bag of Words using TFIDF"""
return self.tonp(self._transform(data)) | [
"datetime",
"EvoMSA.utils.download",
"microtc.weighting.TFIDF.counter",
"microtc.utils.load_model",
"datetime.timedelta",
"b4msa.textmodel.TextModel",
"typing.OrderedDict",
"collections.defaultdict",
"datetime.date.today",
"microtc.utils.Counter"
] | [((6737, 6749), 'datetime.date.today', 'date.today', ([], {}), '()\n', (6747, 6749), False, 'from datetime import date, datetime\n'), ((6764, 6819), 'datetime', 'datetime', ([], {'year': 'hoy.year', 'month': 'hoy.month', 'day': 'hoy.month'}), '(year=hoy.year, month=hoy.month, day=hoy.month)\n', (6772, 6819), False, 'import datetime\n'), ((8078, 8104), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8096, 8104), False, 'import datetime\n'), ((9502, 9519), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9513, 9519), False, 'from collections import defaultdict\n'), ((9693, 9706), 'typing.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9704, 9706), False, 'from typing import List, Iterable, OrderedDict, Union, Dict, Any, Tuple\n'), ((10495, 10515), 'b4msa.textmodel.TextModel', 'TextModel', ([], {}), '(**tm_args)\n', (10504, 10515), False, 'from b4msa.textmodel import TextModel\n'), ((14671, 14680), 'microtc.utils.Counter', 'Counter', ([], {}), '()\n', (14678, 14680), False, 'from microtc.utils import Counter\n'), ((14810, 14828), 'microtc.weighting.TFIDF.counter', 'TFIDF.counter', (['cnt'], {}), '(cnt)\n', (14823, 14828), False, 'from microtc.weighting import TFIDF\n'), ((3180, 3193), 'microtc.utils.load_model', 'load_model', (['x'], {}), '(x)\n', (3190, 3193), False, 'from microtc.utils import load_model\n'), ((6922, 6983), 'datetime', 'datetime', ([], {'year': 'year', 'month': 'self.date.month', 'day': 'self.date.day'}), '(year=year, month=self.date.month, day=self.date.day)\n', (6930, 6983), False, 'import datetime\n'), ((5140, 5176), 'EvoMSA.utils.download', 'download', (["('b4msa_%s.tm' % self._lang)"], {}), "('b4msa_%s.tm' % self._lang)\n", (5148, 5176), False, 'from EvoMSA.utils import download\n'), ((13617, 13649), 'EvoMSA.utils.download', 'download', (["('b4msa_%s.tm' % tokens)"], {}), "('b4msa_%s.tm' % tokens)\n", (13625, 13649), False, 'from EvoMSA.utils import download\n')] |
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_nutnrb
@file marine-integrations/mi/dataset/parser/test/test_nutnrb.py
@author <NAME>
@brief Test code for a Nutnrb data parser
"""
import unittest
import gevent
from StringIO import StringIO
from nose.plugins.attrib import attr
from mi.core.log import get_logger ; log = get_logger()
from mi.core.exceptions import SampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_driver import DataSetDriverConfigKeys
from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey
# Add a mixin here if needed
@unittest.skip('Nutnr parser is broken, timestamp needs to be fixed')
@attr('UNIT', group='mi')
class NutnrbParserUnitTestCase(ParserUnitTestCase):
"""
WFP Parser unit test suite
"""
TEST_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
LONG_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:31:14.041 SATNLC0239,2012348,15.519770,-5.28,18.39,-27.76,0.59,0.000212
2012/12/13 15:31:15.350 SATNLC0239,2012348,15.520128,-7.57,32.65,-51.28,0.62,0.000186
2012/12/13 15:31:16.695 SATNLC0239,2012348,15.520501,-6.17,24.43,-37.71,0.60,0.000218
2012/12/13 15:31:18.015 SATNLC0239,2012348,15.520875,-5.59,18.68,-28.01,0.60,0.000166
2012/12/13 15:31:19.342 SATNLC0239,2012348,15.521232,-7.30,30.87,-48.21,0.62,0.000235
2012/12/13 15:31:20.704 SATNLC0239,2012348,15.521605,-7.52,31.35,-49.03,0.63,0.000240
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
BAD_TEST_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012\12\13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
def state_callback(self, pos, file_ingested):
""" Call back method to watch what comes in via the position callback """
log.trace("SETTING state_callback_value to " + str(pos))
self.position_callback_value = pos
self.file_ingested = file_ingested
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
log.trace("SETTING publish_callback_value to " + str(pub))
self.publish_callback_value = pub
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.nutnrb',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'NutnrbDataParticle'
}
# not a DataSourceLocation...its just the parser
self.position = {StateKey.POSITION: 0}
self.particle_a = NutnrbDataParticle("2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000\n")
self.particle_b = NutnrbDataParticle("2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231\n")
self.particle_c = NutnrbDataParticle("2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191\n")
self.particle_d = NutnrbDataParticle("2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203\n")
self.particle_e = NutnrbDataParticle("2012/12/13 15:31:14.041 SATNLC0239,2012348,15.519770,-5.28,18.39,-27.76,0.59,0.000212\n")
self.particle_z = NutnrbDataParticle("2012/12/13 15:31:20.704 SATNLC0239,2012348,15.521605,-7.52,31.35,-49.03,0.63,0.000240\n")
self.position_callback_value = None
self.publish_callback_value = None
def assert_result(self, result, position, particle):
self.assertEqual(result, [particle])
self.assertEqual(self.parser._state[StateKey.POSITION], position)
self.assertEqual(self.position_callback_value[StateKey.POSITION], position)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def test_happy_path(self):
"""
Test the happy path of operations where the parser takes the input
and spits out a valid data particle given the stream.
"""
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2458, self.particle_a)
result = self.parser.get_records(1)
self.assert_result(result, 2544, self.particle_b)
result = self.parser.get_records(1)
self.assert_result(result, 2630, self.particle_c)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
# no data left, dont move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 2716)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 2716)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d)
def test_get_many(self):
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(2)
self.assertEqual(result, [self.particle_a, self.particle_b])
self.assertEqual(self.parser._state[StateKey.POSITION], 2544)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 2544)
self.assertEqual(self.publish_callback_value[0], self.particle_a)
self.assertEqual(self.publish_callback_value[1], self.particle_b)
def test_bad_data(self):
# There's a bad sample in the data! Ack! Skip it!
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.BAD_TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2603, self.particle_c)
def test_long_stream(self):
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.LONG_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(11)
self.assertEqual(result[-1], self.particle_z)
self.assertEqual(self.parser._state[StateKey.POSITION], 3232)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 3232)
self.assertEqual(self.publish_callback_value[-1], self.particle_z)
def test_mid_state_start(self):
new_state = {StateKey.POSITION:2628}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
def reset_parser(self, state = {}):
self.state_callback_values = []
self.publish_callback_values = []
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, state, self.stream_handle,
self.state_callback, self.pub_callback)
def test_set_state(self):
new_state = {StateKey.POSITION: 2544}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, self.position, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2458, self.particle_a)
self.reset_parser(new_state)
self.parser.set_state(new_state) # seek to after particle_b
result = self.parser.get_records(1)
#
# If particles C and D appear, but the position is off
# it is because you are not consuming newlines in your
# DATA_REGEX pattern
#
self.assert_result(result, 2630, self.particle_c)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
| [
"StringIO.StringIO",
"mi.dataset.parser.nutnrb.NutnrbDataParticle",
"nose.plugins.attrib.attr",
"mi.core.log.get_logger",
"mi.dataset.parser.nutnrb.NutnrbParser",
"mi.dataset.test.test_parser.ParserUnitTestCase.setUp",
"unittest.skip"
] | [((338, 350), 'mi.core.log.get_logger', 'get_logger', ([], {}), '()\n', (348, 350), False, 'from mi.core.log import get_logger\n'), ((631, 699), 'unittest.skip', 'unittest.skip', (['"""Nutnr parser is broken, timestamp needs to be fixed"""'], {}), "('Nutnr parser is broken, timestamp needs to be fixed')\n", (644, 699), False, 'import unittest\n'), ((701, 725), 'nose.plugins.attrib.attr', 'attr', (['"""UNIT"""'], {'group': '"""mi"""'}), "('UNIT', group='mi')\n", (705, 725), False, 'from nose.plugins.attrib import attr\n'), ((17385, 17415), 'mi.dataset.test.test_parser.ParserUnitTestCase.setUp', 'ParserUnitTestCase.setUp', (['self'], {}), '(self)\n', (17409, 17415), False, 'from mi.dataset.test.test_parser import ParserUnitTestCase\n'), ((17740, 17858), 'mi.dataset.parser.nutnrb.NutnrbDataParticle', 'NutnrbDataParticle', (['"""2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000\n"""'], {}), '(\n """2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000\n"""\n )\n', (17758, 17858), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((17872, 17994), 'mi.dataset.parser.nutnrb.NutnrbDataParticle', 'NutnrbDataParticle', (['"""2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231\n"""'], {}), '(\n """2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231\n"""\n )\n', (17890, 17994), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((18008, 18130), 'mi.dataset.parser.nutnrb.NutnrbDataParticle', 'NutnrbDataParticle', (['"""2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191\n"""'], {}), '(\n """2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191\n"""\n )\n', (18026, 18130), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((18144, 18266), 'mi.dataset.parser.nutnrb.NutnrbDataParticle', 'NutnrbDataParticle', (['"""2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203\n"""'], {}), '(\n """2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203\n"""\n )\n', (18162, 18266), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((18280, 18402), 'mi.dataset.parser.nutnrb.NutnrbDataParticle', 'NutnrbDataParticle', (['"""2012/12/13 15:31:14.041 SATNLC0239,2012348,15.519770,-5.28,18.39,-27.76,0.59,0.000212\n"""'], {}), '(\n """2012/12/13 15:31:14.041 SATNLC0239,2012348,15.519770,-5.28,18.39,-27.76,0.59,0.000212\n"""\n )\n', (18298, 18402), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((18416, 18538), 'mi.dataset.parser.nutnrb.NutnrbDataParticle', 'NutnrbDataParticle', (['"""2012/12/13 15:31:20.704 SATNLC0239,2012348,15.521605,-7.52,31.35,-49.03,0.63,0.000240\n"""'], {}), '(\n """2012/12/13 15:31:20.704 SATNLC0239,2012348,15.521605,-7.52,31.35,-49.03,0.63,0.000240\n"""\n )\n', (18434, 18538), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((19257, 19301), 'StringIO.StringIO', 'StringIO', (['NutnrbParserUnitTestCase.TEST_DATA'], {}), '(NutnrbParserUnitTestCase.TEST_DATA)\n', (19265, 19301), False, 'from StringIO import StringIO\n'), ((19324, 19425), 'mi.dataset.parser.nutnrb.NutnrbParser', 'NutnrbParser', (['self.config', 'new_state', 'self.stream_handle', 'self.state_callback', 'self.pub_callback'], {}), '(self.config, new_state, self.stream_handle, self.\n state_callback, self.pub_callback)\n', (19336, 19425), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((20370, 20414), 'StringIO.StringIO', 'StringIO', (['NutnrbParserUnitTestCase.TEST_DATA'], {}), '(NutnrbParserUnitTestCase.TEST_DATA)\n', (20378, 20414), False, 'from StringIO import StringIO\n'), ((20437, 20538), 'mi.dataset.parser.nutnrb.NutnrbParser', 'NutnrbParser', (['self.config', 'new_state', 'self.stream_handle', 'self.state_callback', 'self.pub_callback'], {}), '(self.config, new_state, self.stream_handle, self.\n state_callback, self.pub_callback)\n', (20449, 20538), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((21122, 21170), 'StringIO.StringIO', 'StringIO', (['NutnrbParserUnitTestCase.BAD_TEST_DATA'], {}), '(NutnrbParserUnitTestCase.BAD_TEST_DATA)\n', (21130, 21170), False, 'from StringIO import StringIO\n'), ((21193, 21294), 'mi.dataset.parser.nutnrb.NutnrbParser', 'NutnrbParser', (['self.config', 'new_state', 'self.stream_handle', 'self.state_callback', 'self.pub_callback'], {}), '(self.config, new_state, self.stream_handle, self.\n state_callback, self.pub_callback)\n', (21205, 21294), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((21513, 21557), 'StringIO.StringIO', 'StringIO', (['NutnrbParserUnitTestCase.LONG_DATA'], {}), '(NutnrbParserUnitTestCase.LONG_DATA)\n', (21521, 21557), False, 'from StringIO import StringIO\n'), ((21580, 21681), 'mi.dataset.parser.nutnrb.NutnrbParser', 'NutnrbParser', (['self.config', 'new_state', 'self.stream_handle', 'self.state_callback', 'self.pub_callback'], {}), '(self.config, new_state, self.stream_handle, self.\n state_callback, self.pub_callback)\n', (21592, 21681), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((22148, 22192), 'StringIO.StringIO', 'StringIO', (['NutnrbParserUnitTestCase.TEST_DATA'], {}), '(NutnrbParserUnitTestCase.TEST_DATA)\n', (22156, 22192), False, 'from StringIO import StringIO\n'), ((22215, 22316), 'mi.dataset.parser.nutnrb.NutnrbParser', 'NutnrbParser', (['self.config', 'new_state', 'self.stream_handle', 'self.state_callback', 'self.pub_callback'], {}), '(self.config, new_state, self.stream_handle, self.\n state_callback, self.pub_callback)\n', (22227, 22316), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((22601, 22645), 'StringIO.StringIO', 'StringIO', (['NutnrbParserUnitTestCase.TEST_DATA'], {}), '(NutnrbParserUnitTestCase.TEST_DATA)\n', (22609, 22645), False, 'from StringIO import StringIO\n'), ((22668, 22764), 'mi.dataset.parser.nutnrb.NutnrbParser', 'NutnrbParser', (['self.config', 'state', 'self.stream_handle', 'self.state_callback', 'self.pub_callback'], {}), '(self.config, state, self.stream_handle, self.state_callback,\n self.pub_callback)\n', (22680, 22764), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n'), ((22902, 22946), 'StringIO.StringIO', 'StringIO', (['NutnrbParserUnitTestCase.TEST_DATA'], {}), '(NutnrbParserUnitTestCase.TEST_DATA)\n', (22910, 22946), False, 'from StringIO import StringIO\n'), ((22969, 23074), 'mi.dataset.parser.nutnrb.NutnrbParser', 'NutnrbParser', (['self.config', 'self.position', 'self.stream_handle', 'self.state_callback', 'self.pub_callback'], {}), '(self.config, self.position, self.stream_handle, self.\n state_callback, self.pub_callback)\n', (22981, 23074), False, 'from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey\n')] |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Name: test_component
# Purpose: Test driver for module component
#
# Author: <NAME> (<EMAIL>)
#
# Copyright: (c) 2014 <NAME>
# ----------------------------------------------------------------------------
# $Source$
# $Revision$
"""Test driver for module component"""
from abc import ABC
from numbers import Number
from typing import Tuple
import unittest
from camd3.infrastructure.component import (
Attribute, Component, ComponentLookupError, Immutable, implementer)
from camd3.infrastructure.component.component import _ABCSet, ComponentMeta
DFLT_NAMESPACE = ('__module__', '__qualname__', '__doc__')
class TestComp1(Component):
"""TestComp1"""
class TestComp2(Component):
"""TestComp2"""
attr1 = Attribute()
attr2 = Attribute()
def meth(self):
pass
attr3 = Attribute()
@property
def prop(self):
pass
@implementer(TestComp1, TestComp2)
class TestImpl(Component):
def __init__(self):
pass
class TestComp1Factory:
def __call__(self, i: Number) -> TestComp1:
return TestImpl()
def Number2TestComp1(i: Number) -> TestComp1: # noqa: D103
return TestImpl()
def Str2TestComp2(s: str) -> TestComp2: # noqa: D103
return TestImpl()
@implementer(TestComp1)
class TestABC(ABC):
pass
class TestComp3(Component):
"""TestComp3"""
def __init_subclass__(subcls, **kwds): # noqa: D105
try:
param = kwds.pop('param')
except KeyError:
pass
else:
subcls.param = param
class TestImpl3(TestComp3, param='P'):
pass
class TestComp4(Component, Immutable):
"""TestComp4"""
class TestComp5(TestComp4):
"""TestComp5"""
a = Attribute()
b = Attribute()
class TestComp6(TestComp5):
"""TestComp6"""
c = Attribute()
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def Obj2TestComp6(obj: object) -> TestComp6: # noqa: D103
return TestComp6(obj, None, None)
def Tuple2TestComp6(tpl: Tuple[int, int, str]) -> TestComp6: # noqa: D103
return TestComp6(*tpl)
class TestComp7(TestComp6):
"""TestComp7"""
class TestComp8(TestComp1):
"""TestComp8"""
def __init__(self, a):
self.a = a
self.i = self.initialized
class ABCSetTest(unittest.TestCase):
def testAdd(self):
cls_list = (ABC, Immutable, TestComp4, Component)
self.assertEqual(_ABCSet(cls_list), _ABCSet({ABC, TestComp4}))
cls_list = (int, object, Number, float)
self.assertEqual(_ABCSet(cls_list), _ABCSet({int, float}))
class ComponentMetaTest(unittest.TestCase):
def test_constructor(self):
# name of descriptors
for name in ('attr1', 'attr2', 'attr3'):
self.assertEqual(getattr(getattr(TestComp2, name, None),
'name', None), name)
# __slots__ forced?
self.assertEqual(getattr(TestComp4, '__slots__', None), ())
self.assertEqual(getattr(TestComp5, '__slots__', None), ('_a', '_b'))
self.assertEqual(getattr(TestComp6, '__slots__', None), ('_c',))
self.assertRaises(TypeError, ComponentMeta, 'Test',
(TestImpl, TestComp4), {})
def test_init_subclass(self):
# init_subclass turned into a class method?
meth = TestComp3.__init_subclass__
self.assertTrue(getattr(meth, '__self__', None) is TestComp3)
# __init_subclass called?
self.assertEqual(getattr(TestImpl3, 'param', None), 'P')
def test_attr_names(self):
self.assertEqual(TestComp2.attr_names, ('attr1', 'attr2', 'attr3'))
self.assertEqual(TestComp2.all_attr_names,
('attr1', 'attr2', 'attr3'))
self.assertEqual(TestImpl.attr_names, ())
self.assertEqual(TestImpl.all_attr_names, ())
self.assertEqual(TestComp6.attr_names, ('c',))
self.assertEqual(TestComp6.all_attr_names, ('a', 'b', 'c'))
def test_implementer(self):
self.assertTrue(issubclass(TestImpl, TestComp1))
self.assertTrue(issubclass(TestImpl, TestComp2))
self.assertEqual(TestImpl.__virtual_bases__, {TestComp1, TestComp2})
self.assertTrue(issubclass(TestABC, TestComp1))
def test_adaptation(self):
# wrong component
self.assertRaises(AssertionError, TestComp2.add_adapter,
TestComp1Factory())
self.assertRaises(AssertionError, TestComp2.add_adapter,
Tuple2TestComp6)
# wrong number of args
func = lambda x, y: TestComp2()
func.__annotations__ = {'return': TestComp2, 'x': int, 'y': int}
self.assertRaises(AssertionError, TestComp2.add_adapter, func)
# variable number of args
func = lambda *args: TestComp2()
func.__annotations__ = {'return': TestComp2, 'args': int}
self.assertRaises(AssertionError, TestComp2.add_adapter, func)
# register some adapters
fct = TestComp1Factory()
TestComp1.add_adapter(fct)
self.assertIn(Number, TestComp1.__adapters__)
self.assertIn(fct, TestComp1.__adapters__[Number])
TestComp1.add_adapter(Number2TestComp1)
self.assertIn(Number2TestComp1, TestComp1.__adapters__[Number])
TestComp1.add_adapter(Number2TestComp1)
self.assertEqual(len(TestComp1.__adapters__[Number]), 2)
TestComp2.add_adapter(Str2TestComp2)
self.assertIn(str, TestComp2.__adapters__)
self.assertIn(Str2TestComp2, TestComp2.__adapters__[str])
TestComp6.add_adapter(Tuple2TestComp6)
adapter = TestComp6.add_adapter(Obj2TestComp6)
self.assertEqual(adapter, Obj2TestComp6)
# retrieve adapters
self.assertEqual(TestComp1.get_adapter(5), Number2TestComp1)
self.assertEqual(TestComp1.get_adapter(5.0), Number2TestComp1)
self.assertRaises(ComponentLookupError, TestComp1.get_adapter, 'x')
self.assertEqual(TestComp2.get_adapter('abc'), Str2TestComp2)
self.assertRaises(ComponentLookupError, TestComp2.get_adapter, 3)
self.assertEqual(TestComp6.get_adapter((3, 1, 'x')), Tuple2TestComp6)
self.assertEqual(TestComp6.get_adapter([3, 1, 'x']), Obj2TestComp6)
self.assertEqual(TestComp6.get_adapter(TestComp6(3, 1, 'x')),
Obj2TestComp6)
self.assertEqual(TestComp4.get_adapter((3, 1, 'x')), Tuple2TestComp6)
self.assertEqual(TestComp4.get_adapter([3, 1, 'x']), Obj2TestComp6)
# adapt objects
self.assertIsInstance(TestComp1.adapt(5), TestComp1)
self.assertIsInstance(TestComp1[5.0], TestComp1)
self.assertIsInstance(TestComp2.adapt('x'), TestComp2)
t1 = TestComp6.adapt((5, 17, 'abc'))
self.assertIsInstance(t1, TestComp6)
self.assertEqual((t1.a, t1.b, t1.c), (5, 17, 'abc'))
t2 = TestComp6.adapt(fct)
self.assertIsInstance(t2, TestComp6)
self.assertIs(t2.a, fct)
self.assertRaises(TypeError, TestComp7.adapt, t2)
t3 = TestComp6(4, 9, 'y')
for ct in (TestComp6, TestComp5, TestComp4):
self.assertIs(ct.adapt(t3), t3)
def test_repr(self):
self.assertEqual(repr(TestComp3),
'.'.join((__name__, TestComp3.__qualname__)))
class ComponentTest(unittest.TestCase):
def test_constructor(self):
comp = TestComp8(19)
self.assertFalse(comp.i)
self.assertTrue(comp.initialized)
if __name__ == '__main__':
unittest.main()
| [
"camd3.infrastructure.component.implementer",
"camd3.infrastructure.component.component._ABCSet",
"unittest.main",
"camd3.infrastructure.component.Attribute"
] | [((994, 1027), 'camd3.infrastructure.component.implementer', 'implementer', (['TestComp1', 'TestComp2'], {}), '(TestComp1, TestComp2)\n', (1005, 1027), False, 'from camd3.infrastructure.component import Attribute, Component, ComponentLookupError, Immutable, implementer\n'), ((1399, 1421), 'camd3.infrastructure.component.implementer', 'implementer', (['TestComp1'], {}), '(TestComp1)\n', (1410, 1421), False, 'from camd3.infrastructure.component import Attribute, Component, ComponentLookupError, Immutable, implementer\n'), ((848, 859), 'camd3.infrastructure.component.Attribute', 'Attribute', ([], {}), '()\n', (857, 859), False, 'from camd3.infrastructure.component import Attribute, Component, ComponentLookupError, Immutable, implementer\n'), ((872, 883), 'camd3.infrastructure.component.Attribute', 'Attribute', ([], {}), '()\n', (881, 883), False, 'from camd3.infrastructure.component import Attribute, Component, ComponentLookupError, Immutable, implementer\n'), ((931, 942), 'camd3.infrastructure.component.Attribute', 'Attribute', ([], {}), '()\n', (940, 942), False, 'from camd3.infrastructure.component import Attribute, Component, ComponentLookupError, Immutable, implementer\n'), ((1889, 1900), 'camd3.infrastructure.component.Attribute', 'Attribute', ([], {}), '()\n', (1898, 1900), False, 'from camd3.infrastructure.component import Attribute, Component, ComponentLookupError, Immutable, implementer\n'), ((1909, 1920), 'camd3.infrastructure.component.Attribute', 'Attribute', ([], {}), '()\n', (1918, 1920), False, 'from camd3.infrastructure.component import Attribute, Component, ComponentLookupError, Immutable, implementer\n'), ((1980, 1991), 'camd3.infrastructure.component.Attribute', 'Attribute', ([], {}), '()\n', (1989, 1991), False, 'from camd3.infrastructure.component import Attribute, Component, ComponentLookupError, Immutable, implementer\n'), ((7734, 7749), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7747, 7749), False, 'import unittest\n'), ((2631, 2648), 'camd3.infrastructure.component.component._ABCSet', '_ABCSet', (['cls_list'], {}), '(cls_list)\n', (2638, 2648), False, 'from camd3.infrastructure.component.component import _ABCSet, ComponentMeta\n'), ((2650, 2675), 'camd3.infrastructure.component.component._ABCSet', '_ABCSet', (['{ABC, TestComp4}'], {}), '({ABC, TestComp4})\n', (2657, 2675), False, 'from camd3.infrastructure.component.component import _ABCSet, ComponentMeta\n'), ((2750, 2767), 'camd3.infrastructure.component.component._ABCSet', '_ABCSet', (['cls_list'], {}), '(cls_list)\n', (2757, 2767), False, 'from camd3.infrastructure.component.component import _ABCSet, ComponentMeta\n'), ((2769, 2790), 'camd3.infrastructure.component.component._ABCSet', '_ABCSet', (['{int, float}'], {}), '({int, float})\n', (2776, 2790), False, 'from camd3.infrastructure.component.component import _ABCSet, ComponentMeta\n')] |
import os
from json import JSONDecodeError
from json import dump
from json import load
import numpy as np
from core.net_errors import JsonFileStructureIncorrect, JsonFileNotFound
def upload(net_object, path):
if not os.path.isfile(path):
raise JsonFileNotFound()
try:
with open(path, 'r') as file:
deserialized_file = load(file)
net_object.config = deserialized_file['config']
net_object.tags = deserialized_file.get('tags')
net_object.net = deserialized_file.get('net')
net_object.deviation = deserialized_file.get('normalization')
if net_object.net:
for l in range(1, len(net_object.config)):
net_object.net[l - 1]['w'] = np.array(net_object.net[l - 1]['w'])
net_object.net[l - 1]['o'] = np.zeros((net_object.config[l]))
except KeyError:
raise JsonFileStructureIncorrect()
except JSONDecodeError:
raise
def unload(net_object, path):
try:
net_copy = []
for l in range(len(net_object.net)):
net_copy.append({'w': net_object.net[l]['w'].tolist()})
with open(path, 'w') as file:
file_dictionary = {
'config': net_object.config,
'tags': net_object.tags,
'net': net_copy,
'normalization': net_object.normalization
}
dump(file_dictionary, file, sort_keys=True, indent=4)
except JSONDecodeError:
raise
| [
"core.net_errors.JsonFileStructureIncorrect",
"core.net_errors.JsonFileNotFound",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"json.load",
"json.dump"
] | [((224, 244), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (238, 244), False, 'import os\n'), ((260, 278), 'core.net_errors.JsonFileNotFound', 'JsonFileNotFound', ([], {}), '()\n', (276, 278), False, 'from core.net_errors import JsonFileStructureIncorrect, JsonFileNotFound\n'), ((359, 369), 'json.load', 'load', (['file'], {}), '(file)\n', (363, 369), False, 'from json import load\n'), ((917, 945), 'core.net_errors.JsonFileStructureIncorrect', 'JsonFileStructureIncorrect', ([], {}), '()\n', (943, 945), False, 'from core.net_errors import JsonFileStructureIncorrect, JsonFileNotFound\n'), ((1438, 1491), 'json.dump', 'dump', (['file_dictionary', 'file'], {'sort_keys': '(True)', 'indent': '(4)'}), '(file_dictionary, file, sort_keys=True, indent=4)\n', (1442, 1491), False, 'from json import dump\n'), ((762, 798), 'numpy.array', 'np.array', (["net_object.net[l - 1]['w']"], {}), "(net_object.net[l - 1]['w'])\n", (770, 798), True, 'import numpy as np\n'), ((848, 878), 'numpy.zeros', 'np.zeros', (['net_object.config[l]'], {}), '(net_object.config[l])\n', (856, 878), True, 'import numpy as np\n')] |
from env_wrapper import SubprocVecEnv, DummyVecEnv
import numpy as np
import multiagent.scenarios as scenarios
from multiagent.environment import MultiAgentEnv
def make_parallel_env(n_rollout_threads, seed=1):
def get_env_fn(rank):
def init_env():
env = make_env("simple_adversary")
env.seed(seed + rank * 1000)
np.random.seed(seed + rank * 1000)
return env
return init_env
# if n_rollout_threads == 1:
# return DummyVecEnv([get_env_fn(0)])
# else:
return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])
def make_env(scenario_name, benchmark=False):
scenario = scenarios.load(scenario_name + ".py").Scenario()
world = scenario.make_world()
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
| [
"multiagent.scenarios.load",
"multiagent.environment.MultiAgentEnv",
"numpy.random.seed"
] | [((765, 851), 'multiagent.environment.MultiAgentEnv', 'MultiAgentEnv', (['world', 'scenario.reset_world', 'scenario.reward', 'scenario.observation'], {}), '(world, scenario.reset_world, scenario.reward, scenario.\n observation)\n', (778, 851), False, 'from multiagent.environment import MultiAgentEnv\n'), ((362, 396), 'numpy.random.seed', 'np.random.seed', (['(seed + rank * 1000)'], {}), '(seed + rank * 1000)\n', (376, 396), True, 'import numpy as np\n'), ((672, 709), 'multiagent.scenarios.load', 'scenarios.load', (["(scenario_name + '.py')"], {}), "(scenario_name + '.py')\n", (686, 709), True, 'import multiagent.scenarios as scenarios\n')] |
#!/usr/bin/env python
import os
import sys
import anyconfig
import re
import time
import importlib
import pdb
from select import poll, POLLIN
from statsd import StatsClient
def import_class(klass):
(module, klass) = klass.rsplit('.', 1)
module = importlib.import_module(module)
return getattr(module, klass)
sample_cfg = """
verbose=false
[statsd]
host="127.0.0.1"
port=8127
prefix="amdtemp.gpu"
[metrics.card0.temp]
path="/sys/class/drm/card0/device/hwmon/hwmon2/temp1_input"
[metrics.card0.pwm]
path="/sys/class/drm/card0/device/hwmon/hwmon2/pwm1"
[metrics.card0.memory]
path="/sys/class/drm/card0/device/pp_dpm_mclk"
parser="amdtemp.parsers.RegexParser"
[metrics.card0.memory.parser_options]
regex='(?P<power_state>\d):\s(?P<current_clck>\d+)[KMG]?hz\s\*'
[metrics.card0.core]
path="/sys/class/drm/card0/device/pp_dpm_sclk"
parser="amdtemp.parsers.RegexParser"
[metrics.card0.core.parser_options]
regex='(?P<power_state>\d):\s(?P<current_clck>\d+)[KMG]?hz\s\*'
"""
VERBOSE=False
class Metric:
def __init__(self, name, path, parser="amdtemp.parsers.IntParser", parser_options={}):
self.name = name
self.path = path
self.parser = import_class(parser)(name, **parser_options)
def poll(self):
with open(self.path, "r") as f:
raw = f.read()
if VERBOSE: print("Raw input (%s): %s" % (self.name, raw))
return self.parser.parse(raw)
def __str__(self):
return "Metric (%s), located at %s." % (name, path)
def record(client, fields):
if (VERBOSE):
[print("%s: %d" % (metric, value)) for metric, value in fields.items()]
for metric, value in fields.items():
client.gauge(metric, value)
def monitor(config):
statsd_client = StatsClient(**config['statsd'])
sources = get_sources_list(config)
while(True):
with statsd_client.pipeline() as batch:
for metrics in sources.values():
if VERBOSE: print("Recording...")
for m in metrics:
fields = m.poll()
if VERBOSE: print(fields)
record(batch, fields)
time.sleep(1)
def get_sources_list(config):
sources = dict()
for source, metric in config['metrics'].items():
prefix = lambda name: '.'.join((source, name))
sources[source] = [Metric(prefix(metric), **config) for metric, config in metric.items()]
return sources
usage = """
AMDTemp
This program uses the amdgpu sysfs interface to collect gpu information and send
over a statsd server.
Usage: amdtemp <configfile>
A sample config will be output when called without arguments.
"""
def main():
if len(sys.argv) == 1:
print(sample_cfg)
exit(0)
if re.match(r"(-h|--help|--usage)", sys.argv[1]):
print(usage)
exit(1)
config = anyconfig.load(sys.argv[1])
VERBOSE = config.get('verbose', False)
if (VERBOSE): print(config)
# import pdb; pdb.set_trace()
monitor(config)
| [
"anyconfig.load",
"importlib.import_module",
"re.match",
"time.sleep",
"statsd.StatsClient"
] | [((256, 287), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (279, 287), False, 'import importlib\n'), ((1762, 1793), 'statsd.StatsClient', 'StatsClient', ([], {}), "(**config['statsd'])\n", (1773, 1793), False, 'from statsd import StatsClient\n'), ((2763, 2807), 're.match', 're.match', (['"""(-h|--help|--usage)"""', 'sys.argv[1]'], {}), "('(-h|--help|--usage)', sys.argv[1])\n", (2771, 2807), False, 'import re\n'), ((2861, 2888), 'anyconfig.load', 'anyconfig.load', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2875, 2888), False, 'import anyconfig\n'), ((2161, 2174), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2171, 2174), False, 'import time\n')] |
#!/usr/bin/env python3
import os
import time
import binascii
import codecs
def swap_order(d, wsz=16, gsz=2 ):
return "".join(["".join([m[i:i+gsz] for i in range(wsz-gsz,-gsz,-gsz)]) for m in [d[i:i+wsz] for i in range(0,len(d),wsz)]])
expected_genesis_hash = "00000009c4e61bee0e8d6236f847bb1dd23f4c61ca5240b74852184c9bf98c30"
blockheader1 = "020000005bf0e2f283edac06ea087a9324dc9bd865c79b175658849bd83900000000000085246da7e6e530d276d5f8e0d4222cb8938f7af0e9d6678ec08ff133812f4b7251e8e35cec16471af51dd61c"
expected_hash1 = "00000000623c9e9d39c1fb7ab7290b3014b6348d10c54aa6ab6fc408385dfaa6"
def read_fpga_temprature():
#XADC IP is connected to xdma pcie IP via axi4-lite interface on base-addr 0x40000000
# open port and then read from dedicated register
fd = os.open("/dev/xdma0_user", os.O_RDWR)
temp = os.pread(fd,32,0x0000 + 0x200)[::-1] # read temerature out from temperature register
temp_reg = int.from_bytes(temp, "big")
t = ((int(temp_reg)/65536.0)/0.00198421639) - 273.15
# print("--------------------------------------")
print ("Temperature : {} Celsius".format(t))
# print("--------------------------------------")
os.close(fd)
# Read FPGA MAX Temprature
def read_fpga_maxtemprature():
#XADC IP is connected to xdma pcie IP via axi4-lite interface on base-addr 0x40000000
# open port and then read from dedicated register
fd = os.open("/dev/xdma0_user", os.O_RDWR)
temp = os.pread(fd,32,0x0000 + 0x280)[::-1] # read maxtemerature out from temperature register
temp_reg = int.from_bytes(temp, "big")
t = ((int(temp_reg)/65536.0)/0.00198421639) - 273.15
# print("--------------------------------------")
print ("Temperature Max: {} Celsius".format(t))
# print("--------------------------------------")
os.close(fd)
# Read vccint voltage
def read_fpga_VCCINT():
#XADC IP is connected to xdma pcie IP via axi4-lite interface on base-addr 0x40000000
# open port and then read from dedicated register
fd = os.open("/dev/xdma0_user", os.O_RDWR)
vint_temp = os.pread(fd,32,0x0000 + 0x204)[::-1] # read voltage out from vccint register
volt_int = int.from_bytes(vint_temp, "big")
vint = ((volt_int) * 3.0)/65536.0
# print("--------------------------------------")
print ("VCCINT : {0:.04f} V".format(vint))
# print("--------------------------------------")
os.close(fd)
# # Read max vccint voltage
# def read_fpga_maxVCCINT():
# #XADC IP is connected to xdma pcie IP via axi4-lite interface on base-addr 0x40000000
# # open port and then read from dedicated register
# fd = os.open("/dev/xdma0_user", os.O_RDWR)
# vint_temp = os.pread(fd,32,0x0000 + 0x284)[::-1] # read max voltage out from vccint register
# volt_int = int.from_bytes(vint_temp, "big")
# print(volt_int)
# vint = ((volt_int) * 3.0)/65536.0
# print("--------------------------------------")
# print ("VCCINT max: {0:.04f} V".format(vint))
# print("--------------------------------------")
# os.close(fd)
# Read vccaux , read fpga auxillary volatges
def read_fpga_VCCAUX():
#XADC IP is connected to xdma pcie IP via axi4-lite interface on base-addr 0x40000000
# open port and then read from dedicated register
fd = os.open("/dev/xdma0_user", os.O_RDWR)
vaux_temp = os.pread(fd,32,0x0000 + 0x208)[::-1] # read voltage out from vccaux register
volt_int = int.from_bytes(vaux_temp, "big")
vint = ((volt_int) * 3.0)/65536.0
# print("--------------------------------------")
print ("VCCAUX : {0:.04f} V".format(vint))
# print("--------------------------------------")
os.close(fd)
def read_fpga_VCCBRAM():
#XADC IP is connected to xdma pcie IP via axi4-lite interface on base-addr 0x40000000
# open port and then read from dedicated register
fd = os.open("/dev/xdma0_user", os.O_RDWR)
vbram_temp = os.pread(fd,32,0x0000 + 0x218)[::-1] # read voltage out from vccbram register
volt_int = int.from_bytes(vbram_temp, "big")
vint = ((volt_int) * 3.0)/65536.0
# print("--------------------------------------")
print ("VCCBRAM : {0:.04f} V".format(vint))
# print("--------------------------------------")
os.close(fd)
def hash_genesis_block():
blockheader = ("02000000" +
"a4051e368bfa0191e6c747507dd0fdb03da1a0a54ed14829810b97c6ac070000" +
"e932b0f6b8da85ccc464d9d5066d01d904fb05ae8d1ddad7095b9148e3f08ba6" +
"bcfb6459" +
"f0ff0f1e" +
"3682bb08")
print("txdata:%s" %blockheader)
blockheader_bin = binascii.unhexlify(swap_order(blockheader))
tx_data = blockheader_bin
# Open files
fd_h2c = os.open("/dev/xdma/card0/h2c0", os.O_WRONLY)
fd_c2h = os.open("/dev/xdma/card0/c2h0", os.O_RDONLY)
start_time = time.time()
# Send to FPGA
os.pwrite(fd_h2c, tx_data, 0)
# Receive from FPGA
rx_data = os.pread(fd_c2h, 32, 0)
end_time = time.time()
delay = end_time -start_time
blockheder_rx = codecs.encode(rx_data,'hex').decode('ascii')
print("rxdata:%s" %swap_order(blockheder_rx)[0:64])
print("Time elapsed:%f microsec" %(delay*1000000))
os.close(fd_h2c)
os.close(fd_c2h)
##############################################
def main():
hash_genesis_block()
read_fpga_temprature()
read_fpga_maxtemprature()
read_fpga_VCCINT()
read_fpga_VCCAUX()
read_fpga_VCCBRAM()
##############################################
if __name__ == '__main__':
main()
| [
"os.pread",
"os.close",
"os.open",
"os.pwrite",
"codecs.encode",
"time.time"
] | [((778, 815), 'os.open', 'os.open', (['"""/dev/xdma0_user"""', 'os.O_RDWR'], {}), "('/dev/xdma0_user', os.O_RDWR)\n", (785, 815), False, 'import os\n'), ((1175, 1187), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (1183, 1187), False, 'import os\n'), ((1401, 1438), 'os.open', 'os.open', (['"""/dev/xdma0_user"""', 'os.O_RDWR'], {}), "('/dev/xdma0_user', os.O_RDWR)\n", (1408, 1438), False, 'import os\n'), ((1804, 1816), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (1812, 1816), False, 'import os\n'), ((2017, 2054), 'os.open', 'os.open', (['"""/dev/xdma0_user"""', 'os.O_RDWR'], {}), "('/dev/xdma0_user', os.O_RDWR)\n", (2024, 2054), False, 'import os\n'), ((2395, 2407), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (2403, 2407), False, 'import os\n'), ((3278, 3315), 'os.open', 'os.open', (['"""/dev/xdma0_user"""', 'os.O_RDWR'], {}), "('/dev/xdma0_user', os.O_RDWR)\n", (3285, 3315), False, 'import os\n'), ((3656, 3668), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (3664, 3668), False, 'import os\n'), ((3848, 3885), 'os.open', 'os.open', (['"""/dev/xdma0_user"""', 'os.O_RDWR'], {}), "('/dev/xdma0_user', os.O_RDWR)\n", (3855, 3885), False, 'import os\n'), ((4230, 4242), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (4238, 4242), False, 'import os\n'), ((4683, 4727), 'os.open', 'os.open', (['"""/dev/xdma/card0/h2c0"""', 'os.O_WRONLY'], {}), "('/dev/xdma/card0/h2c0', os.O_WRONLY)\n", (4690, 4727), False, 'import os\n'), ((4741, 4785), 'os.open', 'os.open', (['"""/dev/xdma/card0/c2h0"""', 'os.O_RDONLY'], {}), "('/dev/xdma/card0/c2h0', os.O_RDONLY)\n", (4748, 4785), False, 'import os\n'), ((4804, 4815), 'time.time', 'time.time', ([], {}), '()\n', (4813, 4815), False, 'import time\n'), ((4840, 4869), 'os.pwrite', 'os.pwrite', (['fd_h2c', 'tx_data', '(0)'], {}), '(fd_h2c, tx_data, 0)\n', (4849, 4869), False, 'import os\n'), ((4910, 4933), 'os.pread', 'os.pread', (['fd_c2h', '(32)', '(0)'], {}), '(fd_c2h, 32, 0)\n', (4918, 4933), False, 'import os\n'), ((4949, 4960), 'time.time', 'time.time', ([], {}), '()\n', (4958, 4960), False, 'import time\n'), ((5174, 5190), 'os.close', 'os.close', (['fd_h2c'], {}), '(fd_h2c)\n', (5182, 5190), False, 'import os\n'), ((5195, 5211), 'os.close', 'os.close', (['fd_c2h'], {}), '(fd_c2h)\n', (5203, 5211), False, 'import os\n'), ((827, 852), 'os.pread', 'os.pread', (['fd', '(32)', '(0 + 512)'], {}), '(fd, 32, 0 + 512)\n', (835, 852), False, 'import os\n'), ((1450, 1475), 'os.pread', 'os.pread', (['fd', '(32)', '(0 + 640)'], {}), '(fd, 32, 0 + 640)\n', (1458, 1475), False, 'import os\n'), ((2071, 2096), 'os.pread', 'os.pread', (['fd', '(32)', '(0 + 516)'], {}), '(fd, 32, 0 + 516)\n', (2079, 2096), False, 'import os\n'), ((3332, 3357), 'os.pread', 'os.pread', (['fd', '(32)', '(0 + 520)'], {}), '(fd, 32, 0 + 520)\n', (3340, 3357), False, 'import os\n'), ((3903, 3928), 'os.pread', 'os.pread', (['fd', '(32)', '(0 + 536)'], {}), '(fd, 32, 0 + 536)\n', (3911, 3928), False, 'import os\n'), ((5014, 5043), 'codecs.encode', 'codecs.encode', (['rx_data', '"""hex"""'], {}), "(rx_data, 'hex')\n", (5027, 5043), False, 'import codecs\n')] |
from AlphaGo.models.policy import CNNPolicy
from AlphaGo import go
from AlphaGo.go import GameState
from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer
import numpy as np
import unittest
import os
class TestCNNPolicy(unittest.TestCase):
def test_default_policy(self):
policy = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"])
policy.eval_state(GameState())
# just hope nothing breaks
def test_batch_eval_state(self):
policy = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"])
results = policy.batch_eval_state([GameState(), GameState()])
self.assertEqual(len(results), 2) # one result per GameState
self.assertEqual(len(results[0]), 361) # each one has 361 (move,prob) pairs
def test_output_size(self):
policy19 = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"], board=19)
output = policy19.forward(policy19.preprocessor.state_to_tensor(GameState(19)))
self.assertEqual(output.shape, (1, 19 * 19))
policy13 = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"], board=13)
output = policy13.forward(policy13.preprocessor.state_to_tensor(GameState(13)))
self.assertEqual(output.shape, (1, 13 * 13))
def test_save_load(self):
policy = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"])
model_file = 'TESTPOLICY.json'
weights_file = 'TESTWEIGHTS.h5'
model_file2 = 'TESTPOLICY2.json'
weights_file2 = 'TESTWEIGHTS2.h5'
# test saving model/weights separately
policy.save_model(model_file)
policy.model.save_weights(weights_file)
# test saving them together
policy.save_model(model_file2, weights_file2)
copypolicy = CNNPolicy.load_model(model_file)
copypolicy.model.load_weights(weights_file)
copypolicy2 = CNNPolicy.load_model(model_file2)
for w1, w2 in zip(copypolicy.model.get_weights(), copypolicy2.model.get_weights()):
self.assertTrue(np.all(w1 == w2))
os.remove(model_file)
os.remove(weights_file)
os.remove(model_file2)
os.remove(weights_file2)
class TestPlayers(unittest.TestCase):
def test_greedy_player(self):
gs = GameState()
policy = CNNPolicy(["board", "ones", "turns_since"])
player = GreedyPolicyPlayer(policy)
for i in range(20):
move = player.get_move(gs)
self.assertIsNotNone(move)
gs.do_move(move)
def test_probabilistic_player(self):
gs = GameState()
policy = CNNPolicy(["board", "ones", "turns_since"])
player = ProbabilisticPolicyPlayer(policy)
for i in range(20):
move = player.get_move(gs)
self.assertIsNotNone(move)
gs.do_move(move)
def test_sensible_probabilistic(self):
gs = GameState()
policy = CNNPolicy(["board", "ones", "turns_since"])
player = ProbabilisticPolicyPlayer(policy)
empty = (10, 10)
for x in range(19):
for y in range(19):
if (x, y) != empty:
gs.do_move((x, y), go.BLACK)
gs.current_player = go.BLACK
self.assertIsNone(player.get_move(gs))
def test_sensible_greedy(self):
gs = GameState()
policy = CNNPolicy(["board", "ones", "turns_since"])
player = GreedyPolicyPlayer(policy)
empty = (10, 10)
for x in range(19):
for y in range(19):
if (x, y) != empty:
gs.do_move((x, y), go.BLACK)
gs.current_player = go.BLACK
self.assertIsNone(player.get_move(gs))
if __name__ == '__main__':
unittest.main()
| [
"AlphaGo.models.policy.CNNPolicy.load_model",
"AlphaGo.models.policy.CNNPolicy",
"AlphaGo.ai.GreedyPolicyPlayer",
"AlphaGo.go.GameState",
"unittest.main",
"AlphaGo.ai.ProbabilisticPolicyPlayer",
"numpy.all",
"os.remove"
] | [((3295, 3310), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3308, 3310), False, 'import unittest\n'), ((300, 365), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {}), "(['board', 'liberties', 'sensibleness', 'capture_size'])\n", (309, 365), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((474, 539), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {}), "(['board', 'liberties', 'sensibleness', 'capture_size'])\n", (483, 539), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((790, 865), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {'board': '(19)'}), "(['board', 'liberties', 'sensibleness', 'capture_size'], board=19)\n", (799, 865), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1009, 1084), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {'board': '(13)'}), "(['board', 'liberties', 'sensibleness', 'capture_size'], board=13)\n", (1018, 1084), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1253, 1318), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {}), "(['board', 'liberties', 'sensibleness', 'capture_size'])\n", (1262, 1318), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1668, 1700), 'AlphaGo.models.policy.CNNPolicy.load_model', 'CNNPolicy.load_model', (['model_file'], {}), '(model_file)\n', (1688, 1700), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1764, 1797), 'AlphaGo.models.policy.CNNPolicy.load_model', 'CNNPolicy.load_model', (['model_file2'], {}), '(model_file2)\n', (1784, 1797), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1925, 1946), 'os.remove', 'os.remove', (['model_file'], {}), '(model_file)\n', (1934, 1946), False, 'import os\n'), ((1949, 1972), 'os.remove', 'os.remove', (['weights_file'], {}), '(weights_file)\n', (1958, 1972), False, 'import os\n'), ((1975, 1997), 'os.remove', 'os.remove', (['model_file2'], {}), '(model_file2)\n', (1984, 1997), False, 'import os\n'), ((2000, 2024), 'os.remove', 'os.remove', (['weights_file2'], {}), '(weights_file2)\n', (2009, 2024), False, 'import os\n'), ((2104, 2115), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (2113, 2115), False, 'from AlphaGo.go import GameState\n'), ((2127, 2170), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'ones', 'turns_since']"], {}), "(['board', 'ones', 'turns_since'])\n", (2136, 2170), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((2182, 2208), 'AlphaGo.ai.GreedyPolicyPlayer', 'GreedyPolicyPlayer', (['policy'], {}), '(policy)\n', (2200, 2208), False, 'from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n'), ((2357, 2368), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (2366, 2368), False, 'from AlphaGo.go import GameState\n'), ((2380, 2423), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'ones', 'turns_since']"], {}), "(['board', 'ones', 'turns_since'])\n", (2389, 2423), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((2435, 2468), 'AlphaGo.ai.ProbabilisticPolicyPlayer', 'ProbabilisticPolicyPlayer', (['policy'], {}), '(policy)\n', (2460, 2468), False, 'from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n'), ((2619, 2630), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (2628, 2630), False, 'from AlphaGo.go import GameState\n'), ((2642, 2685), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'ones', 'turns_since']"], {}), "(['board', 'ones', 'turns_since'])\n", (2651, 2685), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((2697, 2730), 'AlphaGo.ai.ProbabilisticPolicyPlayer', 'ProbabilisticPolicyPlayer', (['policy'], {}), '(policy)\n', (2722, 2730), False, 'from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n'), ((2966, 2977), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (2975, 2977), False, 'from AlphaGo.go import GameState\n'), ((2989, 3032), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'ones', 'turns_since']"], {}), "(['board', 'ones', 'turns_since'])\n", (2998, 3032), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((3044, 3070), 'AlphaGo.ai.GreedyPolicyPlayer', 'GreedyPolicyPlayer', (['policy'], {}), '(policy)\n', (3062, 3070), False, 'from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n'), ((386, 397), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (395, 397), False, 'from AlphaGo.go import GameState\n'), ((577, 588), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (586, 588), False, 'from AlphaGo.go import GameState\n'), ((590, 601), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (599, 601), False, 'from AlphaGo.go import GameState\n'), ((932, 945), 'AlphaGo.go.GameState', 'GameState', (['(19)'], {}), '(19)\n', (941, 945), False, 'from AlphaGo.go import GameState\n'), ((1151, 1164), 'AlphaGo.go.GameState', 'GameState', (['(13)'], {}), '(13)\n', (1160, 1164), False, 'from AlphaGo.go import GameState\n'), ((1904, 1920), 'numpy.all', 'np.all', (['(w1 == w2)'], {}), '(w1 == w2)\n', (1910, 1920), True, 'import numpy as np\n')] |
import time
from anchore_engine import db
from anchore_engine.db import User
def add(userId, password, inobj, session=None):
if not session:
session = db.Session()
#our_result = session.query(User).filter_by(userId=userId, password=password).first()
our_result = session.query(User).filter_by(userId=userId).first()
if not our_result:
our_result = User(userId=userId, password=password)
if 'created_at' not in inobj:
inobj['created_at'] = int(time.time())
our_result.update(inobj)
session.add(our_result)
else:
inobj['password'] = password
our_result.update(inobj)
return(True)
def get_all(session=None):
if not session:
session = db.Session()
ret = []
our_results = session.query(User).filter_by()
for result in our_results:
obj = {}
obj.update(dict((key,value) for key, value in vars(result).items() if not key.startswith('_')))
ret.append(obj)
return(ret)
def get(userId, session=None):
if not session:
session = db.Session()
ret = {}
result = session.query(User).filter_by(userId=userId).first()
if result:
obj = dict((key,value) for key, value in vars(result).items() if not key.startswith('_'))
ret = obj
return(ret)
def update(userId, password, inobj, session=None):
return(add(userId, password, inobj, session=session))
def delete(userId, session=None):
if not session:
session = db.Session()
ret = False
result = session.query(User).filter_by(userId=userId).first()
if result:
session.delete(result)
ret = True
# try:
# session.commit()
# ret = True
# except Exception as err:
# raise err
# finally:
# session.rollback()
return(ret)
| [
"time.time",
"anchore_engine.db.User",
"anchore_engine.db.Session"
] | [((166, 178), 'anchore_engine.db.Session', 'db.Session', ([], {}), '()\n', (176, 178), False, 'from anchore_engine import db\n'), ((388, 426), 'anchore_engine.db.User', 'User', ([], {'userId': 'userId', 'password': 'password'}), '(userId=userId, password=password)\n', (392, 426), False, 'from anchore_engine.db import User\n'), ((748, 760), 'anchore_engine.db.Session', 'db.Session', ([], {}), '()\n', (758, 760), False, 'from anchore_engine import db\n'), ((1089, 1101), 'anchore_engine.db.Session', 'db.Session', ([], {}), '()\n', (1099, 1101), False, 'from anchore_engine import db\n'), ((1515, 1527), 'anchore_engine.db.Session', 'db.Session', ([], {}), '()\n', (1525, 1527), False, 'from anchore_engine import db\n'), ((504, 515), 'time.time', 'time.time', ([], {}), '()\n', (513, 515), False, 'import time\n')] |
#!/usr/bin/env python
import sys
from fitparse.records import Crc
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
class RecordsTestCase(unittest.TestCase):
def test_crc(self):
crc = Crc()
self.assertEqual(0, crc.value)
crc.update(b'\x0e\x10\x98\x00(\x00\x00\x00.FIT')
self.assertEqual(0xace7, crc.value)
# 0 must not change the crc
crc.update(0)
self.assertEqual(0xace7, crc.value)
def test_crc_format(self):
self.assertEqual('0x0000', Crc.format(0))
self.assertEqual('0x12AB', Crc.format(0x12AB))
if __name__ == '__main__':
unittest.main()
| [
"fitparse.records.Crc.format",
"unittest2.main",
"fitparse.records.Crc"
] | [((659, 674), 'unittest2.main', 'unittest.main', ([], {}), '()\n', (672, 674), True, 'import unittest2 as unittest\n'), ((241, 246), 'fitparse.records.Crc', 'Crc', ([], {}), '()\n', (244, 246), False, 'from fitparse.records import Crc\n'), ((556, 569), 'fitparse.records.Crc.format', 'Crc.format', (['(0)'], {}), '(0)\n', (566, 569), False, 'from fitparse.records import Crc\n'), ((606, 622), 'fitparse.records.Crc.format', 'Crc.format', (['(4779)'], {}), '(4779)\n', (616, 622), False, 'from fitparse.records import Crc\n')] |
import tkinter as tk
import gui
wd = tk.Tk()
gui = gui.GUI(wd)
wd.mainloop() | [
"tkinter.Tk",
"gui.GUI"
] | [((43, 50), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (48, 50), True, 'import tkinter as tk\n'), ((58, 69), 'gui.GUI', 'gui.GUI', (['wd'], {}), '(wd)\n', (65, 69), False, 'import gui\n')] |
import datetime
from flask_ldap3_login import LDAP3LoginManager, AuthenticationResponseStatus
from lost.settings import LOST_CONFIG, FLASK_DEBUG
from flask_jwt_extended import create_access_token, create_refresh_token
from lost.db.model import User as DBUser, Group
from lost.db import roles
class LoginManager():
def __init__(self, dbm, user_name, password):
self.dbm = dbm
self.user_name = user_name
self.password = password
def login(self):
if LOST_CONFIG.ldap_config['LDAP_ACTIVE']:
access_token, refresh_token = self.__authenticate_ldap()
else:
access_token, refresh_token = self.__authenticate_flask()
if access_token and refresh_token:
return {
'token': access_token,
'refresh_token': refresh_token
}, 200
return {'message': 'Invalid credentials'}, 401
def __get_token(self, user_id):
expires = datetime.timedelta(minutes=LOST_CONFIG.session_timeout)
expires_refresh = datetime.timedelta(minutes=LOST_CONFIG.session_timeout + 2)
if FLASK_DEBUG:
expires = datetime.timedelta(days=365)
expires_refresh = datetime.timedelta(days=366)
access_token = create_access_token(identity=user_id, fresh=True, expires_delta=expires)
refresh_token = create_refresh_token(user_id, expires_delta=expires_refresh)
return access_token, refresh_token
def __authenticate_flask(self):
if self.user_name:
user = self.dbm.find_user_by_user_name(self.user_name)
if user and user.check_password(self.password):
return self.__get_token(user.idx)
return None, None
def __authenticate_ldap(self):
# auth with ldap
ldap_manager = LDAP3LoginManager()
ldap_manager.init_config(LOST_CONFIG.ldap_config)
# Check if the credentials are correct
response = ldap_manager.authenticate(self.user_name, self.password)
if response.status != AuthenticationResponseStatus.success:
# no user found in ldap, try it with db user:
return self.__authenticate_flask()
user_info = response.user_info
user = self.dbm.find_user_by_user_name(self.user_name)
# user not in db:
if not user:
user = self.__create_db_user(user_info)
else:
# user in db -> synch with ldap
user = self.__update_db_user(user_info, user)
return self.__get_token(user.idx)
def __create_db_user(self, user_info):
user = DBUser(user_name=user_info['uid'], email=user_info['mail'],
email_confirmed_at=datetime.datetime.now(), first_name=user_info['givenName'],
last_name=user_info['sn'], is_external=True)
anno_role = self.dbm.get_role_by_name(roles.ANNOTATOR)
user.roles.append(anno_role)
user.groups.append(Group(name=user.user_name, is_user_default=True))
self.dbm.save_obj(user)
return user
def __update_db_user(self, user_info, user):
user.email = user_info['mail']
user.first_name = user_info['givenName']
user.last_name = user_info['sn']
self.dbm.save_obj(user)
return user | [
"flask_jwt_extended.create_access_token",
"flask_jwt_extended.create_refresh_token",
"datetime.datetime.now",
"lost.db.model.Group",
"flask_ldap3_login.LDAP3LoginManager",
"datetime.timedelta"
] | [((970, 1025), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'LOST_CONFIG.session_timeout'}), '(minutes=LOST_CONFIG.session_timeout)\n', (988, 1025), False, 'import datetime\n'), ((1052, 1111), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(LOST_CONFIG.session_timeout + 2)'}), '(minutes=LOST_CONFIG.session_timeout + 2)\n', (1070, 1111), False, 'import datetime\n'), ((1269, 1341), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'user_id', 'fresh': '(True)', 'expires_delta': 'expires'}), '(identity=user_id, fresh=True, expires_delta=expires)\n', (1288, 1341), False, 'from flask_jwt_extended import create_access_token, create_refresh_token\n'), ((1366, 1426), 'flask_jwt_extended.create_refresh_token', 'create_refresh_token', (['user_id'], {'expires_delta': 'expires_refresh'}), '(user_id, expires_delta=expires_refresh)\n', (1386, 1426), False, 'from flask_jwt_extended import create_access_token, create_refresh_token\n'), ((1825, 1844), 'flask_ldap3_login.LDAP3LoginManager', 'LDAP3LoginManager', ([], {}), '()\n', (1842, 1844), False, 'from flask_ldap3_login import LDAP3LoginManager, AuthenticationResponseStatus\n'), ((1158, 1186), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (1176, 1186), False, 'import datetime\n'), ((1217, 1245), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(366)'}), '(days=366)\n', (1235, 1245), False, 'import datetime\n'), ((2974, 3022), 'lost.db.model.Group', 'Group', ([], {'name': 'user.user_name', 'is_user_default': '(True)'}), '(name=user.user_name, is_user_default=True)\n', (2979, 3022), False, 'from lost.db.model import User as DBUser, Group\n'), ((2722, 2745), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2743, 2745), False, 'import datetime\n')] |
import os
import torch
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
def get_param_matrix(model_prefix, model_dir):
"""
Grabs the parameters of a saved model and returns them as a matrix
"""
# Load and combine the parameters
param_matrix = []
for file in os.listdir(model_dir):
if file.startswith(model_prefix):
model_path = os.path.join(model_dir, file)
state_dict = torch.load(model_path)
# Grab all params in state dict
params = [state_dict[param].data.float() for param in state_dict]
# Reshape to one long parameter vector
params = nn.utils.parameters_to_vector(params)
param_matrix.append(params.cpu().numpy())
params_matrix = np.array(param_matrix)
return params_matrix
def plot_trajectory(projected_params):
# Separate components
x = projected_params[:, 0]
y = projected_params[:, 1]
z = projected_params[:, 2]
# Creating figure
fig = plt.figure(figsize = (10, 7))
ax = plt.axes(projection ="3d")
# Creating plot
ax.scatter3D(x, y, z, color="green")
plt.title("Projected Learning Trajectory")
| [
"os.listdir",
"torch.load",
"os.path.join",
"torch.nn.utils.parameters_to_vector",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.title"
] | [((309, 330), 'os.listdir', 'os.listdir', (['model_dir'], {}), '(model_dir)\n', (319, 330), False, 'import os\n'), ((785, 807), 'numpy.array', 'np.array', (['param_matrix'], {}), '(param_matrix)\n', (793, 807), True, 'import numpy as np\n'), ((1024, 1051), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1034, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1088), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1071, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1197), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected Learning Trajectory"""'], {}), "('Projected Learning Trajectory')\n", (1164, 1197), True, 'import matplotlib.pyplot as plt\n'), ((399, 428), 'os.path.join', 'os.path.join', (['model_dir', 'file'], {}), '(model_dir, file)\n', (411, 428), False, 'import os\n'), ((454, 476), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (464, 476), False, 'import torch\n'), ((673, 710), 'torch.nn.utils.parameters_to_vector', 'nn.utils.parameters_to_vector', (['params'], {}), '(params)\n', (702, 710), True, 'import torch.nn as nn\n')] |
import numpy as np
import tor4
import tor4.nn as nn
def test_softmax():
a = tor4.tensor(data=[0, 0, 0.0])
a_sm = nn.functional.softmax(a, dim=0)
assert not a_sm.requires_grad
assert a_sm.tolist() == [1 / 3, 1 / 3, 1 / 3]
def test_softmax2():
a = tor4.tensor(data=[[0, 0, 0], [0, 0, 0.0]])
a_sm0 = nn.functional.softmax(a, dim=0)
a_sm1 = nn.functional.softmax(a, dim=1)
assert not a_sm0.requires_grad
assert a_sm0.tolist() == [[1 / 2, 1 / 2, 1 / 2], [1 / 2, 1 / 2, 1 / 2]]
assert not a_sm1.requires_grad
assert a_sm1.tolist() == [[1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3]]
def test_softmax_backward():
a = tor4.tensor(data=[0, 0, 0.0], requires_grad=True)
a_sm = nn.functional.softmax(a, dim=-1)
a_sm.backward(tor4.tensor([1, 1, 1.0]))
assert a_sm.requires_grad
assert a_sm.tolist() == [1 / 3, 1 / 3, 1 / 3]
assert a.grad.tolist() == [0, 0, 0]
def test_softmax_backward2():
a = tor4.tensor(data=[0, 0, 0.0], requires_grad=True)
a_sm = nn.functional.softmax(a, dim=-1)
a_sm.backward(tor4.tensor([0, 1, -1.0]))
assert a_sm.requires_grad
assert a_sm.tolist() == [1 / 3, 1 / 3, 1 / 3]
assert a.grad.tolist() == [0, 1 / 3, -1 / 3]
def test_softmax2d_backward():
a = tor4.tensor(data=[[0, 1, -1.0], [1, -2, 3]], requires_grad=True)
a_sm = nn.functional.softmax(a, dim=-1)
a_sm.backward(tor4.tensor([[1, 1, 1.0], [1, 1, 1]]))
assert a_sm.requires_grad
assert np.allclose(
a_sm.tolist(),
[[0.2447, 0.6652, 0.09], [0.1185, 0.0059, 0.8756]],
atol=1e-4,
rtol=1e-4,
)
assert np.allclose(a.grad.tolist(), [[0, 0, 0], [0, 0, 0]])
def test_softmax2d_backward2():
a = tor4.tensor(data=[[0, 1, -1.0], [1, -2, 3]], requires_grad=True)
a_sm = nn.functional.softmax(a, dim=0)
a_sm.backward(tor4.tensor([[1, 1, 1.0], [1, 1, 1]]))
assert a_sm.requires_grad
assert np.allclose(
a_sm.tolist(),
[[0.2689, 0.9526, 0.018], [0.7311, 0.0474, 0.982]],
atol=1e-4,
rtol=1e-4,
)
assert np.allclose(a.grad.tolist(), [[0, 0, 0], [0, 0, 0]])
def test_softmax2d_backward3():
a = tor4.tensor(data=[[0, 1, -1.0], [1, -2, 3]], requires_grad=True)
a_sm = nn.functional.softmax(a, dim=-1)
a_sm.backward(tor4.tensor([[0, -1, 1.0], [2, 0, -1]]))
assert a_sm.requires_grad
assert np.allclose(
a_sm.tolist(),
[[0.2447, 0.6652, 0.09], [0.1185, 0.0059, 0.8756]],
atol=1e-4,
rtol=1e-4,
)
assert np.allclose(
a.grad.tolist(),
[[0.1408, -0.2826, 0.1418], [0.3127, 0.0038, -0.3164]],
atol=1e-4,
rtol=1e-4,
)
def test_softmax2d_backward4():
a = tor4.tensor(data=[[0, 1, -1.0], [1, -2, 3]], requires_grad=True)
a_sm = nn.functional.softmax(a, dim=0)
a_sm.backward(tor4.tensor([[-5, 3, 0.0], [0, 0, 1]]))
assert a_sm.requires_grad
assert np.allclose(
a_sm.tolist(),
[[0.2689, 0.9526, 0.018], [0.7311, 0.0474, 0.982]],
atol=1e-4,
rtol=1e-4,
)
assert np.allclose(
a.grad.tolist(),
[[-0.9831, 0.1355, -0.0177], [0.9831, -0.1355, 0.0177]],
atol=1e-4,
rtol=1e-4,
)
def test_softmax3d_backward():
a = tor4.tensor(
data=[[[0, 1, -1.0], [1, -2, 3]], [[1, 4, -2], [0, 0, -3]]], requires_grad=True,
)
a_sm = nn.functional.softmax(a, dim=1)
a_sm.backward(tor4.tensor([[[-5, 3, 0.0], [0, 0, 1]], [[3, 0, -3], [1, 2, 3]]]))
assert a_sm.requires_grad
assert np.allclose(
a_sm.tolist(),
[
[[0.2689, 0.9526, 0.018], [0.7311, 0.0474, 0.982]],
[[0.7311, 0.982, 0.7311], [0.2689, 0.018, 0.2689]],
],
atol=1e-4,
rtol=1e-4,
)
assert np.allclose(
a.grad.tolist(),
[
[[-0.9831, 0.1355, -0.0177], [0.9831, -0.1355, 0.0177]],
[[0.3932, -0.0353, -1.1797], [-0.3932, 0.0353, 1.1797]],
],
atol=1e-4,
rtol=1e-4,
)
| [
"tor4.tensor",
"tor4.nn.functional.softmax"
] | [((83, 112), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[0, 0, 0.0]'}), '(data=[0, 0, 0.0])\n', (94, 112), False, 'import tor4\n'), ((124, 155), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(0)'}), '(a, dim=0)\n', (145, 155), True, 'import tor4.nn as nn\n'), ((272, 314), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[[0, 0, 0], [0, 0, 0.0]]'}), '(data=[[0, 0, 0], [0, 0, 0.0]])\n', (283, 314), False, 'import tor4\n'), ((327, 358), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(0)'}), '(a, dim=0)\n', (348, 358), True, 'import tor4.nn as nn\n'), ((371, 402), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(1)'}), '(a, dim=1)\n', (392, 402), True, 'import tor4.nn as nn\n'), ((665, 714), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[0, 0, 0.0]', 'requires_grad': '(True)'}), '(data=[0, 0, 0.0], requires_grad=True)\n', (676, 714), False, 'import tor4\n'), ((726, 758), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(-1)'}), '(a, dim=-1)\n', (747, 758), True, 'import tor4.nn as nn\n'), ((964, 1013), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[0, 0, 0.0]', 'requires_grad': '(True)'}), '(data=[0, 0, 0.0], requires_grad=True)\n', (975, 1013), False, 'import tor4\n'), ((1025, 1057), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(-1)'}), '(a, dim=-1)\n', (1046, 1057), True, 'import tor4.nn as nn\n'), ((1274, 1338), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[[0, 1, -1.0], [1, -2, 3]]', 'requires_grad': '(True)'}), '(data=[[0, 1, -1.0], [1, -2, 3]], requires_grad=True)\n', (1285, 1338), False, 'import tor4\n'), ((1350, 1382), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(-1)'}), '(a, dim=-1)\n', (1371, 1382), True, 'import tor4.nn as nn\n'), ((1728, 1792), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[[0, 1, -1.0], [1, -2, 3]]', 'requires_grad': '(True)'}), '(data=[[0, 1, -1.0], [1, -2, 3]], requires_grad=True)\n', (1739, 1792), False, 'import tor4\n'), ((1804, 1835), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(0)'}), '(a, dim=0)\n', (1825, 1835), True, 'import tor4.nn as nn\n'), ((2181, 2245), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[[0, 1, -1.0], [1, -2, 3]]', 'requires_grad': '(True)'}), '(data=[[0, 1, -1.0], [1, -2, 3]], requires_grad=True)\n', (2192, 2245), False, 'import tor4\n'), ((2257, 2289), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(-1)'}), '(a, dim=-1)\n', (2278, 2289), True, 'import tor4.nn as nn\n'), ((2730, 2794), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[[0, 1, -1.0], [1, -2, 3]]', 'requires_grad': '(True)'}), '(data=[[0, 1, -1.0], [1, -2, 3]], requires_grad=True)\n', (2741, 2794), False, 'import tor4\n'), ((2806, 2837), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(0)'}), '(a, dim=0)\n', (2827, 2837), True, 'import tor4.nn as nn\n'), ((3277, 3373), 'tor4.tensor', 'tor4.tensor', ([], {'data': '[[[0, 1, -1.0], [1, -2, 3]], [[1, 4, -2], [0, 0, -3]]]', 'requires_grad': '(True)'}), '(data=[[[0, 1, -1.0], [1, -2, 3]], [[1, 4, -2], [0, 0, -3]]],\n requires_grad=True)\n', (3288, 3373), False, 'import tor4\n'), ((3396, 3427), 'tor4.nn.functional.softmax', 'nn.functional.softmax', (['a'], {'dim': '(1)'}), '(a, dim=1)\n', (3417, 3427), True, 'import tor4.nn as nn\n'), ((777, 801), 'tor4.tensor', 'tor4.tensor', (['[1, 1, 1.0]'], {}), '([1, 1, 1.0])\n', (788, 801), False, 'import tor4\n'), ((1076, 1101), 'tor4.tensor', 'tor4.tensor', (['[0, 1, -1.0]'], {}), '([0, 1, -1.0])\n', (1087, 1101), False, 'import tor4\n'), ((1401, 1438), 'tor4.tensor', 'tor4.tensor', (['[[1, 1, 1.0], [1, 1, 1]]'], {}), '([[1, 1, 1.0], [1, 1, 1]])\n', (1412, 1438), False, 'import tor4\n'), ((1854, 1891), 'tor4.tensor', 'tor4.tensor', (['[[1, 1, 1.0], [1, 1, 1]]'], {}), '([[1, 1, 1.0], [1, 1, 1]])\n', (1865, 1891), False, 'import tor4\n'), ((2308, 2347), 'tor4.tensor', 'tor4.tensor', (['[[0, -1, 1.0], [2, 0, -1]]'], {}), '([[0, -1, 1.0], [2, 0, -1]])\n', (2319, 2347), False, 'import tor4\n'), ((2856, 2894), 'tor4.tensor', 'tor4.tensor', (['[[-5, 3, 0.0], [0, 0, 1]]'], {}), '([[-5, 3, 0.0], [0, 0, 1]])\n', (2867, 2894), False, 'import tor4\n'), ((3446, 3511), 'tor4.tensor', 'tor4.tensor', (['[[[-5, 3, 0.0], [0, 0, 1]], [[3, 0, -3], [1, 2, 3]]]'], {}), '([[[-5, 3, 0.0], [0, 0, 1]], [[3, 0, -3], [1, 2, 3]]])\n', (3457, 3511), False, 'import tor4\n')] |
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
class KerasBow(object):
"""doc
词袋模型:我们可以为数据集中的所有单词制作一张词表,然后将每个单词和一个唯一的索引关联。
每个句子都是由一串数字组成,这串数字是词表中的独立单词对应的个数。
通过列表中的索引,我们可以统计出句子中某个单词出现的次数。
"""
def __init__(self, num_words=20000, maxlen=None):
"""
:param maxlen: 句子序列最大长度
:param num_words: top num_words-1(词频降序):保留最常见的num_words-1词
"""
self.maxlen = maxlen
self.num_words = num_words
self.tokenizer = None
def fit(self, docs):
"""
:param corpus: ['some thing to do', 'some thing to drink']与sklearn提取文本特征一致
"""
print('Create Bag Of Words ...')
self.tokenizer = Tokenizer(self.num_words, lower=False) # 不改变大小写(需提前预处理)
self.tokenizer.fit_on_texts(docs)
print("Get Unique Words In Corpus: %s" % len(self.tokenizer.word_index))
return self
def transform(self, docs):
print('Docs To Sequences ...')
sequences = self.tokenizer.texts_to_sequences(docs)
pad_docs = pad_sequences(sequences, self.maxlen, padding='post')
if self.maxlen is None:
self.maxlen = pad_docs.shape[1]
return pad_docs
def fit_transform(self, docs):
self.fit(docs)
return self.transform(docs)
| [
"keras.preprocessing.sequence.pad_sequences",
"keras.preprocessing.text.Tokenizer"
] | [((739, 777), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', (['self.num_words'], {'lower': '(False)'}), '(self.num_words, lower=False)\n', (748, 777), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1088, 1141), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences', 'self.maxlen'], {'padding': '"""post"""'}), "(sequences, self.maxlen, padding='post')\n", (1101, 1141), False, 'from keras.preprocessing.sequence import pad_sequences\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-02-18 16:24
from django.db import migrations
countries = {
"Ma\u010farsko": "HU",
"Czech Republic": "CZ",
"\u010cesk\xe1 republika": "CZ",
"United Kingdom": "GB",
"Austria": "AT",
"Madarsko": "HU",
"Australia": "AU",
"Srbsko": "RS",
"Kraj Vyso\u010dina, \u010cesk\xe1 republika": "CZ",
"\u010desk\xe1 republika": "CZ",
"\u010cR": "CZ",
"Plze\u0148sk\xfd kraj, \u010cesk\xe1 republika": "CZ",
"Sverige": "SE",
"serbia": "RS",
"\xd6sterreich": "AT",
"Switzerland": "CH",
"ma\u010farsk\xe1 \u013eudovo demokratick\xe1 ": "HU",
"Kosovo": "RS",
"India": "IN",
"Uzbekistan": "UZ",
"Uganda": "UG",
"litva": "LT",
"Velka Britania": "GB",
}
def fix_country_names(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Address = apps.get_model("people", "Address")
for address in Address.objects.all():
address.country = countries.get(address.country, "SK")
address.save()
class Migration(migrations.Migration):
dependencies = [("people", "0010_merge")]
operations = [migrations.RunPython(fix_country_names)]
| [
"django.db.migrations.RunPython"
] | [((1259, 1298), 'django.db.migrations.RunPython', 'migrations.RunPython', (['fix_country_names'], {}), '(fix_country_names)\n', (1279, 1298), False, 'from django.db import migrations\n')] |
# -*- coding: utf-8 -*-
'''
常量
'''
import base64
from django.utils.translation import ugettext_lazy as _
DOMAIN_BASIC_PARAMS = (
(u"cf_limit_mailbox_cnt", _(u"限定邮箱数量")),
#(u"cf_limit_alias_cnt", u"限定别名数量"), #这个开关没人用
(u"cf_limit_mailbox_size", _(u"限定邮箱空间总容量")),
(u"cf_limit_netdisk_size", _(u"限定网络硬盘总容量")),
(u"cf_limit_email_size", _(u"发送邮件限制大小")),
#(u"cf_limit_attach_size", u"WebMail单附件大小"), #新版webmail不需要这个按钮
(u"cf_def_mailbox_size", _(u"用户邮箱默认容量")),
(u"cf_def_netdisk_size", _(u"网络硬盘默认容量")),
)
DOMAIN_BASIC_PARAMS_VALUE = (
(u"cf_limit_mailbox_cnt", "8000"),
#(u"cf_limit_alias_cnt", "0"), #这个开关没人用
(u"cf_limit_mailbox_size", "0"),
(u"cf_limit_netdisk_size", "500"),
(u"cf_limit_email_size", "0"),
#(u"cf_limit_attach_size", "10"),
(u"cf_def_mailbox_size", "100"),
(u"cf_def_netdisk_size", "100"),
)
DOMAIN_BASIC_PARAMS_TYPE = (
(u"cf_limit_mailbox_cnt", "system"),
#(u"cf_limit_alias_cnt", "system"), #这个开关没人用
(u"cf_limit_mailbox_size", "system"),
(u"cf_limit_netdisk_size", "system"),
(u"cf_limit_email_size", "system"),
#(u"cf_limit_attach_size", "system"),
(u"cf_def_mailbox_size", "system"),
(u"cf_def_netdisk_size", "system"),
)
DOMAIN_BASIC_STATUS = (
(u"mailboxUsed", _(u"已分配邮箱")),
(u"aliasUsed", _(u"已分配别名")),
(u"spaceUsed", _(u"已分配邮箱空间")),
(u"netdiskUsed", _(u"已分配网盘空间")),
)
DOMAIN_REG_LOGIN_PARAMS = (
(u"sw_user_reg", _(u"用户申请邮箱功能")),
(u"sw_reg_ratify", _(u"管理员审核开通")),
#(u"sw_link_admin", u"管理员登陆链接显示在邮件系统登陆页"), #无语加无用的开关
(u"sw_welcome_letter", _(u"新用户欢迎信功能")),
(u"sw_agreement", _(u"新用户欢迎信功能")),
)
DOMAIN_REG_LOGIN_VALUE = (
(u"sw_user_reg", "-1"),
(u"sw_reg_ratify", "-1"),
#(u"sw_link_admin", "1"),
(u"sw_welcome_letter", "1"),
(u"sw_agreement", "1"),
)
DOMAIN_REG_LOGIN_TYPE = (
(u"sw_user_reg", "webmail"),
(u"sw_reg_ratify", "webmail"),
#(u"sw_link_admin", "webmail"),
(u"sw_welcome_letter", "system"),
(u"sw_agreement", "webmail"),
)
DOMAIN_REG_LOGIN_WELCOME_PARAMS = (
(u"cf_welcome_letter", _(u"欢迎信内容")),
)
DOMAIN_REG_LOGIN_WELCOME_VALUE = (
(u"cf_welcome_letter", ""),
)
DOMAIN_REG_LOGIN_WELCOME_TYPE = (
(u"cf_welcome_letter", "system"),
)
DOMAIN_REG_LOGIN_AGREE_PARAMS = (
(u"cf_agreement", _(u"用户注册协议")),
)
DOMAIN_REG_LOGIN_AGREE_VALUE = (
(u"cf_agreement", ""),
)
DOMAIN_REG_LOGIN_AGREE_TYPE = (
(u"cf_agreement", "webmail"),
)
DOMAIN_SYS_RECV_PARAMS = (
(u"limit_send", _(u"发信功能限制")),
(u"limit_recv", _(u"收信功能限制")),
(u"limit_pop", _(u"POP/POPS邮箱收取功能")),
(u"limit_imap", _(u"IMAP/IMAPS客户端邮件收发功能")),
(u"limit_smtp", _(u"SMTP/SMTPS客户端邮件发送功能")),
)
DOMAIN_SYS_RECV_VALUE = (
(u"limit_send", u"-1"),
(u"limit_recv", u"-1"),
(u"limit_pop", u"-1"),
(u"limit_imap", u"-1"),
(u"limit_smtp", u"-1"),
)
DOMAIN_SYS_RECV_TYPE = (
(u"limit_send", u"system"),
(u"limit_recv", u"system"),
(u"limit_pop", u"system"),
(u"limit_imap", u"system"),
(u"limit_smtp", u"system"),
)
DOMAIN_SYS_SECURITY_PARAMS = (
(u"sw_def_login_limit_mail", _(u"开启修改密码通知信")),
(u"cf_def_safe_login", _(u"安全登录限制")),
(u"cf_ip_limit", _(u"登陆IP限制")),
)
DOMAIN_SYS_SECURITY_VALUE = (
(u"sw_def_login_limit_mail", u"1"),
(u"cf_def_safe_login", u""),
(u"cf_ip_limit", u""),
)
DOMAIN_SYS_SECURITY_TYPE = (
(u"sw_def_login_limit_mail", u"system"),
(u"cf_def_safe_login", u"webmail"),
(u"cf_ip_limit", u"webmail"),
)
DOMAIN_SYS_SECURITY_PWD_PARAMS = (
(u"cf_def_login_limit_mail", _(u"修改密码通知信")),
)
DOMAIN_SYS_SECURITY_PWD_VALUES = (
(u"cf_def_login_limit_mail", u""),
)
DOMAIN_SYS_SECURITY_PWD_TYPE = (
(u"cf_def_login_limit_mail", u"system"),
)
DOMAIN_SYS_PASSWORD_PARAMS = (
(u"sw_pwdtimeout", _(u"定期密码修改设置")),
(u"cf_pwd_days", _(u"密码有效期间")),
#(u"cf_first_change_pwd", u"首次登录修改密码"),
(u"cf_pwd_type", _(u"密码组成字符种类")),
(u"cf_pwd_rule", _(u"其他密码规则设置")),
(u"cf_pwd_forbid", _(u"用户密码强度低于规则设置")),
)
DOMAIN_SYS_PASSWORD_VALUE = (
(u"sw_pwd_timeout", u"1"),
(u"cf_pwd_days", u"365"),
#(u"cf_first_change_pwd", u"-1"),
(u"cf_pwd_type", u"-1"),
(u"cf_pwd_rule", u""),
(u"cf_pwd_forbid", u""),
)
DOMAIN_SYS_PASSWORD_TYPE = (
(u"sw_pwd_timeout", u"system"),
(u"cf_pwd_days", u"system"),
#(u"cf_first_change_pwd", u"system"),
(u"cf_pwd_type", u"system"),
(u"cf_pwd_rule", u"system"),
(u"cf_pwd_forbid", u"system"),
)
#密码组成字符种类
DOMAIN_SYS_PASSWORD_TYPE_LIMIT = (
(u"-1", _(u"必须包含两种字符")),
(u"1", _(u"必须包含三种字符")),
(u"2", _(u"必须包含四种字符")),
)
#其他密码规则设置
DOMAIN_SYS_PASSWORD_RULE_VALUE = (
#(u"pwdLen", u"passwd_size"), >= 2.2.59 后强制开启
(u"pwdLenValue", u"passwd_size2"),
(u"pwdNoAcct", u"passwd_name"),
(u"pwdNumLimit", u"passwd_digital"),
(u"pwdWordLimit", u"passwd_letter"),
(u"pwdFlagLimit", u"passwd_letter2"),
(u"pwdNoName", u"passwd_name2"),
)
#密码低于规则强度时操作
DOMAIN_SYS_PASSWORD_FORBID_RULE = (
(u"pwdLimitForbidSend", u"forbid_send"),
(u"pwdLimitForceChange", u"force_change"),
(u"pwdLimitForbidSendInWeak", u"forbid_send_in_weak"),
(u"pwdLimitForceChangeInWeak", u"force_change_in_weak"),
)
DOMAIN_SYS_PASSWORD_FORBID_RULE_DEFAULT = (
(u"forbid_send", u"-1"),
(u"force_change", u"-1"),
(u"forbid_send_in_weak", u"1"),
(u"force_change_in_weak", u"1"),
)
DOMAIN_SYS_PASSWORD_LEN_LIMIT = tuple([u"{}".format(v) for v in range(8,17)])
DOMAIN_SYS_PASSWORD_RULE_LIMIT = (
#是否限制密码长度
#(u"passwd_size", u"1"),
#密码长度的值
(u"passwd_size2", u"8"),
#密码不能包含账号
(u"passwd_name", u"1"),
#连续3位及以上数字不能连号
(u"passwd_digital", u"1"),
#连续3位及以上字母不能连号
(u"passwd_letter", u"1"),
#密码不能包含连续3个及以上相同字符
(u"passwd_letter2", u"1"),
#密码不能包含用户姓名大小写全拼
(u"passwd_name2", u"1"),
)
DOMAIN_SYS_INTERFACE_PARAMS = (
(u"sw_auth_api", _(u"第三方登录验证")),
(u"sw_api_pwd_encry", _(u"接口修改密码是否加密")),
(u"sw_impush", _(u"即时通讯软件集成")),
(u"sw_xss_token", _(u"登录防止xss启用token验证")),
)
DOMAIN_SYS_INTERFACE_VALUE = (
(u"sw_auth_api", u"-1"),
(u"sw_api_pwd_encry", u"-1"),
(u"sw_impush", u"-1"),
(u"sw_xss_token", u"-1"),
)
DOMAIN_SYS_INTERFACE_TYPE = (
(u"sw_auth_api", u"webmail"),
(u"sw_api_pwd_encry", u"webmail"),
(u"sw_impush", u"webmail"),
(u"sw_xss_token", u"webmail"),
)
DOMAIN_SYS_INTERFACE_AUTH_API_PARAMS = (
(u"cf_auth_api", _(u"第三方登录验证")),
)
DOMAIN_SYS_INTERFACE_AUTH_API_VALUE = (
(u"cf_auth_api", u""),
)
DOMAIN_SYS_INTERFACE_AUTH_API_TYPE = (
(u"cf_auth_api", u"webmail"),
)
DOMAIN_SYS_INTERFACE_IM_API_PARAMS = (
(u"cf_impush_api", _(u"即时通讯软件集成")),
)
DOMAIN_SYS_INTERFACE_IM_API_VALUE = (
(u"cf_impush_api", u""),
)
DOMAIN_SYS_INTERFACE_IM_API_TYPE = (
(u"cf_impush_api", u"webmail"),
)
DOMAIN_SYS_OTHERS_PARAMS = (
#(u"sw_size_limit_recv", u"邮箱容量满后拒绝接收邮件"), 这个开关没意义,去掉了
(u"sw_auto_clean", _(u"邮箱空间定时清理功能")),
(u"sw_online_attach_switch", _(u"客户端网络附件开关")),
#(u"sw_auto_inbox", u"登录默认打开收件箱"),
(u"sw_filter_duplicate_mail", _(u"收件时是否过滤重复邮件")),
#这个开关没有意义,应该作为通用设置存在
(u"sw_display_list", _(u"邮件列表发来邮件显示邮件列表名称")),
(u"sw_user_reg", _(u"用户申请邮箱功能")),
#(u"sw_reg_ratify", _(u"管理员审核开通")),
(u"sw_welcome_letter", _(u"新用户欢迎信功能")),
(u"sw_agreement", _(u"新用户欢迎信功能")),
(u"sw_recvsms", _(u"短信通知接收邮件")),
(u"sw_sendsms", _(u"短信通知发送邮件")),
(u"cf_sms_conf", _(u"短信模块设置")),
)
DOMAIN_SYS_OTHERS_VALUE = (
#(u"sw_size_limit_recv", u"1"),
(u"sw_auto_clean", u"1"),
(u"sw_online_attach_switch", u"-1"),
#(u"sw_auto_inbox", u"1"),
(u"sw_filter_duplicate_mail", u"1"),
(u"sw_display_list", u"1"),
(u"sw_user_reg", "-1"),
#(u"sw_reg_ratify", "-1"),
(u"sw_welcome_letter", "1"),
(u"sw_agreement", "1"),
(u"sw_recvsms", u"-1"),
(u"sw_sendsms", u"-1"),
(u"cf_sms_conf", u""),
)
DOMAIN_SYS_OTHERS_TYPE = (
#(u"sw_size_limit_recv", u"system"),
(u"sw_auto_clean", u"webmail"),
(u"sw_online_attach_switch", u"system"),
#(u"sw_auto_inbox", u"webmail"),
(u"sw_filter_duplicate_mail", u"webmail"),
(u"sw_display_list", u"webmail"),
(u"sw_user_reg", "webmail"),
#(u"sw_reg_ratify", "webmail"),
(u"sw_welcome_letter", "system"),
(u"sw_agreement", "webmail"),
(u"sw_recvsms", u"webmail"),
(u"sw_sendsms", u"webmail"),
(u"cf_sms_conf", u"system"),
)
DOMAIN_SYS_OTHERS_SPACE_PARAMS = (
(u"cf_spaceclean", _(u"邮箱空间清理")),
(u"cf_spacemail", _(u"邮箱空间清理")),
)
DOMAIN_SYS_OTHERS_SPACE_VALUE = (
(u"cf_spaceclean", u""),
(u"cf_spacemail", u""),
)
DOMAIN_SYS_OTHERS_SPACE_TYPE = (
(u"cf_spaceclean", u"system"),
(u"cf_spacemail", u"system"),
)
DOMAIN_SYS_OTHERS_ATTACH_PARAMS = (
(u"cf_online_attach", _(u"客户端网络附件")),
)
DOMAIN_SYS_OTHERS_ATTACH_VALUE = (
(u"cf_online_attach", u""),
)
DOMAIN_SYS_OTHERS_ATTACH_TYPE = (
(u"cf_online_attach", u"system"),
)
DOMAIN_SIGN_PARAMS = (
(u'cf_domain_signature',_(u'域签名')),
(u'sw_domain_signature',_(u'域签名开关')),
)
DOMAIN_SIGN_VALUE = (
(u'cf_domain_signature',u''),
(u'sw_domain_signature',u'-1'),
)
DOMAIN_SIGN_TYPE = (
(u'cf_domain_signature',u'system'),
(u'sw_domain_signature',u'system'),
)
DOMAIN_SIGN_PERSONAL_PARAMS = (
(u'cf_personal_sign',_(u'个人签名模板')),
)
DOMAIN_SIGN_PERSONAL_VALUE = (
(u'cf_personal_sign',u''),
)
DOMAIN_SIGN_PERSONAL_TYPE = (
(u'cf_personal_sign',u'webmail'),
)
# ------个人签名 的输入参数 --------
DOMAIN_PERSONAL_DEFAULT_CODE = """<p><span style="font-size:16px;"><strong>{NAME} [<span style="font-size:14px;">{POSITION}</span>]{DEPARTMENT}<br /></strong></span></p><p><span style="white-space:normal;font-size:16px;"><strong>{TELEPHONE}</strong></span></p><p><br /><strong></strong></p><p><span style="font-size:14px;_(u"><strong>这里填公司名称<br /></strong></span></p><p>地址:这里填公司地址</p><p>电话:<span style=")white-space:normal;_(u">{WORKPHONE} 传真:这里填传真号码 邮箱:{EMAIL}<br /></span></p><br /><p><span style=")white-space:normal;"><br /></span></p>"""
DOMAIN_PERSONAL_DEFAULT_CODE=base64.encodestring(DOMAIN_PERSONAL_DEFAULT_CODE)
DOMAIN_PERSONAL_DEFAULT_CODE=u"{}".format(DOMAIN_PERSONAL_DEFAULT_CODE)
DOMAIN_SIGN_PERSONAL_VALUE_DEFAULT = (
(u'personal_sign_new',u'-1'),
(u'personal_sign_forward',u'-1'),
(u'personal_sign_auto',u'1'),
(u'personal_sign_templ',DOMAIN_PERSONAL_DEFAULT_CODE),
)
DOMAIN_MODULE_HOME_PARAMS = (
#(u'sw_business_tools', u'商务小工具栏目'), 新版本webmail去掉
#(u'sw_wgt_cale', u'万年历'),
#(u'sw_wgt_calc', u'万用计算器'),
#(u'sw_wgt_maps', u'城市地图'),
#(u'sw_email_used_see', u'用户已用邮箱容量查看功能'),
#(u'sw_weather', u'天气预报功能'),
#(u'sw_oab', u'企业通讯录'),
#(u'sw_department_openall', u'企业通讯录域组合'),
#(u'sw_dept_showall', u'父部门中是否显示子部门邮件账号'),
#(u'sw_netdisk', u'网络硬盘功能'),
#(u'sw_calendar', u'日程功能'),
#(u'sw_notes', u'便签功能'),
)
DOMAIN_MODULE_HOME_VALUE = (
#(u'sw_business_tools', u'1'),
#(u'sw_wgt_cale', u'1'),
#(u'sw_wgt_calc', u'1'),
#(u'sw_wgt_maps', u'1'),
#(u'sw_email_used_see', u'1'), #邮箱容量查看功能,这开关去掉
#(u'sw_weather', u'1'),
#(u'sw_oab', u'1'),
#(u'sw_department_openall', u'1'),
#(u'sw_dept_showall', u'1'),
#(u'sw_netdisk', u'1'),
#(u'sw_calendar', u'1'),
#(u'sw_notes', u'1'),
)
DOMAIN_MODULE_HOME_TYPE = (
#(u'sw_business_tools', u'webmail'),
#(u'sw_wgt_cale', u'webmail'),
#(u'sw_wgt_calc', u'webmail'),
#(u'sw_wgt_maps', u'webmail'),
#(u'sw_email_used_see', u'webmail'),
#(u'sw_weather', u'webmail'),
#(u'sw_oab', u'webmail'),
#(u'sw_department_openall', u'webmail'),
#(u'sw_dept_showall', u'webmail'),
#(u'sw_netdisk', u'webmail'),
#(u'sw_calendar', u'webmail'),
#(u'sw_notes', u'webmail'),
)
DOMAIN_MODULE_MAIL_PARAMS = (
#(u'sw_drafts', u'保存草稿功能'),
#(u'sw_mail_encryption', u'发送邮件显示加密选项'),
#(u'sw_show_add_paper', u'显示地址簿和信纸模块'),
#(u'sw_mailpaper', u'去掉信纸模块'),
#(u'sw_auto_receipt', u'自动发送回执功能'), 这个开关在新版没什么意义
(u'sw_mail_in_reply_to', _(u'添加Reply-To到邮件头')),
(u'sw_mail_recall_notify', _(u'邮件召回成功后提示收件人')),
(u'sw_save_client_sent_email', _(u'保存客户端已发送邮件')),
(u'sw_oab_dumpbutton', _(u'通讯录导出按钮开关')),
(u'oab_show_mod', _(u'企业通讯录设置')), #新版webmail使用
(u'sw_oab_share', _(u'其他域通讯录共享')),
(u'sw_cab', _(u'公共通讯录')),
)
DOMAIN_MODULE_MAIL_VALUE = (
#(u'sw_drafts', u'1'),
#(u'sw_mail_encryption', u'-1'),
#(u'sw_show_add_paper', u'-1'),
#(u'sw_mailpaper', u'-1'),
#(u'sw_auto_receipt', u'1'),
(u'sw_mail_in_reply_to', u'1'),
(u'sw_mail_recall_notify', u'1'),
(u'sw_save_client_sent_email', u'-1'),
(u'sw_oab_dumpbutton', u'1'),
(u'oab_show_mod', u'1'),
(u'sw_oab_share', u'1'),
(u'sw_cab', u'1'),
)
DOMAIN_MODULE_MAIL_TYPE = (
#(u'sw_drafts', u'webmail'),
#(u'sw_mail_encryption', u'webmail'),
#(u'sw_show_add_paper', u'webmail'),
#(u'sw_mailpaper', u'webmail'),
#(u'sw_auto_receipt', u'webmail'),
(u'sw_mail_in_reply_to', u'webmail'),
(u'sw_mail_recall_notify', u'webmail'),
(u'sw_save_client_sent_email', u'webmail'),
(u'sw_oab_dumpbutton', u'webmail'),#是否显示通讯录导出按钮
(u'oab_show_mod', u'webmail'), # JSON, 显示所有部门 等按钮设置
(u'sw_oab_share', u'webmail'),
(u'sw_cab', u'webmail'),
)
DOMAIN_MODULE_SET_PARAMS = (
#(u'sw_change_userinfo', u'个人资料功能'),
(u'sw_change_pass', _(u'<PASSWORD>')),
#(u'sw_options', u'参数设置功能'),
#(u'sw_signature', u'邮件签名功能'),
#(u'sw_auto_reply', u'自动回复功能'),
#(u'sw_auto_forward', u'自动转发功能'),
#(u'sys_userbwlist', u'黑白名单功能'),
#(u'sw_autoforward_visible', u'设置自动转发默认值'),
(u'sw_mailboxmove', _(u'邮箱搬家功能')),
(u'sw_feedback', _(u'邮箱意见反馈功能')),
(u'sw_zhaohui', _(u'邮件召回记录查看')),
(u'sw_cfilter', _(u'邮件过滤功能')),
(u'sw_smtptransfer_visible', _(u'SMTP外发邮件代理')),
(u'sw_realaddress_alert', _(u'代发邮件地址提醒')),
(u'sw_time_mode', _(u'邮件内容中时间显示')),
)
DOMAIN_MODULE_SET_VALUE = (
#(u'sw_change_userinfo', u'1'),
(u'sw_change_pass', u'1'),
#(u'sw_options', u'1'),
#(u'sw_signature', u'1'),
#(u'sw_auto_reply', u'1'),
#(u'sw_auto_forward', u'1'),
#(u'userbwlist', u'黑白名单功能'),
#(u'sw_autoforward_visible', u'1'),
(u'sw_mailboxmove', u'1'),
(u'sw_feedback', u'1'),
(u'sw_zhaohui', u'1'),
(u'sw_cfilter', u'1'),
(u'sw_smtptransfer_visible', u'-1'),
(u'sw_realaddress_alert', u'1'),
(u'sw_time_mode', u'-1'),
)
DOMAIN_MODULE_SET_TYPE = (
#(u'sw_change_userinfo', u'webmail'),
(u'sw_change_pass', u'<PASSWORD>mail'),
#(u'sw_options', u'webmail'),
#(u'sw_signature', u'webmail'),
#(u'sw_auto_reply', u'webmail'),
#(u'sw_auto_forward', u'webmail'),
#(u'userbwlist', u'-1'),
#(u'sw_autoforward_visible', u'webmail'),
(u'sw_mailboxmove', u'webmail'),
(u'sw_feedback', u'webmail'),
(u'sw_zhaohui', u'webmail'),
(u'sw_cfilter', u'webmail'),
(u'sw_smtptransfer_visible', u'webmail'),
(u'sw_realaddress_alert', u'webmail'),
(u'sw_time_mode', u'webmail'),
)
DOMAIN_MODULE_OTHER_PARAMS = (
#(u'sw_folder_clean', u'清空文件夹功能'),
#(u'sw_user_score', u'用户积分功能'),
#部门邮件列表 这个开关毫无存在意义
#(u'sw_dept_maillist', u'部门邮件列表'),
)
DOMAIN_MODULE_OTHER_VALUE = (
#(u'sw_folder_clean', u'-1'),
#(u'sw_user_score', u'1'),
#(u'sw_dept_maillist', u'-1'),
)
DOMAIN_MODULE_OTHER_TYPE = (
#(u'sw_folder_clean', u'webmail'),
#(u'sw_user_score', u'webmail'),
#(u'sw_dept_maillist', u'webmail'),
)
DOMAIN_SECRET_GRADE_1 = u'0' #秘密
DOMAIN_SECRET_GRADE_2 = u'1' #机密
DOMAIN_SECRET_GRADE_3 = u'2' #绝密
DOMAIN_SECRET_GRADE_ALL = (
(DOMAIN_SECRET_GRADE_1, _(u"秘密")),
(DOMAIN_SECRET_GRADE_2, _(u"机密")),
(DOMAIN_SECRET_GRADE_3, _(u"绝密")),
)
DOMAIN_PUBLIC_GENDER_CHOICES = (
(u'M',_(u'男')),
(u'F',_(u'女')),
)
DOMAIN_LIST_PARAMS = (
(u"cf_limit_mailbox_cnt", _(u"限定邮箱数量")),
(u"cf_limit_mailbox_size", _(u"限定邮箱空间总容量")),
(u"cf_limit_netdisk_size", _(u"限定网络硬盘总容量")),
(u"cf_limit_email_size", _(u"发送邮件限制大小")),
#(u"cf_limit_attach_size", u"WebMail单附件大小"),
(u"cf_def_mailbox_size", _(u"用户邮箱默认容量")),
(u"cf_def_netdisk_size", _(u"网络硬盘默认容量")),
(u"limit_send", _(u"发信功能限制")),
(u"limit_recv", _(u"收信功能限制")),
)
DOMAIN_LIST_PARAMS_VALUE = (
(u"cf_limit_mailbox_cnt", u"8000"),
(u"cf_limit_mailbox_size", u"0"),
(u"cf_limit_netdisk_size", u"500"),
(u"cf_limit_email_size", u"0"),
#(u"cf_limit_attach_size", u"10"),
(u"cf_def_mailbox_size", u"100"),
(u"cf_def_netdisk_size", u"100"),
(u"limit_send", u"-1"),
(u"limit_recv", u"-1"),
)
DOMAIN_LIST_PARAMS_TYPE = (
(u"cf_limit_mailbox_cnt", u"system"),
(u"cf_limit_mailbox_size", u"system"),
(u"cf_limit_netdisk_size", u"system"),
(u"cf_limit_email_size", u"system"),
#(u"cf_limit_attach_size", u"system"),
(u"cf_def_mailbox_size", u"system"),
(u"cf_def_netdisk_size", u"system"),
(u"limit_send", u"system"),
(u"limit_recv", u"system"),
)
DOMAIN_WEB_BASIC_PARAMS = (
(u"cf_title", _(u"页面标题")),
(u"cf_login_page", _(u"登录页面自动输入域名")),
(u"sw_icp_show", _(u"ICP 备案是否显示")),
(u"cf_icp_number", _(u"ICP 备案号")),
(u"cf_icp_url", _(u"ICP 备案链接地址")),
(u"cf_faq_url", _(u"帮助文件地址")),
(u"sw_unique_login", _(u"登录系统地点限制")),
#(u"sw_login_captcha_error_num", u"启用验证码功能"),
(u"cf_logout_url", _(u"登出跳转地址")),
(u"sw_login_ssl", _(u"SSL访问")),
)
DOMAIN_WEB_BASIC_VALUE = (
(u"cf_title", u""),
(u"cf_login_page", u"default"),
(u"sw_icp_show", u"-1"),
(u"cf_icp_number", u""),
(u"cf_icp_url", u""),
(u"cf_faq_url", u"http://www.comingchina.com/html/faq/"),
(u"sw_unique_login", u"-1"),
#(u"sw_login_captcha_error_num", u"-1"),
(u"cf_logout_url", u""),
(u"sw_login_ssl", u"-1"),
)
DOMAIN_WEB_BASIC_TYPE = (
(u"cf_title", u"webmail"),
(u"cf_login_page", u"webmail"),
(u"sw_icp_show", u"webmail"),
(u"cf_icp_number", u"webmail"),
(u"cf_icp_url", u"webmail"),
(u"cf_faq_url", u"webmail"),
(u"sw_unique_login", u"webmail"),
(u"sw_login_ssl", u"webmail"),
#(u"sw_login_captcha_error_num", u"webmail"),
(u"cf_logout_url", u"webmail"),
)
DOMAIN_WEB_ANOUNCE_PARAMS = (
(u"cf_announce_set", _(u"设置系统公告")),
(u"cf_announce", _(u"系统公告")),
)
DOMAIN_WEB_ANOUNCE_VALUE = (
(u"cf_announce_set", u""),
(u"cf_announce", u""),
)
DOMAIN_WEB_ANOUNCE_YPE = (
(u"cf_announce_set", u"webmail"),
(u"cf_announce", u"webmail"),
)
DOMAIN_LOGO_PARAMS = (
(u"cf_webmail_logo", _(u"Webmail Logo 设置")),
(u"cf_login_logo", _(u"登录页面 Logo 设置")),
)
DOMAIN_LOGO_VALUE = (
(u"cf_webmail_logo", u""),
(u"cf_login_logo", u""),
)
DOMAIN_LOGO_TYPE = (
(u"cf_webmail_logo", u"webmail"),
(u"cf_login_logo", u"webmail"),
)
DOMAIN_LOGIN_TEMP_LIST = (
(u"default", _(u"默认")),
(u"manual", _(u"手动域名")),
(u"adlogin", _(u"广告风格")),
(u"gao", _(u"大气管理员")),
(u"test", _(u"轮播图")),
(u"center", _(u"登录框居中")),
(u"sanya", _(u"背景图风格")),
)
DOMAIN_WEB_AD_PARAMS = (
#(u"cf_adsetting", u"页面广告设置"), #老版本webmail
(u"cf_adsetting2", _(u"页面广告设置")), #新版本webmail
)
DOMAIN_WEB_AD_VALUE = (
(u"cf_adsetting2", u""),
)
DOMAIN_WEB_AD_TYPE = (
(u"cf_adsetting2", u"webmail"),
)
DOMAIN_WEB_LINK_PARAMS = (
#(u"cf_webmail_link", u"首页链接设置"), #老版本webmail
(u"cf_webmail_link2", _(u"首页链接设置")), #新版本webmail
)
DOMAIN_WEB_LINK_VALUE = (
(u"cf_webmail_link2", u""),
)
DOMAIN_WEB_LINK_TYPE = (
(u"cf_webmail_link2", u"webmail"),
)
| [
"django.utils.translation.ugettext_lazy",
"base64.encodestring"
] | [((10718, 10767), 'base64.encodestring', 'base64.encodestring', (['DOMAIN_PERSONAL_DEFAULT_CODE'], {}), '(DOMAIN_PERSONAL_DEFAULT_CODE)\n', (10737, 10767), False, 'import base64\n'), ((162, 174), 'django.utils.translation.ugettext_lazy', '_', (['u"""限定邮箱数量"""'], {}), "(u'限定邮箱数量')\n", (163, 174), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((265, 280), 'django.utils.translation.ugettext_lazy', '_', (['u"""限定邮箱空间总容量"""'], {}), "(u'限定邮箱空间总容量')\n", (266, 280), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((314, 329), 'django.utils.translation.ugettext_lazy', '_', (['u"""限定网络硬盘总容量"""'], {}), "(u'限定网络硬盘总容量')\n", (315, 329), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((361, 375), 'django.utils.translation.ugettext_lazy', '_', (['u"""发送邮件限制大小"""'], {}), "(u'发送邮件限制大小')\n", (362, 375), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((477, 491), 'django.utils.translation.ugettext_lazy', '_', (['u"""用户邮箱默认容量"""'], {}), "(u'用户邮箱默认容量')\n", (478, 491), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((523, 537), 'django.utils.translation.ugettext_lazy', '_', (['u"""网络硬盘默认容量"""'], {}), "(u'网络硬盘默认容量')\n", (524, 537), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1366, 1377), 'django.utils.translation.ugettext_lazy', '_', (['u"""已分配邮箱"""'], {}), "(u'已分配邮箱')\n", (1367, 1377), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1399, 1410), 'django.utils.translation.ugettext_lazy', '_', (['u"""已分配别名"""'], {}), "(u'已分配别名')\n", (1400, 1410), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1432, 1445), 'django.utils.translation.ugettext_lazy', '_', (['u"""已分配邮箱空间"""'], {}), "(u'已分配邮箱空间')\n", (1433, 1445), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1469, 1482), 'django.utils.translation.ugettext_lazy', '_', (['u"""已分配网盘空间"""'], {}), "(u'已分配网盘空间')\n", (1470, 1482), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1537, 1551), 'django.utils.translation.ugettext_lazy', '_', (['u"""用户申请邮箱功能"""'], {}), "(u'用户申请邮箱功能')\n", (1538, 1551), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1577, 1590), 'django.utils.translation.ugettext_lazy', '_', (['u"""管理员审核开通"""'], {}), "(u'管理员审核开通')\n", (1578, 1590), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1682, 1696), 'django.utils.translation.ugettext_lazy', '_', (['u"""新用户欢迎信功能"""'], {}), "(u'新用户欢迎信功能')\n", (1683, 1696), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1721, 1735), 'django.utils.translation.ugettext_lazy', '_', (['u"""新用户欢迎信功能"""'], {}), "(u'新用户欢迎信功能')\n", (1722, 1735), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2263, 2274), 'django.utils.translation.ugettext_lazy', '_', (['u"""欢迎信内容"""'], {}), "(u'欢迎信内容')\n", (2264, 2274), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2487, 2499), 'django.utils.translation.ugettext_lazy', '_', (['u"""用户注册协议"""'], {}), "(u'用户注册协议')\n", (2488, 2499), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2690, 2702), 'django.utils.translation.ugettext_lazy', '_', (['u"""发信功能限制"""'], {}), "(u'发信功能限制')\n", (2691, 2702), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2725, 2737), 'django.utils.translation.ugettext_lazy', '_', (['u"""收信功能限制"""'], {}), "(u'收信功能限制')\n", (2726, 2737), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2759, 2779), 'django.utils.translation.ugettext_lazy', '_', (['u"""POP/POPS邮箱收取功能"""'], {}), "(u'POP/POPS邮箱收取功能')\n", (2760, 2779), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2802, 2827), 'django.utils.translation.ugettext_lazy', '_', (['u"""IMAP/IMAPS客户端邮件收发功能"""'], {}), "(u'IMAP/IMAPS客户端邮件收发功能')\n", (2803, 2827), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2850, 2875), 'django.utils.translation.ugettext_lazy', '_', (['u"""SMTP/SMTPS客户端邮件发送功能"""'], {}), "(u'SMTP/SMTPS客户端邮件发送功能')\n", (2851, 2875), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3300, 3315), 'django.utils.translation.ugettext_lazy', '_', (['u"""开启修改密码通知信"""'], {}), "(u'开启修改密码通知信')\n", (3301, 3315), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3345, 3357), 'django.utils.translation.ugettext_lazy', '_', (['u"""安全登录限制"""'], {}), "(u'安全登录限制')\n", (3346, 3357), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3381, 3393), 'django.utils.translation.ugettext_lazy', '_', (['u"""登陆IP限制"""'], {}), "(u'登陆IP限制')\n", (3382, 3393), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3751, 3764), 'django.utils.translation.ugettext_lazy', '_', (['u"""修改密码通知信"""'], {}), "(u'修改密码通知信')\n", (3752, 3764), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3982, 3996), 'django.utils.translation.ugettext_lazy', '_', (['u"""定期密码修改设置"""'], {}), "(u'定期密码修改设置')\n", (3983, 3996), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4020, 4032), 'django.utils.translation.ugettext_lazy', '_', (['u"""密码有效期间"""'], {}), "(u'密码有效期间')\n", (4021, 4032), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4100, 4114), 'django.utils.translation.ugettext_lazy', '_', (['u"""密码组成字符种类"""'], {}), "(u'密码组成字符种类')\n", (4101, 4114), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4138, 4152), 'django.utils.translation.ugettext_lazy', '_', (['u"""其他密码规则设置"""'], {}), "(u'其他密码规则设置')\n", (4139, 4152), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4178, 4196), 'django.utils.translation.ugettext_lazy', '_', (['u"""用户密码强度低于规则设置"""'], {}), "(u'用户密码强度低于规则设置')\n", (4179, 4196), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4720, 4734), 'django.utils.translation.ugettext_lazy', '_', (['u"""必须包含两种字符"""'], {}), "(u'必须包含两种字符')\n", (4721, 4734), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4748, 4762), 'django.utils.translation.ugettext_lazy', '_', (['u"""必须包含三种字符"""'], {}), "(u'必须包含三种字符')\n", (4749, 4762), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4776, 4790), 'django.utils.translation.ugettext_lazy', '_', (['u"""必须包含四种字符"""'], {}), "(u'必须包含四种字符')\n", (4777, 4790), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6095, 6108), 'django.utils.translation.ugettext_lazy', '_', (['u"""第三方登录验证"""'], {}), "(u'第三方登录验证')\n", (6096, 6108), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6137, 6153), 'django.utils.translation.ugettext_lazy', '_', (['u"""接口修改密码是否加密"""'], {}), "(u'接口修改密码是否加密')\n", (6138, 6153), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6175, 6189), 'django.utils.translation.ugettext_lazy', '_', (['u"""即时通讯软件集成"""'], {}), "(u'即时通讯软件集成')\n", (6176, 6189), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6214, 6236), 'django.utils.translation.ugettext_lazy', '_', (['u"""登录防止xss启用token验证"""'], {}), "(u'登录防止xss启用token验证')\n", (6215, 6236), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6677, 6690), 'django.utils.translation.ugettext_lazy', '_', (['u"""第三方登录验证"""'], {}), "(u'第三方登录验证')\n", (6678, 6690), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6918, 6932), 'django.utils.translation.ugettext_lazy', '_', (['u"""即时通讯软件集成"""'], {}), "(u'即时通讯软件集成')\n", (6919, 6932), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7225, 7241), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮箱空间定时清理功能"""'], {}), "(u'邮箱空间定时清理功能')\n", (7226, 7241), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7282, 7297), 'django.utils.translation.ugettext_lazy', '_', (['u"""客户端网络附件开关"""'], {}), "(u'客户端网络附件开关')\n", (7283, 7297), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7385, 7402), 'django.utils.translation.ugettext_lazy', '_', (['u"""收件时是否过滤重复邮件"""'], {}), "(u'收件时是否过滤重复邮件')\n", (7386, 7402), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7461, 7483), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮件列表发来邮件显示邮件列表名称"""'], {}), "(u'邮件列表发来邮件显示邮件列表名称')\n", (7462, 7483), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7507, 7521), 'django.utils.translation.ugettext_lazy', '_', (['u"""用户申请邮箱功能"""'], {}), "(u'用户申请邮箱功能')\n", (7508, 7521), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7591, 7605), 'django.utils.translation.ugettext_lazy', '_', (['u"""新用户欢迎信功能"""'], {}), "(u'新用户欢迎信功能')\n", (7592, 7605), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7630, 7644), 'django.utils.translation.ugettext_lazy', '_', (['u"""新用户欢迎信功能"""'], {}), "(u'新用户欢迎信功能')\n", (7631, 7644), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7681, 7695), 'django.utils.translation.ugettext_lazy', '_', (['u"""短信通知接收邮件"""'], {}), "(u'短信通知接收邮件')\n", (7682, 7695), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7732, 7746), 'django.utils.translation.ugettext_lazy', '_', (['u"""短信通知发送邮件"""'], {}), "(u'短信通知发送邮件')\n", (7733, 7746), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7783, 7795), 'django.utils.translation.ugettext_lazy', '_', (['u"""短信模块设置"""'], {}), "(u'短信模块设置')\n", (7784, 7795), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9037, 9049), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮箱空间清理"""'], {}), "(u'邮箱空间清理')\n", (9038, 9049), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9082, 9094), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮箱空间清理"""'], {}), "(u'邮箱空间清理')\n", (9083, 9094), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9398, 9411), 'django.utils.translation.ugettext_lazy', '_', (['u"""客户端网络附件"""'], {}), "(u'客户端网络附件')\n", (9399, 9411), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9627, 9636), 'django.utils.translation.ugettext_lazy', '_', (['u"""域签名"""'], {}), "(u'域签名')\n", (9628, 9636), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9667, 9678), 'django.utils.translation.ugettext_lazy', '_', (['u"""域签名开关"""'], {}), "(u'域签名开关')\n", (9668, 9678), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9940, 9952), 'django.utils.translation.ugettext_lazy', '_', (['u"""个人签名模板"""'], {}), "(u'个人签名模板')\n", (9941, 9952), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12681, 12701), 'django.utils.translation.ugettext_lazy', '_', (['u"""添加Reply-To到邮件头"""'], {}), "(u'添加Reply-To到邮件头')\n", (12682, 12701), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12735, 12753), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮件召回成功后提示收件人"""'], {}), "(u'邮件召回成功后提示收件人')\n", (12736, 12753), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12791, 12807), 'django.utils.translation.ugettext_lazy', '_', (['u"""保存客户端已发送邮件"""'], {}), "(u'保存客户端已发送邮件')\n", (12792, 12807), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12837, 12852), 'django.utils.translation.ugettext_lazy', '_', (['u"""通讯录导出按钮开关"""'], {}), "(u'通讯录导出按钮开关')\n", (12838, 12852), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12877, 12890), 'django.utils.translation.ugettext_lazy', '_', (['u"""企业通讯录设置"""'], {}), "(u'企业通讯录设置')\n", (12878, 12890), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12931, 12945), 'django.utils.translation.ugettext_lazy', '_', (['u"""其他域通讯录共享"""'], {}), "(u'其他域通讯录共享')\n", (12932, 12945), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12964, 12975), 'django.utils.translation.ugettext_lazy', '_', (['u"""公共通讯录"""'], {}), "(u'公共通讯录')\n", (12965, 12975), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14038, 14054), 'django.utils.translation.ugettext_lazy', '_', (['u"""<PASSWORD>"""'], {}), "(u'<PASSWORD>')\n", (14039, 14054), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14309, 14321), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮箱搬家功能"""'], {}), "(u'邮箱搬家功能')\n", (14310, 14321), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14345, 14359), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮箱意见反馈功能"""'], {}), "(u'邮箱意见反馈功能')\n", (14346, 14359), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14383, 14397), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮件召回记录查看"""'], {}), "(u'邮件召回记录查看')\n", (14384, 14397), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14420, 14432), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮件过滤功能"""'], {}), "(u'邮件过滤功能')\n", (14421, 14432), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14468, 14484), 'django.utils.translation.ugettext_lazy', '_', (['u"""SMTP外发邮件代理"""'], {}), "(u'SMTP外发邮件代理')\n", (14469, 14484), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14517, 14531), 'django.utils.translation.ugettext_lazy', '_', (['u"""代发邮件地址提醒"""'], {}), "(u'代发邮件地址提醒')\n", (14518, 14531), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((14556, 14571), 'django.utils.translation.ugettext_lazy', '_', (['u"""邮件内容中时间显示"""'], {}), "(u'邮件内容中时间显示')\n", (14557, 14571), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16307, 16315), 'django.utils.translation.ugettext_lazy', '_', (['u"""秘密"""'], {}), "(u'秘密')\n", (16308, 16315), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16346, 16354), 'django.utils.translation.ugettext_lazy', '_', (['u"""机密"""'], {}), "(u'机密')\n", (16347, 16354), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16385, 16393), 'django.utils.translation.ugettext_lazy', '_', (['u"""绝密"""'], {}), "(u'绝密')\n", (16386, 16393), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16442, 16449), 'django.utils.translation.ugettext_lazy', '_', (['u"""男"""'], {}), "(u'男')\n", (16443, 16449), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16462, 16469), 'django.utils.translation.ugettext_lazy', '_', (['u"""女"""'], {}), "(u'女')\n", (16463, 16469), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16528, 16540), 'django.utils.translation.ugettext_lazy', '_', (['u"""限定邮箱数量"""'], {}), "(u'限定邮箱数量')\n", (16529, 16540), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16574, 16589), 'django.utils.translation.ugettext_lazy', '_', (['u"""限定邮箱空间总容量"""'], {}), "(u'限定邮箱空间总容量')\n", (16575, 16589), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16623, 16638), 'django.utils.translation.ugettext_lazy', '_', (['u"""限定网络硬盘总容量"""'], {}), "(u'限定网络硬盘总容量')\n", (16624, 16638), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16670, 16684), 'django.utils.translation.ugettext_lazy', '_', (['u"""发送邮件限制大小"""'], {}), "(u'发送邮件限制大小')\n", (16671, 16684), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16765, 16779), 'django.utils.translation.ugettext_lazy', '_', (['u"""用户邮箱默认容量"""'], {}), "(u'用户邮箱默认容量')\n", (16766, 16779), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16811, 16825), 'django.utils.translation.ugettext_lazy', '_', (['u"""网络硬盘默认容量"""'], {}), "(u'网络硬盘默认容量')\n", (16812, 16825), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16848, 16860), 'django.utils.translation.ugettext_lazy', '_', (['u"""发信功能限制"""'], {}), "(u'发信功能限制')\n", (16849, 16860), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((16883, 16895), 'django.utils.translation.ugettext_lazy', '_', (['u"""收信功能限制"""'], {}), "(u'收信功能限制')\n", (16884, 16895), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17783, 17793), 'django.utils.translation.ugettext_lazy', '_', (['u"""页面标题"""'], {}), "(u'页面标题')\n", (17784, 17793), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17819, 17835), 'django.utils.translation.ugettext_lazy', '_', (['u"""登录页面自动输入域名"""'], {}), "(u'登录页面自动输入域名')\n", (17820, 17835), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17859, 17875), 'django.utils.translation.ugettext_lazy', '_', (['u"""ICP 备案是否显示"""'], {}), "(u'ICP 备案是否显示')\n", (17860, 17875), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17901, 17914), 'django.utils.translation.ugettext_lazy', '_', (['u"""ICP 备案号"""'], {}), "(u'ICP 备案号')\n", (17902, 17914), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17937, 17953), 'django.utils.translation.ugettext_lazy', '_', (['u"""ICP 备案链接地址"""'], {}), "(u'ICP 备案链接地址')\n", (17938, 17953), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17976, 17988), 'django.utils.translation.ugettext_lazy', '_', (['u"""帮助文件地址"""'], {}), "(u'帮助文件地址')\n", (17977, 17988), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((18016, 18030), 'django.utils.translation.ugettext_lazy', '_', (['u"""登录系统地点限制"""'], {}), "(u'登录系统地点限制')\n", (18017, 18030), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((18106, 18118), 'django.utils.translation.ugettext_lazy', '_', (['u"""登出跳转地址"""'], {}), "(u'登出跳转地址')\n", (18107, 18118), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((18143, 18154), 'django.utils.translation.ugettext_lazy', '_', (['u"""SSL访问"""'], {}), "(u'SSL访问')\n", (18144, 18154), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19035, 19047), 'django.utils.translation.ugettext_lazy', '_', (['u"""设置系统公告"""'], {}), "(u'设置系统公告')\n", (19036, 19047), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19071, 19081), 'django.utils.translation.ugettext_lazy', '_', (['u"""系统公告"""'], {}), "(u'系统公告')\n", (19072, 19081), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19333, 19354), 'django.utils.translation.ugettext_lazy', '_', (['u"""Webmail Logo 设置"""'], {}), "(u'Webmail Logo 设置')\n", (19334, 19354), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19380, 19398), 'django.utils.translation.ugettext_lazy', '_', (['u"""登录页面 Logo 设置"""'], {}), "(u'登录页面 Logo 设置')\n", (19381, 19398), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19631, 19639), 'django.utils.translation.ugettext_lazy', '_', (['u"""默认"""'], {}), "(u'默认')\n", (19632, 19639), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19658, 19668), 'django.utils.translation.ugettext_lazy', '_', (['u"""手动域名"""'], {}), "(u'手动域名')\n", (19659, 19668), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19688, 19698), 'django.utils.translation.ugettext_lazy', '_', (['u"""广告风格"""'], {}), "(u'广告风格')\n", (19689, 19698), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19714, 19725), 'django.utils.translation.ugettext_lazy', '_', (['u"""大气管理员"""'], {}), "(u'大气管理员')\n", (19715, 19725), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19742, 19751), 'django.utils.translation.ugettext_lazy', '_', (['u"""轮播图"""'], {}), "(u'轮播图')\n", (19743, 19751), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19770, 19781), 'django.utils.translation.ugettext_lazy', '_', (['u"""登录框居中"""'], {}), "(u'登录框居中')\n", (19771, 19781), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19799, 19810), 'django.utils.translation.ugettext_lazy', '_', (['u"""背景图风格"""'], {}), "(u'背景图风格')\n", (19800, 19810), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((19913, 19925), 'django.utils.translation.ugettext_lazy', '_', (['u"""页面广告设置"""'], {}), "(u'页面广告设置')\n", (19914, 19925), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((20171, 20183), 'django.utils.translation.ugettext_lazy', '_', (['u"""首页链接设置"""'], {}), "(u'首页链接设置')\n", (20172, 20183), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
from django.shortcuts import render, redirect
from .forms import UserRegisterForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
form = UserRegisterForm()
return render(request,'accounts/register.html',{'form':form})
| [
"django.shortcuts.render",
"django.shortcuts.redirect"
] | [((330, 387), 'django.shortcuts.render', 'render', (['request', '"""accounts/register.html"""', "{'form': form}"], {}), "(request, 'accounts/register.html', {'form': form})\n", (336, 387), False, 'from django.shortcuts import render, redirect\n'), ((256, 273), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (264, 273), False, 'from django.shortcuts import render, redirect\n')] |
import logging
from sockjs.tornado import SockJSConnection
from thunderpush.sortingstation import SortingStation
try:
import simplejson as json
except ImportError:
import json
logger = logging.getLogger()
class ThunderSocketHandler(SockJSConnection):
def on_open(self, info):
logger.debug("New connection opened.")
# no messenger object yet, client needs issue CONNECT command first
self.messenger = None
def on_message(self, msg):
logger.debug("Got message: %s" % msg)
self.process_message(msg)
def on_close(self):
if self.connected:
self.messenger.unregister_user(self)
self.messenger = None
logger.debug("User %s has disconnected."
% getattr(self, "userid", None))
def force_disconnect(self):
self.close(9002, "Server closed the connection (intentionally).")
def process_message(self, msg):
"""
We assume that every client message comes in following format:
COMMAND argument1[:argument2[:argumentX]]
"""
tokens = msg.split(" ")
messages = {
'CONNECT': self.handle_connect,
'SUBSCRIBE': self.handle_subscribe,
'UNSUBSCRIBE': self.handle_unsubscribe
}
try:
messages[tokens[0]](tokens[1])
except (KeyError, IndexError):
logger.warning("Received invalid message: %s." % msg)
def handle_connect(self, args):
if self.connected:
logger.warning("User already connected.")
return
try:
self.userid, self.apikey = args.split(":")
except ValueError:
logger.warning("Invalid message syntax.")
return
# get singleton instance of SortingStation
ss = SortingStation.instance()
# get and store the messenger object for given apikey
self.messenger = ss.get_messenger_by_apikey(self.apikey)
if self.messenger:
self.messenger.register_user(self)
else:
self.close(9000, "Invalid API key.")
def handle_subscribe(self, args):
if not self.connected:
logger.warning("User not connected.")
# close the connection, the user issues commands in a wrong order
self.close(9001, "Subscribing before connecting.")
return
channels = filter(None, args.split(":"))
for channel in channels:
self.messenger.subscribe_user_to_channel(self, channel)
def handle_unsubscribe(self, args):
if not self.connected:
logger.warning("User not connected.")
# close the connection, the user issues commands in a wrong order
self.close(9001, "Subscribing before connecting.")
return
channels = filter(None, args.split(":"))
for channel in channels:
self.messenger.unsubscribe_user_from_channel(self, channel)
def close(self, code=3000, message="Go away!"):
self.session.close(code, message)
@property
def connected(self):
return bool(self.messenger)
| [
"logging.getLogger",
"thunderpush.sortingstation.SortingStation.instance"
] | [((196, 215), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (213, 215), False, 'import logging\n'), ((1820, 1845), 'thunderpush.sortingstation.SortingStation.instance', 'SortingStation.instance', ([], {}), '()\n', (1843, 1845), False, 'from thunderpush.sortingstation import SortingStation\n')] |
import argparse
class appOptions:
show_devices = '--show-devices'
clean_report = '--clean-report'
device_config = '--device-config'
global_config = '--global-config'
test_case = '--test-case'
tests_dir = '--tests-dir'
device = '--device'
test = '--test'
service_address = '--service-address'
bp = '--bp'
disable_screenshot = '--disable-screenshot'
output_dir = '--output-dir'
separate = '--separate'
allure_report = '--allure-report'
clean = '--clean'
log_level = '--pyauto-log-level'
# log_file = '--pyauto-log-file'
class Parser(object):
def __init__(self, parser=None, attach=True):
self.options = None
self.parser = parser or argparse.ArgumentParser()
if attach:
self.addoption()
def addoption(self):
self.add_help_option()
# 配置文件
self.add_config_option()
# 测试设备
self.add_device_option()
# 测试模块
self.add_tests_option()
# log配置
self.add_log_option()
# output
self.add_output_option()
# appium
self.add_appium_option()
# testing
self.add_testing_option()
def parse_arg(self, op=None):
self.options = self.parser.parse_args(op)
return self.options
def parse_known_args(self, op):
return self.parser.parse_known_args(op)
def add_config_option(self):
# 配置文件
self.parser.add_argument(
appOptions.device_config,
type=str,
help='device configuration file'
)
self.parser.add_argument(
appOptions.global_config,
type=str,
help='global configuration file'
)
self.parser.add_argument(
appOptions.test_case,
type=str,
help='Test case file'
)
def add_device_option(self):
# 运行设备:设备名,输入ios/android会选择默认的ios/android设备,未输入会选择default设备
self.parser.add_argument(
appOptions.device,
type=str,
help='device to test on, such as ios, android, <device>'
)
def add_tests_option(self):
# 运行case(模块): ios/android/...
self.parser.add_argument(
appOptions.test,
nargs='*',
help='Test case to run, such as: ios, android, <dir>/<test_case.py>'
)
self.parser.add_argument(
appOptions.tests_dir,
type=str,
help='Test case to run, such as: ios, android, <dir>/<test_case.py>'
)
def add_testing_option(self):
self.parser.add_argument(
appOptions.disable_screenshot,
action='store_true',
help='Disable device screenshot',
)
def add_log_option(self):
# log 配置
self.parser.add_argument(
appOptions.log_level,
type=str,
help='pyautotest log level',
)
def add_output_option(self):
# report
self.parser.add_argument(
appOptions.output_dir,
type=str,
help='test report directory'
)
self.parser.add_argument(
appOptions.separate,
action='store_true',
help='separate report directory each run',
)
self.parser.add_argument(
appOptions.allure_report,
action='store_true',
help='generate allure report',
)
self.parser.add_argument(
appOptions.clean,
action='store_true',
help='--clean for allure report command',
)
def add_appium_option(self):
# appium
self.parser.add_argument(
appOptions.service_address,
type=str,
help='Appium service address'
)
self.parser.add_argument(
appOptions.bp,
type=str,
help='WebDriverAgent port or Bootstrap port'
)
def add_help_option(self):
self.parser.add_argument(
appOptions.show_devices,
action='store_true',
help='show available devices in device.yml',
)
self.parser.add_argument(
appOptions.clean_report,
action='store_true',
help='clean reports, excluding logs',
)
class pytestOption(object):
def __init__(self, parser):
self.parser = parser
def add_config_option(self):
# 配置文件
self.parser.addoption(
'--device-config',
type=str,
help='device configuration file'
)
self.parser.addoption(
'--global-config',
type=str,
help='global configuration file'
)
self.parser.addoption(
'--test-case',
type=str,
help='Test case file'
)
self.parser.addoption(
'--data',
type=str,
help='Data file'
)
def add_device_option(self):
# 运行设备:设备名,输入ios/android会选择默认的ios/android设备,未输入会选择default设备
self.parser.addoption(
'--device',
type=str,
help='device to test on, such as ios, android, <device>'
)
self.parser.addoption(
'--system-port',
type=str,
help='android desired capabilities - systemPort'
)
self.parser.addoption(
'--platform',
type=str,
help='testing device platform, such as ios/android'
)
def add_case_option(self):
# 运行case(模块): ios/android/bunny/...
self.parser.addoption(
'--test',
type=str,
help='Test case to run, such as: ios, android, <test_case.py>'
)
def add_log_option(self):
# log 配置
self.parser.addoption(
'--pyauto-log-file',
type=str,
help='pyautotest log level',
)
def add_output_option(self):
# report
self.parser.addoption(
'--output-dir',
type=str,
help='output directory'
)
def add_appium_option(self):
# appium
self.parser.addoption(
'--service-address',
type=str,
help='Appium server host'
)
self.parser.addoption(
'--port',
type=str,
help='Appium server host'
)
self.parser.addoption(
'--bp',
type=str,
help='WebDriverAgent Port or Bootstrap Port'
)
def add_attachment_option(self):
self.parser.addoption(
'--disable-screenshot',
action='store_true',
help='Disable screenshot',
) | [
"argparse.ArgumentParser"
] | [((728, 753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (751, 753), False, 'import argparse\n')] |
import requests
from teste_app import settigns
def google(q: str):
"""Faz uma pesquisa no google"""
return requests.get(settigns.GOOGLE, params={"q": q})
| [
"requests.get"
] | [((119, 165), 'requests.get', 'requests.get', (['settigns.GOOGLE'], {'params': "{'q': q}"}), "(settigns.GOOGLE, params={'q': q})\n", (131, 165), False, 'import requests\n')] |
# github link: https://github.com/ds-praveenkumar/kaggle
# Author: ds-praveenkumar
# file: forcasting/prepare_train_data.py/
# Created by ds-praveenkumar at 13-06-2020 15 34
# feature:
import os
import pandas as pd
import numpy as np
import click
from src.utility.timeit import timeit
root = os.path.dirname(os.getcwd())
train_data_path = os.path.join(root, 'data', 'training')
preprocess_data_path = os.path.join(root, 'data', 'preprocess')
@timeit
def prepare_train_data(prep_path, train_path):
prep_df = pd.DataFrame(np.load(os.path.join(prep_path, 'sales_mat.npy')))
prep_df = prep_df.T
prep_df['ds'] = pd.date_range(end='2016-06-19',periods=1913).values
for column in prep_df.iloc[:,:30489]:
train_items = prep_df[['ds',column]][-365:]
train_items.rename(columns={column:'y'},inplace=True)
save_at = os.path.join(train_path,f"{column}.csv")
train_items.to_csv(save_at,index=False)
print(f"file saved at {save_at}")
@click.command()
@click.argument('preprocess_data_path', type=click.Path(exists=True))
@click.argument('train_data_path', type=click.Path())
def main(preprocess_data_path, train_data_path):
prepare_train_data(preprocess_data_path, train_data_path)
if __name__=='__main__':
main() | [
"os.path.join",
"os.getcwd",
"click.Path",
"click.command",
"pandas.date_range"
] | [((341, 379), 'os.path.join', 'os.path.join', (['root', '"""data"""', '"""training"""'], {}), "(root, 'data', 'training')\n", (353, 379), False, 'import os\n'), ((403, 443), 'os.path.join', 'os.path.join', (['root', '"""data"""', '"""preprocess"""'], {}), "(root, 'data', 'preprocess')\n", (415, 443), False, 'import os\n'), ((987, 1002), 'click.command', 'click.command', ([], {}), '()\n', (1000, 1002), False, 'import click\n'), ((310, 321), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (319, 321), False, 'import os\n'), ((626, 671), 'pandas.date_range', 'pd.date_range', ([], {'end': '"""2016-06-19"""', 'periods': '(1913)'}), "(end='2016-06-19', periods=1913)\n", (639, 671), True, 'import pandas as pd\n'), ((852, 893), 'os.path.join', 'os.path.join', (['train_path', 'f"""{column}.csv"""'], {}), "(train_path, f'{column}.csv')\n", (864, 893), False, 'import os\n'), ((1048, 1071), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1058, 1071), False, 'import click\n'), ((1113, 1125), 'click.Path', 'click.Path', ([], {}), '()\n', (1123, 1125), False, 'import click\n'), ((538, 578), 'os.path.join', 'os.path.join', (['prep_path', '"""sales_mat.npy"""'], {}), "(prep_path, 'sales_mat.npy')\n", (550, 578), False, 'import os\n')] |
import psycopg2
import psycopg2.extras
import sys
from sqlalchemy import create_engine
import pandas as pd
def get_cursor():
conn_string = "host='localhost' dbname='HintereggerA' user='HintereggerA' password='<PASSWORD>'"
# print the connection string we will use to connect
print("Connecting to database: {}".format(conn_string), file=sys.stderr)
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
# cursor = conn.cursor()
return cursor
def query(query_string):
cursor.execute(query_string)
return cursor.fetchall()
engine = create_engine('postgresql://HintereggerA:root@localhost/HintereggerA')
def pandaquery(query_string):
print("SQL: " + query_string)
return pd.read_sql_query(query_string, engine)
cursor = get_cursor()
# print("DB Cursor is available as cursor")
# print("query with query(str)") | [
"psycopg2.connect",
"sqlalchemy.create_engine",
"pandas.read_sql_query"
] | [((766, 836), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://HintereggerA:root@localhost/HintereggerA"""'], {}), "('postgresql://HintereggerA:root@localhost/HintereggerA')\n", (779, 836), False, 'from sqlalchemy import create_engine\n'), ((444, 473), 'psycopg2.connect', 'psycopg2.connect', (['conn_string'], {}), '(conn_string)\n', (460, 473), False, 'import psycopg2\n'), ((907, 946), 'pandas.read_sql_query', 'pd.read_sql_query', (['query_string', 'engine'], {}), '(query_string, engine)\n', (924, 946), True, 'import pandas as pd\n')] |
# Author: <NAME> <<EMAIL>>
"""Module implementing the FASTA algorithm"""
import numpy as np
from math import sqrt
from scipy import linalg
import time
import logging
def _next_stepsize(deltax, deltaF, t=0):
"""A variation of spectral descent step-size selection: 'adaptive' BB method.
Reference:
---------
<NAME>, <NAME>, and <NAME>, 'Gradient methods with adaptive step-sizes,'
Comput. Optim. Appl., vol. 35, pp. 69-86, Sept. 2006
parameters
----------
deltax: ndarray
difference between coefs_current and coefs_next
deltaF: ndarray
difference between grad operator evaluated at coefs_current and coefs_next
returns
-------
float
adaptive step-size
"""
n_deltax = (deltax ** 2).sum() # linalg.norm(deltax, 'fro') ** 2
n_deltaF = (deltaF ** 2).sum() # linalg.norm(deltaF, 'fro') ** 2
innerproduct_xF = np.real((deltax * deltaF).sum())
if n_deltax == 0:
return 0
elif (n_deltaF == 0) | (innerproduct_xF == 0):
return -1
else:
tau_s = n_deltax / innerproduct_xF # steepest descent
tau_m = innerproduct_xF / n_deltaF # minimum residual
# adaptive BB method
if 2 * tau_m > tau_s:
return tau_m
else:
return tau_s - 0.5 * tau_m
def _compute_residual(deltaf, sg):
"""Computes residuals"""
res = sqrt(((deltaf + sg) ** 2).sum())
a = sqrt((deltaf ** 2).sum())
b = sqrt((sg ** 2).sum())
res_r = res / (max(a, b) + 1e-15)
return res, res_r
def _update_coefs(x, tau, gradfx, prox, f, g, beta, fk, linesearch=True):
"""Non-monotone line search
parameters
----------
x: ndarray
current coefficients
tau: float
step size
gradfx: ndarry
gradient operator evaluated at current coefficients
prox: function handle
proximal operator of :math:`g(x)`
f: callable
smooth differentiable function, :math:`f(x)`
g: callable
non-smooth function, :math:`g(x)`
beta: float
backtracking parameter
fk: float
maximum of previous function values
returns
-------
z: ndarray
next coefficients
"""
x_hat = x - tau * gradfx
z = prox(x_hat, tau)
fz = f(z)
count = 0
if linesearch:
while fz > fk + (gradfx * (z - x)).sum() + ((z - x) ** 2).sum() / (2 * tau):
# np.square(linalg.norm(z - x, 'fro')) / (2 * tau):
count += 1
tau *= beta
x_hat = x - tau * gradfx
z = prox(x_hat, tau)
fz = f(z)
sg = (x_hat - z) / tau
return z, fz, sg, tau, count
class Fasta:
r"""Fast adaptive shrinkage/threshold Algorithm
Reference
---------
Goldstein, Tom, <NAME>, and <NAME>. "A field guide to forward-backward
splitting with a FASTA implementation." arXiv preprint arXiv:1411.3406 (2014).
Parameters
----------
f: function handle
smooth differentiable function, :math:`f(x)`
g: function handle
non-smooth convex function, :math:`g(x)`
gradf: function handle
gradient of smooth differentiable function, :math:`\\nabla f(x)`
proxg: function handle
proximal operator of non-smooth convex function
:math:`proxg(v, \\lambda) = argmin g(x) + \\frac{1}{2*\\lambda}\|x-v\|^2`
beta: float, optional
backtracking parameter
default is 0.5
n_iter: int, optional
number of iterations
default is 1000
Attributes
----------
coefs: ndvar
learned coefficients
objective_value: float
optimum objective value
residuals: list
residual values at each iteration
initial_stepsize: float, optional
created only with verbose=1 option
objective: list, optional
objective values at each iteration
created only with verbose=1 option
stepsizes: list, optional
stepsizes at each iteration
created only with verbose=1 option
backtracks: list, optional
number of backtracking steps
created only with verbose=1 option
Notes
-----
Make sure that outputs of gradf and proxg is of same size as x.
The implementation does not check for any such discrepancies.
Use
---
Solve following least square problem using fastapy
:math:`\\min .5||Ax-b||^2 + \\mu*\|x\|_1`
Create function handles
>>> def f(x): return 0.5 * linalg.norm(np.dot(A, x) - b, 2)**2 # f(x) = .5||Ax-b||^2
>>> def gradf(x): return np.dot(A.T, np.dot(A, x) - b) # gradient of f(x)
>>> def g(x): return mu * linalg.norm(x, 1) # mu|x|
>>> def proxg(x, t): return shrink(x, mu*t)
>>> def shrink(x, mu): return np.multiply(np.sign(x), np.maximum(np.abs(x) - mu, 0)) #proxg(z,t) = sign(x)*max(
|x|-mu,0)
Create FASTA instance
>>> lsq = Fasta(f, g, gradf, proxg)
Call solver
>>> lsq.learn(x0, verbose=True)
"""
def __init__(self, f, g, gradf, proxg, beta=0.5, n_iter=1000):
self.f = f
self.g = g
self.grad = gradf
self.prox = proxg
self.beta = beta
self.n_iter = n_iter
self.residuals = []
self._funcValues = []
self.coefs_ = None
def __str__(self):
return "Fast adaptive shrinkage/thresholding Algorithm instance"
def learn(self, coefs_init, tol=1e-4, verbose=True, linesearch=True, next_stepsize=_next_stepsize):
r"""fits the model using FASTA algorithm
parameters
----------
coefs_init: ndarray
initial guess
tol: float, optional
tolerance parameter
default is 1e-8
verbose: bool
verbosity of the method : 1 will display informations while 0 will display nothing
default = 0
linesearch: bool
if True (Default) uses line-search to fine step-size
next_stepsize: callable
a callable with argument (\deltax, \deltaGradf) which provides next step-size.
Default is a non-monotone step-size selection ('adaptive' BB) method.
returns
-------
self
"""
logger = logging.getLogger("FASTA")
coefs_current = np.copy(coefs_init)
grad_current = self.grad(coefs_current)
coefs_next = coefs_current + 0.01 * np.random.randn(coefs_current.shape[0], coefs_current.shape[1])
grad_next = self.grad(coefs_next)
tau_current = next_stepsize(coefs_next - coefs_current, grad_next - grad_current, 0)
self._funcValues.append(self.f(coefs_current))
if verbose:
self.objective = []
self.objective.append(self._funcValues[-1] + self.g(coefs_current))
self.initial_stepsize = np.copy(tau_current)
self.stepsizes = []
self.backtracks = []
start = time.time()
logger.debug(f"Iteration \t objective value \t step-size \t backtracking steps taken \t residual")
for i in range(self.n_iter):
coefs_next, objective_next, sub_grad, tau, n_backtracks \
= _update_coefs(coefs_current, tau_current, grad_current,
self.prox, self.f, self.g, self.beta, max(self._funcValues), linesearch)
self._funcValues.append(objective_next)
grad_next = self.grad(coefs_next)
# Find residual
delta_coef = coefs_current - coefs_next
delta_grad = grad_current - grad_next
residual, residual_r = _compute_residual(grad_next, sub_grad)
self.residuals.append(residual)
residual_n = residual / (self.residuals[0] + 1e-15)
# Find step size for next iteration
tau_next = next_stepsize(delta_coef, delta_grad, i)
if verbose:
self.stepsizes.append(tau)
self.backtracks.append(n_backtracks)
self.objective.append(objective_next + self.g(coefs_next))
logger.debug(
f"{i} \t {self.objective[i]} \t {self.stepsizes[i]} \t {self.backtracks[i]} \t {self.residuals[i]}")
# Prepare for next iteration
coefs_current = coefs_next
grad_current = grad_next
if tau_next == 0.0 or min(residual_n, residual_r) < tol: # convergence reached
break
elif tau_next < 0.0: # non-convex probelms -> negative stepsize -> use the previous value
tau_current = tau
else:
tau_current = tau_next
end = time.time()
self.coefs_ = coefs_current
self.objective_value = objective_next + self.g(coefs_current)
if verbose:
logger.debug(f"total time elapsed : {end - start}s")
return self
| [
"logging.getLogger",
"numpy.copy",
"numpy.random.randn",
"time.time"
] | [((6219, 6245), 'logging.getLogger', 'logging.getLogger', (['"""FASTA"""'], {}), "('FASTA')\n", (6236, 6245), False, 'import logging\n'), ((6270, 6289), 'numpy.copy', 'np.copy', (['coefs_init'], {}), '(coefs_init)\n', (6277, 6289), True, 'import numpy as np\n'), ((6908, 6919), 'time.time', 'time.time', ([], {}), '()\n', (6917, 6919), False, 'import time\n'), ((8629, 8640), 'time.time', 'time.time', ([], {}), '()\n', (8638, 8640), False, 'import time\n'), ((6805, 6825), 'numpy.copy', 'np.copy', (['tau_current'], {}), '(tau_current)\n', (6812, 6825), True, 'import numpy as np\n'), ((6382, 6445), 'numpy.random.randn', 'np.random.randn', (['coefs_current.shape[0]', 'coefs_current.shape[1]'], {}), '(coefs_current.shape[0], coefs_current.shape[1])\n', (6397, 6445), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Author: XuMing <<EMAIL>>
# Brief:
import os
import pickle
def load_pkl(pkl_path):
"""
加载词典文件
:param pkl_path:
:return:
"""
with open(pkl_path, 'rb') as f:
result = pickle.load(f)
return result
def dump_pkl(vocab, pkl_path, overwrite=True):
"""
存储文件
:param pkl_path:
:param overwrite:
:return:
"""
if os.path.exists(pkl_path) and not overwrite:
return
with open(pkl_path, 'wb') as f:
# pickle.dump(vocab, f, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(vocab, f, protocol=0)
| [
"pickle.dump",
"os.path.exists",
"pickle.load"
] | [((225, 239), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (236, 239), False, 'import pickle\n'), ((395, 419), 'os.path.exists', 'os.path.exists', (['pkl_path'], {}), '(pkl_path)\n', (409, 419), False, 'import os\n'), ((564, 597), 'pickle.dump', 'pickle.dump', (['vocab', 'f'], {'protocol': '(0)'}), '(vocab, f, protocol=0)\n', (575, 597), False, 'import pickle\n')] |
import copy
import _mecab
from collections import namedtuple
from typing import Generator
from mecab import MeCabError
from domain.mecab_domain import MecabWordFeature
def delete_pattern_from_string(string, pattern, index, nofail=False):
""" 문자열에서 패턴을 찾아서 *로 변환해주는 기능 """
# raise an error if index is outside of the string
if not nofail and index not in range(len(string)):
raise ValueError("index outside given string")
# if not erroring, but the index is still not in the correct range..
if index < 0: # add it to the beginning
return pattern + string
if index > len(string): # add it to the end
return string + pattern
len_pattern = len(pattern)
blank_pattern = len(pattern) * "*"
# insert the new string between "slices" of the original
return string[:index] + blank_pattern + string[index + len_pattern:]
STRING_NOT_FOUND = -1
Feature = namedtuple('Feature', [
'pos',
'semantic',
'has_jongseong',
'reading',
'type',
'start_pos',
'end_pos',
'expression',
])
def _create_lattice(sentence):
lattice = _mecab.Lattice()
lattice.add_request_type(_mecab.MECAB_ALLOCATE_SENTENCE) # Required
lattice.set_sentence(sentence)
return lattice
def _get_mecab_feature(node) -> MecabWordFeature:
# Reference:
# - http://taku910.github.io/mecab/learn.html
# - https://docs.google.com/spreadsheets/d/1-9blXKjtjeKZqsf4NzHeYJCrr49-nXeRF6D80udfcwY
# - https://bitbucket.org/eunjeon/mecab-ko-dic/src/master/utils/dictionary/lexicon.py
# feature = <pos>,<semantic>,<has_jongseong>,<reading>,<type>,<start_pos>,<end_pos>,<expression>
values = node.feature.split(',')
assert len(values) == 8
values = [value if value != '*' else None for value in values]
feature = dict(zip(Feature._fields, values))
feature['has_jongseong'] = {'T': True, 'F': False}.get(feature['has_jongseong'])
return MecabWordFeature(node.surface, **feature)
class MecabParser:
"""
문장을 형태소 분석하는 클래스.
형태소 분석시 형태소 분석 토큰, 스페이스 분석 토큰의 인덱스 위치도 함께 저장
"""
FIRST_WORD = 0
type_list = ["Compound", "Inflect"]
def __init__(self, sentence: str, dicpath=''):
argument = ''
if dicpath != '':
argument = '-d %s' % dicpath
self.tagger = _mecab.Tagger(argument)
self.sentence = sentence
self.sentence_token = self.sentence.split()
def _get_space_token_idx(self, mecab_word_feature: MecabWordFeature) -> int:
"""
스페이스로 토큰 분석한 인덱스 값 반환
:param mecab_word_feature: 메캅 단어 특성데이터
:return: 스페이스 토큰 분석한 결과
"""
for idx_token, sentence_token_item in enumerate(self.sentence_token):
index_string = sentence_token_item.find(mecab_word_feature.word)
if index_string != STRING_NOT_FOUND:
self.sentence_token[idx_token] = delete_pattern_from_string(sentence_token_item, mecab_word_feature.word, index_string)
return idx_token
return False
def gen_mecab_token_feature(self) -> Generator:
"""
메캅으로 형태소 분석한 토큰 제너레이터로 반환
스페이스로 분석한 토큰의 정보와 형태소로 분석한 토큰의 정보 포함
"""
lattice = _create_lattice(self.sentence)
if not self.tagger.parse(lattice):
raise MeCabError(self.tagger.what())
for mecab_token_idx, mecab_token in enumerate(lattice):
mecab_token_feature = _get_mecab_feature(mecab_token)
mecab_token_feature.mecab_token_idx = mecab_token_idx
space_token_idx = self._get_space_token_idx(mecab_token_feature)
if space_token_idx is not False:
mecab_token_feature.space_token_idx = space_token_idx
mecab_token_feature.word = mecab_token_feature.word.lower()
yield mecab_token_feature
def tokenize_mecab_compound(self) -> Generator:
"""
메캅으로 분석한 토큰 제너레이터로 반환 결과 중에 복합여, 굴절형태소 있는 경우 토큰화
"""
for compound_include_item in self.gen_mecab_token_feature():
if compound_include_item.type in self.type_list:
compound_item_list = compound_include_item.expression.split("+")
for compound_item in compound_item_list:
word, pos_tag, _ = compound_item.split("/")
copy_compound_include_item = copy.deepcopy(compound_include_item)
copy_compound_include_item.word = word
yield word, copy_compound_include_item
else:
yield compound_include_item.word, compound_include_item
def gen_mecab_compound_token_feature(self) -> Generator:
"""
:return: 복합어를 분해한 메캅 토큰 순서가 들어간 단어
"""
for idx, x in enumerate(list(self.tokenize_mecab_compound())):
copy_x = copy.deepcopy(x)
copy_x[1].mecab_token_compound_idx = idx
yield copy_x
def get_word_from_mecab_compound(self, is_list=False):
"""
메캅으로 분해된 문장에서 단어만 추출
:param is_list: 리스트로 반환 여부
:return: 메캅으로 분해된 문장에서 단어만 포함된 문장
"""
if is_list:
return [x[self.FIRST_WORD] for x in list(self.gen_mecab_compound_token_feature())]
return " ".join([x[self.FIRST_WORD] for x in list(self.gen_mecab_compound_token_feature())])
if __name__ == "__main__":
test_sentence = "나는 서울대병원에 갔어"
mecab_parse_results = list(
MecabParser(test_sentence).gen_mecab_token_feature())
for idx, mecab_parse_item in enumerate(mecab_parse_results):
print(mecab_parse_item)
mecab_parse_results = list(
MecabParser(test_sentence).gen_mecab_compound_token_feature())
for idx, mecab_parse_item in enumerate(mecab_parse_results):
print(mecab_parse_item) | [
"collections.namedtuple",
"_mecab.Lattice",
"domain.mecab_domain.MecabWordFeature",
"_mecab.Tagger",
"copy.deepcopy"
] | [((917, 1037), 'collections.namedtuple', 'namedtuple', (['"""Feature"""', "['pos', 'semantic', 'has_jongseong', 'reading', 'type', 'start_pos',\n 'end_pos', 'expression']"], {}), "('Feature', ['pos', 'semantic', 'has_jongseong', 'reading',\n 'type', 'start_pos', 'end_pos', 'expression'])\n", (927, 1037), False, 'from collections import namedtuple\n'), ((1116, 1132), '_mecab.Lattice', '_mecab.Lattice', ([], {}), '()\n', (1130, 1132), False, 'import _mecab\n'), ((1943, 1984), 'domain.mecab_domain.MecabWordFeature', 'MecabWordFeature', (['node.surface'], {}), '(node.surface, **feature)\n', (1959, 1984), False, 'from domain.mecab_domain import MecabWordFeature\n'), ((2319, 2342), '_mecab.Tagger', '_mecab.Tagger', (['argument'], {}), '(argument)\n', (2332, 2342), False, 'import _mecab\n'), ((4840, 4856), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (4853, 4856), False, 'import copy\n'), ((4370, 4406), 'copy.deepcopy', 'copy.deepcopy', (['compound_include_item'], {}), '(compound_include_item)\n', (4383, 4406), False, 'import copy\n')] |
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import model_from_json
import numpy as np
import tensorflow.keras.models as models
def predict(temp_file):
test_image = image.load_img(temp_file, target_size = (224, 224))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
with open('Model Weights _ Json/model.json','r') as json_file:
json_model = json_file.read()
model = model_from_json(json_model)
model.load_weights('Model Weights _ Json/model_weights.h5')
result = model.predict(test_image)
return np.argmax(result)
| [
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.models.model_from_json",
"numpy.argmax",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array"
] | [((203, 252), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['temp_file'], {'target_size': '(224, 224)'}), '(temp_file, target_size=(224, 224))\n', (217, 252), False, 'from tensorflow.keras.preprocessing import image\n'), ((272, 302), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (290, 302), False, 'from tensorflow.keras.preprocessing import image\n'), ((320, 354), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (334, 354), True, 'import numpy as np\n'), ((474, 501), 'tensorflow.keras.models.model_from_json', 'model_from_json', (['json_model'], {}), '(json_model)\n', (489, 501), False, 'from tensorflow.keras.models import model_from_json\n'), ((616, 633), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (625, 633), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
##############################################################################
## This file is part of 'PGP PCIe APP DEV'.
## It is subject to the license terms in the LICENSE.txt file found in the
## top-level directory of this distribution and at:
## https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
## No part of 'PGP PCIe APP DEV', including this file,
## may be copied, modified, propagated, or distributed except according to
## the terms contained in the LICENSE.txt file.
##############################################################################
import sys
import argparse
import rogue
import rogue.hardware.axi
import rogue.interfaces.stream
import rogue.interfaces.memory
import pyrogue as pr
import pyrogue.pydm
import pyrogue.utilities.prbs
import pyrogue.interfaces.simulation
import axipcie as pcie
import surf.protocols.ssi as ssi
# rogue.Logging.setLevel(rogue.Logging.Warning)
# rogue.Logging.setLevel(rogue.Logging.Debug)
#################################################################
# Set the argument parser
parser = argparse.ArgumentParser()
# Convert str to bool
argBool = lambda s: s.lower() in ['true', 't', 'yes', '1']
# Add arguments
parser.add_argument(
"--type",
type = str,
required = False,
default = 'pcie',
help = "define the type of interface",
)
parser.add_argument(
"--dev",
type = str,
required = False,
default = '/dev/datadev_0',
help = "path to device",
)
parser.add_argument(
"--numLane",
type = int,
required = False,
default = 1,
help = "# of DMA Lanes",
)
parser.add_argument(
"--numVc",
type = int,
required = False,
default = 1,
help = "# of VC (virtual channels)",
)
parser.add_argument(
"--pollEn",
type = argBool,
required = False,
default = True,
help = "Enable auto-polling",
)
parser.add_argument(
"--initRead",
type = argBool,
required = False,
default = True,
help = "Enable read all variables at start",
)
# Get the arguments
args = parser.parse_args()
#################################################################
class MyRoot(pr.Root):
def __init__( self,
name = "pciServer",
description = "DMA Loopback Testing",
**kwargs):
super().__init__(name=name, description=description, **kwargs)
#################################################################
self.dmaStream = [[None for x in range(args.numVc)] for y in range(args.numLane)]
self.prbsRx = [[None for x in range(args.numVc)] for y in range(args.numLane)]
self.prbTx = [[None for x in range(args.numVc)] for y in range(args.numLane)]
# DataDev PCIe Card
if ( args.type == 'pcie' ):
# Create PCIE memory mapped interface
self.memMap = rogue.hardware.axi.AxiMemMap(args.dev)
# Create the DMA loopback channel
for lane in range(args.numLane):
for vc in range(args.numVc):
self.dmaStream[lane][vc] = rogue.hardware.axi.AxiStreamDma(args.dev,(0x100*lane)+vc,1)
# VCS simulation
elif ( args.type == 'sim' ):
self.memMap = rogue.interfaces.memory.TcpClient('localhost',8000)
# Create the DMA loopback channel
for lane in range(args.numLane):
for vc in range(args.numVc):
self.dmaStream[lane][vc] = rogue.interfaces.stream.TcpClient('localhost',8002+(512*lane)+2*vc)
# Undefined device type
else:
raise ValueError("Invalid type (%s)" % (args.type) )
# Add the PCIe core device to base
self.add(pcie.AxiPcieCore(
memBase = self.memMap,
offset = 0x00000000,
numDmaLanes = args.numLane,
expand = True,
))
for lane in range(args.numLane):
for vc in range(args.numVc):
# Connect the SW PRBS Receiver module
self.prbsRx[lane][vc] = pr.utilities.prbs.PrbsRx(name=('SwPrbsRx[%d][%d]'%(lane,vc)),expand=True)
self.dmaStream[lane][vc] >> self.prbsRx[lane][vc]
self.add(self.prbsRx[lane][vc])
# Connect the SW PRBS Transmitter module
self.prbTx[lane][vc] = pr.utilities.prbs.PrbsTx(name=('SwPrbsTx[%d][%d]'%(lane,vc)),expand=True)
self.prbTx[lane][vc] >> self.dmaStream[lane][vc]
self.add(self.prbTx[lane][vc])
#################################################################
with MyRoot(pollEn=args.pollEn, initRead=args.initRead) as root:
pyrogue.pydm.runPyDM(root=root)
#################################################################
| [
"rogue.interfaces.stream.TcpClient",
"pyrogue.utilities.prbs.PrbsRx",
"axipcie.AxiPcieCore",
"rogue.interfaces.memory.TcpClient",
"argparse.ArgumentParser",
"rogue.hardware.axi.AxiStreamDma",
"pyrogue.utilities.prbs.PrbsTx",
"rogue.hardware.axi.AxiMemMap"
] | [((1107, 1132), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1130, 1132), False, 'import argparse\n'), ((2947, 2985), 'rogue.hardware.axi.AxiMemMap', 'rogue.hardware.axi.AxiMemMap', (['args.dev'], {}), '(args.dev)\n', (2975, 2985), False, 'import rogue\n'), ((3797, 3887), 'axipcie.AxiPcieCore', 'pcie.AxiPcieCore', ([], {'memBase': 'self.memMap', 'offset': '(0)', 'numDmaLanes': 'args.numLane', 'expand': '(True)'}), '(memBase=self.memMap, offset=0, numDmaLanes=args.numLane,\n expand=True)\n', (3813, 3887), True, 'import axipcie as pcie\n'), ((3320, 3372), 'rogue.interfaces.memory.TcpClient', 'rogue.interfaces.memory.TcpClient', (['"""localhost"""', '(8000)'], {}), "('localhost', 8000)\n", (3353, 3372), False, 'import rogue\n'), ((4157, 4232), 'pyrogue.utilities.prbs.PrbsRx', 'pr.utilities.prbs.PrbsRx', ([], {'name': "('SwPrbsRx[%d][%d]' % (lane, vc))", 'expand': '(True)'}), "(name='SwPrbsRx[%d][%d]' % (lane, vc), expand=True)\n", (4181, 4232), True, 'import pyrogue as pr\n'), ((4442, 4517), 'pyrogue.utilities.prbs.PrbsTx', 'pr.utilities.prbs.PrbsTx', ([], {'name': "('SwPrbsTx[%d][%d]' % (lane, vc))", 'expand': '(True)'}), "(name='SwPrbsTx[%d][%d]' % (lane, vc), expand=True)\n", (4466, 4517), True, 'import pyrogue as pr\n'), ((3170, 3231), 'rogue.hardware.axi.AxiStreamDma', 'rogue.hardware.axi.AxiStreamDma', (['args.dev', '(256 * lane + vc)', '(1)'], {}), '(args.dev, 256 * lane + vc, 1)\n', (3201, 3231), False, 'import rogue\n'), ((3556, 3630), 'rogue.interfaces.stream.TcpClient', 'rogue.interfaces.stream.TcpClient', (['"""localhost"""', '(8002 + 512 * lane + 2 * vc)'], {}), "('localhost', 8002 + 512 * lane + 2 * vc)\n", (3589, 3630), False, 'import rogue\n')] |
import inspect
import json
from .serialisation import JsonObjHelper, JsonTypeError
class RpcMethod:
"""Encapsulate argument/result (de)serialisation for a function
based on a type-annotated function signature and name.
"""
def __init__(self, name, signature, doc=None):
if (
signature.return_annotation is None
or signature.return_annotation is signature.empty
):
signature = signature.replace(return_annotation=type(None))
self.name = name
self.sig = signature
self.doc = doc
@classmethod
def from_function(cls, func):
"""Factory function from a function"""
return cls(func.__name__, inspect.signature(func), func.__doc__)
def call_with(self, obj, arg_dict):
"""Apply a dictionary of arguments, probably from
deserialise_args, to an object assuming that it has a method
with our name and signature (less type annotations).
"""
bound = self.sig.bind(**arg_dict)
method = getattr(obj, self.name)
return method(*bound.args, **bound.kwargs)
def serialise_args(self, *args, **kwargs):
"""Turn this method's args+kwargs into bytes (UTF-8)"""
bound = self.sig.bind(*args, **kwargs)
json_obj = {}
for name, param in self.sig.parameters.items():
try:
py_obj = bound.arguments[name]
except KeyError:
continue
if not isinstance(py_obj, param.annotation):
raise TypeError(
'Argument "{}" not of type "{}"'.format(name, param.annotation)
)
json_obj[name] = JsonObjHelper.py2j(py_obj)
return json.dumps(json_obj).encode()
def deserialise_args(self, buf):
"""Turn raw bytes from the wire to a dictionary of arguments
matching our function signature, that can be applied to the real
method.
"""
dct = json.loads(buf)
arg_dict = {}
for name, param in self.sig.parameters.items():
try:
json_obj = dct.pop(name)
except KeyError:
py_obj = param.default
if py_obj is param.empty:
raise ValueError("Missing required argument: %s" % name)
else:
py_obj = JsonObjHelper.j2py(param.annotation, json_obj)
arg_dict[name] = py_obj
if len(dct):
raise ValueError(
"Unknown argument(s): " + ", ".join('"%"' % k for k in dct.keys())
)
return arg_dict
def serialise_result(self, pyobj):
"""Turn an actual result object into bytes (UTF-8)"""
rt = self.sig.return_annotation
if not isinstance(pyobj, rt):
raise TypeError('Return value not of type "{}"'.format(rt))
jobj = JsonObjHelper.py2j(pyobj)
return json.dumps(jobj).encode()
def deserialise_result(self, buf):
"""Turn raw bytes from the wire to an object of return
annotation type.
"""
json_obj = json.loads(buf)
try:
return JsonObjHelper.j2py(self.sig.return_annotation, json_obj)
except JsonTypeError as e:
# TypeErrors from j2py should be ValueError really
raise ValueError("reconstructed types do not match") from e
pass
rpcmethod = RpcMethod.from_function
| [
"json.loads",
"json.dumps",
"inspect.signature"
] | [((1992, 2007), 'json.loads', 'json.loads', (['buf'], {}), '(buf)\n', (2002, 2007), False, 'import json\n'), ((3123, 3138), 'json.loads', 'json.loads', (['buf'], {}), '(buf)\n', (3133, 3138), False, 'import json\n'), ((705, 728), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (722, 728), False, 'import inspect\n'), ((1740, 1760), 'json.dumps', 'json.dumps', (['json_obj'], {}), '(json_obj)\n', (1750, 1760), False, 'import json\n'), ((2938, 2954), 'json.dumps', 'json.dumps', (['jobj'], {}), '(jobj)\n', (2948, 2954), False, 'import json\n')] |
import logging
from cinp import client
MCP_API_VERSIONS = ( '0.10', '0.11', )
class MCP( object ):
def __init__( self, host, proxy, job_id, instance_id, cookie, stop_event ):
self.cinp = client.CInP( host, '/api/v1/', proxy, retry_event=stop_event )
self.job_id = job_id
self.instance_id = instance_id
self.cookie = cookie
root, _ = self.cinp.describe( '/api/v1/', retry_count=30 ) # very tollerant for the initial describe, let things settle
if root[ 'api-version' ] not in MCP_API_VERSIONS:
raise Exception( 'Expected API version (one of) "{0}" found "{1}"'.format( MCP_API_VERSIONS, root[ 'api-version' ] ) )
def contractorInfo( self ):
logging.info( 'MCP: Get Contractor Info' )
return self.cinp.call( '/api/v1/config(getContractorInfo)', {}, retry_count=10 )
def packratInfo( self ):
logging.info( 'MCP: Get Packrat Info' )
return self.cinp.call( '/api/v1/config(getPackratInfo)', {}, retry_count=10 )
def confluenceInfo( self ):
logging.info( 'MCP: Get Confluence Info' )
return self.cinp.call( '/api/v1/config(getConfluenceInfo)', {}, retry_count=10 )
def signalJobRan( self ):
logging.info( 'MCP: Signal Job Ran' )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(jobRan)'.format( self.instance_id ), { 'cookie': self.cookie }, retry_count=20 )
def sendMessage( self, message ):
logging.info( 'MCP: Message "{0}"'.format( message ) )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(setMessage)'.format( self.instance_id ), { 'cookie': self.cookie, 'message': message }, retry_count=20 )
def setSuccess( self, success ):
logging.info( 'MCP: Success "{0}"'.format( success ) )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(setSuccess)'.format( self.instance_id ), { 'cookie': self.cookie, 'success': success }, retry_count=20 )
def setResults( self, target, results ):
if results is not None:
logging.info( 'MCP: Results "{0}"'.format( results[ -100: ].strip() ) )
else:
logging.info( 'MCP: Results <empty>' )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(setResults)'.format( self.instance_id ), { 'cookie': self.cookie, 'target': target, 'results': results }, retry_count=20 )
def setScore( self, target, score ):
if score is not None:
logging.info( 'MCP: Score "{0}"'.format( score ) )
else:
logging.info( 'MCP: Score <undefined>' )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(setScore)'.format( self.instance_id ), { 'cookie': self.cookie, 'target': target, 'score': score }, retry_count=20 )
def uploadedPackages( self, package_file_map ):
if not package_file_map:
return
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(addPackageFiles)'.format( self.instance_id ), { 'cookie': self.cookie, 'package_file_map': package_file_map }, retry_count=20 )
def getInstanceState( self, name=None ):
logging.info( 'MCP: Instance State for "{0}"'.format( name ) )
args = {}
if name is not None:
args[ 'name' ] = name
# json encoding turns the numeric dict keys into strings, this will undo that # TODO: this is fixed in CInP now??
result = {}
state_map = self.cinp.call( '/api/v1/Processor/BuildJob:{0}:(getInstanceState)'.format( self.job_id ), args, retry_count=10 )
if name is None:
for name in state_map:
result[ name ] = {}
for index, state in state_map[ name ].items():
result[ name ][ int( index ) ] = state
else:
for index, state in state_map.items():
result[ int( index ) ] = state
return result
def getInstanceStructureId( self, name=None ):
logging.info( 'MCP: Instance Structure Id(s) for "{0}"'.format( name ) )
args = {}
if name is not None:
args[ 'name' ] = name
# json encoding turns the numeric dict keys into strings, this will undo that
result = {}
detail_map = self.cinp.call( '/api/v1/Processor/BuildJob:{0}:(getInstanceStructureId)'.format( self.job_id ), args, retry_count=10 )
if name is None:
for name in detail_map:
result[ name ] = {}
for index, detail in detail_map[ name ].items():
result[ name ][ int( index ) ] = detail
else:
for index, detail in detail_map.items():
result[ int( index ) ] = detail
return result
def updateValueMap( self, value_map ):
logging.info( 'MCP: Setting Value "{0}"'.format( value_map ) )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(updateValueMap)'.format( self.instance_id ), { 'cookie': self.cookie, 'value_map': value_map }, retry_count=20 )
return True
def getValueMap( self, name=None ):
logging.info( 'MCP: Getting Value Map' )
return self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(getValueMap)'.format( self.instance_id ), { 'cookie': self.cookie }, retry_count=10 )
| [
"logging.info",
"cinp.client.CInP"
] | [((197, 257), 'cinp.client.CInP', 'client.CInP', (['host', '"""/api/v1/"""', 'proxy'], {'retry_event': 'stop_event'}), "(host, '/api/v1/', proxy, retry_event=stop_event)\n", (208, 257), False, 'from cinp import client\n'), ((686, 726), 'logging.info', 'logging.info', (['"""MCP: Get Contractor Info"""'], {}), "('MCP: Get Contractor Info')\n", (698, 726), False, 'import logging\n'), ((846, 883), 'logging.info', 'logging.info', (['"""MCP: Get Packrat Info"""'], {}), "('MCP: Get Packrat Info')\n", (858, 883), False, 'import logging\n'), ((1003, 1043), 'logging.info', 'logging.info', (['"""MCP: Get Confluence Info"""'], {}), "('MCP: Get Confluence Info')\n", (1015, 1043), False, 'import logging\n'), ((1164, 1199), 'logging.info', 'logging.info', (['"""MCP: Signal Job Ran"""'], {}), "('MCP: Signal Job Ran')\n", (1176, 1199), False, 'import logging\n'), ((4774, 4812), 'logging.info', 'logging.info', (['"""MCP: Getting Value Map"""'], {}), "('MCP: Getting Value Map')\n", (4786, 4812), False, 'import logging\n'), ((2057, 2093), 'logging.info', 'logging.info', (['"""MCP: Results <empty>"""'], {}), "('MCP: Results <empty>')\n", (2069, 2093), False, 'import logging\n'), ((2428, 2466), 'logging.info', 'logging.info', (['"""MCP: Score <undefined>"""'], {}), "('MCP: Score <undefined>')\n", (2440, 2466), False, 'import logging\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
import datetime
def func(x, a, b):
return a + b*x
def exp_regression(x, y):
p, _ = curve_fit(func, x, np.log(y))
p[0] = np.exp(p[0])
return p
def r2(coeffs, x, y):
return r2_score(np.log(y), np.log(out[0]*np.exp(out[1]*x)))
# calculate exponential fit for error rate extrapolation
# report as annual decay (i.e. error rate decreases by fixed factor every year)
errors = pd.read_csv('error_rates.csv')
x = pd.to_datetime(errors.iloc[:, 0]).astype(int)
y = errors.iloc[:, 1]
out = exp_regression(x, y)
print('annual error rate decay', np.exp(out[1]*pd.Timedelta(datetime.timedelta(days=365.2422)).delta))
print('R^2', r2(out, x, y)) | [
"pandas.read_csv",
"numpy.log",
"numpy.exp",
"datetime.timedelta",
"pandas.to_datetime"
] | [((543, 573), 'pandas.read_csv', 'pd.read_csv', (['"""error_rates.csv"""'], {}), "('error_rates.csv')\n", (554, 573), True, 'import pandas as pd\n'), ((279, 291), 'numpy.exp', 'np.exp', (['p[0]'], {}), '(p[0])\n', (285, 291), True, 'import numpy as np\n'), ((257, 266), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (263, 266), True, 'import numpy as np\n'), ((352, 361), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (358, 361), True, 'import numpy as np\n'), ((578, 611), 'pandas.to_datetime', 'pd.to_datetime', (['errors.iloc[:, 0]'], {}), '(errors.iloc[:, 0])\n', (592, 611), True, 'import pandas as pd\n'), ((377, 395), 'numpy.exp', 'np.exp', (['(out[1] * x)'], {}), '(out[1] * x)\n', (383, 395), True, 'import numpy as np\n'), ((733, 766), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(365.2422)'}), '(days=365.2422)\n', (751, 766), False, 'import datetime\n')] |
"""Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from contextlib import contextmanager
from unittest.mock import patch
import pytest
from IPython.lib import latextools
from IPython.testing.decorators import (
onlyif_cmds_exist,
skipif_not_matplotlib,
)
from IPython.utils.process import FindCmdError
@pytest.mark.parametrize('command', ['latex', 'dvipng'])
def test_check_latex_to_png_dvipng_fails_when_no_cmd(command):
def mock_find_cmd(arg):
if arg == command:
raise FindCmdError
with patch.object(latextools, "find_cmd", mock_find_cmd):
assert latextools.latex_to_png_dvipng("whatever", True) is None
@contextmanager
def no_op(*args, **kwargs):
yield
@onlyif_cmds_exist("latex", "dvipng")
@pytest.mark.parametrize("s, wrap", [(u"$$x^2$$", False), (u"x^2", True)])
def test_latex_to_png_dvipng_runs(s, wrap):
"""
Test that latex_to_png_dvipng just runs without error.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
latextools.latex_to_png_dvipng(s, wrap)
with patch_latextool(mock_kpsewhich):
latextools.latex_to_png_dvipng(s, wrap)
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
@contextmanager
def patch_latextool(mock=mock_kpsewhich):
with patch.object(latextools, "kpsewhich", mock):
yield
@pytest.mark.parametrize('context', [no_op, patch_latextool])
@pytest.mark.parametrize('s_wrap', [("$x^2$", False), ("x^2", True)])
def test_latex_to_png_mpl_runs(s_wrap, context):
"""
Test that latex_to_png_mpl just runs without error.
"""
try:
import matplotlib
except ImportError:
pytest.skip("This needs matplotlib to be available")
return
s, wrap = s_wrap
with context():
latextools.latex_to_png_mpl(s, wrap)
@skipif_not_matplotlib
def test_latex_to_html():
img = latextools.latex_to_html("$x^2$")
assert "data:image/png;base64,iVBOR" in img
def test_genelatex_no_wrap():
"""
Test genelatex with wrap=False.
"""
def mock_kpsewhich(filename):
assert False, ("kpsewhich should not be called "
"(called with {0})".format(filename))
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("body text", False)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
body text
\end{document}'''
def test_genelatex_wrap_with_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is installed.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return "path/to/breqn.sty"
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("x^2", True)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{breqn}
\pagestyle{empty}
\begin{document}
\begin{dmath*}
x^2
\end{dmath*}
\end{document}'''
def test_genelatex_wrap_without_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is not installed.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("x^2", True)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
$$x^2$$
\end{document}'''
@skipif_not_matplotlib
@onlyif_cmds_exist('latex', 'dvipng')
def test_latex_to_png_color():
"""
Test color settings for latex_to_png.
"""
latex_string = "$x^2$"
default_value = latextools.latex_to_png(latex_string, wrap=False)
default_hexblack = latextools.latex_to_png(latex_string, wrap=False,
color='#000000')
dvipng_default = latextools.latex_to_png_dvipng(latex_string, False)
dvipng_black = latextools.latex_to_png_dvipng(latex_string, False, 'Black')
assert dvipng_default == dvipng_black
mpl_default = latextools.latex_to_png_mpl(latex_string, False)
mpl_black = latextools.latex_to_png_mpl(latex_string, False, 'Black')
assert mpl_default == mpl_black
assert default_value in [dvipng_black, mpl_black]
assert default_hexblack in [dvipng_black, mpl_black]
# Test that dvips name colors can be used without error
dvipng_maroon = latextools.latex_to_png_dvipng(latex_string, False,
'Maroon')
# And that it doesn't return the black one
assert dvipng_black != dvipng_maroon
mpl_maroon = latextools.latex_to_png_mpl(latex_string, False, 'Maroon')
assert mpl_black != mpl_maroon
mpl_white = latextools.latex_to_png_mpl(latex_string, False, 'White')
mpl_hexwhite = latextools.latex_to_png_mpl(latex_string, False, '#FFFFFF')
assert mpl_white == mpl_hexwhite
mpl_white_scale = latextools.latex_to_png_mpl(latex_string, False,
'White', 1.2)
assert mpl_white != mpl_white_scale
def test_latex_to_png_invalid_hex_colors():
"""
Test that invalid hex colors provided to dvipng gives an exception.
"""
latex_string = "$x^2$"
pytest.raises(
ValueError,
lambda: latextools.latex_to_png(
latex_string, backend="dvipng", color="#f00bar"
),
)
pytest.raises(
ValueError,
lambda: latextools.latex_to_png(latex_string, backend="dvipng", color="#f00"),
)
| [
"IPython.testing.decorators.onlyif_cmds_exist",
"IPython.lib.latextools.latex_to_png_dvipng",
"IPython.lib.latextools.latex_to_png",
"pytest.mark.parametrize",
"IPython.lib.latextools.latex_to_png_mpl",
"unittest.mock.patch.object",
"IPython.lib.latextools.latex_to_html",
"pytest.skip",
"IPython.lib... | [((404, 459), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""command"""', "['latex', 'dvipng']"], {}), "('command', ['latex', 'dvipng'])\n", (427, 459), False, 'import pytest\n'), ((803, 839), 'IPython.testing.decorators.onlyif_cmds_exist', 'onlyif_cmds_exist', (['"""latex"""', '"""dvipng"""'], {}), "('latex', 'dvipng')\n", (820, 839), False, 'from IPython.testing.decorators import onlyif_cmds_exist, skipif_not_matplotlib\n'), ((841, 914), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s, wrap"""', "[(u'$$x^2$$', False), (u'x^2', True)]"], {}), "('s, wrap', [(u'$$x^2$$', False), (u'x^2', True)])\n", (864, 914), False, 'import pytest\n'), ((1475, 1535), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""context"""', '[no_op, patch_latextool]'], {}), "('context', [no_op, patch_latextool])\n", (1498, 1535), False, 'import pytest\n'), ((1537, 1605), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s_wrap"""', "[('$x^2$', False), ('x^2', True)]"], {}), "('s_wrap', [('$x^2$', False), ('x^2', True)])\n", (1560, 1605), False, 'import pytest\n'), ((3685, 3721), 'IPython.testing.decorators.onlyif_cmds_exist', 'onlyif_cmds_exist', (['"""latex"""', '"""dvipng"""'], {}), "('latex', 'dvipng')\n", (3702, 3721), False, 'from IPython.testing.decorators import onlyif_cmds_exist, skipif_not_matplotlib\n'), ((1132, 1171), 'IPython.lib.latextools.latex_to_png_dvipng', 'latextools.latex_to_png_dvipng', (['s', 'wrap'], {}), '(s, wrap)\n', (1162, 1171), False, 'from IPython.lib import latextools\n'), ((2008, 2041), 'IPython.lib.latextools.latex_to_html', 'latextools.latex_to_html', (['"""$x^2$"""'], {}), "('$x^2$')\n", (2032, 2041), False, 'from IPython.lib import latextools\n'), ((3858, 3907), 'IPython.lib.latextools.latex_to_png', 'latextools.latex_to_png', (['latex_string'], {'wrap': '(False)'}), '(latex_string, wrap=False)\n', (3881, 3907), False, 'from IPython.lib import latextools\n'), ((3931, 3997), 'IPython.lib.latextools.latex_to_png', 'latextools.latex_to_png', (['latex_string'], {'wrap': '(False)', 'color': '"""#000000"""'}), "(latex_string, wrap=False, color='#000000')\n", (3954, 3997), False, 'from IPython.lib import latextools\n'), ((4066, 4117), 'IPython.lib.latextools.latex_to_png_dvipng', 'latextools.latex_to_png_dvipng', (['latex_string', '(False)'], {}), '(latex_string, False)\n', (4096, 4117), False, 'from IPython.lib import latextools\n'), ((4137, 4197), 'IPython.lib.latextools.latex_to_png_dvipng', 'latextools.latex_to_png_dvipng', (['latex_string', '(False)', '"""Black"""'], {}), "(latex_string, False, 'Black')\n", (4167, 4197), False, 'from IPython.lib import latextools\n'), ((4258, 4306), 'IPython.lib.latextools.latex_to_png_mpl', 'latextools.latex_to_png_mpl', (['latex_string', '(False)'], {}), '(latex_string, False)\n', (4285, 4306), False, 'from IPython.lib import latextools\n'), ((4323, 4380), 'IPython.lib.latextools.latex_to_png_mpl', 'latextools.latex_to_png_mpl', (['latex_string', '(False)', '"""Black"""'], {}), "(latex_string, False, 'Black')\n", (4350, 4380), False, 'from IPython.lib import latextools\n'), ((4609, 4670), 'IPython.lib.latextools.latex_to_png_dvipng', 'latextools.latex_to_png_dvipng', (['latex_string', '(False)', '"""Maroon"""'], {}), "(latex_string, False, 'Maroon')\n", (4639, 4670), False, 'from IPython.lib import latextools\n'), ((4828, 4886), 'IPython.lib.latextools.latex_to_png_mpl', 'latextools.latex_to_png_mpl', (['latex_string', '(False)', '"""Maroon"""'], {}), "(latex_string, False, 'Maroon')\n", (4855, 4886), False, 'from IPython.lib import latextools\n'), ((4938, 4995), 'IPython.lib.latextools.latex_to_png_mpl', 'latextools.latex_to_png_mpl', (['latex_string', '(False)', '"""White"""'], {}), "(latex_string, False, 'White')\n", (4965, 4995), False, 'from IPython.lib import latextools\n'), ((5015, 5074), 'IPython.lib.latextools.latex_to_png_mpl', 'latextools.latex_to_png_mpl', (['latex_string', '(False)', '"""#FFFFFF"""'], {}), "(latex_string, False, '#FFFFFF')\n", (5042, 5074), False, 'from IPython.lib import latextools\n'), ((5135, 5197), 'IPython.lib.latextools.latex_to_png_mpl', 'latextools.latex_to_png_mpl', (['latex_string', '(False)', '"""White"""', '(1.2)'], {}), "(latex_string, False, 'White', 1.2)\n", (5162, 5197), False, 'from IPython.lib import latextools\n'), ((619, 670), 'unittest.mock.patch.object', 'patch.object', (['latextools', '"""find_cmd"""', 'mock_find_cmd'], {}), "(latextools, 'find_cmd', mock_find_cmd)\n", (631, 670), False, 'from unittest.mock import patch\n'), ((1223, 1262), 'IPython.lib.latextools.latex_to_png_dvipng', 'latextools.latex_to_png_dvipng', (['s', 'wrap'], {}), '(s, wrap)\n', (1253, 1262), False, 'from IPython.lib import latextools\n'), ((1414, 1457), 'unittest.mock.patch.object', 'patch.object', (['latextools', '"""kpsewhich"""', 'mock'], {}), "(latextools, 'kpsewhich', mock)\n", (1426, 1457), False, 'from unittest.mock import patch\n'), ((1911, 1947), 'IPython.lib.latextools.latex_to_png_mpl', 'latextools.latex_to_png_mpl', (['s', 'wrap'], {}), '(s, wrap)\n', (1938, 1947), False, 'from IPython.lib import latextools\n'), ((687, 735), 'IPython.lib.latextools.latex_to_png_dvipng', 'latextools.latex_to_png_dvipng', (['"""whatever"""', '(True)'], {}), "('whatever', True)\n", (717, 735), False, 'from IPython.lib import latextools\n'), ((1794, 1846), 'pytest.skip', 'pytest.skip', (['"""This needs matplotlib to be available"""'], {}), "('This needs matplotlib to be available')\n", (1805, 1846), False, 'import pytest\n'), ((5504, 5576), 'IPython.lib.latextools.latex_to_png', 'latextools.latex_to_png', (['latex_string'], {'backend': '"""dvipng"""', 'color': '"""#f00bar"""'}), "(latex_string, backend='dvipng', color='#f00bar')\n", (5527, 5576), False, 'from IPython.lib import latextools\n'), ((5661, 5730), 'IPython.lib.latextools.latex_to_png', 'latextools.latex_to_png', (['latex_string'], {'backend': '"""dvipng"""', 'color': '"""#f00"""'}), "(latex_string, backend='dvipng', color='#f00')\n", (5684, 5730), False, 'from IPython.lib import latextools\n'), ((2394, 2434), 'IPython.lib.latextools.genelatex', 'latextools.genelatex', (['"""body text"""', '(False)'], {}), "('body text', False)\n", (2414, 2434), False, 'from IPython.lib import latextools\n'), ((2911, 2944), 'IPython.lib.latextools.genelatex', 'latextools.genelatex', (['"""x^2"""', '(True)'], {}), "('x^2', True)\n", (2931, 2944), False, 'from IPython.lib import latextools\n'), ((3454, 3487), 'IPython.lib.latextools.genelatex', 'latextools.genelatex', (['"""x^2"""', '(True)'], {}), "('x^2', True)\n", (3474, 3487), False, 'from IPython.lib import latextools\n')] |
#!/usr/bin/python3
#
# This is a Hello World example of BPF.
from bcc import BPF
# define BPF program
prog = """
int kprobe__sys_clone(void *ctx)
{
bpf_trace_printk("Hello, World!\\n");
return 0;
}
"""
# load BPF program
b = BPF(text=prog)
b.trace_print()
| [
"bcc.BPF"
] | [((235, 249), 'bcc.BPF', 'BPF', ([], {'text': 'prog'}), '(text=prog)\n', (238, 249), False, 'from bcc import BPF\n')] |
from django.contrib import admin
from accounts.models import *
admin.site.register(UserProfile)
admin.site.register(ProjectPage)
admin.site.register(Comment)
| [
"django.contrib.admin.site.register"
] | [((64, 96), 'django.contrib.admin.site.register', 'admin.site.register', (['UserProfile'], {}), '(UserProfile)\n', (83, 96), False, 'from django.contrib import admin\n'), ((97, 129), 'django.contrib.admin.site.register', 'admin.site.register', (['ProjectPage'], {}), '(ProjectPage)\n', (116, 129), False, 'from django.contrib import admin\n'), ((130, 158), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment'], {}), '(Comment)\n', (149, 158), False, 'from django.contrib import admin\n')] |
import imaplib
import email
from email import message
import time
username = 'gmail_id'
password = '<PASSWORD>'
new_message = email.message.Message()
new_message.set_unixfrom('satheesh')
new_message['Subject'] = 'Sample Message'
# from gmail id
new_message['From'] = '<EMAIL>'
# to gmail id
new_message['To'] = '<EMAIL>'
# message data
new_message.set_payload('This is the body of the message.\n')
# print(new_message)
# you want to connect to a server; specify which server and port
# server = imaplib.IMAP4('server', 'port')
server = imaplib.IMAP4_SSL('imap.googlemail.com')
# after connecting, tell the server who you are to login to gmail
# server.login('user', 'password')
server.login(username, password)
# this will show you a list of available folders
# possibly your Inbox is called INBOX, but check the list of mailboxes
response, mailboxes = server.list()
if response == 'OK':
response, data = server.select("Inbox")
response = server.append('INBOX', '', imaplib.Time2Internaldate(time.time()), str(new_message).encode('utf-8'))
# print(response)
if response[0] == 'OK':
print("Gmail Appended Successfully")
else:
print("Not Appended")
server.close()
server.logout()
| [
"imaplib.IMAP4_SSL",
"time.time",
"email.message.Message"
] | [((129, 152), 'email.message.Message', 'email.message.Message', ([], {}), '()\n', (150, 152), False, 'import email\n'), ((540, 580), 'imaplib.IMAP4_SSL', 'imaplib.IMAP4_SSL', (['"""imap.googlemail.com"""'], {}), "('imap.googlemail.com')\n", (557, 580), False, 'import imaplib\n'), ((1004, 1015), 'time.time', 'time.time', ([], {}), '()\n', (1013, 1015), False, 'import time\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras.applications.xception import Xception
import h5py
import json
import cv2
import math
import logging
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.xception import preprocess_input, decode_predictions
logging.basicConfig(level = logging.INFO)
sampling_rate = 5
sampled_frames = frame_stamps = []
top1_labels = top1_scores = []
def sampling_time_stamps(_sample_path):
cap = cv2.VideoCapture(_sample_path)
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
logging.info('Total no. of frames in video:', total_frame_count)
for i in range(sampling_rate):
val = round(total_frame_count/sampling_rate)*(i+1)
frame_stamps.append(val)
def sampling_frames():
frameId , frame_count = 5, 0
success,frame = cap.read()
while success:
frame_count+=1
if frame_count in frame_stamps and frameId >= 1:
frame = cv2.resize(frame, (299,299))
sampled_frames.append(frame)
success,frame = cap.read()
frameId-=1
else:
success,frame = cap.read()
pass
def generate_and_average_predictions():
base_model = keras.applications.Xception(
weights='imagenet') # Load weights pre-trained on ImageNet.
for i in range(len(sampled_frames)):
img = sampled_frames[i]
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = base_model.predict(x)
print('Prediction level:', (i+1), decode_predictions(preds, top=5)[0])
top1_labels.append(decode_predictions(preds, top=1)[0][0][1])
top1_scores.append(decode_predictions(preds, top=1)[0][0][2])
return top1_labels, top1_scores
def run():
sampling_time_stamps(_sample_path)
sampling_frames()
labels, scores = generate_and_average_predictions()
return labels, scores
| [
"logging.basicConfig",
"cv2.resize",
"tensorflow.keras.applications.xception.preprocess_input",
"tensorflow.keras.applications.xception.decode_predictions",
"tensorflow.keras.applications.Xception",
"cv2.VideoCapture",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array",
"loggin... | [((333, 372), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (352, 372), False, 'import logging\n'), ((519, 549), 'cv2.VideoCapture', 'cv2.VideoCapture', (['_sample_path'], {}), '(_sample_path)\n', (535, 549), False, 'import cv2\n'), ((613, 677), 'logging.info', 'logging.info', (['"""Total no. of frames in video:"""', 'total_frame_count'], {}), "('Total no. of frames in video:', total_frame_count)\n", (625, 677), False, 'import logging\n'), ((1271, 1318), 'tensorflow.keras.applications.Xception', 'keras.applications.Xception', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (1298, 1318), False, 'from tensorflow import keras\n'), ((1448, 1471), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1466, 1471), False, 'from tensorflow.keras.preprocessing import image\n'), ((1482, 1507), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1496, 1507), True, 'import numpy as np\n'), ((1518, 1537), 'tensorflow.keras.applications.xception.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1534, 1537), False, 'from tensorflow.keras.applications.xception import preprocess_input, decode_predictions\n'), ((1006, 1035), 'cv2.resize', 'cv2.resize', (['frame', '(299, 299)'], {}), '(frame, (299, 299))\n', (1016, 1035), False, 'import cv2\n'), ((1614, 1646), 'tensorflow.keras.applications.xception.decode_predictions', 'decode_predictions', (['preds'], {'top': '(5)'}), '(preds, top=5)\n', (1632, 1646), False, 'from tensorflow.keras.applications.xception import preprocess_input, decode_predictions\n'), ((1678, 1710), 'tensorflow.keras.applications.xception.decode_predictions', 'decode_predictions', (['preds'], {'top': '(1)'}), '(preds, top=1)\n', (1696, 1710), False, 'from tensorflow.keras.applications.xception import preprocess_input, decode_predictions\n'), ((1746, 1778), 'tensorflow.keras.applications.xception.decode_predictions', 'decode_predictions', (['preds'], {'top': '(1)'}), '(preds, top=1)\n', (1764, 1778), False, 'from tensorflow.keras.applications.xception import preprocess_input, decode_predictions\n')] |
from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.db import DataError
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from clinicmodels.models import ConsultType, Visit
from consult.forms import ConsultForm
@api_view(['GET'])
def get_all_consult_types(request):
try:
consulttypes = ConsultType.objects.all()
if consulttypes.count() == 0:
return JsonResponse({"message": "ConsultType matching query does not exist"}, status=404)
response = serializers.serialize("json", consulttypes)
return HttpResponse(response, content_type='application/json')
except ObjectDoesNotExist as e:
return JsonResponse({"message": str(e)}, status=404)
except DataError as e:
return JsonResponse({"message": str(e)}, status=400)
@api_view(['POST'])
def create_new_consult_type(request):
try:
if 'consult_type' not in request.POST:
return JsonResponse({"message": "POST: parameter 'consult_type' not found"}, status=400)
consult_type_field = request.POST['consult_type']
consulttype = ConsultType(type=consult_type_field)
consulttype.save()
response = serializers.serialize("json", [consulttype, ])
return HttpResponse(response, content_type='application/json')
except ObjectDoesNotExist as e:
return JsonResponse({"message": str(e)}, status=404)
except DataError as e:
return JsonResponse({"message": str(e)}, status=400)
@api_view(['POST'])
@csrf_exempt
def create_new_consult(request):
try:
if 'visit' not in request.POST:
return JsonResponse({"message": "POST: parameter 'visit' not found"}, status=400)
if 'doctor' not in request.POST:
return JsonResponse({"message": "POST: parameter 'doctor' not found"}, status=400)
if 'consult_type' not in request.POST:
return JsonResponse({"message": "POST: parameter 'consult_type' not found"}, status=400)
visit_id = request.POST['visit']
doctor_id = request.POST['doctor']
consult_type_name = request.POST['consult_type']
Visit.objects.get(pk=visit_id)
User.objects.get(pk=doctor_id)
consult_type = ConsultType.objects.get(type=consult_type_name)
consult_form = ConsultForm(request.POST)
consult_form.consult_type = consult_type
if consult_form.is_valid():
consult = consult_form.save()
response = serializers.serialize("json", [consult, ])
return HttpResponse(response, content_type='application/json')
else:
return JsonResponse({"message": consult_form.errors}, status=400)
except ObjectDoesNotExist as e:
return JsonResponse({"message": str(e)}, status=404)
except DataError as e:
return JsonResponse({"message": str(e)}, status=400)
| [
"clinicmodels.models.ConsultType.objects.all",
"clinicmodels.models.ConsultType",
"clinicmodels.models.ConsultType.objects.get",
"consult.forms.ConsultForm",
"django.http.HttpResponse",
"django.http.JsonResponse",
"clinicmodels.models.Visit.objects.get",
"django.core.serializers.serialize",
"django.... | [((410, 427), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (418, 427), False, 'from rest_framework.decorators import api_view\n'), ((984, 1002), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (992, 1002), False, 'from rest_framework.decorators import api_view\n'), ((1667, 1685), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (1675, 1685), False, 'from rest_framework.decorators import api_view\n'), ((496, 521), 'clinicmodels.models.ConsultType.objects.all', 'ConsultType.objects.all', ([], {}), '()\n', (519, 521), False, 'from clinicmodels.models import ConsultType, Visit\n'), ((681, 724), 'django.core.serializers.serialize', 'serializers.serialize', (['"""json"""', 'consulttypes'], {}), "('json', consulttypes)\n", (702, 724), False, 'from django.core import serializers\n'), ((740, 795), 'django.http.HttpResponse', 'HttpResponse', (['response'], {'content_type': '"""application/json"""'}), "(response, content_type='application/json')\n", (752, 795), False, 'from django.http import JsonResponse, HttpResponse\n'), ((1278, 1314), 'clinicmodels.models.ConsultType', 'ConsultType', ([], {'type': 'consult_type_field'}), '(type=consult_type_field)\n', (1289, 1314), False, 'from clinicmodels.models import ConsultType, Visit\n'), ((1361, 1405), 'django.core.serializers.serialize', 'serializers.serialize', (['"""json"""', '[consulttype]'], {}), "('json', [consulttype])\n", (1382, 1405), False, 'from django.core import serializers\n'), ((1423, 1478), 'django.http.HttpResponse', 'HttpResponse', (['response'], {'content_type': '"""application/json"""'}), "(response, content_type='application/json')\n", (1435, 1478), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2308, 2338), 'clinicmodels.models.Visit.objects.get', 'Visit.objects.get', ([], {'pk': 'visit_id'}), '(pk=visit_id)\n', (2325, 2338), False, 'from clinicmodels.models import ConsultType, Visit\n'), ((2347, 2377), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'pk': 'doctor_id'}), '(pk=doctor_id)\n', (2363, 2377), False, 'from django.contrib.auth.models import User\n'), ((2401, 2448), 'clinicmodels.models.ConsultType.objects.get', 'ConsultType.objects.get', ([], {'type': 'consult_type_name'}), '(type=consult_type_name)\n', (2424, 2448), False, 'from clinicmodels.models import ConsultType, Visit\n'), ((2473, 2498), 'consult.forms.ConsultForm', 'ConsultForm', (['request.POST'], {}), '(request.POST)\n', (2484, 2498), False, 'from consult.forms import ConsultForm\n'), ((579, 665), 'django.http.JsonResponse', 'JsonResponse', (["{'message': 'ConsultType matching query does not exist'}"], {'status': '(404)'}), "({'message': 'ConsultType matching query does not exist'},\n status=404)\n", (591, 665), False, 'from django.http import JsonResponse, HttpResponse\n'), ((1116, 1201), 'django.http.JsonResponse', 'JsonResponse', (['{\'message\': "POST: parameter \'consult_type\' not found"}'], {'status': '(400)'}), '({\'message\': "POST: parameter \'consult_type\' not found"},\n status=400)\n', (1128, 1201), False, 'from django.http import JsonResponse, HttpResponse\n'), ((1800, 1874), 'django.http.JsonResponse', 'JsonResponse', (['{\'message\': "POST: parameter \'visit\' not found"}'], {'status': '(400)'}), '({\'message\': "POST: parameter \'visit\' not found"}, status=400)\n', (1812, 1874), False, 'from django.http import JsonResponse, HttpResponse\n'), ((1935, 2010), 'django.http.JsonResponse', 'JsonResponse', (['{\'message\': "POST: parameter \'doctor\' not found"}'], {'status': '(400)'}), '({\'message\': "POST: parameter \'doctor\' not found"}, status=400)\n', (1947, 2010), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2077, 2162), 'django.http.JsonResponse', 'JsonResponse', (['{\'message\': "POST: parameter \'consult_type\' not found"}'], {'status': '(400)'}), '({\'message\': "POST: parameter \'consult_type\' not found"},\n status=400)\n', (2089, 2162), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2649, 2689), 'django.core.serializers.serialize', 'serializers.serialize', (['"""json"""', '[consult]'], {}), "('json', [consult])\n", (2670, 2689), False, 'from django.core import serializers\n'), ((2711, 2766), 'django.http.HttpResponse', 'HttpResponse', (['response'], {'content_type': '"""application/json"""'}), "(response, content_type='application/json')\n", (2723, 2766), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2800, 2858), 'django.http.JsonResponse', 'JsonResponse', (["{'message': consult_form.errors}"], {'status': '(400)'}), "({'message': consult_form.errors}, status=400)\n", (2812, 2858), False, 'from django.http import JsonResponse, HttpResponse\n')] |
import json
import requests
import os
import subprocess
import platform
HTTP = requests.session()
class ClientException(Exception):
message = 'unhandled error'
def __init__(self, message=None):
if message is not None:
self.message = message
def getURL(address):
url = "https://api.etherscan.io/api"
url += "?module=contract"
url += "&action=getsourcecode"
url += "&address=" + address
url += "&apikey=<KEY>"
return url
def connect(url):
try:
req = HTTP.get(url)
except requests.exceptions.ConnectionError:
raise ClientException
if req.status_code == 200:
# Check for empty response
if req.text:
data = req.json()
status = data.get('status')
if status == '1' or status == '0':
return data
else:
raise ClientException
raise ClientException
def getCode(jsonCode, fileName):
code = jsonCode[0]['SourceCode']
contractName = jsonCode[0]['ContractName']
if (code == ''):
print(fileName + ': not verified yet!')
return code
if (code.find('"content": "') == -1):
return code
# removing unnecessary braces
code = code[1:-1]
code = code.replace("\r", "")
code = code.replace("\n", "")
# Etherscan API send bad JSON
index = code.find('"content": "')
clearCode = ''
while index != -1:
clearCode += code[:index+12]
code = code[index+12:]
index2 = code.find('" },')
if (index2 == -1):
index2 = code.find('" }')
tmpString = code[:index2]
tmpString = tmpString.replace('\\"', "'")
clearCode += tmpString
code = code[index2:]
index = code.find('"content": "')
clearCode += code
code = json.loads(clearCode)
contractCode = ''
for src in code['sources']:
if (src.find(contractName) != -1):
contractCode = code['sources'][src]['content']
break
return contractCode
if __name__ == "__main__":
with open("Config.json") as jsonFile:
jsonObject = json.load(jsonFile)
jsonFile.close()
dir = jsonObject['directory']
addresses = jsonObject['addresses']
isExist = os.path.exists(dir)
if not isExist:
os.makedirs(dir)
for address in addresses:
url = getURL(address[1])
req = connect(url)
code = getCode(req['result'], address[0])
if (code == ''):
continue
file = open(dir + address[0] + '.sol', "w+")
file.write(code)
file.close()
# File comparison
print('Open: ' + address[0])
etherscanCode = dir + address[0] + '.sol'
githubCode = address[2]
if (platform.system() == 'Windows'):
subprocess.call(['C:\\Program Files (x86)\\Meld\\meld.exe', etherscanCode, githubCode])
else:
os.system('meld ' + etherscanCode + ' ' + githubCode) | [
"os.path.exists",
"requests.session",
"json.loads",
"os.makedirs",
"platform.system",
"subprocess.call",
"json.load",
"os.system"
] | [((80, 98), 'requests.session', 'requests.session', ([], {}), '()\n', (96, 98), False, 'import requests\n'), ((1830, 1851), 'json.loads', 'json.loads', (['clearCode'], {}), '(clearCode)\n', (1840, 1851), False, 'import json\n'), ((2281, 2300), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (2295, 2300), False, 'import os\n'), ((2142, 2161), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (2151, 2161), False, 'import json\n'), ((2329, 2345), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (2340, 2345), False, 'import os\n'), ((2789, 2806), 'platform.system', 'platform.system', ([], {}), '()\n', (2804, 2806), False, 'import platform\n'), ((2834, 2925), 'subprocess.call', 'subprocess.call', (["['C:\\\\Program Files (x86)\\\\Meld\\\\meld.exe', etherscanCode, githubCode]"], {}), "(['C:\\\\Program Files (x86)\\\\Meld\\\\meld.exe', etherscanCode,\n githubCode])\n", (2849, 2925), False, 'import subprocess\n'), ((2948, 3001), 'os.system', 'os.system', (["('meld ' + etherscanCode + ' ' + githubCode)"], {}), "('meld ' + etherscanCode + ' ' + githubCode)\n", (2957, 3001), False, 'import os\n')] |
from molecules.dna_molecule import DNAMolecule
from molecules.dna_sequence import DNASequence
class GelElectrophoresis:
"""
Produce the Gel Electrophoresis procedure to sort DNA molecules by their size.
"""
@staticmethod
def run_gel(dna_molecules):
"""
Runs the Gel Electrophoresis procedure to sort DNA molecules by their size.
:param dna_molecules: DNA molecules.
:return: Sorted list of the DNA molecules given.
"""
molecules = list(dna_molecules)
molecules.sort(key=lambda mol: mol.length)
return molecules
if __name__ == '__main__':
dna_sequences = [
DNASequence.create_random_sequence(size=20),
DNASequence.create_random_sequence(size=10),
DNASequence.create_random_sequence(size=30),
DNASequence.create_random_sequence(size=5),
DNASequence.create_random_sequence(size=15)
]
ex_dna_molecules = [
DNAMolecule(dna_sequences[index], dna_sequences[index].get_complement())
for index in range(len(dna_sequences))
]
print('DNA molecules:')
for molecule in ex_dna_molecules:
print(f'{molecule}\n')
print('\nRun Gel Electrophoresis on DNA molecules:')
gel_dna_molecules = GelElectrophoresis.run_gel(ex_dna_molecules)
print('Results DNA molecules:')
for molecule in gel_dna_molecules:
print(f'{molecule}\n')
| [
"molecules.dna_sequence.DNASequence.create_random_sequence"
] | [((658, 701), 'molecules.dna_sequence.DNASequence.create_random_sequence', 'DNASequence.create_random_sequence', ([], {'size': '(20)'}), '(size=20)\n', (692, 701), False, 'from molecules.dna_sequence import DNASequence\n'), ((711, 754), 'molecules.dna_sequence.DNASequence.create_random_sequence', 'DNASequence.create_random_sequence', ([], {'size': '(10)'}), '(size=10)\n', (745, 754), False, 'from molecules.dna_sequence import DNASequence\n'), ((764, 807), 'molecules.dna_sequence.DNASequence.create_random_sequence', 'DNASequence.create_random_sequence', ([], {'size': '(30)'}), '(size=30)\n', (798, 807), False, 'from molecules.dna_sequence import DNASequence\n'), ((817, 859), 'molecules.dna_sequence.DNASequence.create_random_sequence', 'DNASequence.create_random_sequence', ([], {'size': '(5)'}), '(size=5)\n', (851, 859), False, 'from molecules.dna_sequence import DNASequence\n'), ((869, 912), 'molecules.dna_sequence.DNASequence.create_random_sequence', 'DNASequence.create_random_sequence', ([], {'size': '(15)'}), '(size=15)\n', (903, 912), False, 'from molecules.dna_sequence import DNASequence\n')] |
import time
import os
from pykafka.test.kafka_instance import KafkaInstance, KafkaConnection
def get_cluster():
"""Gets a Kafka cluster for testing, using one already running is possible.
An already-running cluster is determined by environment variables:
BROKERS, ZOOKEEPER, KAFKA_BIN. This is used primarily to speed up tests
in our Travis-CI environment.
"""
if os.environ.get('BROKERS', None) and \
os.environ.get('ZOOKEEPER', None) and \
os.environ.get('KAFKA_BIN', None):
# Broker is already running. Use that.
return KafkaConnection(os.environ['KAFKA_BIN'],
os.environ['BROKERS'],
os.environ['ZOOKEEPER'],
os.environ.get('BROKERS_SSL', None))
else:
return KafkaInstance(num_instances=3)
def stop_cluster(cluster):
"""Stop a created cluster, or merely flush a pre-existing one."""
if isinstance(cluster, KafkaInstance):
cluster.terminate()
else:
cluster.flush()
def retry(assertion_callable, retry_time=10, wait_between_tries=0.1, exception_to_retry=AssertionError):
"""Retry assertion callable in a loop"""
start = time.time()
while True:
try:
return assertion_callable()
except exception_to_retry as e:
if time.time() - start >= retry_time:
raise e
time.sleep(wait_between_tries)
| [
"time.time",
"os.environ.get",
"time.sleep",
"pykafka.test.kafka_instance.KafkaInstance"
] | [((1225, 1236), 'time.time', 'time.time', ([], {}), '()\n', (1234, 1236), False, 'import time\n'), ((393, 424), 'os.environ.get', 'os.environ.get', (['"""BROKERS"""', 'None'], {}), "('BROKERS', None)\n", (407, 424), False, 'import os\n'), ((438, 471), 'os.environ.get', 'os.environ.get', (['"""ZOOKEEPER"""', 'None'], {}), "('ZOOKEEPER', None)\n", (452, 471), False, 'import os\n'), ((485, 518), 'os.environ.get', 'os.environ.get', (['"""KAFKA_BIN"""', 'None'], {}), "('KAFKA_BIN', None)\n", (499, 518), False, 'import os\n'), ((826, 856), 'pykafka.test.kafka_instance.KafkaInstance', 'KafkaInstance', ([], {'num_instances': '(3)'}), '(num_instances=3)\n', (839, 856), False, 'from pykafka.test.kafka_instance import KafkaInstance, KafkaConnection\n'), ((764, 799), 'os.environ.get', 'os.environ.get', (['"""BROKERS_SSL"""', 'None'], {}), "('BROKERS_SSL', None)\n", (778, 799), False, 'import os\n'), ((1432, 1462), 'time.sleep', 'time.sleep', (['wait_between_tries'], {}), '(wait_between_tries)\n', (1442, 1462), False, 'import time\n'), ((1361, 1372), 'time.time', 'time.time', ([], {}), '()\n', (1370, 1372), False, 'import time\n')] |
import base64
from scapy.layers.inet import *
from scapy.layers.dns import *
import dissector
class SIPStartField(StrField):
"""
field class for handling sip start field
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPStartField"
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
cstream = -1
if pkt.underlayer.name == "TCP":
cstream = dissector.check_stream(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"],\
pkt.underlayer.fields["seq"], s)
if not cstream == -1:
s = cstream
remain = ""
value = ""
ls = s.splitlines(True)
f = ls[0].split()
if "SIP" in f[0]:
ls = s.splitlines(True)
f = ls[0].split()
length = len(f)
value = ""
if length == 3:
value = "SIP-Version:" + f[0] + ", Status-Code:" +\
f[1] + ", Reason-Phrase:" + f[2]
ls.remove(ls[0])
for element in ls:
remain = remain + element
else:
value = ls[0]
ls.remove(ls[0])
for element in ls:
remain = remain + element
return remain, value
elif "SIP" in f[2]:
ls = s.splitlines(True)
f = ls[0].split()
length = len(f)
value = []
if length == 3:
value = "Method:" + f[0] + ", Request-URI:" +\
f[1] + ", SIP-Version:" + f[2]
ls.remove(ls[0])
for element in ls:
remain = remain + element
else:
value = ls[0]
ls.remove(ls[0])
for element in ls:
remain = remain + element
return remain, value
else:
return s, ""
class SIPMsgField(StrField):
"""
field class for handling the body of sip packets
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPMsgField"
myresult = ""
def __init__(self, name, default):
"""
class constructor, for initializing instance variables
@param name: name of the field
@param default: Scapy has many formats to represent the data
internal, human and machine. anyways you may sit this param to None.
"""
self.name = name
self.fmt = "!B"
Field.__init__(self, name, default, "!B")
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
if s.startswith("\r\n"):
s = s.lstrip("\r\n")
if s == "":
return "", ""
self.myresult = ""
for c in s:
self.myresult = self.myresult + base64.standard_b64encode(c)
return "", self.myresult
class SIPField(StrField):
"""
field class for handling the body of sip fields
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPField"
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
if self.name == "unknown-header(s): ":
remain = ""
value = []
ls = s.splitlines(True)
i = -1
for element in ls:
i = i + 1
if element == "\r\n":
return s, []
elif element != "\r\n" and (": " in element[:10])\
and (element[-2:] == "\r\n"):
value.append(element)
ls.remove(ls[i])
remain = ""
unknown = True
for element in ls:
if element != "\r\n" and (": " in element[:15])\
and (element[-2:] == "\r\n") and unknown:
value.append(element)
else:
unknow = False
remain = remain + element
return remain, value
return s, []
remain = ""
value = ""
ls = s.splitlines(True)
i = -1
for element in ls:
i = i + 1
if element.upper().startswith(self.name.upper()):
value = element
value = value.strip(self.name)
ls.remove(ls[i])
remain = ""
for element in ls:
remain = remain + element
return remain, value[len(self.name) + 1:]
return s, ""
def __init__(self, name, default, fmt, remain=0):
"""
class constructor for initializing the instance variables
@param name: name of the field
@param default: Scapy has many formats to represent the data
internal, human and machine. anyways you may sit this param to None.
@param fmt: specifying the format, this has been set to "H"
@param remain: this parameter specifies the size of the remaining
data so make it 0 to handle all of the data.
"""
self.name = name
StrField.__init__(self, name, default, fmt, remain)
class SIP(Packet):
"""
class for handling the body of sip packets
@attention: it inherets Packet from Scapy library
"""
name = "sip"
fields_desc = [SIPStartField("start-line: ", "", "H"),
SIPField("accept: ", "", "H"),
SIPField("accept-contact: ", "", "H"),
SIPField("accept-encoding: ", "", "H"),
SIPField("accept-language: ", "", "H"),
SIPField("accept-resource-priority: ", "", "H"),
SIPField("alert-info: ", "", "H"),
SIPField("allow: ", "", "H"),
SIPField("allow-events: ", "", "H"),
SIPField("authentication-info: ", "", "H"),
SIPField("authorization: ", "", "H"),
SIPField("call-id: ", "", "H"),
SIPField("call-info: ", "", "H"),
SIPField("contact: ", "", "H"),
SIPField("content-disposition: ", "", "H"),
SIPField("content-encoding: ", "", "H"),
SIPField("content-language: ", "", "H"),
SIPField("content-length: ", "", "H"),
SIPField("content-type: ", "", "H"),
SIPField("cseq: ", "", "H"),
SIPField("date: ", "", "H"),
SIPField("error-info: ", "", "H"),
SIPField("event: ", "", "H"),
SIPField("expires: ", "", "H"),
SIPField("from: ", "", "H"),
SIPField("in-reply-to: ", "", "H"),
SIPField("join: ", "", "H"),
SIPField("max-forwards: ", "", "H"),
SIPField("mime-version: ", "", "H"),
SIPField("min-expires: ", "", "H"),
SIPField("min-se: ", "", "H"),
SIPField("organization: ", "", "H"),
SIPField("p-access-network-info: ", "", "H"),
SIPField("p-asserted-identity: ", "", "H"),
SIPField("p-associated-uri: ", "", "H"),
SIPField("p-called-party-id: ", "", "H"),
SIPField("p-charging-function-addresses: ", "", "H"),
SIPField("p-charging-vector: ", "", "H"),
SIPField("p-dcs-trace-party-id: ", "", "H"),
SIPField("p-dcs-osps: ", "", "H"),
SIPField("p-dcs-billing-info: ", "", "H"),
SIPField("p-dcs-laes: ", "", "H"),
SIPField("p-dcs-redirect: ", "", "H"),
SIPField("p-media-authorization: ", "", "H"),
SIPField("p-preferred-identity: ", "", "H"),
SIPField("p-visited-network-id: ", "", "H"),
SIPField("path: ", "", "H"),
SIPField("priority: ", "", "H"),
SIPField("privacy: ", "", "H"),
SIPField("proxy-authenticate: ", "", "H"),
SIPField("proxy-authorization: ", "", "H"),
SIPField("proxy-require: ", "", "H"),
SIPField("rack: ", "", "H"),
SIPField("reason: ", "", "H"),
SIPField("record-route: ", "", "H"),
SIPField("referred-by: ", "", "H"),
SIPField("reject-contact: ", "", "H"),
SIPField("replaces: ", "", "H"),
SIPField("reply-to: ", "", "H"),
SIPField("request-disposition: ", "", "H"),
SIPField("require: ", "", "H"),
SIPField("resource-priority: ", "", "H"),
SIPField("retry-after: ", "", "H"),
SIPField("route: ", "", "H"),
SIPField("rseq: ", "", "H"),
SIPField("security-client: ", "", "H"),
SIPField("security-server: ", "", "H"),
SIPField("security-verify: ", "", "H"),
SIPField("server: ", "", "H"),
SIPField("service-route: ", "", "H"),
SIPField("session-expires: ", "", "H"),
SIPField("sip-etag: ", "", "H"),
SIPField("sip-if-match: ", "", "H"),
SIPField("subject: ", "", "H"),
SIPField("subscription-state: ", "", "H"),
SIPField("supported: ", "", "H"),
SIPField("timestamp: ", "", "H"),
SIPField("to: ", "", "H"),
SIPField("unsupported: ", "", "H"),
SIPField("user-agent: ", "", "H"),
SIPField("via: ", "", "H"),
SIPField("warning: ", "", "H"),
SIPField("www-authenticate: ", "", "H"),
SIPField("refer-to: ", "", "H"),
SIPField("history-info: ", "", "H"),
SIPField("unknown-header(s): ", "", "H"),
SIPMsgField("message-body: ", "")]
bind_layers(TCP, SIP, sport=5060)
bind_layers(TCP, SIP, dport=5060)
bind_layers(UDP, SIP, sport=5060)
bind_layers(UDP, SIP, dport=5060)
| [
"dissector.check_stream",
"base64.standard_b64encode"
] | [((844, 1054), 'dissector.check_stream', 'dissector.check_stream', (["pkt.underlayer.underlayer.fields['src']", "pkt.underlayer.underlayer.fields['dst']", "pkt.underlayer.fields['sport']", "pkt.underlayer.fields['dport']", "pkt.underlayer.fields['seq']", 's'], {}), "(pkt.underlayer.underlayer.fields['src'], pkt.\n underlayer.underlayer.fields['dst'], pkt.underlayer.fields['sport'],\n pkt.underlayer.fields['dport'], pkt.underlayer.fields['seq'], s)\n", (866, 1054), False, 'import dissector\n'), ((3810, 3838), 'base64.standard_b64encode', 'base64.standard_b64encode', (['c'], {}), '(c)\n', (3835, 3838), False, 'import base64\n')] |
from collections import namedtuple
import numpy as np
import scipy as sp
from scipy.sparse.csgraph import minimum_spanning_tree
from .. import logging as logg
from ..neighbors import Neighbors
from .. import utils
from .. import settings
def paga(
adata,
groups='louvain',
use_rna_velocity=False,
copy=False):
"""\
Generate cellular maps of differentiation manifolds with complex
topologies [Wolf17i]_.
Partition-based graph abstraction (PAGA) quantifies the connectivities of
partitions of a neighborhood graph of single cells, thereby generating a
much simpler abstracted graph whose nodes label the partitions. Together
with a random walk-based distance measure, this generates a partial
coordinatization of data useful for exploring and explaining its variation.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
groups : categorical annotation of observations or 'louvain_groups', optional (default: 'louvain_groups')
Criterion to determine the resulting partitions of the single-cell
graph. 'louvain_groups' uses the Louvain algorithm and optimizes
modularity of the graph. You can also pass your predefined groups by
choosing any categorical annotation of observations (`adata.obs`).
use_rna_velocity : `bool` (default: `False`)
Use RNA velocity to orient edges in the abstracted graph and estimate transitions.
copy : `bool`, optional (default: `False`)
Copy `adata` before computation and return a copy. Otherwise, perform
computation inplace and return `None`.
Returns
-------
Returns or updates `adata` depending on `copy` with
connectivities : np.ndarray (adata.uns['connectivities'])
The full adjacency matrix of the abstracted graph, weights
correspond to connectivities.
confidence : np.ndarray (adata.uns['confidence'])
The full adjacency matrix of the abstracted graph, weights
correspond to confidence in the presence of an edge.
confidence_tree : sc.sparse csr matrix (adata.uns['confidence_tree'])
The adjacency matrix of the tree-like subgraph that best explains
the topology.
"""
if 'neighbors' not in adata.uns:
raise ValueError(
'You need to run `pp.neighbors` first to compute a neighborhood graph.')
adata = adata.copy() if copy else adata
utils.sanitize_anndata(adata)
logg.info('running partition-based graph abstraction (PAGA)', reset=True)
paga = PAGA(adata, groups, use_rna_velocity=use_rna_velocity)
paga.compute()
# only add if not present
if 'paga' not in adata.uns:
adata.uns['paga'] = {}
if not use_rna_velocity:
adata.uns['paga']['connectivities'] = paga.connectivities_coarse
adata.uns['paga']['confidence'] = paga.confidence
adata.uns['paga']['confidence_tree'] = paga.confidence_tree
adata.uns[groups + '_sizes'] = np.array(paga.vc.sizes())
else:
adata.uns['paga']['transitions_confidence'] = paga.transitions_confidence
adata.uns['paga']['transitions_ttest'] = paga.transitions_ttest
adata.uns['paga']['groups'] = groups
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
if use_rna_velocity:
logg.hint(
'added\n'
' \'paga/transitions_confidence\', confidence adjacency (adata.uns)\n'
' \'paga/transitions_ttest\', confidence subtree (adata.uns)')
else:
logg.hint(
'added\n'
' \'paga/connectivities\', connectivities adjacency (adata.uns)\n'
' \'paga/confidence\', confidence adjacency (adata.uns)\n'
' \'paga/confidence_tree\', confidence subtree (adata.uns)')
return adata if copy else None
class PAGA(Neighbors):
def __init__(self, adata, groups, use_rna_velocity=False,
tree_based_confidence=False):
super(PAGA, self).__init__(adata)
self._groups = groups
self._tree_based_confidence = tree_based_confidence
self._use_rna_velocity = use_rna_velocity
def compute(self):
if self._use_rna_velocity:
self.compute_transitions_coarse()
else:
self.compute_connectivities_coarse()
self.compute_confidence()
def compute_connectivities_coarse(self):
import igraph
ones = self.connectivities.copy()
# graph where edges carry weight 1
ones.data = np.ones(len(ones.data))
g = utils.get_igraph_from_adjacency(ones)
self.vc = igraph.VertexClustering(
g, membership=self._adata.obs[self._groups].cat.codes.values)
cg = self.vc.cluster_graph(combine_edges='sum')
self.connectivities_coarse = utils.get_sparse_from_igraph(cg, weight_attr='weight')/2
def compute_confidence(self):
"""Translates the connectivities_coarse measure into a confidence measure.
"""
pseudo_distance = self.connectivities_coarse.copy()
pseudo_distance.data = 1./pseudo_distance.data
connectivities_coarse_tree = minimum_spanning_tree(pseudo_distance)
connectivities_coarse_tree.data = 1./connectivities_coarse_tree.data
connectivities_coarse_tree_indices = [
connectivities_coarse_tree[i].nonzero()[1]
for i in range(connectivities_coarse_tree.shape[0])]
# inter- and intra-cluster based confidence
if not self._tree_based_confidence:
total_n = self.n_neighbors * np.array(self.vc.sizes())
maximum = self.connectivities_coarse.max()
confidence = self.connectivities_coarse.copy() # initializing
for i in range(self.connectivities_coarse.shape[0]):
for j in range(i+1, self.connectivities_coarse.shape[1]):
if self.connectivities_coarse[i, j] > 0:
geom_mean = np.sqrt(total_n[i] * total_n[j])
confidence[i, j] = self.connectivities_coarse[i, j] / geom_mean
confidence[j, i] = confidence[i, j]
# tree-based confidence
else:
median_connectivities_coarse_tree = np.median(connectivities_coarse_tree.data)
confidence = self.connectivities_coarse.copy()
confidence.data[self.connectivities_coarse.data >= median_connectivities_coarse_tree] = 1
connectivities_coarse_adjusted = self.connectivities_coarse.copy()
connectivities_coarse_adjusted.data -= median_connectivities_coarse_tree
connectivities_coarse_adjusted.data = np.exp(connectivities_coarse_adjusted.data)
index = self.connectivities_coarse.data < median_connectivities_coarse_tree
confidence.data[index] = connectivities_coarse_adjusted.data[index]
confidence_tree = self.compute_confidence_tree(
confidence, connectivities_coarse_tree_indices)
self.confidence = confidence
self.confidence_tree = confidence_tree
def compute_confidence_tree(
self, confidence, connectivities_coarse_tree_indices):
confidence_tree = sp.sparse.lil_matrix(confidence.shape, dtype=float)
for i, neighbors in enumerate(connectivities_coarse_tree_indices):
if len(neighbors) > 0:
confidence_tree[i, neighbors] = confidence[i, neighbors]
return confidence_tree.tocsr()
def compute_transitions_coarse(self):
# analogous code using networkx
# membership = adata.obs['clusters'].cat.codes.tolist()
# partition = defaultdict(list)
# for n, p in zip(list(range(len(G))), membership):
# partition[p].append(n)
# partition = partition.values()
# g_abstracted = nx.quotient_graph(g, partition, relabel=True)
# for some reason, though, edges aren't oriented in the quotient
# graph...
import igraph
g = utils.get_igraph_from_adjacency(
self._adata.uns['velocyto_transitions'], directed=True)
vc = igraph.VertexClustering(
g, membership=self._adata.obs[self._groups].cat.codes.values)
cg_full = vc.cluster_graph(combine_edges=False)
g_bool = utils.get_igraph_from_adjacency(
self._adata.uns['velocyto_transitions'].astype('bool'), directed=True)
vc_bool = igraph.VertexClustering(
g_bool, membership=self._adata.obs[self._groups].cat.codes.values)
cg_bool = vc_bool.cluster_graph(combine_edges='sum') # collapsed version
transitions_coarse = utils.get_sparse_from_igraph(cg_bool, weight_attr='weight')
# translate this into a confidence measure
# the number of outgoing edges
# total_n = np.zeros(len(vc.sizes()))
# # (this is not the convention of standard stochastic matrices)
# total_outgoing = transitions_coarse.sum(axis=1)
# for i in range(len(total_n)):
# total_n[i] = vc.subgraph(i).ecount()
# total_n[i] += total_outgoing[i, 0]
# use the topology based reference, the velocity one might have very small numbers
total_n = self.n_neighbors * np.array(vc_bool.sizes())
transitions_ttest = transitions_coarse.copy()
transitions_confidence = transitions_coarse.copy()
from scipy.stats import ttest_1samp
for i in range(transitions_coarse.shape[0]):
# no symmetry in transitions_coarse, hence we should not restrict to
# upper triangle
neighbors = transitions_coarse[i].nonzero()[1]
for j in neighbors:
forward = cg_full.es.select(_source=i, _target=j)['weight']
backward = cg_full.es.select(_source=j, _target=i)['weight']
# backward direction: add minus sign
values = np.array(list(forward) + list(-np.array(backward)))
# require some minimal number of observations
if len(values) < 5:
transitions_ttest[i, j] = 0
transitions_ttest[j, i] = 0
transitions_confidence[i, j] = 0
transitions_confidence[j, i] = 0
continue
t, prob = ttest_1samp(values, 0.0)
if t > 0:
# number of outgoing edges greater than number of ingoing edges
# i.e., transition from i to j
transitions_ttest[i, j] = -np.log10(max(prob, 1e-10))
transitions_ttest[j, i] = 0
else:
transitions_ttest[j, i] = -np.log10(max(prob, 1e-10))
transitions_ttest[i, j] = 0
# geom_mean
geom_mean = np.sqrt(total_n[i] * total_n[j])
diff = (len(forward) - len(backward)) / geom_mean
if diff > 0:
transitions_confidence[i, j] = diff
transitions_confidence[j, i] = 0
else:
transitions_confidence[j, i] = -diff
transitions_confidence[i, j] = 0
transitions_ttest.eliminate_zeros()
transitions_confidence.eliminate_zeros()
# transpose in order to match convention of stochastic matrices
# entry ij means transition from j to i
self.transitions_ttest = transitions_ttest.T
self.transitions_confidence = transitions_confidence.T
def paga_degrees(adata):
"""Compute the degree of each node in the abstracted graph.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
degrees : list
List of degrees for each node.
"""
import networkx as nx
g = nx.Graph(adata.uns['paga']['confidence'])
degrees = [d for _, d in g.degree(weight='weight')]
return degrees
def paga_expression_entropies(adata):
"""Compute the median expression entropy for each node-group.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
entropies : list
Entropies of median expressions for each node.
"""
from scipy.stats import entropy
groups_order, groups_masks = utils.select_groups(
adata, key=adata.uns['paga']['groups'])
entropies = []
for mask in groups_masks:
X_mask = adata.X[mask]
x_median = np.median(X_mask, axis=0)
x_probs = (x_median - np.min(x_median)) / (np.max(x_median) - np.min(x_median))
entropies.append(entropy(x_probs))
return entropies
def paga_compare_paths(adata1, adata2,
adjacency_key='confidence', adjacency_key2=None):
"""Compare paths in abstracted graphs in two datasets.
Compute the fraction of consistent paths between leafs, a measure for the
topological similarity between graphs.
By increasing the verbosity to level 4 and 5, the paths that do not agree
and the paths that agree are written to the output, respectively.
The PAGA "groups key" needs to be the same in both objects.
Parameters
----------
adata1, adata2 : AnnData
Annotated data matrices to compare.
adjacency_key : str
Key for indexing the adjacency matrices in `.uns['paga']` to be used in
adata1 and adata2.
adjacency_key2 : str, None
If provided, used for adata2.
Returns
-------
OrderedTuple with attributes ``n_steps`` (total number of steps in paths)
and ``frac_steps`` (fraction of consistent steps), ``n_paths`` and
``frac_paths``.
"""
import networkx as nx
g1 = nx.Graph(adata1.uns['paga'][adjacency_key])
g2 = nx.Graph(adata2.uns['paga'][adjacency_key2 if adjacency_key2 is not None else adjacency_key])
leaf_nodes1 = [str(x) for x in g1.nodes() if g1.degree(x) == 1]
logg.msg('leaf nodes in graph 1: {}'.format(leaf_nodes1), v=5, no_indent=True)
paga_groups = adata1.uns['paga']['groups']
asso_groups1 = utils.identify_groups(adata1.obs[paga_groups].values,
adata2.obs[paga_groups].values)
asso_groups2 = utils.identify_groups(adata2.obs[paga_groups].values,
adata1.obs[paga_groups].values)
orig_names1 = adata1.obs[paga_groups].cat.categories
orig_names2 = adata2.obs[paga_groups].cat.categories
import itertools
n_steps = 0
n_agreeing_steps = 0
n_paths = 0
n_agreeing_paths = 0
# loop over all pairs of leaf nodes in the reference adata1
for (r, s) in itertools.combinations(leaf_nodes1, r=2):
r2, s2 = asso_groups1[r][0], asso_groups1[s][0]
orig_names = [orig_names1[int(i)] for i in [r, s]]
orig_names += [orig_names2[int(i)] for i in [r2, s2]]
logg.msg('compare shortest paths between leafs ({}, {}) in graph1 and ({}, {}) in graph2:'
.format(*orig_names), v=4, no_indent=True)
no_path1 = False
try:
path1 = [str(x) for x in nx.shortest_path(g1, int(r), int(s))]
except nx.NetworkXNoPath:
no_path1 = True
no_path2 = False
try:
path2 = [str(x) for x in nx.shortest_path(g2, int(r2), int(s2))]
except nx.NetworkXNoPath:
no_path2 = True
if no_path1 and no_path2:
# consistent behavior
n_paths += 1
n_agreeing_paths += 1
n_steps += 1
n_agreeing_steps += 1
logg.msg('there are no connecting paths in both graphs', v=5, no_indent=True)
continue
elif no_path1 or no_path2:
# non-consistent result
n_paths += 1
n_steps += 1
continue
if len(path1) >= len(path2):
path_mapped = [asso_groups1[l] for l in path1]
path_compare = path2
path_compare_id = 2
path_compare_orig_names = [[orig_names2[int(s)] for s in l] for l in path_compare]
path_mapped_orig_names = [[orig_names2[int(s)] for s in l] for l in path_mapped]
else:
path_mapped = [asso_groups2[l] for l in path2]
path_compare = path1
path_compare_id = 1
path_compare_orig_names = [[orig_names1[int(s)] for s in l] for l in path_compare]
path_mapped_orig_names = [[orig_names1[int(s)] for s in l] for l in path_mapped]
n_agreeing_steps_path = 0
ip_progress = 0
for il, l in enumerate(path_compare[:-1]):
for ip, p in enumerate(path_mapped):
if ip >= ip_progress and l in p:
# check whether we can find the step forward of path_compare in path_mapped
if (ip + 1 < len(path_mapped)
and
path_compare[il + 1] in path_mapped[ip + 1]):
# make sure that a step backward leads us to the same value of l
# in case we "jumped"
logg.msg('found matching step ({} -> {}) at position {} in path{} and position {} in path_mapped'
.format(l, path_compare_orig_names[il + 1], il, path_compare_id, ip), v=6)
consistent_history = True
for iip in range(ip, ip_progress, -1):
if l not in path_mapped[iip - 1]:
consistent_history = False
if consistent_history:
# here, we take one step further back (ip_progress - 1); it's implied that this
# was ok in the previous step
logg.msg(' step(s) backward to position(s) {} in path_mapped are fine, too: valid step'
.format(list(range(ip - 1, ip_progress - 2, -1))), v=6)
n_agreeing_steps_path += 1
ip_progress = ip + 1
break
n_steps_path = len(path_compare) - 1
n_agreeing_steps += n_agreeing_steps_path
n_steps += n_steps_path
n_paths += 1
if n_agreeing_steps_path == n_steps_path: n_agreeing_paths += 1
# only for the output, use original names
path1_orig_names = [orig_names1[int(s)] for s in path1]
path2_orig_names = [orig_names2[int(s)] for s in path2]
logg.msg(' path1 = {},\n'
'path_mapped = {},\n'
' path2 = {},\n'
'-> n_agreeing_steps = {} / n_steps = {}.'
.format(path1_orig_names,
[list(p) for p in path_mapped_orig_names],
path2_orig_names,
n_agreeing_steps_path, n_steps_path), v=5, no_indent=True)
Result = namedtuple('paga_compare_paths_result',
['frac_steps', 'n_steps', 'frac_paths', 'n_paths'])
return Result(frac_steps=n_agreeing_steps/n_steps if n_steps > 0 else np.nan,
n_steps=n_steps if n_steps > 0 else np.nan,
frac_paths=n_agreeing_paths/n_paths if n_steps > 0 else np.nan,
n_paths=n_paths if n_steps > 0 else np.nan)
| [
"collections.namedtuple",
"scipy.sparse.lil_matrix",
"numpy.median",
"scipy.stats.entropy",
"numpy.sqrt",
"networkx.Graph",
"numpy.max",
"itertools.combinations",
"numpy.exp",
"numpy.array",
"scipy.sparse.csgraph.minimum_spanning_tree",
"igraph.VertexClustering",
"scipy.stats.ttest_1samp",
... | [((11850, 11891), 'networkx.Graph', 'nx.Graph', (["adata.uns['paga']['confidence']"], {}), "(adata.uns['paga']['confidence'])\n", (11858, 11891), True, 'import networkx as nx\n'), ((13730, 13773), 'networkx.Graph', 'nx.Graph', (["adata1.uns['paga'][adjacency_key]"], {}), "(adata1.uns['paga'][adjacency_key])\n", (13738, 13773), True, 'import networkx as nx\n'), ((13783, 13880), 'networkx.Graph', 'nx.Graph', (["adata2.uns['paga'][adjacency_key2 if adjacency_key2 is not None else\n adjacency_key]"], {}), "(adata2.uns['paga'][adjacency_key2 if adjacency_key2 is not None else\n adjacency_key])\n", (13791, 13880), True, 'import networkx as nx\n'), ((14667, 14707), 'itertools.combinations', 'itertools.combinations', (['leaf_nodes1'], {'r': '(2)'}), '(leaf_nodes1, r=2)\n', (14689, 14707), False, 'import itertools\n'), ((18931, 19026), 'collections.namedtuple', 'namedtuple', (['"""paga_compare_paths_result"""', "['frac_steps', 'n_steps', 'frac_paths', 'n_paths']"], {}), "('paga_compare_paths_result', ['frac_steps', 'n_steps',\n 'frac_paths', 'n_paths'])\n", (18941, 19026), False, 'from collections import namedtuple\n'), ((4669, 4759), 'igraph.VertexClustering', 'igraph.VertexClustering', (['g'], {'membership': 'self._adata.obs[self._groups].cat.codes.values'}), '(g, membership=self._adata.obs[self._groups].cat.\n codes.values)\n', (4692, 4759), False, 'import igraph\n'), ((5200, 5238), 'scipy.sparse.csgraph.minimum_spanning_tree', 'minimum_spanning_tree', (['pseudo_distance'], {}), '(pseudo_distance)\n', (5221, 5238), False, 'from scipy.sparse.csgraph import minimum_spanning_tree\n'), ((7244, 7295), 'scipy.sparse.lil_matrix', 'sp.sparse.lil_matrix', (['confidence.shape'], {'dtype': 'float'}), '(confidence.shape, dtype=float)\n', (7264, 7295), True, 'import scipy as sp\n'), ((8154, 8244), 'igraph.VertexClustering', 'igraph.VertexClustering', (['g'], {'membership': 'self._adata.obs[self._groups].cat.codes.values'}), '(g, membership=self._adata.obs[self._groups].cat.\n codes.values)\n', (8177, 8244), False, 'import igraph\n'), ((8461, 8556), 'igraph.VertexClustering', 'igraph.VertexClustering', (['g_bool'], {'membership': 'self._adata.obs[self._groups].cat.codes.values'}), '(g_bool, membership=self._adata.obs[self._groups].\n cat.codes.values)\n', (8484, 8556), False, 'import igraph\n'), ((12501, 12526), 'numpy.median', 'np.median', (['X_mask'], {'axis': '(0)'}), '(X_mask, axis=0)\n', (12510, 12526), True, 'import numpy as np\n'), ((6287, 6329), 'numpy.median', 'np.median', (['connectivities_coarse_tree.data'], {}), '(connectivities_coarse_tree.data)\n', (6296, 6329), True, 'import numpy as np\n'), ((6705, 6748), 'numpy.exp', 'np.exp', (['connectivities_coarse_adjusted.data'], {}), '(connectivities_coarse_adjusted.data)\n', (6711, 6748), True, 'import numpy as np\n'), ((12640, 12656), 'scipy.stats.entropy', 'entropy', (['x_probs'], {}), '(x_probs)\n', (12647, 12656), False, 'from scipy.stats import entropy\n'), ((10346, 10370), 'scipy.stats.ttest_1samp', 'ttest_1samp', (['values', '(0.0)'], {}), '(values, 0.0)\n', (10357, 10370), False, 'from scipy.stats import ttest_1samp\n'), ((10854, 10886), 'numpy.sqrt', 'np.sqrt', (['(total_n[i] * total_n[j])'], {}), '(total_n[i] * total_n[j])\n', (10861, 10886), True, 'import numpy as np\n'), ((12557, 12573), 'numpy.min', 'np.min', (['x_median'], {}), '(x_median)\n', (12563, 12573), True, 'import numpy as np\n'), ((12578, 12594), 'numpy.max', 'np.max', (['x_median'], {}), '(x_median)\n', (12584, 12594), True, 'import numpy as np\n'), ((12597, 12613), 'numpy.min', 'np.min', (['x_median'], {}), '(x_median)\n', (12603, 12613), True, 'import numpy as np\n'), ((6012, 6044), 'numpy.sqrt', 'np.sqrt', (['(total_n[i] * total_n[j])'], {}), '(total_n[i] * total_n[j])\n', (6019, 6044), True, 'import numpy as np\n'), ((9970, 9988), 'numpy.array', 'np.array', (['backward'], {}), '(backward)\n', (9978, 9988), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import pytest
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../lib"))
import dartsense.player
player_list = None
def test_player_list_init(setup_db):
player_list = dartsense.player.PlayerList()
assert isinstance(player_list, dartsense.player.PlayerList)
assert len(player_list) == 5
for player in player_list:
assert isinstance(player, dartsense.player.Player)
def test_player_list_filter(setup_db):
player_list = dartsense.player.PlayerList(
filters={'competition': pytest.setup_vars['testleague1_id']}
)
assert len(player_list) == 4
def test_player_list_search(setup_db):
player_list = dartsense.player.PlayerList(
search='player 3'
)
assert len(player_list) == 1
player_list = dartsense.player.PlayerList(
filters={'competition': pytest.setup_vars['testleague2_id']},
search='player 3'
)
assert len(player_list) == 1
| [
"os.path.dirname"
] | [((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n')] |
from models.contact import Contacts
import random
def test_edit_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contacts.create_contact(Contacts(lastname="LastNameUser", firstname="<NAME>"))
old_contacts = db.get_contact_list()
randomcontact = random.choice(old_contacts)
index = old_contacts.index(randomcontact)
contact = Contacts(id=randomcontact.id, lastname="Lastname", firstname="ModifyFirstname")
app.contacts.test_edit_contact_by_id(randomcontact.id, contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
# assert sorted(old_contacts, key=Contacts.contact_id_or_max) == sorted(new_contacts, key=Contacts.contact_id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contacts.contact_id_or_max) == sorted(app.contacts.get_contact_list(), key=Contacts.contact_id_or_max) | [
"random.choice",
"models.contact.Contacts"
] | [((290, 317), 'random.choice', 'random.choice', (['old_contacts'], {}), '(old_contacts)\n', (303, 317), False, 'import random\n'), ((378, 457), 'models.contact.Contacts', 'Contacts', ([], {'id': 'randomcontact.id', 'lastname': '"""Lastname"""', 'firstname': '"""ModifyFirstname"""'}), "(id=randomcontact.id, lastname='Lastname', firstname='ModifyFirstname')\n", (386, 457), False, 'from models.contact import Contacts\n'), ((174, 227), 'models.contact.Contacts', 'Contacts', ([], {'lastname': '"""LastNameUser"""', 'firstname': '"""<NAME>"""'}), "(lastname='LastNameUser', firstname='<NAME>')\n", (182, 227), False, 'from models.contact import Contacts\n')] |
"""
This demo will fill the screen with white, draw a black box on top
and then print Hello World! in the center of the display
This example is for use on (Linux) computers that are using CPython with
Adafruit Blinka to support CircuitPython libraries. CircuitPython does
not support PIL/pillow (python imaging library)!
"""
import board
import busio
import digitalio
from PIL import Image, ImageDraw, ImageFont
import adafruit_pcd8544
# Parameters to Change
BORDER = 5
FONTSIZE = 10
spi = busio.SPI(board.SCK, MOSI=board.MOSI)
dc = digitalio.DigitalInOut(board.D6) # data/command
cs = digitalio.DigitalInOut(board.CE0) # Chip select
reset = digitalio.DigitalInOut(board.D5) # reset
display = adafruit_pcd8544.PCD8544(spi, dc, cs, reset)
# Contrast and Brightness Settings
display.bias = 4
display.contrast = 60
# Turn on the Backlight LED
backlight = digitalio.DigitalInOut(board.D13) # backlight
backlight.switch_to_output()
backlight.value = True
# Clear display.
display.fill(0)
display.show()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
image = Image.new("1", (display.width, display.height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black background
draw.rectangle((0, 0, display.width, display.height), outline=255, fill=255)
# Draw a smaller inner rectangle
draw.rectangle(
(BORDER, BORDER, display.width - BORDER - 1, display.height - BORDER - 1),
outline=0,
fill=0,
)
# Load a TTF font.
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", FONTSIZE)
# Draw Some Text
text = "Hello World!"
(font_width, font_height) = font.getsize(text)
draw.text(
(display.width // 2 - font_width // 2, display.height // 2 - font_height // 2),
text,
font=font,
fill=255,
)
# Display image
display.image(image)
display.show()
| [
"adafruit_pcd8544.PCD8544",
"PIL.Image.new",
"busio.SPI",
"PIL.ImageFont.truetype",
"PIL.ImageDraw.Draw",
"digitalio.DigitalInOut"
] | [((494, 531), 'busio.SPI', 'busio.SPI', (['board.SCK'], {'MOSI': 'board.MOSI'}), '(board.SCK, MOSI=board.MOSI)\n', (503, 531), False, 'import busio\n'), ((537, 569), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['board.D6'], {}), '(board.D6)\n', (559, 569), False, 'import digitalio\n'), ((591, 624), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['board.CE0'], {}), '(board.CE0)\n', (613, 624), False, 'import digitalio\n'), ((648, 680), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['board.D5'], {}), '(board.D5)\n', (670, 680), False, 'import digitalio\n'), ((701, 745), 'adafruit_pcd8544.PCD8544', 'adafruit_pcd8544.PCD8544', (['spi', 'dc', 'cs', 'reset'], {}), '(spi, dc, cs, reset)\n', (725, 745), False, 'import adafruit_pcd8544\n'), ((862, 895), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['board.D13'], {}), '(board.D13)\n', (884, 895), False, 'import digitalio\n'), ((1112, 1159), 'PIL.Image.new', 'Image.new', (['"""1"""', '(display.width, display.height)'], {}), "('1', (display.width, display.height))\n", (1121, 1159), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1207, 1228), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (1221, 1228), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1519, 1598), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"""', 'FONTSIZE'], {}), "('/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf', FONTSIZE)\n", (1537, 1598), False, 'from PIL import Image, ImageDraw, ImageFont\n')] |
from pygame import mixer
import speech_recognition as sr
import pyttsx3
import pyjokes
import boto3
import pyglet
import winsound
import datetime
import pywhatkit
import datetime
import time
import os
from PIL import Image
import random
import wikipedia
import smtplib, ssl
from mutagen.mp3 import MP3
import requests, json
from bs4 import BeautifulSoup
import geocoder
from geopy.geocoders import Nominatim
import webbrowser
import pymongo
from getmac import get_mac_address as gma
import cv2
import face_recognition
import numpy as np
import smtplib
import datetime
import re, requests, subprocess, urllib.parse, urllib.request
r = sr.Recognizer()
task={}
filename1=[]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
client = pymongo.MongoClient("mongodb+srv://karan:123@cluster0.gfuxd.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
database = client["LocationDatabase"]
table = database["Location"]
def location():
g = geocoder.ip('me')
Latitude = str(g.latlng[0])
Longitude = str(g.latlng[1])
geolocator = Nominatim(user_agent="geoapiExercises")
location = geolocator.reverse(Latitude+","+Longitude)
mydict={"_id":''.join(i for i in gma() if not i.isdigit()).replace(":",""),"location":str(location)}
try:
x = table.insert_one(mydict)
except:
myquery = { "_id": gma() }
newvalues = { "$set": { "location": str(location) } }
table.update_one(myquery, newvalues)
def weather(city):
city = city.replace(" ", "+")
res = requests.get(
f'https://www.google.com/search?q={city}&oq={city}&aqs=chrome.0.35i39l2j0l4j46j69i60.6128j1j7&sourceid=chrome&ie=UTF-8', headers=headers)
soup = BeautifulSoup(res.text, 'html.parser')
location = soup.select('#wob_loc')[0].getText().strip()
info = soup.select('#wob_dc')[0].getText().strip()
weather = soup.select('#wob_tm')[0].getText().strip()
greetings=["Hello","Hey","Hi","Greetings","Namaste"]
timewait=speak(greetings[random.randint(0,4)])
time.sleep(timewait)
timewait=speak("In "+location)
time.sleep(timewait)
d = datetime.datetime.strptime(str(datetime.datetime.now().strftime("%H:%M")), "%H:%M")
timewait=speak("It is "+str(d.strftime("%I")))
time.sleep(timewait-0.1)
timewait=speak(str(d.strftime("%M")),"op")
time.sleep(timewait)
if(datetime.datetime.now().hour>12):
timewait=speak("P M","ui")
time.sleep(timewait-0.1)
else:
timewait=speak("A M")
time.sleep(timewait)
timewait=speak("The Temperature is "+weather+"Degree Celcius","oi")
time.sleep(timewait)
def weather_main():
g = geocoder.ip('me')
Latitude = str(g.latlng[0])
Longitude = str(g.latlng[1])
geolocator = Nominatim(user_agent="geoapiExercises")
location = geolocator.reverse(Latitude+","+Longitude)
address = location.raw['address']
city_name=address['city']
api_key = "Your_OWN_KEY"
base_url = "http://api.openweathermap.org/data/2.5/weather?"
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
city_name = city_name+" weather"
weather(city_name)
if x["cod"] != "404":
y = x["main"]
current_temperature = y["temp"]
current_humidity = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
timewait=speak("Humidity is " +str(current_humidity) + "percentage","op")
time.sleep(timewait)
timewait=speak("It's "+str(weather_description)+" Today","ooP")
time.sleep(timewait)
if(("thunderstorm" in str(weather_description)) or ("rain" in str(weather_description)) or ("shower" in str(weather_description))):
timewait=speak("You Might Need An Umbrella!")
time.sleep(timewait)
elif(("clear" in str(weather_description)) or ("sunny" in str(weather_description))):
timewait=speak("We Have A Clear Sky!")
time.sleep(timewait)
elif("cloudy" in str(weather_description)):
timewait=speak("The Sky Might Be Cloudy!")
time.sleep(timewait)
timewait=speak("Have a Nice Day")
time.sleep(timewait+1)
name="User"
def speak(text,tp="1",voice="Salli"):
response = polly_client.synthesize_speech(VoiceId=voice,
OutputFormat='mp3',
Text=text)
date_string = datetime.datetime.now().strftime("%d%m%Y%H%M%S")
file = open('speech'+date_string+tp+'.mp3', 'wb')
file.write(response['AudioStream'].read())
file.close()
filename1.append('speech'+date_string+tp+'.mp3')
if(len(filename1)>10):
for i in range(0,5):
os.remove(filename1[i])
filename1.pop(i)
audio = MP3('speech'+date_string+tp+'.mp3')
mixer.init()
mixer.music.load('speech'+date_string+tp+'.mp3')
mixer.music.play()
return audio.info.length
polly_client = boto3.Session(
aws_access_key_id="Your_OWN_KEY",
aws_secret_access_key="Your_OWN_KEY",
region_name='us-west-2').client('polly')
try:
for file in os.listdir("./"):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgloaded=face_recognition.load_image_file(filename)
imgloaded=cv2.cvtColor(imgloaded,cv2.COLOR_BGR2RGB)
camera = cv2.VideoCapture(0)
return_value, image = camera.read()
cv2.imwrite(os.path.join('./' , 'testimage.jpg'), image)
imgtest=face_recognition.load_image_file('./testimage.jpg')
imgtest=cv2.cvtColor(imgtest,cv2.COLOR_BGR2RGB)
faceloc=face_recognition.face_locations(imgloaded)[0]
encodeloaded=face_recognition.face_encodings(imgloaded)[0]
cv2.rectangle(imgloaded,(faceloc[3],faceloc[0]),(faceloc[1],faceloc[2]),(255,0,255),2)
faceloctest=face_recognition.face_locations(imgtest)[0]
encodetest=face_recognition.face_encodings(imgtest)[0]
cv2.rectangle(imgtest,(faceloc[3],faceloc[0]),(faceloc[1],faceloc[2]),(255,0,255),2)
results=face_recognition.compare_faces([encodeloaded],encodetest)
if(results[0]):
name=filename.replace(".jpg","")
break
except:
timewait=speak("What's Your Name? ")
time.sleep(timewait)
print("Listening")
with sr.Microphone() as source2:
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
name = r.recognize_google(audio2)
camera = cv2.VideoCapture(0)
return_value, image = camera.read()
date_string = datetime.datetime.now().strftime("%d%m%Y%H%M%S")
cv2.imwrite(os.path.join('./' , name+'.jpg'), image)
onlyonce=0
while(1):
try:
d = datetime.datetime.strptime(str(datetime.datetime.now().strftime("%H:%M")), "%H:%M")
if(str(d.strftime("%M"))=='14' and onlyonce==0):
onlyonce+=1
location()
if(onlyonce>0):
if(str(d.strftime("%M"))!='14'):
onlyonce=0
with sr.Microphone() as source2:
print("Listening")
r.adjust_for_ambient_noise(source2, duration=1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
MyText = MyText.lower()
print(MyText.title())
if("joke" in MyText):
My_joke = pyjokes.get_joke(language="en", category="all")
print(My_joke)
time1 = speak(My_joke,"joke")
time.sleep(int(time1))
elif(("hello" in MyText) or ("update" in MyText) or ("hi" in MyText) or ("hey" in MyText)):
speak(name,"iu")
time.sleep(1)
weather_main()
elif("time" in MyText):
speak(name,"iu")
time.sleep(1)
speak("The Time Is","O")
time.sleep(0.7)
speak(str(datetime.datetime.strptime(str(datetime.datetime.now().strftime("%H:%M")), "%H:%M").strftime("%I")))
time.sleep(0.5)
speak(str(datetime.datetime.strptime(str(datetime.datetime.now().strftime("%H:%M")), "%H:%M").strftime("%M")),"o")
time.sleep(0.8)
if(datetime.datetime.now().hour>12):
timewait=speak("P M","ui")
time.sleep(timewait-0.1)
else:
timewait=speak("A M")
time.sleep(timewait)
elif("date" in MyText):
speak(name,"iu")
time.sleep(1)
x = datetime.datetime.now()
speak("It's "+str(x.strftime("%A")))
time.sleep(0.85)
speak(str(x.strftime("%d")).replace("0",""),"i")
time.sleep(0.8)
speak(x.strftime("%B"),"P")
time.sleep(0.8)
speak(str(x.year),"OP")
time.sleep(0.8)
elif("mail" in MyText):
port = 587
smtp_server = "smtp.gmail.com"
sender_email = "<EMAIL>"
speak("What's The Receiver's Mail I D")
receiver_email = input("Receiver's Mail ID:")
password = input("Receiver's Your Password: ")
speak("What's The Subject?")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
SUBJECT = r.recognize_google(audio2)
speak("What Should The Message Say")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
message = r.recognize_google(audio2)
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.ehlo()
server.starttls(context=context)
server.ehlo()
server.login(sender_email, password)
message = 'Subject: {}\n\n{}'.format(SUBJECT, message)
server.sendmail(sender_email, receiver_email, message)
speak("Message On Its Way!")
print("Message Sent!")
elif("whatsapp" and "message" in MyText):
if("to" in MyText):
split_sentence = MyText.split(' ')
name=split_sentence[-1]
speak("What's "+name+"'s Phone Number? ")
else:
speak("What's Their Phone Number?")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
number = MyText.lower().replace(" ", "")
speak("What's The Message? ")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
msg = MyText.lower()
try:
pywhatkit.sendwhatmsg("+91"+number,msg,datetime.datetime.now().hour,datetime.datetime.now().minute+1)
except:
pywhatkit.sendwhatmsg("+91"+number,msg,datetime.datetime.now().hour,datetime.datetime.now().minute+2)
speak("Message On Its Way!")
print("Message Sent!")
elif("random" and "number" in MyText):
speak(name,"iu")
time.sleep(1)
if("from" and "to" in MyText):
split_sentence = MyText.split(' ')
fromIndex=split_sentence.index('from')
toIndex=split_sentence.index('to')
speak("Here's Your Random Number "+str(random.randint(int(split_sentence[int(fromIndex)+1]),int(split_sentence[int(toIndex)+1]))))
else:
speak("Here's Your Random Number "+str(random.randint(0,100)))
time.sleep(3)
elif(("note" in MyText) or( "write" in MyText) or( "homework" in MyText)):
speak("What's The Content? ")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
msg = MyText.lower()
pywhatkit.text_to_handwriting(msg)
img_path = "pywhatkit.png"
image1 = Image.open(r'pywhatkit.png')
im1 = image1.convert('RGB')
im1.save(r'HandWritten.pdf')
speak("Your HomeWork Is Generated As Handwritten dot p n g")
time.sleep(3)
elif(("do" in MyText) or( "what" in MyText) or ("where" in MyText) or ("who" in MyText)):
split_sentence = MyText.split(' ')
if((split_sentence[-2]!="know") or (split_sentence[-2]!="is") or (split_sentence[-2]!="are") or (split_sentence[-2]!="an") or (split_sentence[-2]!="a") or (split_sentence[-2]!="the")):
print(wikipedia.summary(split_sentence[-2]+" "+split_sentence[-1],sentences=2))
time1=speak(wikipedia.summary(split_sentence[-2]+" "+split_sentence[-1],sentences=2))
else:
print(wikipedia.summary(split_sentence[-1],sentences=2))
time1=speak(wikipedia.summary(split_sentence[-1],sentences=2))
time.sleep(time1)
elif(("create" in MyText) and ("list" in MyText)):
speak(name,"iu")
time.sleep(1)
split_sentence = MyText.split(' ')
dict["new key"]=[]
task[split_sentence[split_sentence.index("list")-1]]=[]
nameoflist=split_sentence[split_sentence.index("list")-1]
speak("What Items Do You Want Me To Add?")
time.sleep(2)
speak("Please! Add One Item At a time!","p")
time.sleep(4)
while ("end" not in MyText):
print("Say Task")
time.sleep(1)
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
if("end" in MyText):
speak("List Updated")
else:
task[nameoflist].append(MyText)
speak("Next Item?")
time.sleep(2)
print(task)
elif(("show" in MyText) and ("list" in MyText)):
speak(name,"iu")
time.sleep(1)
if(task=={}):
speak("You Currently Have No Items In The List")
else:
speak("You Have"+str(len(task))+" Items In List")
time.sleep(2)
for key in task:
speak("In "+key+" You Have","o")
time.sleep(2)
for keys in task[key]:
speak(keys,"oo")
time.sleep(1)
elif("weather" in MyText):
speak(name,"iu")
time.sleep(1)
weather_main()
elif(("open" in MyText)):
split_sentence = MyText.split(' ')
url=""
for i in split_sentence:
if(i=="open"):
continue
url+=i
webbrowser.open_new(url)
elif("search" in MyText):
split_sentence = MyText.split(' ')
url=""
for i in split_sentence:
if(i=="search"):
continue
url+=i+"+"
webbrowser.open("https://www.google.com/search?q={query}".format(query=url))
webbrowser.open("https://www.youtube.com/results?search_query={query}".format(query=url))
elif(("siri" in MyText) or ("siri" in MyText) or ("siri" in MyText)):
comment=["She Seems Clever!","Full Respect, Being An Assistant Is Hardwork","I Know Her, She Is Amazing","You Know Her? That's Great!"]
timewait=speak(comment[random.randint(0,3)])
time.sleep(timewait)
elif("id" in MyText):
speak(name,"iu")
time.sleep(1)
timewait=speak("Please Note Down Your ID ")
time.sleep(timewait)
time.sleep(0.5)
timewait=speak(''.join(i for i in gma() if not i.isdigit()).replace(":",""),"io")
print(''.join(i for i in gma() if not i.isdigit()).replace(":",""))
time.sleep(timewait)
elif("location" in MyText):
MyText=MyText.lower()
split_sentence = MyText.split(' ')
idd=''.join([str(elem) for elem in split_sentence[split_sentence.index("of")+1:]]).lower()
for x in table.find({"_id":idd},{ "_id": 0, "location": 1}):
timewait=speak("Last Updated Location Is "+x["location"])
time.sleep(timewait)
elif(("youtube" in MyText)):
split_sentence = MyText.split(' ')
url=""
for i in split_sentence:
if(i=="youtube"):
continue
url+=i+"+"
webbrowser.open("https://www.youtube.com/results?search_query={query}".format(query=url))
elif("play" in MyText):
split_sentence = MyText.split(' ')
url=""
for i in split_sentence:
if(i=="play"):
continue
url+=i+" "
music_name = url
query_string = urllib.parse.urlencode({"search_query": music_name})
formatUrl = urllib.request.urlopen("https://www.youtube.com/results?" + query_string)
search_results = re.findall(r"watch\?v=(\S{11})", formatUrl.read().decode())
clip = requests.get("https://www.youtube.com/watch?v=" + "{}".format(search_results[0]))
clip2 = "https://www.youtube.com/watch?v=" + "{}".format(search_results[0])
#os.system("start \"\" {url}".format(url=clip2))
webbrowser.open(clip2)
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
except sr.UnknownValueError:
print("Could You Repeat That?")
| [
"geocoder.ip",
"cv2.rectangle",
"webbrowser.open_new",
"webbrowser.open",
"time.sleep",
"face_recognition.load_image_file",
"os.fsdecode",
"pymongo.MongoClient",
"os.remove",
"os.listdir",
"pygame.mixer.music.load",
"random.randint",
"face_recognition.face_locations",
"pywhatkit.text_to_ha... | [((637, 652), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (650, 652), True, 'import speech_recognition as sr\n'), ((826, 953), 'pymongo.MongoClient', 'pymongo.MongoClient', (['"""mongodb+srv://karan:123@cluster0.gfuxd.mongodb.net/myFirstDatabase?retryWrites=true&w=majority"""'], {}), "(\n 'mongodb+srv://karan:123@cluster0.gfuxd.mongodb.net/myFirstDatabase?retryWrites=true&w=majority'\n )\n", (845, 953), False, 'import pymongo\n'), ((1038, 1055), 'geocoder.ip', 'geocoder.ip', (['"""me"""'], {}), "('me')\n", (1049, 1055), False, 'import geocoder\n'), ((1150, 1189), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""geoapiExercises"""'}), "(user_agent='geoapiExercises')\n", (1159, 1189), False, 'from geopy.geocoders import Nominatim\n'), ((1630, 1790), 'requests.get', 'requests.get', (['f"""https://www.google.com/search?q={city}&oq={city}&aqs=chrome.0.35i39l2j0l4j46j69i60.6128j1j7&sourceid=chrome&ie=UTF-8"""'], {'headers': 'headers'}), "(\n f'https://www.google.com/search?q={city}&oq={city}&aqs=chrome.0.35i39l2j0l4j46j69i60.6128j1j7&sourceid=chrome&ie=UTF-8'\n , headers=headers)\n", (1642, 1790), False, 'import re, requests, subprocess, urllib.parse, urllib.request\n'), ((1801, 1839), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""html.parser"""'], {}), "(res.text, 'html.parser')\n", (1814, 1839), False, 'from bs4 import BeautifulSoup\n'), ((2130, 2150), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (2140, 2150), False, 'import time\n'), ((2190, 2210), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (2200, 2210), False, 'import time\n'), ((2358, 2384), 'time.sleep', 'time.sleep', (['(timewait - 0.1)'], {}), '(timewait - 0.1)\n', (2368, 2384), False, 'import time\n'), ((2434, 2454), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (2444, 2454), False, 'import time\n'), ((2709, 2729), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (2719, 2729), False, 'import time\n'), ((2763, 2780), 'geocoder.ip', 'geocoder.ip', (['"""me"""'], {}), "('me')\n", (2774, 2780), False, 'import geocoder\n'), ((2875, 2914), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""geoapiExercises"""'}), "(user_agent='geoapiExercises')\n", (2884, 2914), False, 'from geopy.geocoders import Nominatim\n'), ((3235, 3261), 'requests.get', 'requests.get', (['complete_url'], {}), '(complete_url)\n', (3247, 3261), False, 'import re, requests, subprocess, urllib.parse, urllib.request\n'), ((5014, 5055), 'mutagen.mp3.MP3', 'MP3', (["('speech' + date_string + tp + '.mp3')"], {}), "('speech' + date_string + tp + '.mp3')\n", (5017, 5055), False, 'from mutagen.mp3 import MP3\n'), ((5059, 5071), 'pygame.mixer.init', 'mixer.init', ([], {}), '()\n', (5069, 5071), False, 'from pygame import mixer\n'), ((5076, 5130), 'pygame.mixer.music.load', 'mixer.music.load', (["('speech' + date_string + tp + '.mp3')"], {}), "('speech' + date_string + tp + '.mp3')\n", (5092, 5130), False, 'from pygame import mixer\n'), ((5129, 5147), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (5145, 5147), False, 'from pygame import mixer\n'), ((5356, 5372), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (5366, 5372), False, 'import os\n'), ((2539, 2565), 'time.sleep', 'time.sleep', (['(timewait - 0.1)'], {}), '(timewait - 0.1)\n', (2549, 2565), False, 'import time\n'), ((2612, 2632), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (2622, 2632), False, 'import time\n'), ((3650, 3670), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (3660, 3670), False, 'import time\n'), ((3751, 3771), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (3761, 3771), False, 'import time\n'), ((4384, 4408), 'time.sleep', 'time.sleep', (['(timewait + 1)'], {}), '(timewait + 1)\n', (4394, 4408), False, 'import time\n'), ((5193, 5308), 'boto3.Session', 'boto3.Session', ([], {'aws_access_key_id': '"""Your_OWN_KEY"""', 'aws_secret_access_key': '"""Your_OWN_KEY"""', 'region_name': '"""us-west-2"""'}), "(aws_access_key_id='Your_OWN_KEY', aws_secret_access_key=\n 'Your_OWN_KEY', region_name='us-west-2')\n", (5206, 5308), False, 'import boto3\n'), ((5393, 5410), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (5404, 5410), False, 'import os\n'), ((6566, 6586), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (6576, 6586), False, 'import time\n'), ((6796, 6815), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (6812, 6815), False, 'import cv2\n'), ((2104, 2124), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (2118, 2124), False, 'import random\n'), ((2462, 2485), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2483, 2485), False, 'import datetime\n'), ((3982, 4002), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (3992, 4002), False, 'import time\n'), ((4654, 4677), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4675, 4677), False, 'import datetime\n'), ((4949, 4972), 'os.remove', 'os.remove', (['filename1[i]'], {}), '(filename1[i])\n', (4958, 4972), False, 'import os\n'), ((5471, 5513), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['filename'], {}), '(filename)\n', (5503, 5513), False, 'import face_recognition\n'), ((5536, 5578), 'cv2.cvtColor', 'cv2.cvtColor', (['imgloaded', 'cv2.COLOR_BGR2RGB'], {}), '(imgloaded, cv2.COLOR_BGR2RGB)\n', (5548, 5578), False, 'import cv2\n'), ((5599, 5618), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (5615, 5618), False, 'import cv2\n'), ((5756, 5807), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['"""./testimage.jpg"""'], {}), "('./testimage.jpg')\n", (5788, 5807), False, 'import face_recognition\n'), ((5828, 5868), 'cv2.cvtColor', 'cv2.cvtColor', (['imgtest', 'cv2.COLOR_BGR2RGB'], {}), '(imgtest, cv2.COLOR_BGR2RGB)\n', (5840, 5868), False, 'import cv2\n'), ((6017, 6115), 'cv2.rectangle', 'cv2.rectangle', (['imgloaded', '(faceloc[3], faceloc[0])', '(faceloc[1], faceloc[2])', '(255, 0, 255)', '(2)'], {}), '(imgloaded, (faceloc[3], faceloc[0]), (faceloc[1], faceloc[2]),\n (255, 0, 255), 2)\n', (6030, 6115), False, 'import cv2\n'), ((6251, 6347), 'cv2.rectangle', 'cv2.rectangle', (['imgtest', '(faceloc[3], faceloc[0])', '(faceloc[1], faceloc[2])', '(255, 0, 255)', '(2)'], {}), '(imgtest, (faceloc[3], faceloc[0]), (faceloc[1], faceloc[2]),\n (255, 0, 255), 2)\n', (6264, 6347), False, 'import cv2\n'), ((6356, 6414), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['[encodeloaded]', 'encodetest'], {}), '([encodeloaded], encodetest)\n', (6386, 6414), False, 'import face_recognition\n'), ((6619, 6634), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (6632, 6634), True, 'import speech_recognition as sr\n'), ((6939, 6972), 'os.path.join', 'os.path.join', (['"""./"""', "(name + '.jpg')"], {}), "('./', name + '.jpg')\n", (6951, 6972), False, 'import os\n'), ((7320, 7335), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (7333, 7335), True, 'import speech_recognition as sr\n'), ((1444, 1449), 'getmac.get_mac_address', 'gma', ([], {}), '()\n', (1447, 1449), True, 'from getmac import get_mac_address as gma\n'), ((4160, 4180), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (4170, 4180), False, 'import time\n'), ((5691, 5726), 'os.path.join', 'os.path.join', (['"""./"""', '"""testimage.jpg"""'], {}), "('./', 'testimage.jpg')\n", (5703, 5726), False, 'import os\n'), ((5888, 5930), 'face_recognition.face_locations', 'face_recognition.face_locations', (['imgloaded'], {}), '(imgloaded)\n', (5919, 5930), False, 'import face_recognition\n'), ((5959, 6001), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['imgloaded'], {}), '(imgloaded)\n', (5990, 6001), False, 'import face_recognition\n'), ((6128, 6168), 'face_recognition.face_locations', 'face_recognition.face_locations', (['imgtest'], {}), '(imgtest)\n', (6159, 6168), False, 'import face_recognition\n'), ((6195, 6235), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['imgtest'], {}), '(imgtest)\n', (6226, 6235), False, 'import face_recognition\n'), ((6874, 6897), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6895, 6897), False, 'import datetime\n'), ((7695, 7742), 'pyjokes.get_joke', 'pyjokes.get_joke', ([], {'language': '"""en"""', 'category': '"""all"""'}), "(language='en', category='all')\n", (7711, 7742), False, 'import pyjokes\n'), ((2250, 2273), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2271, 2273), False, 'import datetime\n'), ((4300, 4320), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (4310, 4320), False, 'import time\n'), ((8012, 8025), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8022, 8025), False, 'import time\n'), ((7054, 7077), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7075, 7077), False, 'import datetime\n'), ((8142, 8155), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8152, 8155), False, 'import time\n'), ((8213, 8228), 'time.sleep', 'time.sleep', (['(0.7)'], {}), '(0.7)\n', (8223, 8228), False, 'import time\n'), ((8372, 8387), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (8382, 8387), False, 'import time\n'), ((8535, 8550), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (8545, 8550), False, 'import time\n'), ((1291, 1296), 'getmac.get_mac_address', 'gma', ([], {}), '()\n', (1294, 1296), True, 'from getmac import get_mac_address as gma\n'), ((8671, 8697), 'time.sleep', 'time.sleep', (['(timewait - 0.1)'], {}), '(timewait - 0.1)\n', (8681, 8697), False, 'import time\n'), ((8780, 8800), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (8790, 8800), False, 'import time\n'), ((8886, 8899), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8896, 8899), False, 'import time\n'), ((8920, 8943), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8941, 8943), False, 'import datetime\n'), ((9013, 9029), 'time.sleep', 'time.sleep', (['(0.85)'], {}), '(0.85)\n', (9023, 9029), False, 'import time\n'), ((9111, 9126), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (9121, 9126), False, 'import time\n'), ((9187, 9202), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (9197, 9202), False, 'import time\n'), ((9259, 9274), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (9269, 9274), False, 'import time\n'), ((8570, 8593), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8591, 8593), False, 'import datetime\n'), ((9700, 9713), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9710, 9713), False, 'import time\n'), ((9980, 9993), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9990, 9993), False, 'import time\n'), ((10234, 10262), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (10260, 10262), False, 'import smtplib, ssl\n'), ((10284, 10315), 'smtplib.SMTP', 'smtplib.SMTP', (['smtp_server', 'port'], {}), '(smtp_server, port)\n', (10296, 10315), False, 'import smtplib\n'), ((11112, 11125), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (11122, 11125), False, 'import time\n'), ((11442, 11455), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (11452, 11455), False, 'import time\n'), ((12193, 12206), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12203, 12206), False, 'import time\n'), ((12696, 12709), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (12706, 12709), False, 'import time\n'), ((12862, 12875), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (12872, 12875), False, 'import time\n'), ((13159, 13193), 'pywhatkit.text_to_handwriting', 'pywhatkit.text_to_handwriting', (['msg'], {}), '(msg)\n', (13188, 13193), False, 'import pywhatkit\n'), ((13279, 13306), 'PIL.Image.open', 'Image.open', (['"""pywhatkit.png"""'], {}), "('pywhatkit.png')\n", (13289, 13306), False, 'from PIL import Image\n'), ((13491, 13504), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (13501, 13504), False, 'import time\n'), ((11787, 11810), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11808, 11810), False, 'import datetime\n'), ((14299, 14316), 'time.sleep', 'time.sleep', (['time1'], {}), '(time1)\n', (14309, 14316), False, 'import time\n'), ((11816, 11839), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11837, 11839), False, 'import datetime\n'), ((11933, 11956), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11954, 11956), False, 'import datetime\n'), ((14447, 14460), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14457, 14460), False, 'import time\n'), ((14768, 14781), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14778, 14781), False, 'import time\n'), ((14859, 14872), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (14869, 14872), False, 'import time\n'), ((8286, 8309), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8307, 8309), False, 'import datetime\n'), ((8445, 8468), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8466, 8468), False, 'import datetime\n'), ((11962, 11985), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11983, 11985), False, 'import datetime\n'), ((12656, 12678), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (12670, 12678), False, 'import random\n'), ((13921, 13998), 'wikipedia.summary', 'wikipedia.summary', (["(split_sentence[-2] + ' ' + split_sentence[-1])"], {'sentences': '(2)'}), "(split_sentence[-2] + ' ' + split_sentence[-1], sentences=2)\n", (13938, 13998), False, 'import wikipedia\n'), ((14027, 14104), 'wikipedia.summary', 'wikipedia.summary', (["(split_sentence[-2] + ' ' + split_sentence[-1])"], {'sentences': '(2)'}), "(split_sentence[-2] + ' ' + split_sentence[-1], sentences=2)\n", (14044, 14104), False, 'import wikipedia\n'), ((14149, 14199), 'wikipedia.summary', 'wikipedia.summary', (['split_sentence[-1]'], {'sentences': '(2)'}), '(split_sentence[-1], sentences=2)\n', (14166, 14199), False, 'import wikipedia\n'), ((14232, 14282), 'wikipedia.summary', 'wikipedia.summary', (['split_sentence[-1]'], {'sentences': '(2)'}), '(split_sentence[-1], sentences=2)\n', (14249, 14282), False, 'import wikipedia\n'), ((14993, 15006), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15003, 15006), False, 'import time\n'), ((15570, 15583), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15580, 15583), False, 'import time\n'), ((15417, 15430), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (15427, 15430), False, 'import time\n'), ((15795, 15808), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (15805, 15808), False, 'import time\n'), ((16192, 16205), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (16202, 16205), False, 'import time\n'), ((15927, 15940), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (15937, 15940), False, 'import time\n'), ((16501, 16525), 'webbrowser.open_new', 'webbrowser.open_new', (['url'], {}), '(url)\n', (16520, 16525), False, 'import webbrowser\n'), ((16090, 16103), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (16100, 16103), False, 'import time\n'), ((17310, 17330), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (17320, 17330), False, 'import time\n'), ((17414, 17427), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17424, 17427), False, 'import time\n'), ((17504, 17524), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (17514, 17524), False, 'import time\n'), ((17541, 17556), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (17551, 17556), False, 'import time\n'), ((17755, 17775), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (17765, 17775), False, 'import time\n'), ((17272, 17292), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (17286, 17292), False, 'import random\n'), ((18187, 18207), 'time.sleep', 'time.sleep', (['timewait'], {}), '(timewait)\n', (18197, 18207), False, 'import time\n'), ((19417, 19439), 'webbrowser.open', 'webbrowser.open', (['clip2'], {}), '(clip2)\n', (19432, 19439), False, 'import webbrowser\n'), ((17607, 17612), 'getmac.get_mac_address', 'gma', ([], {}), '()\n', (17610, 17612), True, 'from getmac import get_mac_address as gma\n'), ((17696, 17701), 'getmac.get_mac_address', 'gma', ([], {}), '()\n', (17699, 17701), True, 'from getmac import get_mac_address as gma\n')] |
"""Test fixture files, using the ``DocutilsRenderer``.
Note, the output AST is before any transforms are applied.
"""
import shlex
from io import StringIO
from pathlib import Path
import pytest
from docutils.core import Publisher, publish_doctree
from myst_parser.parsers.docutils_ import Parser
FIXTURE_PATH = Path(__file__).parent.joinpath("fixtures")
@pytest.mark.param_file(FIXTURE_PATH / "docutil_syntax_elements.md")
def test_syntax_elements(file_params, monkeypatch):
"""Test conversion of Markdown to docutils AST (before transforms are applied)."""
def _apply_transforms(self):
pass
monkeypatch.setattr(Publisher, "apply_transforms", _apply_transforms)
doctree = publish_doctree(
file_params.content,
source_path="notset",
parser=Parser(),
settings_overrides={"myst_highlight_code_blocks": False},
)
# in docutils 0.18 footnote ids have changed
outcome = doctree.pformat().replace('"footnote-reference-1"', '"id1"')
file_params.assert_expected(outcome, rstrip_lines=True)
@pytest.mark.param_file(FIXTURE_PATH / "docutil_roles.md")
def test_docutils_roles(file_params, monkeypatch):
"""Test conversion of Markdown to docutils AST (before transforms are applied)."""
def _apply_transforms(self):
pass
monkeypatch.setattr(Publisher, "apply_transforms", _apply_transforms)
doctree = publish_doctree(
file_params.content,
source_path="notset",
parser=Parser(),
)
file_params.assert_expected(doctree.pformat(), rstrip_lines=True)
@pytest.mark.param_file(FIXTURE_PATH / "docutil_directives.md")
def test_docutils_directives(file_params, monkeypatch):
"""Test output of docutils directives."""
if "SKIP" in file_params.description: # line-block directive not yet supported
pytest.skip(file_params.description)
def _apply_transforms(self):
pass
monkeypatch.setattr(Publisher, "apply_transforms", _apply_transforms)
doctree = publish_doctree(
file_params.content,
source_path="notset",
parser=Parser(),
)
file_params.assert_expected(doctree.pformat(), rstrip_lines=True)
@pytest.mark.param_file(FIXTURE_PATH / "docutil_syntax_extensions.txt")
def test_syntax_extensions(file_params):
"""The description is parsed as a docutils commandline"""
pub = Publisher(parser=Parser())
option_parser = pub.setup_option_parser()
try:
settings = option_parser.parse_args(
shlex.split(file_params.description)
).__dict__
except Exception as err:
raise AssertionError(
f"Failed to parse commandline: {file_params.description}\n{err}"
)
report_stream = StringIO()
settings["warning_stream"] = report_stream
doctree = publish_doctree(
file_params.content,
parser=Parser(),
settings_overrides=settings,
)
file_params.assert_expected(doctree.pformat(), rstrip_lines=True)
| [
"pytest.skip",
"pytest.mark.param_file",
"pathlib.Path",
"shlex.split",
"io.StringIO",
"myst_parser.parsers.docutils_.Parser"
] | [((361, 428), 'pytest.mark.param_file', 'pytest.mark.param_file', (["(FIXTURE_PATH / 'docutil_syntax_elements.md')"], {}), "(FIXTURE_PATH / 'docutil_syntax_elements.md')\n", (383, 428), False, 'import pytest\n'), ((1066, 1123), 'pytest.mark.param_file', 'pytest.mark.param_file', (["(FIXTURE_PATH / 'docutil_roles.md')"], {}), "(FIXTURE_PATH / 'docutil_roles.md')\n", (1088, 1123), False, 'import pytest\n'), ((1580, 1642), 'pytest.mark.param_file', 'pytest.mark.param_file', (["(FIXTURE_PATH / 'docutil_directives.md')"], {}), "(FIXTURE_PATH / 'docutil_directives.md')\n", (1602, 1642), False, 'import pytest\n'), ((2192, 2262), 'pytest.mark.param_file', 'pytest.mark.param_file', (["(FIXTURE_PATH / 'docutil_syntax_extensions.txt')"], {}), "(FIXTURE_PATH / 'docutil_syntax_extensions.txt')\n", (2214, 2262), False, 'import pytest\n'), ((2737, 2747), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2745, 2747), False, 'from io import StringIO\n'), ((1837, 1873), 'pytest.skip', 'pytest.skip', (['file_params.description'], {}), '(file_params.description)\n', (1848, 1873), False, 'import pytest\n'), ((315, 329), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (319, 329), False, 'from pathlib import Path\n'), ((796, 804), 'myst_parser.parsers.docutils_.Parser', 'Parser', ([], {}), '()\n', (802, 804), False, 'from myst_parser.parsers.docutils_ import Parser\n'), ((1490, 1498), 'myst_parser.parsers.docutils_.Parser', 'Parser', ([], {}), '()\n', (1496, 1498), False, 'from myst_parser.parsers.docutils_ import Parser\n'), ((2102, 2110), 'myst_parser.parsers.docutils_.Parser', 'Parser', ([], {}), '()\n', (2108, 2110), False, 'from myst_parser.parsers.docutils_ import Parser\n'), ((2393, 2401), 'myst_parser.parsers.docutils_.Parser', 'Parser', ([], {}), '()\n', (2399, 2401), False, 'from myst_parser.parsers.docutils_ import Parser\n'), ((2870, 2878), 'myst_parser.parsers.docutils_.Parser', 'Parser', ([], {}), '()\n', (2876, 2878), False, 'from myst_parser.parsers.docutils_ import Parser\n'), ((2515, 2551), 'shlex.split', 'shlex.split', (['file_params.description'], {}), '(file_params.description)\n', (2526, 2551), False, 'import shlex\n')] |
import os
import json
from .base import CommunityBaseSettings
class EnvironmentSettings(CommunityBaseSettings):
"""Settings for local development"""
DEBUG = os.environ.get('DEBUG') == 'true'
ALLOW_PRIVATE_REPOS = os.environ['ALLOW_PRIVATE_REPOS'] == 'true'
PRODUCTION_DOMAIN = os.environ['PROD_HOST']
WEBHOOK_DOMAIN = os.environ['WEBHOOK_HOST']
WEBSOCKET_HOST = os.environ['WEBSOCKET_HOST']
DEFAULT_PRIVACY_LEVEL = os.environ['DEFAULT_PRIVACY_LEVEL']
PUBLIC_API_URL = PRODUCTION_DOMAIN
CSRF_TRUSTED_ORIGINS = [PRODUCTION_DOMAIN]
@property
def DATABASES(self): # noqa
return {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASS'],
'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT']
}
}
DONT_HIT_DB = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
SESSION_COOKIE_DOMAIN = None
CACHE_BACKEND = 'dummy://'
SLUMBER_USERNAME = os.environ['SLUMBER_USER']
SLUMBER_PASSWORD = os.environ['SLUMBER_PASS'] # noqa: ignore dodgy check
SLUMBER_API_HOST = os.environ['SLUMBER_HOST']
# Redis setup.
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PORT = os.environ['REDIS_PORT']
REDIS_ADDRESS = '{}:{}'.format(REDIS_HOST, REDIS_PORT)
BROKER_URL = 'redis://{}/0'.format(REDIS_ADDRESS)
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_ALWAYS_EAGER = os.environ.get('ASYNC_TASKS') != 'true'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_IGNORE_RESULT = False
# Elastic Search setup.
ES_HOSTS = json.loads(os.environ['ES_HOSTS'])
ES_DEFAULT_NUM_REPLICAS = 0
ES_DEFAULT_NUM_SHARDS = 5
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# Mail settings
# Whether or not to actually use the default email backend.
if os.environ.get('ENABLE_EMAILS') != 'true':
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = os.environ.get('FROM_EMAIL')
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# File Sync -- NOTE: Must be local for single-app hosts.
FILE_SYNCER = os.environ['FILE_SYNCER']
# Cors origins.
CORS_ORIGIN_WHITELIST = json.loads(os.environ['CORS_HOSTS'])
# Social Auth config.
@property
def SOCIALACCOUNT_PROVIDERS(self):
providers = super(EnvironmentSettings, self).SOCIALACCOUNT_PROVIDERS
# This enables private repositories.
providers['github']['SCOPE'].append('repo')
return providers
ACCOUNT_DEFAULT_HTTP_PROTOCOL = os.environ.get(
'ACCOUNT_DEFAULT_HTTP_PROTOCOL'
) or 'http'
# Cache backend.
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': REDIS_ADDRESS,
'PREFIX': 'docs',
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 5,
'timeout': 3,
},
'MAX_CONNECTIONS': 10,
'PICKLE_VERSION': -1,
},
},
}
LOG_FORMAT = "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s"
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': LOG_FORMAT,
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': ('INFO', 'DEBUG')[DEBUG],
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'readthedocs.core.views.post_commit': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'core.middleware': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'restapi': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': False,
},
'readthedocs.projects.views.public.search': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'search': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'elasticsearch.trace': {
'level': 'DEBUG',
'handlers': ['console'],
},
'': {
'handlers': ['console'],
'level': 'INFO',
}
}
}
EnvironmentSettings.load_settings(__name__)
if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):
try:
# pylint: disable=unused-wildcard-import
from .local_settings import * # noqa
except ImportError:
pass
| [
"json.loads",
"os.environ.get"
] | [((1741, 1775), 'json.loads', 'json.loads', (["os.environ['ES_HOSTS']"], {}), "(os.environ['ES_HOSTS'])\n", (1751, 1775), False, 'import json\n'), ((2210, 2238), 'os.environ.get', 'os.environ.get', (['"""FROM_EMAIL"""'], {}), "('FROM_EMAIL')\n", (2224, 2238), False, 'import os\n'), ((2256, 2284), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST"""'], {}), "('EMAIL_HOST')\n", (2270, 2284), False, 'import os\n'), ((2307, 2335), 'os.environ.get', 'os.environ.get', (['"""EMAIL_USER"""'], {}), "('EMAIL_USER')\n", (2321, 2335), False, 'import os\n'), ((2362, 2390), 'os.environ.get', 'os.environ.get', (['"""EMAIL_PASS"""'], {}), "('EMAIL_PASS')\n", (2376, 2390), False, 'import os\n'), ((2592, 2628), 'json.loads', 'json.loads', (["os.environ['CORS_HOSTS']"], {}), "(os.environ['CORS_HOSTS'])\n", (2602, 2628), False, 'import json\n'), ((5901, 5952), 'os.environ.get', 'os.environ.get', (['"""DJANGO_SETTINGS_SKIP_LOCAL"""', '(False)'], {}), "('DJANGO_SETTINGS_SKIP_LOCAL', False)\n", (5915, 5952), False, 'import os\n'), ((170, 193), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""'], {}), "('DEBUG')\n", (184, 193), False, 'import os\n'), ((1581, 1610), 'os.environ.get', 'os.environ.get', (['"""ASYNC_TASKS"""'], {}), "('ASYNC_TASKS')\n", (1595, 1610), False, 'import os\n'), ((2068, 2099), 'os.environ.get', 'os.environ.get', (['"""ENABLE_EMAILS"""'], {}), "('ENABLE_EMAILS')\n", (2082, 2099), False, 'import os\n'), ((2945, 2992), 'os.environ.get', 'os.environ.get', (['"""ACCOUNT_DEFAULT_HTTP_PROTOCOL"""'], {}), "('ACCOUNT_DEFAULT_HTTP_PROTOCOL')\n", (2959, 2992), False, 'import os\n')] |
import unittest
from datetime import datetime, timezone
from src.entity.count_entity import CountEntity
from src.interface_adapter.in_memory_count_repository import \
InMemoryCountRepository
from src.use_case.record_count_input_data import RecordCountInputData
from src.use_case.record_count_use_case_interactor import \
RecordCountUseCaseInteractor
class TestRecordCountUseCaseInteractor(unittest.TestCase):
def setUp(self) -> None:
self.repository = InMemoryCountRepository()
return super().setUp()
def test_create(self):
# Execute
RecordCountUseCaseInteractor(self.repository)
def test_handle_empty(self):
# Setup
interactor = RecordCountUseCaseInteractor(self.repository)
date = datetime(2020, 1, 1, tzinfo=timezone.utc)
input = RecordCountInputData(date, [])
# Execute
interactor.handle(input)
# Assert
counts = self.repository.find_all()
self.assertEqual(len(counts), 0)
def test_handle_multi_input(self):
# Setup
interactor = RecordCountUseCaseInteractor(self.repository)
date = datetime(2020, 1, 1, tzinfo=timezone.utc)
ent0 = CountEntity(date, "/bin/bash", 3)
ent1 = CountEntity(date, "/bin/sash", 4)
ent2 = CountEntity(date, "/bin/cash", 5)
counts = [ent0, ent1, ent2]
input = RecordCountInputData(date, counts)
# Execute
interactor.handle(input)
# Assert
counts = self.repository.find_all()
self.assertEqual(len(counts), 3)
self.assertIn(ent0, counts)
self.assertIn(ent1, counts)
self.assertIn(ent2, counts)
| [
"datetime.datetime",
"src.entity.count_entity.CountEntity",
"src.interface_adapter.in_memory_count_repository.InMemoryCountRepository",
"src.use_case.record_count_input_data.RecordCountInputData",
"src.use_case.record_count_use_case_interactor.RecordCountUseCaseInteractor"
] | [((475, 500), 'src.interface_adapter.in_memory_count_repository.InMemoryCountRepository', 'InMemoryCountRepository', ([], {}), '()\n', (498, 500), False, 'from src.interface_adapter.in_memory_count_repository import InMemoryCountRepository\n'), ((586, 631), 'src.use_case.record_count_use_case_interactor.RecordCountUseCaseInteractor', 'RecordCountUseCaseInteractor', (['self.repository'], {}), '(self.repository)\n', (614, 631), False, 'from src.use_case.record_count_use_case_interactor import RecordCountUseCaseInteractor\n'), ((703, 748), 'src.use_case.record_count_use_case_interactor.RecordCountUseCaseInteractor', 'RecordCountUseCaseInteractor', (['self.repository'], {}), '(self.repository)\n', (731, 748), False, 'from src.use_case.record_count_use_case_interactor import RecordCountUseCaseInteractor\n'), ((764, 805), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)'], {'tzinfo': 'timezone.utc'}), '(2020, 1, 1, tzinfo=timezone.utc)\n', (772, 805), False, 'from datetime import datetime, timezone\n'), ((822, 852), 'src.use_case.record_count_input_data.RecordCountInputData', 'RecordCountInputData', (['date', '[]'], {}), '(date, [])\n', (842, 852), False, 'from src.use_case.record_count_input_data import RecordCountInputData\n'), ((1085, 1130), 'src.use_case.record_count_use_case_interactor.RecordCountUseCaseInteractor', 'RecordCountUseCaseInteractor', (['self.repository'], {}), '(self.repository)\n', (1113, 1130), False, 'from src.use_case.record_count_use_case_interactor import RecordCountUseCaseInteractor\n'), ((1146, 1187), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)'], {'tzinfo': 'timezone.utc'}), '(2020, 1, 1, tzinfo=timezone.utc)\n', (1154, 1187), False, 'from datetime import datetime, timezone\n'), ((1203, 1236), 'src.entity.count_entity.CountEntity', 'CountEntity', (['date', '"""/bin/bash"""', '(3)'], {}), "(date, '/bin/bash', 3)\n", (1214, 1236), False, 'from src.entity.count_entity import CountEntity\n'), ((1252, 1285), 'src.entity.count_entity.CountEntity', 'CountEntity', (['date', '"""/bin/sash"""', '(4)'], {}), "(date, '/bin/sash', 4)\n", (1263, 1285), False, 'from src.entity.count_entity import CountEntity\n'), ((1301, 1334), 'src.entity.count_entity.CountEntity', 'CountEntity', (['date', '"""/bin/cash"""', '(5)'], {}), "(date, '/bin/cash', 5)\n", (1312, 1334), False, 'from src.entity.count_entity import CountEntity\n'), ((1387, 1421), 'src.use_case.record_count_input_data.RecordCountInputData', 'RecordCountInputData', (['date', 'counts'], {}), '(date, counts)\n', (1407, 1421), False, 'from src.use_case.record_count_input_data import RecordCountInputData\n')] |
from setuptools import setup
setup(
name='nd',
py_modules=['nd'],
version='1.0.0',
description='user friendly emulation game selection',
license="MIT",
author='<NAME>',
author_email='<EMAIL>',
install_requires=['tkinter', 'nltk', 'pymongo'],
scripts=[]
)
| [
"setuptools.setup"
] | [((30, 267), 'setuptools.setup', 'setup', ([], {'name': '"""nd"""', 'py_modules': "['nd']", 'version': '"""1.0.0"""', 'description': '"""user friendly emulation game selection"""', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'install_requires': "['tkinter', 'nltk', 'pymongo']", 'scripts': '[]'}), "(name='nd', py_modules=['nd'], version='1.0.0', description=\n 'user friendly emulation game selection', license='MIT', author=\n '<NAME>', author_email='<EMAIL>', install_requires=['tkinter', 'nltk',\n 'pymongo'], scripts=[])\n", (35, 267), False, 'from setuptools import setup\n')] |
"""add class of delete
Revision ID: <PASSWORD>
Revises: <PASSWORD>
Create Date: 2019-03-04 17:50:54.573744
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('subscribers', 'username')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('subscribers', sa.Column('username', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| [
"sqlalchemy.VARCHAR",
"alembic.op.drop_column"
] | [((382, 423), 'alembic.op.drop_column', 'op.drop_column', (['"""subscribers"""', '"""username"""'], {}), "('subscribers', 'username')\n", (396, 423), False, 'from alembic import op\n'), ((599, 621), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(255)'}), '(length=255)\n', (609, 621), True, 'import sqlalchemy as sa\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 5 01:34:00 2021
@author: yrc2
"""
import biosteam as bst
import biorefineries.oilcane as oc
from biosteam.utils import CABBI_colors, colors
from thermosteam.utils import set_figure_size, set_font, roundsigfigs
from thermosteam.units_of_measure import format_units
from colorpalette import Palette
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from warnings import warn
import numpy as np
import pandas as pd
from matplotlib.gridspec import GridSpec
from . import _variable_mockups as variables
from ._variable_mockups import (
tea_monte_carlo_metric_mockups,
tea_monte_carlo_derivative_metric_mockups,
lca_monte_carlo_metric_mockups,
lca_monte_carlo_derivative_metric_mockups,
MFPP, TCI, electricity_production, natural_gas_consumption,
ethanol_production, biodiesel_production,
GWP_ethanol, GWP_biodiesel, GWP_electricity,
GWP_ethanol_allocation, GWP_biodiesel_allocation,
GWP_economic, MFPP_derivative,
TCI_derivative,
ethanol_production_derivative,
biodiesel_production_derivative,
electricity_production_derivative,
natural_gas_consumption_derivative,
GWP_ethanol_derivative,
)
from ._load_data import (
images_folder,
get_monte_carlo,
spearman_file,
)
import os
from._parse_configuration import format_name
__all__ = (
'plot_all',
'plot_montecarlo_main_manuscript',
'plot_breakdowns',
'plot_montecarlo_feedstock_comparison',
'plot_montecarlo_configuration_comparison',
'plot_montecarlo_agile_comparison',
'plot_montecarlo_derivative',
'plot_montecarlo_absolute',
'plot_spearman_tea',
'plot_spearman_lca',
'plot_spearman_tea_short',
'plot_spearman_lca_short',
'plot_monte_carlo_across_coordinate',
'monte_carlo_box_plot',
'plot_monte_carlo',
'plot_spearman',
'plot_configuration_breakdown',
'plot_TCI_areas_across_oil_content',
'plot_heatmap_comparison',
'plot_feedstock_conventional_comparison_kde',
'plot_feedstock_cellulosic_comparison_kde',
'plot_configuration_comparison_kde',
'plot_open_comparison_kde',
'plot_feedstock_comparison_kde',
'plot_crude_configuration_comparison_kde',
'plot_agile_comparison_kde',
'plot_separated_configuration_comparison_kde',
'area_colors',
'area_hatches',
)
area_colors = {
'Feedstock handling': CABBI_colors.teal,
'Juicing': CABBI_colors.green_dirty,
'EtOH prod.': CABBI_colors.blue,
'Ethanol production': CABBI_colors.blue,
'Oil ext.': CABBI_colors.brown,
'Oil extraction': CABBI_colors.brown,
'Biod. prod.': CABBI_colors.orange,
'Biodiesel production': CABBI_colors.orange,
'Pretreatment': CABBI_colors.green,
'Wastewater treatment': colors.purple,
'CH&P': CABBI_colors.yellow,
'Co-Heat and Power': CABBI_colors.yellow,
'Utilities': colors.red,
'Storage': CABBI_colors.grey,
'HXN': colors.orange,
'Heat exchanger network': colors.orange,
}
area_hatches = {
'Feedstock handling': 'x',
'Juicing': '-',
'EtOH prod.': '/',
'Ethanol production': '/',
'Oil ext.': '\\',
'Oil extraction': '\\',
'Biod. prod.': '/|',
'Biodiesel production': '/|',
'Pretreatment': '//',
'Wastewater treatment': r'\\',
'CH&P': '',
'Co-Heat and Power': '',
'Utilities': '\\|',
'Storage': '',
'HXN': '+',
'Heat exchanger network': '+',
}
for i in area_colors: area_colors[i] = area_colors[i].tint(20)
palette = Palette(**area_colors)
letter_color = colors.neutral.shade(25).RGBn
GWP_units_L = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{L}^{-1}$'
GWP_units_L_small = GWP_units_L.replace('kg', 'g')
CABBI_colors.orange_hatch = CABBI_colors.orange.copy(hatch='////')
ethanol_over_biodiesel = bst.MockVariable('Ethanol over biodiesel', 'L/MT', 'Biorefinery')
GWP_ethanol_displacement = variables.GWP_ethanol_displacement
production = (ethanol_production, biodiesel_production)
mc_metric_settings = {
'MFPP': (MFPP, f"MFPP\n[{format_units('USD/MT')}]", None),
'TCI': (TCI, f"TCI\n[{format_units('10^6*USD')}]", None),
'production': (production, f"Production\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production, f"Elec. prod.\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption, f"NG cons.\n[{format_units('m^3/MT')}]", None),
'GWP_ethanol_displacement': (GWP_ethanol_displacement, "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None),
'GWP_economic': ((GWP_ethanol, GWP_biodiesel), "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None),
'GWP_energy': ((GWP_ethanol_allocation, GWP_biodiesel_allocation), "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None),
}
mc_comparison_settings = {
'MFPP': (MFPP, r"$\Delta$" + f"MFPP\n[{format_units('USD/MT')}]", None),
'TCI': (TCI, r"$\Delta$" + f"TCI\n[{format_units('10^6*USD')}]", None),
'production': (production, r"$\Delta$" + f"Production\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production, r"$\Delta$" + f"Elec. prod.\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption, r"$\Delta$" + f"NG cons.\n[{format_units('m^3/MT')}]", None),
'GWP_ethanol_displacement': (GWP_ethanol_displacement, r"$\Delta$" + "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None),
'GWP_economic': (GWP_ethanol, r"$\Delta$" + "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None),
'GWP_energy': (GWP_ethanol_allocation, r"$\Delta$" + "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None),
'GWP_property_allocation': ((GWP_ethanol, GWP_ethanol_allocation), r"$\Delta$" + f"GWP\n[{GWP_units_L}]", None),
}
mc_derivative_metric_settings = {
'MFPP': (MFPP_derivative, r"$\Delta$" + format_units(r"MFPP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('USD/MT')}]", None),
'TCI': (TCI_derivative, r"$\Delta$" + format_units(r"TCI/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('10^6*USD')}]", None),
'production': ((ethanol_production_derivative, biodiesel_production_derivative), r"$\Delta$" + format_units(r"Prod./OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production_derivative, r"$\Delta$" + format_units(r"EP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption_derivative, r"$\Delta$" + format_units(r"NGC/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('m^3/MT')}]", None),
'GWP_economic': (GWP_ethanol_derivative, r"$\Delta$" + r"GWP $\cdot \Delta \mathrm{OC}^{-1}$" f"\n[{GWP_units_L_small}]", 1000),
}
kde_metric_settings = {j[0]: j for j in mc_metric_settings.values()}
kde_comparison_settings = {j[0]: j for j in mc_comparison_settings.values()}
kde_derivative_settings = {j[0]: j for j in mc_derivative_metric_settings.values()}
# %% Plots for publication
def plot_all():
# plot_montecarlo_main_manuscript()
plot_montecarlo_absolute()
plot_spearman_tea()
plot_spearman_lca()
plot_breakdowns()
def plot_montecarlo_main_manuscript():
set_font(size=8)
set_figure_size(aspect_ratio=0.85)
fig = plt.figure()
everything = GridSpec(4, 3, fig, hspace=1.5, wspace=0.7,
top=0.90, bottom=0.05,
left=0.11, right=0.97)
def spec2axes(spec, x, y, hspace=0, wspace=0.7, **kwargs):
subspec = spec.subgridspec(x, y, hspace=hspace, wspace=wspace, **kwargs)
return np.array([[fig.add_subplot(subspec[i, j]) for j in range(y)] for i in range(x)], object)
gs_feedstock_comparison = everything[:2, :]
gs_configuration_comparison = everything[2:, :2]
gs_agile_comparison = everything[2:, 2]
axes_feedstock_comparison = spec2axes(gs_feedstock_comparison, 2, 3)
axes_configuration_comparison = spec2axes(gs_configuration_comparison, 2, 2)
axes_agile_comparison = spec2axes(gs_agile_comparison, 2, 1)
plot_montecarlo_feedstock_comparison(axes_feedstock_comparison, letters='ABCDEFG')
plot_montecarlo_configuration_comparison(axes_configuration_comparison, letters='ABCDEFG')
plot_montecarlo_agile_comparison(axes_agile_comparison, letters='ABCDEFG')
def add_title(gs, title):
ax = fig.add_subplot(gs)
ax._frameon = False
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_title(
title, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold', y=1.1
)
add_title(gs_feedstock_comparison, '(I) Impact of opting to process oilcane over sugarcane')
add_title(gs_configuration_comparison, '(II) Impact of cellulosic ethanol integration')
add_title(gs_agile_comparison, '(III) Impact of\noilsorghum\nintegration')
plt.show()
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_main_manuscript.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_feedstock_comparison(axes_box=None, letters=None,
single_column=True):
if single_column:
width = 'half'
aspect_ratio = 2.25
ncols = 1
left = 0.255
bottom = 0.05
else:
width = None
aspect_ratio = 0.75
left = 0.105
bottom = 0.12
ncols = 3
if axes_box is None:
set_font(size=8)
set_figure_size(width=width, aspect_ratio=aspect_ratio)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box,
labels=[
'Direct Cogeneration',
'Integrated Co-Fermentation',
# 'Direct Cogeneration',
# 'Integrated Co-Fermentation',
],
comparison_names=['O1 - S1', 'O2 - S2'],
metrics = ['MFPP', 'TCI', 'production', 'GWP_property_allocation',
'natural_gas_consumption', 'electricity_production'],
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green',
'orange', 'orange_hatch', 'grey', 'brown',
])
)
for ax, letter in zip(axes, 'ABCDEFGH' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
# if axes_box is None and letter in 'DH':
# x = 0.5
# plt.text(x, ylb - (yub - ylb) * 0.3,
# 'Impact of processing\noilcane over sugarcane',
# horizontalalignment='center',verticalalignment='center',
# fontsize=8)
if axes_box is None:
plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_feedstock_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_configuration_comparison(axes_box=None, letters=None,
single_column=True):
if single_column:
width = 'half'
aspect_ratio = 2.25
ncols = 1
left = 0.255
bottom = 0.05
x = 1.65
metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation',
'natural_gas_consumption', 'electricity_production']
else:
width = None
aspect_ratio = 0.75
left = 0.105
bottom = 0.12
ncols = 2
x = 0.58
metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation']
if axes_box is None:
set_font(size=8)
set_figure_size(width=width, aspect_ratio=aspect_ratio)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box,
labels=[
'Oilcane',
# 'Sugarcane',
],
comparison_names=[
'O2 - O1',
# 'S2 - S1'
],
metrics=metrics,
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green',
'orange', 'orange_hatch',
])
)
for ax, letter in zip(axes, 'ABCDEF' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(x, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
if axes_box is None:
plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_configuration_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_agile_comparison(axes_box=None, letters=None):
if axes_box is None:
set_font(size=8)
set_figure_size(width=3.3071, aspect_ratio=1.0)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile_only=True, ncols=1,
labels=[
'Direct Cogeneration',
'Integrated Co-Fermentation'
],
metrics=['MFPP', 'TCI'],
axes_box=axes_box,
)
for ax, letter in zip(axes, 'AB' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
if axes_box is None and letter == 'B':
plt.text(0.5, ylb - (yub - ylb) * 0.25,
'Impact of integrating oilsorghum\nat an agile oilcane biorefinery',
horizontalalignment='center',verticalalignment='center',
fontsize=8)
if axes_box is None:
plt.subplots_adjust(right=0.9, left=0.2, wspace=0.5, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_agile_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_derivative():
set_font(size=8)
set_figure_size(
aspect_ratio=0.5,
# width=3.3071, aspect_ratio=1.85
)
fig, axes = plot_monte_carlo(
derivative=True, absolute=True,
comparison=False, agile=False,
ncols=3,
# tickmarks=np.array([
# [-3, -2, -1, 0, 1, 2, 3, 4, 5],
# [-9, -6, -3, 0, 3, 6, 9, 12, 15],
# [-2.0, -1.5, -1.0, -0.5, 0, 0.5, 1.0, 1.5, 2],
# [-16, -8, 0, 8, 16, 24, 32, 40, 48],
# [-400, -300, -200, -100, 0, 100, 200, 300, 400],
# [-300, -225, -150, -75, 0, 75, 150, 225, 300]
# ], dtype=object),
labels=['DC', 'ICF'],
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown',
'orange',
])
)
for ax, letter in zip(axes, 'ABCDEFGH'):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
plt.subplots_adjust(
hspace=0, wspace=0.7,
top=0.95, bottom=0.1,
left=0.12, right=0.96
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_derivative.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_absolute():
set_font(size=8)
set_figure_size(aspect_ratio=1.05)
fig, axes = plot_monte_carlo(
absolute=True, comparison=False, ncols=2,
expand=0.1,
labels=['Sugarcane\nDC', 'Oilcane\nDC',
'Sugarcane\nICF', 'Oilcane\nICF',
'Sugarcane &\nSorghum DC', 'Oilcane &\nOil-sorghum DC',
'Sugarcane &\nSorghum ICF', 'Oilcane &\nOil-sorghum ICF'],
xrot=90,
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown',
'orange', 'orange', 'green', 'orange', 'green',
])
)
for ax, letter in zip(axes, 'ABCDEFGHIJ'):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(7.8, ylb + (yub - ylb) * 0.92, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
plt.subplots_adjust(left=0.12, right=0.95, wspace=0.40, top=0.98, bottom=0.2)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_absolute.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_tea(with_units=None, aspect_ratio=0.8, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio)
plot_spearman(
configurations=[
'O1', 'O1*',
'O2', 'O2*',
],
labels=[
'DC', 'Oil-sorghum int., DC',
'ICF', 'Oil-sorghum int., ICF',
],
kind='TEA',
with_units=with_units,
cutoff=0.03,
**kwargs
)
plt.subplots_adjust(left=0.45, right=0.975, top=0.98, bottom=0.08)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_tea.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_tea_short(**kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=0.65, width=6.6142 * 2/3)
plot_spearman(
configurations=[
'O1',
'O2',
],
labels=[
'DC',
'ICF',
],
kind='TEA',
with_units=False,
cutoff=0.03,
top=5,
legend=True,
legend_kwargs={'loc': 'upper left'},
**kwargs
)
plt.subplots_adjust(left=0.35, right=0.975, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_tea.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_lca_short(with_units=False, aspect_ratio=0.65, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio, width=6.6142 * 2/3)
plot_spearman(
configurations=[
'O1',
'O2',
],
labels=[
'DC',
'ICF',
],
kind='LCA',
with_units=with_units,
cutoff=0.03,
top=5,
legend=False,
**kwargs
)
plt.subplots_adjust(left=0.35, right=0.975, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_lca.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_lca(with_units=None, aspect_ratio=0.65, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio)
plot_spearman(
configurations=[
'O1', 'O1*',
'O2', 'O2*',
],
labels=[
'DC', 'Oil-sorghum int., DC',
'ICF', 'Oil-sorghum int., ICF',
],
kind='LCA',
with_units=with_units,
cutoff=0.03,
**kwargs
)
plt.subplots_adjust(left=0.45, right=0.975, top=0.98, bottom=0.10)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_lca.{i}')
plt.savefig(file, transparent=True)
def plot_breakdowns():
set_font(size=8)
set_figure_size(aspect_ratio=0.68)
fig, axes = plt.subplots(nrows=1, ncols=2)
plt.sca(axes[0])
plot_configuration_breakdown('O1', ax=axes[0], legend=False)
plt.sca(axes[1])
plot_configuration_breakdown('O2', ax=axes[1], legend=True)
yticks = axes[1].get_yticks()
plt.yticks(yticks, ['']*len(yticks))
plt.ylabel('')
plt.subplots_adjust(left=0.09, right=0.96, wspace=0., top=0.84, bottom=0.31)
for ax, letter in zip(axes, ['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation']):
plt.sca(ax)
ylb, yub = plt.ylim()
xlb, xub = plt.xlim()
plt.text((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.2, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'breakdowns.{i}')
plt.savefig(file, transparent=True)
# %% Heatmap
def get_fraction_in_same_direction(data, direction):
return (direction * data >= 0.).sum(axis=0) / data.size
def get_median(data):
return roundsigfigs(np.percentile(data, 50, axis=0))
def plot_heatmap_comparison(comparison_names=None, xlabels=None):
if comparison_names is None: comparison_names = oc.comparison_names
columns = comparison_names
if xlabels is None: xlabels = [format_name(i).replace(' ', '') for i in comparison_names]
def get_data(metric, name):
df = get_monte_carlo(name, metric)
values = df.values
return values
GWP_economic, GWP_ethanol, GWP_biodiesel, GWP_electricity, GWP_crude_glycerol, = lca_monte_carlo_metric_mockups
MFPP, TCI, ethanol_production, biodiesel_production, electricity_production, natural_gas_consumption = tea_monte_carlo_metric_mockups
GWP_ethanol_displacement = variables.GWP_ethanol_displacement
GWP_ethanol_allocation = variables.GWP_ethanol_allocation
rows = [
MFPP,
TCI,
ethanol_production,
biodiesel_production,
electricity_production,
natural_gas_consumption,
GWP_ethanol_displacement,
GWP_ethanol_allocation,
GWP_ethanol, # economic
]
ylabels = [
f"MFPP\n[{format_units('USD/MT')}]",
f"TCI\n[{format_units('10^6*USD')}]",
f"Ethanol production\n[{format_units('L/MT')}]",
f"Biodiesel production\n[{format_units('L/MT')}]",
f"Elec. prod.\n[{format_units('kWhr/MT')}]",
f"NG cons.\n[{format_units('m^3/MT')}]",
"GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]",
"GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]",
"GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]",
]
N_rows = len(rows)
N_cols = len(comparison_names)
data = np.zeros([N_rows, N_cols], dtype=object)
data[:] = [[get_data(i, j) for j in columns] for i in rows]
medians = np.zeros_like(data, dtype=float)
fractions = medians.copy()
for i in range(N_rows):
for j in range(N_cols):
medians[i, j] = x = get_median(data[i, j])
fractions[i, j] = get_fraction_in_same_direction(data[i, j], 1 if x > 0 else -1)
fig, ax = plt.subplots()
mbar = bst.plots.MetricBar(
'Fraction in the same direction [%]', ticks=[-100, -75, -50, -25, 0, 25, 50, 75, 100],
cmap=plt.cm.get_cmap('RdYlGn')
)
im, cbar = bst.plots.plot_heatmap(
100 * fractions, vmin=0, vmax=100, ax=ax, cell_labels=medians,
metric_bar=mbar, xlabels=xlabels, ylabels=ylabels,
)
cbar.ax.set_ylabel(mbar.title, rotation=-90, va="bottom")
plt.sca(ax)
ax.spines[:].set_visible(False)
plt.grid(True, 'major', 'both', lw=1, color='w', ls='-')
# %% KDE
def plot_kde(name, metrics=(GWP_ethanol, MFPP), xticks=None, yticks=None,
xbox_kwargs=None, ybox_kwargs=None, top_left='',
top_right='Tradeoff', bottom_left='Tradeoff',
bottom_right=''):
set_font(size=8)
set_figure_size(width='half', aspect_ratio=1.20)
Xi, Yi = [i.index for i in metrics]
df = oc.get_monte_carlo(name, metrics)
y = df[Yi].values
x = df[Xi].values
sX, sY = [kde_comparison_settings[i] for i in metrics]
_, xlabel, fx = sX
_, ylabel, fy = sY
if fx: x *= fx
if fy: y *= fy
ax = bst.plots.plot_kde(
y=y, x=x, xticks=xticks, yticks=yticks,
xticklabels=True, yticklabels=True,
xbox_kwargs=xbox_kwargs or dict(light=CABBI_colors.orange.RGBn, dark=CABBI_colors.orange.shade(60).RGBn),
ybox_kwargs=ybox_kwargs or dict(light=CABBI_colors.blue.RGBn, dark=CABBI_colors.blue.shade(60).RGBn),
)
plt.sca(ax)
plt.xlabel(xlabel.replace('\n', ' '))
plt.ylabel(ylabel.replace('\n', ' '))
bst.plots.plot_quadrants()
xlb, xub = plt.xlim()
ylb, yub = plt.ylim()
xpos = lambda x: xlb + (xub - xlb) * x
# xlpos = lambda x: xlb * (1 - x)
ypos = lambda y: ylb + (yub - ylb) * y
y_mt_0 = y > 0
y_lt_0 = y < 0
x_mt_0 = x > 0
x_lt_0 = x < 0
xleft = 0.02
xright = 0.98
ytop = 0.94
ybottom = 0.02
if yub > 0. and xlb < 0.:
if top_left.endswith('()'):
p = (y_mt_0 & x_lt_0).sum() / y.size
top_left = f"{p:.0%} {top_left.strip('()')}"
plt.text(xpos(xleft), ypos(ytop), top_left, color=CABBI_colors.teal.shade(50).RGBn,
horizontalalignment='left', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
if ylb < 0. and xlb < 0.:
if bottom_left.endswith('()'):
p = (y_lt_0 & x_lt_0).sum() / y.size
bottom_left = f"{p:.0%} {bottom_left.strip('()')}"
plt.text(xpos(xleft), ypos(ybottom), bottom_left, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='left', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
if yub > 0. and xub > 0.:
if top_right.endswith('()'):
p = (y_mt_0 & x_mt_0).sum() / y.size
top_right = f"{p:.0%} {top_right.strip('()')}"
plt.text(xpos(xright), ypos(ytop), top_right, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='right', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
if ylb < 0. and xub > 0.:
if bottom_right.endswith('()'):
p = (y_lt_0 & x_mt_0).sum() / y.size
bottom_right = f"{p:.0%} {bottom_right.strip('()')}"
plt.text(xpos(xright), ypos(ybottom), bottom_right, color=colors.red.shade(50).RGBn,
horizontalalignment='right', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
plt.subplots_adjust(
hspace=0.05, wspace=0.05,
top=0.98, bottom=0.15,
left=0.15, right=0.98,
)
def plot_kde_2d(name, metrics=(GWP_ethanol, MFPP), xticks=None, yticks=None,
top_left='', top_right='Tradeoff', bottom_left='Tradeoff',
bottom_right='', xbox_kwargs=None, ybox_kwargs=None, titles=None):
set_font(size=8)
set_figure_size(aspect_ratio=0.65)
if isinstance(name, str): name = (name,)
Xi, Yi = [i.index for i in metrics]
dfs = [oc.get_monte_carlo(i, metrics) for i in name]
sX, sY = [kde_comparison_settings[i] for i in metrics]
_, xlabel, fx = sX
_, ylabel, fy = sY
xs = np.array([[df[Xi] for df in dfs]])
ys = np.array([[df[Yi] for df in dfs]])
if fx: xs *= fx
if fy: ys *= fy
axes = bst.plots.plot_kde_2d(
xs=xs, ys=ys,
xticks=xticks, yticks=yticks,
xticklabels=[True, True], yticklabels=[True, True],
xbox_kwargs=2*[xbox_kwargs or dict(light=CABBI_colors.orange.RGBn, dark=CABBI_colors.orange.shade(60).RGBn)],
ybox_kwargs=[ybox_kwargs or dict(light=CABBI_colors.blue.RGBn, dark=CABBI_colors.blue.shade(60).RGBn)],
)
M, N = axes.shape
xleft = 0.02
xright = 0.98
ytop = 0.94
ybottom = 0.02
for i in range(M):
for j in range(N):
ax = axes[i, j]
plt.sca(ax)
if i == M - 1: plt.xlabel(xlabel.replace('\n', ' '))
if j == 0: plt.ylabel(ylabel.replace('\n', ' '))
bst.plots.plot_quadrants()
xlb, xub = plt.xlim()
ylb, yub = plt.ylim()
xpos = lambda x: xlb + (xub - xlb) * x
# xlpos = lambda x: xlb * (1 - x)
ypos = lambda y: ylb + (yub - ylb) * y
df = dfs[j]
x = df[Xi]
y = df[Yi]
y_mt_0 = y > 0
y_lt_0 = y < 0
x_mt_0 = x > 0
x_lt_0 = x < 0
if yub > 0. and xlb < 0. and top_left:
if top_left.endswith('()'):
p = (y_mt_0 & x_lt_0).sum() / y.size
top_left = f"{p:.0%} {top_left.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xleft), ypos(ytop), top_left, color=CABBI_colors.teal.shade(50).RGBn,
horizontalalignment='left', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
top_left = replacement
if ylb < 0. and xlb < 0. and bottom_left:
if bottom_left.endswith('()'):
p = (y_lt_0 & x_lt_0).sum() / y.size
bottom_left = f"{p:.0%} {bottom_left.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xleft), ypos(ybottom), bottom_left, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='left', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
bottom_left = replacement
if yub > 0. and xub > 0. and top_right:
if top_right.endswith('()'):
p = (y_mt_0 & x_mt_0).sum() / y.size
top_right = f"{p:.0%} {top_right.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xright), ypos(ytop), top_right, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='right', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
top_right = replacement
if ylb < 0. and xub > 0. and bottom_right:
if bottom_right.endswith('()'):
p = (y_lt_0 & x_mt_0).sum() / y.size
bottom_right = f"{p:.0%} {bottom_right.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xright), ypos(ybottom), bottom_right, color=colors.red.shade(50).RGBn,
horizontalalignment='right', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
bottom_right = replacement
plt.subplots_adjust(
hspace=0, wspace=0,
top=0.98, bottom=0.15,
left=0.1, right=0.98,
)
if titles:
plt.subplots_adjust(
top=0.90,
)
for ax, letter in zip(axes[0, :], titles):
plt.sca(ax)
ylb, yub = plt.ylim()
xlb, xub = plt.xlim()
plt.text((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.17, letter, color=letter_color,
horizontalalignment='center', verticalalignment='center',
fontsize=12, fontweight='bold')
def plot_feedstock_conventional_comparison_kde():
plot_kde(
'O1 - S1',
yticks=[-20, -10, 0, 10, 20, 30, 40],
xticks=[-0.12, -0.09, -0.06, -0.03, 0, 0.03, 0.06],
top_left='Oilcane Favored',
bottom_right='Sugarcane\nFavored',
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_conventional_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_feedstock_cellulosic_comparison_kde():
plot_kde(
'O2 - S2',
yticks=[-40, -20, 0, 20, 40, 60, 80],
xticks=[-5, -4, -3, -2, -1, 0],
top_left='Oilcane Favored',
bottom_right='Sugarcane Favored',
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
fx=1000.,
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_cellulosic_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_feedstock_comparison_kde():
plot_kde_2d(
('O1 - S1', 'O2 - S2'),
yticks=[[-10, 0, 10, 20, 30, 40, 50, 60]],
xticks=[[-0.12, -0.09, -0.06, -0.03, 0, 0.03, 0.06],
[-2.0, -1.5, -1, -0.5, 0., 0.5, 1.0]],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Oilcane\nFavored()',
bottom_right='\nSugarcane\nFavored()',
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
plt.subplots_adjust(
wspace=0,
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_configuration_comparison_kde():
plot_kde(
'O1 - O2',
yticks=[-20, 0, 20, 40, 60],
xticks=[-2, -1.5, -1, -0.5, 0, 0.5, 1],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='DC Favored()',
bottom_right='ICF\nFavored()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_separated_configuration_comparison_kde():
plot_kde_2d(
('O1', 'O2'),
yticks=[[-20, 0, 20, 40, 60]],
xticks=[
[0, 0.5, 1, 1.5],
[0, 2, 4, 6, 8, 10]
],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='DC Favored()',
bottom_right='ICF\nFavored()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'separated_configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_crude_configuration_comparison_kde():
plot_kde_2d(
('O1 - O3', 'O2 - O4'),
yticks=[[-12, 0, 12, 24, 36, 48]],
xticks=[
[-0.5, -0.4, -0.3, -0.2, -0.1, 0],
[-1, -0.8, -0.6, -0.4, -0.2, 0]
],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Biodiesel\nProduction Favored()',
bottom_right='Crude Oil\nProduction Favored()',
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'crude_configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_agile_comparison_kde():
plot_kde_2d(
('O1* - O1', 'O2* - O2'),
metrics=[TCI, MFPP],
yticks=[[0, 3, 6, 9, 12, 15]],
xticks=2*[[-150, -125, -100, -75, -50, -25, 0]],
top_right='TCI-Tradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Sorghum\nIntegration Favored()',
bottom_right='Cane-only\nFavored()',
xbox_kwargs=dict(light=CABBI_colors.green_dirty.RGBn,
dark=CABBI_colors.green_dirty.shade(60).RGBn),
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'agile_conventional_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_open_comparison_kde(overlap=False):
metrics = [MFPP, TCI, GWP_ethanol, biodiesel_production]
df_conventional_oc = oc.get_monte_carlo('O1', metrics)
df_cellulosic_oc = oc.get_monte_carlo('O2', metrics)
df_conventional_sc = oc.get_monte_carlo('S1', metrics)
df_cellulosic_sc = oc.get_monte_carlo('S2', metrics)
MFPPi = MFPP.index
TCIi = TCI.index
if overlap:
ys = np.zeros([1, 2], dtype=object)
xs = np.zeros([1, 2], dtype=object)
ys[0, 0] = (df_conventional_oc[MFPPi], df_cellulosic_oc[MFPPi])
ys[0, 1] = (df_conventional_sc[MFPPi], df_cellulosic_sc[MFPPi])
xs[0, 0] = (df_conventional_oc[TCIi], df_cellulosic_oc[TCIi])
xs[0, 1] = (df_conventional_sc[TCIi], df_cellulosic_sc[TCIi])
yticks = [[-30, -15, 0, 15, 30, 45, 60, 75]]
xticks = 2*[[200, 300, 400, 500, 600]]
else:
ys = np.array([
[df_conventional_oc[MFPPi], df_conventional_sc[MFPPi]],
[df_cellulosic_oc[MFPPi], df_cellulosic_sc[MFPPi]]
])
xs = np.array([
[df_conventional_oc[TCIi], df_conventional_sc[TCIi]],
[df_cellulosic_oc[TCIi], df_cellulosic_sc[TCIi]]
])
yticks = 2*[[-30, -15, 0, 15, 30, 45, 60, 75]]
xticks = 2*[[200, 300, 400, 500, 600]]
bst.plots.plot_kde_2d(
ys=ys, xs=xs, xticks=xticks, yticks=yticks,
xbox_kwargs=[dict(position=1), dict(position=1)],
ybox_kwargs=[dict(position=0), dict(position=0)],
)
#%% General Monte Carlo box plots
def plot_monte_carlo_across_coordinate(coordinate, data, color_wheel):
if isinstance(data, list):
return [plot_monte_carlo_across_coordinate(coordinate, i, color_wheel) for i in data]
else:
color = color_wheel.next()
return bst.plots.plot_montecarlo_across_coordinate(
coordinate, data,
light_color=color.tint(50).RGBn,
dark_color=color.shade(50).RGBn,
)
def monte_carlo_box_plot(data, positions, light_color, dark_color, width=None,
hatch=None, outliers=False, **kwargs):
if width is None: width = 0.8
if outliers:
flierprops = {'marker':'D',
'markerfacecolor': light_color,
'markeredgecolor': dark_color,
'markersize':3}
else:
flierprops = {'marker':''}
bp = plt.boxplot(
x=data, positions=positions, patch_artist=True,
widths=width, whis=[5, 95],
boxprops={'facecolor':light_color,
'edgecolor':dark_color},
medianprops={'color':dark_color,
'linewidth':1.5},
flierprops=flierprops,
**kwargs
)
if hatch:
for box in bp['boxes']:
box.set(hatch = hatch)
def plot_monte_carlo(derivative=False, absolute=True, comparison=True,
configuration_names=None, comparison_names=None,
metrics=None, labels=None, tickmarks=None, agile=True,
ncols=1, expand=None, step_min=None,
agile_only=False, xrot=None,
color_wheel=None, axes_box=None):
if derivative:
default_configuration_names = ['O1', 'O2']
default_comparison_names = ['O2 - O1']
metric_info = mc_derivative_metric_settings
default_metrics = list(metric_info)
else:
default_configuration_names = oc.configuration_names[:-2]
default_comparison_names = oc.comparison_names
if comparison:
metric_info = mc_comparison_settings
else:
metric_info = mc_metric_settings
if agile_only:
default_configuration_names = [i for i in default_configuration_names if '*' in i]
default_comparison_names = [i for i in default_comparison_names if '*' in i]
default_metrics = ['MFPP', 'TCI', 'production']
else:
default_metrics = list(metric_info)
if configuration_names is None: configuration_names = default_configuration_names
if comparison_names is None: comparison_names = default_comparison_names
if metrics is None: metrics = default_metrics
combined = absolute and comparison
if agile_only:
configuration_names = [i for i in configuration_names if '*' in i]
comparison_names = [i for i in comparison_names if '*' in i]
elif not agile:
configuration_names = [i for i in configuration_names if '*' not in i]
comparison_names = [i for i in comparison_names if '*' not in i]
if combined:
columns = configurations = configuration_names + comparison_names
elif absolute:
columns = configurations = configuration_names
elif comparison:
columns = configurations = comparison_names
else:
columns = configurations = []
rows, ylabels, factors = zip(*[metric_info[i] for i in metrics])
factors = [(i, j) for i, j in enumerate(factors) if j is not None]
if color_wheel is None: color_wheel = CABBI_colors.wheel()
N_rows = len(rows)
if axes_box is None:
fig, axes_box = plt.subplots(ncols=ncols, nrows=int(round(N_rows / ncols)))
plt.subplots_adjust(wspace=0.45)
else:
fig = None
axes = axes_box.transpose()
axes = axes.flatten()
N_cols = len(columns)
xtext = labels or [format_name(i).replace(' ', '') for i in configurations]
N_marks = len(xtext)
xticks = tuple(range(N_marks))
def get_data(metric, name):
try:
df = get_monte_carlo(name, metric)
except:
return np.zeros([1, 1])
else:
values = df.values
return values
def plot(arr, position):
if arr.ndim == 2:
N = arr.shape[1]
width = 0.618 / N
boxwidth = 0.618 / (N + 1/N)
plots = []
for i in range(N):
color = color_wheel.next()
boxplot = monte_carlo_box_plot(
data=arr[:, i], positions=[position + (i-(N-1)/2)*width],
light_color=color.RGBn,
dark_color=color.shade(60).RGBn,
width=boxwidth,
hatch=getattr(color, 'hatch', None),
)
plots.append(boxplot)
return plots
else:
color = color_wheel.next()
return monte_carlo_box_plot(
data=arr, positions=[position],
light_color=color.RGBn,
dark_color=color.shade(60).RGBn,
width=0.618,
)
data = np.zeros([N_rows, N_cols], dtype=object)
data[:] = [[get_data(i, j) for j in columns] for i in rows]
for i, j in factors: data[i, :] *= j
if tickmarks is None:
tickmarks = [
bst.plots.rounded_tickmarks_from_data(
i, step_min=step_min, N_ticks=8, lb_max=0, center=0,
f=roundsigfigs, expand=expand,
f_min=lambda x: np.percentile(x, 5),
f_max=lambda x: np.percentile(x, 95),
)
for i in data
]
x0 = len(configuration_names) - 0.5
xf = len(columns) - 0.5
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
if combined:
bst.plots.plot_vertical_line(x0)
ax.axvspan(x0, xf, color=colors.purple_tint.tint(60).RGBn)
plt.xlim(-0.5, xf)
for j in range(N_cols):
color_wheel.restart()
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
plot(data[i, j], j)
plt.ylabel(ylabels[i])
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
yticks = tickmarks[i]
plt.ylim([yticks[0], yticks[1]])
if yticks[0] < 0.:
bst.plots.plot_horizontal_line(0, color=CABBI_colors.black.RGBn, lw=0.8, linestyle='--')
try:
xticklabels = xtext if ax in axes_box[-1] else []
except:
xticklabels = xtext if i == N_rows - 1 else []
bst.plots.style_axis(ax,
xticks = xticks,
yticks = yticks,
xticklabels= xticklabels,
ytick0=False,
ytickf=False,
offset_xticks=True,
xrot=xrot,
)
if fig is None:
fig = plt.gcf()
else:
plt.subplots_adjust(hspace=0)
fig.align_ylabels(axes)
return fig, axes
#%% Spearman
def plot_spearman(configurations, labels=None, metric=None,
kind=None, with_units=None, legend=None, legend_kwargs=None, **kwargs):
if kind is None: kind = 'TEA'
if with_units is None: with_units = True
if legend is None: legend = True
if metric is None:
if kind == 'TEA':
metric = MFPP
metric_name = metric.name
elif kind == 'LCA':
metric = GWP_economic
metric_name = r'GWP$_{\mathrm{economic}}$'
else:
raise ValueError(f"invalid kind '{kind}'")
else:
if metric == 'MFPP':
metric = MFPP
elif metric == 'GWP':
metric = GWP_economic
metric_name = metric.name
stream_price = format_units('USD/L')
USD_MT = format_units('USD/MT')
ng_price = format_units('USD/m^3')
electricity_price = format_units('USD/kWhr')
operating_days = format_units('day/yr')
capacity = format_units('10^6 MT/yr')
titer = format_units('g/L')
productivity = format_units('g/L/hr')
material_GWP = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{kg}^{-1}$'
feedstock_GWP = '$\\mathrm{g} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{kg}^{-1}$'
index, ignored_list = zip(*[
('Crushing mill oil recovery [60 $-$ 95 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Saccharification oil recovery [70 $-$ 95 %]', ['S2', 'S1', 'S2*', 'S1*', 'O1', 'O1*']),
(f'Cane operating days [120 $-$ 180 {operating_days}]', []),
(f'Sorghum operating days [30 $-$ 60 {operating_days}]', ['S2', 'S1', 'O1', 'O2']),
(f'Crushing capacity [1.2 $-$ 2.0 {capacity}]', []),
(f'Ethanol price [0.269, 0.476, 0.758 {stream_price}]', []),
(f'Relative biodiesel price [0.0819, 0.786, 1.09 {stream_price}]', []),
(f'Natural gas price [0.105, 0.122, 0.175 {ng_price}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Electricity price [0.0583, 0.065, 0.069 {electricity_price}]', ['S2', 'O2', 'S2*', 'O2*']),
('IRR [10 $-$ 15 %]', []),
(f'Crude glycerol price [100 $-$ 220 {USD_MT}]', ['S2', 'S1', 'S2*', 'S1*']),
(f'Pure glycerol price [488 $-$ 812 {USD_MT}]', ['S2', 'S1', 'S2*', 'S1*']),
('Saccharification reaction time [54 $-$ 90 hr]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Cellulase price [159 $-$ 265 {USD_MT}]', ['S1', 'O1', 'S1*', 'O1*']),
('Cellulase loading [1.5 $-$ 2.5 wt. % cellulose]', ['S1', 'O1', 'S1*', 'O1*']),
('PTRS base cost [14.9 $-$ 24.7 MMUSD]', ['S1', 'O1', 'S1*', 'O1*']),
# ('Pretreatment reactor system base cost [14.9 $-$ 24.7 MMUSD]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane glucose yield [85 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Sorghum glucose yield [85 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane xylose yield [65 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Sorghum xylose yield [65 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Glucose to ethanol yield [90 $-$ 95 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Xylose to ethanol yield [50 $-$ 95 %]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Titer [65 $-$ 130 {titer}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Productivity [1.0 $-$ 2.0 {productivity}]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane PL content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Sorghum PL content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Cane FFA content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Sorghum FFA content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Cane oil content [5 $-$ 15 dry wt. %]', ['S2', 'S1', 'S2*', 'S1*']),
('Relative sorghum oil content [-3 $-$ 0 dry wt. %]', ['S2', 'S1', 'S2*', 'S1*', 'O2', 'O1']),
('TAG to FFA conversion [17.25 $-$ 28.75 % theoretical]', ['S1', 'O1', 'S1*', 'O1*']),
# TODO: change lower upper values to baseline +- 10%
(f'Feedstock GWPCF [26.3 $-$ 44.0 {feedstock_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Methanol GWPCF [0.338 $-$ 0.563 {material_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Pure glycerine GWPCF [1.25 $-$ 2.08 {material_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Cellulase GWPCF [6.05 $-$ 10.1 {material_GWP}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Natural gas GWPCF [0.297 $-$ 0.363 {material_GWP}]', ['S1', 'O1', 'S1*', 'O1*']),
])
if not with_units: index = [i.split(' [')[0] for i in index]
ignored_dct = {
'S1': [],
'O1': [],
'S2': [],
'O2': [],
'S1*': [],
'O1*': [],
'S2*': [],
'O2*': [],
}
for i, ignored in enumerate(ignored_list):
for name in ignored: ignored_dct[name].append(i)
index_name = index[i]
if kind == 'LCA':
for term in ('cost', 'price', 'IRR', 'time', 'capacity'):
if term in index_name:
for name in ignored_dct: ignored_dct[name].append(i)
break
elif kind == 'TEA':
if 'GWP' in index_name:
for name in ignored_dct: ignored_dct[name].append(i)
else:
raise ValueError(f"invalid kind '{kind}'")
rhos = []
for name in configurations:
file = spearman_file(name)
try:
df = pd.read_excel(file, header=[0, 1], index_col=[0, 1])
except:
warning = RuntimeWarning(f"file '{file}' not found")
warn(warning)
continue
s = df[metric.index]
s.iloc[ignored_dct[name]] = 0.
rhos.append(s)
color_wheel = [CABBI_colors.orange, CABBI_colors.green_soft, CABBI_colors.blue, CABBI_colors.brown]
fig, ax = bst.plots.plot_spearman_2d(rhos, index=index,
color_wheel=color_wheel,
name=metric_name,
**kwargs)
if legend:
if legend_kwargs is None:
legend_kwargs = {'loc': 'lower left'}
plt.legend(
handles=[
mpatches.Patch(
color=color_wheel[i].RGBn,
label=labels[i] if labels else format_name(configurations[i])
)
for i in range(len(configurations))
],
**legend_kwargs,
)
return fig, ax
# %% Other
def plot_configuration_breakdown(name, across_coordinate=False, **kwargs):
oc.load(name)
if across_coordinate:
return bst.plots.plot_unit_groups_across_coordinate(
oc.set_cane_oil_content,
[5, 7.5, 10, 12.5],
'Feedstock oil content [dry wt. %]',
oc.unit_groups,
colors=[area_colors[i.name].RGBn for i in oc.unit_groups],
hatches=[area_hatches[i.name] for i in oc.unit_groups],
**kwargs,
)
else:
def format_total(x):
if x < 1e3:
return format(x, '.3g')
else:
x = int(x)
n = 10 ** (len(str(x)) - 3)
value = int(round(x / n) * n)
return format(value, ',')
for i in oc.unit_groups:
if i.name == 'EtOH prod.':
i.name = 'Ethanol production'
elif i.name == 'Oil ext.':
i.name = 'Oil extraction'
elif i.name == 'Biod. prod.':
i.name = 'Biodiesel production'
i.metrics[0].name = 'Inst. eq.\ncost'
i.metrics[3].name = 'Elec.\ncons.'
i.metrics[4].name = 'Mat.\ncost'
return bst.plots.plot_unit_groups(
oc.unit_groups,
colors=[area_colors[i.name].RGBn for i in oc.unit_groups],
hatches=[area_hatches[i.name] for i in oc.unit_groups],
format_total=format_total,
fraction=True,
legend_kwargs=dict(
loc='lower center',
ncol=4,
bbox_to_anchor=(0, -0.52),
labelspacing=1.5, handlelength=2.8,
handleheight=1, scale=0.8,
),
**kwargs,
)
def plot_TCI_areas_across_oil_content(configuration='O2'):
oc.load(configuration)
data = {i.name: [] for i in oc.unit_groups}
increasing_areas = []
decreasing_areas = []
oil_contents = np.linspace(5, 15, 10)
for i in oil_contents:
oc.set_cane_oil_content(i)
oc.sys.simulate()
for i in oc.unit_groups: data[i.name].append(i.get_installed_cost())
for name, group_data in data.items():
lb, *_, ub = group_data
if ub > lb:
increasing_areas.append(group_data)
else:
decreasing_areas.append(group_data)
increasing_values = np.sum(increasing_areas, axis=0)
increasing_values -= increasing_values[0]
decreasing_values = np.sum(decreasing_areas, axis=0)
decreasing_values -= decreasing_values[-1]
plt.plot(oil_contents, increasing_values, label='Oil & fiber areas')
plt.plot(oil_contents, decreasing_values, label='Sugar areas')
# def plot_monte_carlo_across_oil_content(kind=0, derivative=False):
# MFPP, TCI, *production, electricity_production, natural_gas_consumption = tea_monte_carlo_metric_mockups
# rows = [MFPP, TCI, production]
# if kind == 0:
# columns = across_oil_content_names
# elif kind == 1:
# columns = across_oil_content_agile_names
# elif kind == 2:
# columns = across_oil_content_comparison_names
# elif kind == 3:
# columns = across_oil_content_agile_comparison_names
# elif kind == 4:
# columns = across_oil_content_agile_direct_comparison_names
# else:
# raise NotImplementedError(str(kind))
# if derivative:
# x = 100 * (oil_content[:-1] + np.diff(oil_content) / 2.)
# ylabels = [
# f"MFPP der. [{format_units('USD/MT')}]",
# f"TCI der. [{format_units('10^6*USD')}]",
# f"Production der. [{format_units('L/MT')}]"
# ]
# else:
# x = 100 * oil_content
# ylabels = [
# f"MFPP$\backprime$ [{format_units('USD/MT')}]",
# f"TCI [{format_units('10^6*USD')}]",
# f"Production [{format_units('L/MT')}]"
# ]
# N_cols = len(columns)
# N_rows = len(rows)
# fig, axes = plt.subplots(ncols=N_cols, nrows=N_rows)
# data = np.zeros([N_rows, N_cols], dtype=object)
# def get_data(metric, name):
# if isinstance(metric, bst.Variable):
# return get_monte_carlo_across_oil_content(name, metric, derivative)
# else:
# return [get_data(i, name) for i in metric]
# data = np.array([[get_data(i, j) for j in columns] for i in rows])
# tickmarks = [None] * N_rows
# get_max = lambda x: max([i.max() for i in x]) if isinstance(x, list) else x.max()
# get_min = lambda x: min([i.min() for i in x]) if isinstance(x, list) else x.min()
# N_ticks = 5
# for r in range(N_rows):
# lb = min(min([get_min(i) for i in data[r, :]]), 0)
# ub = max([get_max(i) for i in data[r, :]])
# diff = 0.1 * (ub - lb)
# ub += diff
# if derivative:
# lb = floor(lb)
# ub = ceil(ub)
# step = (ub - lb) / (N_ticks - 1)
# tickmarks[r] = [0, 1] if step == 0 else [int(lb + step * i) for i in range(N_ticks)]
# else:
# if rows[r] is MFPP:
# if kind == 0 or kind == 1:
# tickmarks[r] = [-20, 0, 20, 40, 60]
# elif kind == 2:
# tickmarks[r] = [-20, -10, 0, 10, 20]
# elif kind == 3:
# tickmarks[r] = [-10, 0, 10, 20, 30]
# elif kind == 4:
# tickmarks[r] = [-5, 0, 5, 10, 15]
# continue
# lb = floor(lb / 15) * 15
# ub = ceil(ub / 15) * 15
# step = (ub - lb) / (N_ticks - 1)
# tickmarks[r] = [0, 1] if step == 0 else [int(lb + step * i) for i in range(N_ticks)]
# color_wheel = CABBI_colors.wheel()
# for j in range(N_cols):
# color_wheel.restart()
# for i in range(N_rows):
# arr = data[i, j]
# ax = axes[i, j]
# plt.sca(ax)
# percentiles = plot_monte_carlo_across_coordinate(x, arr, color_wheel)
# if i == 0: ax.set_title(format_name(columns[j]))
# xticklabels = i == N_rows - 1
# yticklabels = j == 0
# if xticklabels: plt.xlabel('Oil content [dry wt. %]')
# if yticklabels: plt.ylabel(ylabels[i])
# bst.plots.style_axis(ax,
# xticks = [5, 10, 15],
# yticks = tickmarks[i],
# xticklabels= xticklabels,
# yticklabels= yticklabels,
# ytick0=False)
# for i in range(N_cols): fig.align_ylabels(axes[:, i])
# plt.subplots_adjust(hspace=0.1, wspace=0.1) | [
"matplotlib.pyplot.boxplot",
"biosteam.utils.colors.red.shade",
"matplotlib.pyplot.grid",
"biosteam.utils.CABBI_colors.orange.shade",
"matplotlib.pyplot.ylabel",
"biosteam.plots.plot_quadrants",
"biosteam.plots.style_axis",
"biosteam.MockVariable",
"numpy.array",
"biosteam.utils.CABBI_colors.green... | [((3524, 3546), 'colorpalette.Palette', 'Palette', ([], {}), '(**area_colors)\n', (3531, 3546), False, 'from colorpalette import Palette\n'), ((3762, 3800), 'biosteam.utils.CABBI_colors.orange.copy', 'CABBI_colors.orange.copy', ([], {'hatch': '"""////"""'}), "(hatch='////')\n", (3786, 3800), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((3831, 3896), 'biosteam.MockVariable', 'bst.MockVariable', (['"""Ethanol over biodiesel"""', '"""L/MT"""', '"""Biorefinery"""'], {}), "('Ethanol over biodiesel', 'L/MT', 'Biorefinery')\n", (3847, 3896), True, 'import biosteam as bst\n'), ((3562, 3586), 'biosteam.utils.colors.neutral.shade', 'colors.neutral.shade', (['(25)'], {}), '(25)\n', (3582, 3586), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((7273, 7289), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (7281, 7289), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((7294, 7328), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.85)'}), '(aspect_ratio=0.85)\n', (7309, 7328), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((7339, 7351), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7349, 7351), True, 'import matplotlib.pyplot as plt\n'), ((7369, 7461), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(4)', '(3)', 'fig'], {'hspace': '(1.5)', 'wspace': '(0.7)', 'top': '(0.9)', 'bottom': '(0.05)', 'left': '(0.11)', 'right': '(0.97)'}), '(4, 3, fig, hspace=1.5, wspace=0.7, top=0.9, bottom=0.05, left=0.11,\n right=0.97)\n', (7377, 7461), False, 'from matplotlib.gridspec import GridSpec\n'), ((9025, 9035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9033, 9035), True, 'import matplotlib.pyplot as plt\n'), ((14658, 14674), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (14666, 14674), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((14679, 14712), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.5)'}), '(aspect_ratio=0.5)\n', (14694, 14712), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((15779, 15869), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)', 'wspace': '(0.7)', 'top': '(0.95)', 'bottom': '(0.1)', 'left': '(0.12)', 'right': '(0.96)'}), '(hspace=0, wspace=0.7, top=0.95, bottom=0.1, left=0.12,\n right=0.96)\n', (15798, 15869), True, 'import matplotlib.pyplot as plt\n'), ((16079, 16095), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (16087, 16095), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((16100, 16134), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(1.05)'}), '(aspect_ratio=1.05)\n', (16115, 16134), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((16999, 17075), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.12)', 'right': '(0.95)', 'wspace': '(0.4)', 'top': '(0.98)', 'bottom': '(0.2)'}), '(left=0.12, right=0.95, wspace=0.4, top=0.98, bottom=0.2)\n', (17018, 17075), True, 'import matplotlib.pyplot as plt\n'), ((17298, 17314), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (17306, 17314), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((17319, 17361), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': 'aspect_ratio'}), '(aspect_ratio=aspect_ratio)\n', (17334, 17361), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((17680, 17746), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.45)', 'right': '(0.975)', 'top': '(0.98)', 'bottom': '(0.08)'}), '(left=0.45, right=0.975, top=0.98, bottom=0.08)\n', (17699, 17746), True, 'import matplotlib.pyplot as plt\n'), ((17928, 17944), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (17936, 17944), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((17949, 18005), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.65)', 'width': '(6.6142 * 2 / 3)'}), '(aspect_ratio=0.65, width=6.6142 * 2 / 3)\n', (17964, 18005), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((18338, 18404), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.35)', 'right': '(0.975)', 'top': '(0.98)', 'bottom': '(0.15)'}), '(left=0.35, right=0.975, top=0.98, bottom=0.15)\n', (18357, 18404), True, 'import matplotlib.pyplot as plt\n'), ((18623, 18639), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (18631, 18639), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((18644, 18708), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': 'aspect_ratio', 'width': '(6.6142 * 2 / 3)'}), '(aspect_ratio=aspect_ratio, width=6.6142 * 2 / 3)\n', (18659, 18708), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19002, 19068), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.35)', 'right': '(0.975)', 'top': '(0.98)', 'bottom': '(0.15)'}), '(left=0.35, right=0.975, top=0.98, bottom=0.15)\n', (19021, 19068), True, 'import matplotlib.pyplot as plt\n'), ((19280, 19296), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (19288, 19296), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19301, 19343), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': 'aspect_ratio'}), '(aspect_ratio=aspect_ratio)\n', (19316, 19343), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19662, 19727), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.45)', 'right': '(0.975)', 'top': '(0.98)', 'bottom': '(0.1)'}), '(left=0.45, right=0.975, top=0.98, bottom=0.1)\n', (19681, 19727), True, 'import matplotlib.pyplot as plt\n'), ((19894, 19910), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (19902, 19910), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19915, 19949), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.68)'}), '(aspect_ratio=0.68)\n', (19930, 19949), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19966, 19996), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (19978, 19996), True, 'import matplotlib.pyplot as plt\n'), ((20001, 20017), 'matplotlib.pyplot.sca', 'plt.sca', (['axes[0]'], {}), '(axes[0])\n', (20008, 20017), True, 'import matplotlib.pyplot as plt\n'), ((20087, 20103), 'matplotlib.pyplot.sca', 'plt.sca', (['axes[1]'], {}), '(axes[1])\n', (20094, 20103), True, 'import matplotlib.pyplot as plt\n'), ((20247, 20261), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (20257, 20261), True, 'import matplotlib.pyplot as plt\n'), ((20266, 20343), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.09)', 'right': '(0.96)', 'wspace': '(0.0)', 'top': '(0.84)', 'bottom': '(0.31)'}), '(left=0.09, right=0.96, wspace=0.0, top=0.84, bottom=0.31)\n', (20285, 20343), True, 'import matplotlib.pyplot as plt\n'), ((22700, 22740), 'numpy.zeros', 'np.zeros', (['[N_rows, N_cols]'], {'dtype': 'object'}), '([N_rows, N_cols], dtype=object)\n', (22708, 22740), True, 'import numpy as np\n'), ((22819, 22851), 'numpy.zeros_like', 'np.zeros_like', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (22832, 22851), True, 'import numpy as np\n'), ((23118, 23132), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (23130, 23132), True, 'import matplotlib.pyplot as plt\n'), ((23320, 23460), 'biosteam.plots.plot_heatmap', 'bst.plots.plot_heatmap', (['(100 * fractions)'], {'vmin': '(0)', 'vmax': '(100)', 'ax': 'ax', 'cell_labels': 'medians', 'metric_bar': 'mbar', 'xlabels': 'xlabels', 'ylabels': 'ylabels'}), '(100 * fractions, vmin=0, vmax=100, ax=ax,\n cell_labels=medians, metric_bar=mbar, xlabels=xlabels, ylabels=ylabels)\n', (23342, 23460), True, 'import biosteam as bst\n'), ((23546, 23557), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (23553, 23557), True, 'import matplotlib.pyplot as plt\n'), ((23598, 23654), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)', '"""major"""', '"""both"""'], {'lw': '(1)', 'color': '"""w"""', 'ls': '"""-"""'}), "(True, 'major', 'both', lw=1, color='w', ls='-')\n", (23606, 23654), True, 'import matplotlib.pyplot as plt\n'), ((23897, 23913), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (23905, 23913), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((23918, 23965), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'width': '"""half"""', 'aspect_ratio': '(1.2)'}), "(width='half', aspect_ratio=1.2)\n", (23933, 23965), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((24016, 24049), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['name', 'metrics'], {}), '(name, metrics)\n', (24034, 24049), True, 'import biorefineries.oilcane as oc\n'), ((24592, 24603), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (24599, 24603), True, 'import matplotlib.pyplot as plt\n'), ((24692, 24718), 'biosteam.plots.plot_quadrants', 'bst.plots.plot_quadrants', ([], {}), '()\n', (24716, 24718), True, 'import biosteam as bst\n'), ((24734, 24744), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (24742, 24744), True, 'import matplotlib.pyplot as plt\n'), ((24760, 24770), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (24768, 24770), True, 'import matplotlib.pyplot as plt\n'), ((26662, 26758), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.05)', 'wspace': '(0.05)', 'top': '(0.98)', 'bottom': '(0.15)', 'left': '(0.15)', 'right': '(0.98)'}), '(hspace=0.05, wspace=0.05, top=0.98, bottom=0.15, left=\n 0.15, right=0.98)\n', (26681, 26758), True, 'import matplotlib.pyplot as plt\n'), ((27025, 27041), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (27033, 27041), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((27046, 27080), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.65)'}), '(aspect_ratio=0.65)\n', (27061, 27080), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((27337, 27371), 'numpy.array', 'np.array', (['[[df[Xi] for df in dfs]]'], {}), '([[df[Xi] for df in dfs]])\n', (27345, 27371), True, 'import numpy as np\n'), ((27381, 27415), 'numpy.array', 'np.array', (['[[df[Yi] for df in dfs]]'], {}), '([[df[Yi] for df in dfs]])\n', (27389, 27415), True, 'import numpy as np\n'), ((31052, 31140), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)', 'wspace': '(0)', 'top': '(0.98)', 'bottom': '(0.15)', 'left': '(0.1)', 'right': '(0.98)'}), '(hspace=0, wspace=0, top=0.98, bottom=0.15, left=0.1,\n right=0.98)\n', (31071, 31140), True, 'import matplotlib.pyplot as plt\n'), ((33141, 33170), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)'}), '(wspace=0)\n', (33160, 33170), True, 'import matplotlib.pyplot as plt\n'), ((35929, 35962), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['"""O1"""', 'metrics'], {}), "('O1', metrics)\n", (35947, 35962), True, 'import biorefineries.oilcane as oc\n'), ((35986, 36019), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['"""O2"""', 'metrics'], {}), "('O2', metrics)\n", (36004, 36019), True, 'import biorefineries.oilcane as oc\n'), ((36045, 36078), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['"""S1"""', 'metrics'], {}), "('S1', metrics)\n", (36063, 36078), True, 'import biorefineries.oilcane as oc\n'), ((36102, 36135), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['"""S2"""', 'metrics'], {}), "('S2', metrics)\n", (36120, 36135), True, 'import biorefineries.oilcane as oc\n'), ((38200, 38447), 'matplotlib.pyplot.boxplot', 'plt.boxplot', ([], {'x': 'data', 'positions': 'positions', 'patch_artist': '(True)', 'widths': 'width', 'whis': '[5, 95]', 'boxprops': "{'facecolor': light_color, 'edgecolor': dark_color}", 'medianprops': "{'color': dark_color, 'linewidth': 1.5}", 'flierprops': 'flierprops'}), "(x=data, positions=positions, patch_artist=True, widths=width,\n whis=[5, 95], boxprops={'facecolor': light_color, 'edgecolor':\n dark_color}, medianprops={'color': dark_color, 'linewidth': 1.5},\n flierprops=flierprops, **kwargs)\n", (38211, 38447), True, 'import matplotlib.pyplot as plt\n'), ((42466, 42506), 'numpy.zeros', 'np.zeros', (['[N_rows, N_cols]'], {'dtype': 'object'}), '([N_rows, N_cols], dtype=object)\n', (42474, 42506), True, 'import numpy as np\n'), ((45080, 45101), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/L"""'], {}), "('USD/L')\n", (45092, 45101), False, 'from thermosteam.units_of_measure import format_units\n'), ((45115, 45137), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (45127, 45137), False, 'from thermosteam.units_of_measure import format_units\n'), ((45153, 45176), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/m^3"""'], {}), "('USD/m^3')\n", (45165, 45176), False, 'from thermosteam.units_of_measure import format_units\n'), ((45201, 45225), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/kWhr"""'], {}), "('USD/kWhr')\n", (45213, 45225), False, 'from thermosteam.units_of_measure import format_units\n'), ((45247, 45269), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""day/yr"""'], {}), "('day/yr')\n", (45259, 45269), False, 'from thermosteam.units_of_measure import format_units\n'), ((45285, 45311), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6 MT/yr"""'], {}), "('10^6 MT/yr')\n", (45297, 45311), False, 'from thermosteam.units_of_measure import format_units\n'), ((45324, 45343), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""g/L"""'], {}), "('g/L')\n", (45336, 45343), False, 'from thermosteam.units_of_measure import format_units\n'), ((45363, 45385), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""g/L/hr"""'], {}), "('g/L/hr')\n", (45375, 45385), False, 'from thermosteam.units_of_measure import format_units\n'), ((50066, 50169), 'biosteam.plots.plot_spearman_2d', 'bst.plots.plot_spearman_2d', (['rhos'], {'index': 'index', 'color_wheel': 'color_wheel', 'name': 'metric_name'}), '(rhos, index=index, color_wheel=color_wheel, name\n =metric_name, **kwargs)\n', (50092, 50169), True, 'import biosteam as bst\n'), ((50827, 50840), 'biorefineries.oilcane.load', 'oc.load', (['name'], {}), '(name)\n', (50834, 50840), True, 'import biorefineries.oilcane as oc\n'), ((52583, 52605), 'biorefineries.oilcane.load', 'oc.load', (['configuration'], {}), '(configuration)\n', (52590, 52605), True, 'import biorefineries.oilcane as oc\n'), ((52725, 52747), 'numpy.linspace', 'np.linspace', (['(5)', '(15)', '(10)'], {}), '(5, 15, 10)\n', (52736, 52747), True, 'import numpy as np\n'), ((53142, 53174), 'numpy.sum', 'np.sum', (['increasing_areas'], {'axis': '(0)'}), '(increasing_areas, axis=0)\n', (53148, 53174), True, 'import numpy as np\n'), ((53245, 53277), 'numpy.sum', 'np.sum', (['decreasing_areas'], {'axis': '(0)'}), '(decreasing_areas, axis=0)\n', (53251, 53277), True, 'import numpy as np\n'), ((53329, 53397), 'matplotlib.pyplot.plot', 'plt.plot', (['oil_contents', 'increasing_values'], {'label': '"""Oil & fiber areas"""'}), "(oil_contents, increasing_values, label='Oil & fiber areas')\n", (53337, 53397), True, 'import matplotlib.pyplot as plt\n'), ((53402, 53464), 'matplotlib.pyplot.plot', 'plt.plot', (['oil_contents', 'decreasing_values'], {'label': '"""Sugar areas"""'}), "(oil_contents, decreasing_values, label='Sugar areas')\n", (53410, 53464), True, 'import matplotlib.pyplot as plt\n'), ((9080, 9142), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_main_manuscript.{i}"""'], {}), "(images_folder, f'montecarlo_main_manuscript.{i}')\n", (9092, 9142), False, 'import os\n'), ((9151, 9186), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (9162, 9186), True, 'import matplotlib.pyplot as plt\n'), ((9608, 9624), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (9616, 9624), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((9633, 9688), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'width': 'width', 'aspect_ratio': 'aspect_ratio'}), '(width=width, aspect_ratio=aspect_ratio)\n', (9648, 9688), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((10503, 10514), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (10510, 10514), True, 'import matplotlib.pyplot as plt\n'), ((10534, 10544), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (10542, 10544), True, 'import matplotlib.pyplot as plt\n'), ((10553, 10718), 'matplotlib.pyplot.text', 'plt.text', (['(1.65)', '(ylb + (yub - ylb) * 0.9)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(1.65, ylb + (yub - ylb) * 0.9, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (10561, 10718), True, 'import matplotlib.pyplot as plt\n'), ((11089, 11174), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.96)', 'left': 'left', 'wspace': '(0.38)', 'top': '(0.98)', 'bottom': 'bottom'}), '(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom\n )\n', (11108, 11174), True, 'import matplotlib.pyplot as plt\n'), ((12024, 12040), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (12032, 12040), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((12049, 12104), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'width': 'width', 'aspect_ratio': 'aspect_ratio'}), '(width=width, aspect_ratio=aspect_ratio)\n', (12064, 12104), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((12699, 12710), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (12706, 12710), True, 'import matplotlib.pyplot as plt\n'), ((12730, 12740), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (12738, 12740), True, 'import matplotlib.pyplot as plt\n'), ((12749, 12911), 'matplotlib.pyplot.text', 'plt.text', (['x', '(ylb + (yub - ylb) * 0.9)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(x, ylb + (yub - ylb) * 0.9, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (12757, 12911), True, 'import matplotlib.pyplot as plt\n'), ((12971, 13056), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.96)', 'left': 'left', 'wspace': '(0.38)', 'top': '(0.98)', 'bottom': 'bottom'}), '(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom\n )\n', (12990, 13056), True, 'import matplotlib.pyplot as plt\n'), ((13329, 13345), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (13337, 13345), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((13354, 13401), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'width': '(3.3071)', 'aspect_ratio': '(1.0)'}), '(width=3.3071, aspect_ratio=1.0)\n', (13369, 13401), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((13795, 13806), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (13802, 13806), True, 'import matplotlib.pyplot as plt\n'), ((13826, 13836), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (13834, 13836), True, 'import matplotlib.pyplot as plt\n'), ((13845, 14010), 'matplotlib.pyplot.text', 'plt.text', (['(1.65)', '(ylb + (yub - ylb) * 0.9)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(1.65, ylb + (yub - ylb) * 0.9, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (13853, 14010), True, 'import matplotlib.pyplot as plt\n'), ((14375, 14450), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.9)', 'left': '(0.2)', 'wspace': '(0.5)', 'top': '(0.98)', 'bottom': '(0.15)'}), '(right=0.9, left=0.2, wspace=0.5, top=0.98, bottom=0.15)\n', (14394, 14450), True, 'import matplotlib.pyplot as plt\n'), ((15533, 15544), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (15540, 15544), True, 'import matplotlib.pyplot as plt\n'), ((15564, 15574), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (15572, 15574), True, 'import matplotlib.pyplot as plt\n'), ((15583, 15748), 'matplotlib.pyplot.text', 'plt.text', (['(1.65)', '(ylb + (yub - ylb) * 0.9)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(1.65, ylb + (yub - ylb) * 0.9, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (15591, 15748), True, 'import matplotlib.pyplot as plt\n'), ((15940, 15997), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_derivative.{i}"""'], {}), "(images_folder, f'montecarlo_derivative.{i}')\n", (15952, 15997), False, 'import os\n'), ((16006, 16041), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (16017, 16041), True, 'import matplotlib.pyplot as plt\n'), ((16754, 16765), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (16761, 16765), True, 'import matplotlib.pyplot as plt\n'), ((16785, 16795), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (16793, 16795), True, 'import matplotlib.pyplot as plt\n'), ((16804, 16969), 'matplotlib.pyplot.text', 'plt.text', (['(7.8)', '(ylb + (yub - ylb) * 0.92)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(7.8, ylb + (yub - ylb) * 0.92, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (16812, 16969), True, 'import matplotlib.pyplot as plt\n'), ((17121, 17176), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_absolute.{i}"""'], {}), "(images_folder, f'montecarlo_absolute.{i}')\n", (17133, 17176), False, 'import os\n'), ((17185, 17220), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (17196, 17220), True, 'import matplotlib.pyplot as plt\n'), ((17791, 17839), 'os.path.join', 'os.path.join', (['images_folder', 'f"""spearman_tea.{i}"""'], {}), "(images_folder, f'spearman_tea.{i}')\n", (17803, 17839), False, 'import os\n'), ((17848, 17883), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (17859, 17883), True, 'import matplotlib.pyplot as plt\n'), ((18449, 18497), 'os.path.join', 'os.path.join', (['images_folder', 'f"""spearman_tea.{i}"""'], {}), "(images_folder, f'spearman_tea.{i}')\n", (18461, 18497), False, 'import os\n'), ((18506, 18541), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (18517, 18541), True, 'import matplotlib.pyplot as plt\n'), ((19113, 19161), 'os.path.join', 'os.path.join', (['images_folder', 'f"""spearman_lca.{i}"""'], {}), "(images_folder, f'spearman_lca.{i}')\n", (19125, 19161), False, 'import os\n'), ((19170, 19205), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (19181, 19205), True, 'import matplotlib.pyplot as plt\n'), ((19773, 19821), 'os.path.join', 'os.path.join', (['images_folder', 'f"""spearman_lca.{i}"""'], {}), "(images_folder, f'spearman_lca.{i}')\n", (19785, 19821), False, 'import os\n'), ((19830, 19865), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (19841, 19865), True, 'import matplotlib.pyplot as plt\n'), ((20447, 20458), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (20454, 20458), True, 'import matplotlib.pyplot as plt\n'), ((20478, 20488), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (20486, 20488), True, 'import matplotlib.pyplot as plt\n'), ((20508, 20518), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (20516, 20518), True, 'import matplotlib.pyplot as plt\n'), ((20527, 20706), 'matplotlib.pyplot.text', 'plt.text', (['((xlb + xub) * 0.5)', '(ylb + (yub - ylb) * 1.2)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.2, letter, color=\n letter_color, horizontalalignment='center', verticalalignment='center',\n fontsize=12, fontweight='bold')\n", (20535, 20706), True, 'import matplotlib.pyplot as plt\n'), ((20777, 20823), 'os.path.join', 'os.path.join', (['images_folder', 'f"""breakdowns.{i}"""'], {}), "(images_folder, f'breakdowns.{i}')\n", (20789, 20823), False, 'import os\n'), ((20832, 20867), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (20843, 20867), True, 'import matplotlib.pyplot as plt\n'), ((21043, 21074), 'numpy.percentile', 'np.percentile', (['data', '(50)'], {'axis': '(0)'}), '(data, 50, axis=0)\n', (21056, 21074), True, 'import numpy as np\n'), ((27177, 27207), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['i', 'metrics'], {}), '(i, metrics)\n', (27195, 27207), True, 'import biorefineries.oilcane as oc\n'), ((31191, 31219), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (31210, 31219), True, 'import matplotlib.pyplot as plt\n'), ((32011, 32084), 'os.path.join', 'os.path.join', (['images_folder', 'f"""feedstock_conventional_comparison_kde.{i}"""'], {}), "(images_folder, f'feedstock_conventional_comparison_kde.{i}')\n", (32023, 32084), False, 'import os\n'), ((32093, 32128), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (32104, 32128), True, 'import matplotlib.pyplot as plt\n'), ((32520, 32591), 'os.path.join', 'os.path.join', (['images_folder', 'f"""feedstock_cellulosic_comparison_kde.{i}"""'], {}), "(images_folder, f'feedstock_cellulosic_comparison_kde.{i}')\n", (32532, 32591), False, 'import os\n'), ((32600, 32635), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (32611, 32635), True, 'import matplotlib.pyplot as plt\n'), ((33239, 33299), 'os.path.join', 'os.path.join', (['images_folder', 'f"""feedstock_comparison_kde.{i}"""'], {}), "(images_folder, f'feedstock_comparison_kde.{i}')\n", (33251, 33299), False, 'import os\n'), ((33308, 33343), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (33319, 33343), True, 'import matplotlib.pyplot as plt\n'), ((33703, 33767), 'os.path.join', 'os.path.join', (['images_folder', 'f"""configuration_comparison_kde.{i}"""'], {}), "(images_folder, f'configuration_comparison_kde.{i}')\n", (33715, 33767), False, 'import os\n'), ((33776, 33811), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (33787, 33811), True, 'import matplotlib.pyplot as plt\n'), ((34231, 34305), 'os.path.join', 'os.path.join', (['images_folder', 'f"""separated_configuration_comparison_kde.{i}"""'], {}), "(images_folder, f'separated_configuration_comparison_kde.{i}')\n", (34243, 34305), False, 'import os\n'), ((34314, 34349), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (34325, 34349), True, 'import matplotlib.pyplot as plt\n'), ((34922, 34992), 'os.path.join', 'os.path.join', (['images_folder', 'f"""crude_configuration_comparison_kde.{i}"""'], {}), "(images_folder, f'crude_configuration_comparison_kde.{i}')\n", (34934, 34992), False, 'import os\n'), ((35001, 35036), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (35012, 35036), True, 'import matplotlib.pyplot as plt\n'), ((35683, 35752), 'os.path.join', 'os.path.join', (['images_folder', 'f"""agile_conventional_comparison_kde.{i}"""'], {}), "(images_folder, f'agile_conventional_comparison_kde.{i}')\n", (35695, 35752), False, 'import os\n'), ((35761, 35796), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (35772, 35796), True, 'import matplotlib.pyplot as plt\n'), ((36209, 36239), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': 'object'}), '([1, 2], dtype=object)\n', (36217, 36239), True, 'import numpy as np\n'), ((36253, 36283), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': 'object'}), '([1, 2], dtype=object)\n', (36261, 36283), True, 'import numpy as np\n'), ((36695, 36818), 'numpy.array', 'np.array', (['[[df_conventional_oc[MFPPi], df_conventional_sc[MFPPi]], [df_cellulosic_oc[\n MFPPi], df_cellulosic_sc[MFPPi]]]'], {}), '([[df_conventional_oc[MFPPi], df_conventional_sc[MFPPi]], [\n df_cellulosic_oc[MFPPi], df_cellulosic_sc[MFPPi]]])\n', (36703, 36818), True, 'import numpy as np\n'), ((36861, 36980), 'numpy.array', 'np.array', (['[[df_conventional_oc[TCIi], df_conventional_sc[TCIi]], [df_cellulosic_oc[\n TCIi], df_cellulosic_sc[TCIi]]]'], {}), '([[df_conventional_oc[TCIi], df_conventional_sc[TCIi]], [\n df_cellulosic_oc[TCIi], df_cellulosic_sc[TCIi]]])\n', (36869, 36980), True, 'import numpy as np\n'), ((40847, 40867), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', ([], {}), '()\n', (40865, 40867), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((41008, 41040), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.45)'}), '(wspace=0.45)\n', (41027, 41040), True, 'import matplotlib.pyplot as plt\n'), ((43121, 43132), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (43128, 43132), True, 'import matplotlib.pyplot as plt\n'), ((43278, 43296), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5)', 'xf'], {}), '(-0.5, xf)\n', (43286, 43296), True, 'import matplotlib.pyplot as plt\n'), ((43566, 43577), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (43573, 43577), True, 'import matplotlib.pyplot as plt\n'), ((43616, 43648), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[yticks[0], yticks[1]]'], {}), '([yticks[0], yticks[1]])\n', (43624, 43648), True, 'import matplotlib.pyplot as plt\n'), ((43935, 44078), 'biosteam.plots.style_axis', 'bst.plots.style_axis', (['ax'], {'xticks': 'xticks', 'yticks': 'yticks', 'xticklabels': 'xticklabels', 'ytick0': '(False)', 'ytickf': '(False)', 'offset_xticks': '(True)', 'xrot': 'xrot'}), '(ax, xticks=xticks, yticks=yticks, xticklabels=\n xticklabels, ytick0=False, ytickf=False, offset_xticks=True, xrot=xrot)\n', (43955, 44078), True, 'import biosteam as bst\n'), ((44210, 44219), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (44217, 44219), True, 'import matplotlib.pyplot as plt\n'), ((44238, 44267), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (44257, 44267), True, 'import matplotlib.pyplot as plt\n'), ((50882, 51163), 'biosteam.plots.plot_unit_groups_across_coordinate', 'bst.plots.plot_unit_groups_across_coordinate', (['oc.set_cane_oil_content', '[5, 7.5, 10, 12.5]', '"""Feedstock oil content [dry wt. %]"""', 'oc.unit_groups'], {'colors': '[area_colors[i.name].RGBn for i in oc.unit_groups]', 'hatches': '[area_hatches[i.name] for i in oc.unit_groups]'}), "(oc.set_cane_oil_content, [5, \n 7.5, 10, 12.5], 'Feedstock oil content [dry wt. %]', oc.unit_groups,\n colors=[area_colors[i.name].RGBn for i in oc.unit_groups], hatches=[\n area_hatches[i.name] for i in oc.unit_groups], **kwargs)\n", (50926, 51163), True, 'import biosteam as bst\n'), ((52783, 52809), 'biorefineries.oilcane.set_cane_oil_content', 'oc.set_cane_oil_content', (['i'], {}), '(i)\n', (52806, 52809), True, 'import biorefineries.oilcane as oc\n'), ((52818, 52835), 'biorefineries.oilcane.sys.simulate', 'oc.sys.simulate', ([], {}), '()\n', (52833, 52835), True, 'import biorefineries.oilcane as oc\n'), ((10264, 10379), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', (["['blue_light', 'green_dirty', 'orange', 'green', 'orange', 'orange_hatch',\n 'grey', 'brown']"], {}), "(['blue_light', 'green_dirty', 'orange', 'green',\n 'orange', 'orange_hatch', 'grey', 'brown'])\n", (10282, 10379), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((11222, 11289), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_feedstock_comparison.{i}"""'], {}), "(images_folder, f'montecarlo_feedstock_comparison.{i}')\n", (11234, 11289), False, 'import os\n'), ((11302, 11337), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (11313, 11337), True, 'import matplotlib.pyplot as plt\n'), ((12478, 12576), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', (["['blue_light', 'green_dirty', 'orange', 'green', 'orange', 'orange_hatch']"], {}), "(['blue_light', 'green_dirty', 'orange', 'green',\n 'orange', 'orange_hatch'])\n", (12496, 12576), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((13104, 13175), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_configuration_comparison.{i}"""'], {}), "(images_folder, f'montecarlo_configuration_comparison.{i}')\n", (13116, 13175), False, 'import os\n'), ((13188, 13223), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (13199, 13223), True, 'import matplotlib.pyplot as plt\n'), ((14096, 14285), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(ylb - (yub - ylb) * 0.25)', '"""Impact of integrating oilsorghum\nat an agile oilcane biorefinery"""'], {'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(8)'}), '(0.5, ylb - (yub - ylb) * 0.25,\n """Impact of integrating oilsorghum\nat an agile oilcane biorefinery""",\n horizontalalignment=\'center\', verticalalignment=\'center\', fontsize=8)\n', (14104, 14285), True, 'import matplotlib.pyplot as plt\n'), ((14503, 14566), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_agile_comparison.{i}"""'], {}), "(images_folder, f'montecarlo_agile_comparison.{i}')\n", (14515, 14566), False, 'import os\n'), ((14579, 14614), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (14590, 14614), True, 'import matplotlib.pyplot as plt\n'), ((15342, 15441), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', (["['blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown', 'orange']"], {}), "(['blue_light', 'green_dirty', 'orange', 'green', 'grey',\n 'brown', 'orange'])\n", (15360, 15441), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((16524, 16661), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', (["['blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown', 'orange',\n 'orange', 'green', 'orange', 'green']"], {}), "(['blue_light', 'green_dirty', 'orange', 'green', 'grey',\n 'brown', 'orange', 'orange', 'green', 'orange', 'green'])\n", (16542, 16661), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((23273, 23298), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""RdYlGn"""'], {}), "('RdYlGn')\n", (23288, 23298), True, 'import matplotlib.pyplot as plt\n'), ((28028, 28039), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (28035, 28039), True, 'import matplotlib.pyplot as plt\n'), ((28178, 28204), 'biosteam.plots.plot_quadrants', 'bst.plots.plot_quadrants', ([], {}), '()\n', (28202, 28204), True, 'import biosteam as bst\n'), ((28228, 28238), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (28236, 28238), True, 'import matplotlib.pyplot as plt\n'), ((28262, 28272), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (28270, 28272), True, 'import matplotlib.pyplot as plt\n'), ((31307, 31318), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (31314, 31318), True, 'import matplotlib.pyplot as plt\n'), ((31342, 31352), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (31350, 31352), True, 'import matplotlib.pyplot as plt\n'), ((31376, 31386), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (31384, 31386), True, 'import matplotlib.pyplot as plt\n'), ((31399, 31579), 'matplotlib.pyplot.text', 'plt.text', (['((xlb + xub) * 0.5)', '(ylb + (yub - ylb) * 1.17)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.17, letter, color=\n letter_color, horizontalalignment='center', verticalalignment='center',\n fontsize=12, fontweight='bold')\n", (31407, 31579), True, 'import matplotlib.pyplot as plt\n'), ((43166, 43198), 'biosteam.plots.plot_vertical_line', 'bst.plots.plot_vertical_line', (['x0'], {}), '(x0)\n', (43194, 43198), True, 'import biosteam as bst\n'), ((43425, 43436), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (43432, 43436), True, 'import matplotlib.pyplot as plt\n'), ((43481, 43503), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabels[i]'], {}), '(ylabels[i])\n', (43491, 43503), True, 'import matplotlib.pyplot as plt\n'), ((43688, 43780), 'biosteam.plots.plot_horizontal_line', 'bst.plots.plot_horizontal_line', (['(0)'], {'color': 'CABBI_colors.black.RGBn', 'lw': '(0.8)', 'linestyle': '"""--"""'}), "(0, color=CABBI_colors.black.RGBn, lw=0.8,\n linestyle='--')\n", (43718, 43780), True, 'import biosteam as bst\n'), ((49675, 49727), 'pandas.read_excel', 'pd.read_excel', (['file'], {'header': '[0, 1]', 'index_col': '[0, 1]'}), '(file, header=[0, 1], index_col=[0, 1])\n', (49688, 49727), True, 'import pandas as pd\n'), ((4068, 4090), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (4080, 4090), False, 'from thermosteam.units_of_measure import format_units\n'), ((4128, 4152), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6*USD"""'], {}), "('10^6*USD')\n", (4140, 4152), False, 'from thermosteam.units_of_measure import format_units\n'), ((4211, 4231), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (4223, 4231), False, 'from thermosteam.units_of_measure import format_units\n'), ((4315, 4338), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""kWhr/MT"""'], {}), "('kWhr/MT')\n", (4327, 4338), False, 'from thermosteam.units_of_measure import format_units\n'), ((4421, 4443), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""m^3/MT"""'], {}), "('m^3/MT')\n", (4433, 4443), False, 'from thermosteam.units_of_measure import format_units\n'), ((22155, 22177), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (22167, 22177), False, 'from thermosteam.units_of_measure import format_units\n'), ((22199, 22223), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6*USD"""'], {}), "('10^6*USD')\n", (22211, 22223), False, 'from thermosteam.units_of_measure import format_units\n'), ((22260, 22280), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (22272, 22280), False, 'from thermosteam.units_of_measure import format_units\n'), ((22319, 22339), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (22331, 22339), False, 'from thermosteam.units_of_measure import format_units\n'), ((22369, 22392), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""kWhr/MT"""'], {}), "('kWhr/MT')\n", (22381, 22392), False, 'from thermosteam.units_of_measure import format_units\n'), ((22419, 22441), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""m^3/MT"""'], {}), "('m^3/MT')\n", (22431, 22441), False, 'from thermosteam.units_of_measure import format_units\n'), ((41430, 41446), 'numpy.zeros', 'np.zeros', (['[1, 1]'], {}), '([1, 1])\n', (41438, 41446), True, 'import numpy as np\n'), ((49822, 49835), 'warnings.warn', 'warn', (['warning'], {}), '(warning)\n', (49826, 49835), False, 'from warnings import warn\n'), ((4885, 4907), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (4897, 4907), False, 'from thermosteam.units_of_measure import format_units\n'), ((4959, 4983), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6*USD"""'], {}), "('10^6*USD')\n", (4971, 4983), False, 'from thermosteam.units_of_measure import format_units\n'), ((5056, 5076), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (5068, 5076), False, 'from thermosteam.units_of_measure import format_units\n'), ((5174, 5197), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""kWhr/MT"""'], {}), "('kWhr/MT')\n", (5186, 5197), False, 'from thermosteam.units_of_measure import format_units\n'), ((5294, 5316), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""m^3/MT"""'], {}), "('m^3/MT')\n", (5306, 5316), False, 'from thermosteam.units_of_measure import format_units\n'), ((5945, 5967), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (5957, 5967), False, 'from thermosteam.units_of_measure import format_units\n'), ((6086, 6110), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6*USD"""'], {}), "('10^6*USD')\n", (6098, 6110), False, 'from thermosteam.units_of_measure import format_units\n'), ((6287, 6307), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (6299, 6307), False, 'from thermosteam.units_of_measure import format_units\n'), ((6462, 6485), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""kWhr/MT"""'], {}), "('kWhr/MT')\n", (6474, 6485), False, 'from thermosteam.units_of_measure import format_units\n'), ((6643, 6665), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""m^3/MT"""'], {}), "('m^3/MT')\n", (6655, 6665), False, 'from thermosteam.units_of_measure import format_units\n'), ((25271, 25298), 'biosteam.utils.CABBI_colors.teal.shade', 'CABBI_colors.teal.shade', (['(50)'], {}), '(50)\n', (25294, 25298), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((25680, 25707), 'biosteam.utils.CABBI_colors.grey.shade', 'CABBI_colors.grey.shade', (['(75)'], {}), '(75)\n', (25703, 25707), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((26082, 26109), 'biosteam.utils.CABBI_colors.grey.shade', 'CABBI_colors.grey.shade', (['(75)'], {}), '(75)\n', (26105, 26109), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((26497, 26517), 'biosteam.utils.colors.red.shade', 'colors.red.shade', (['(50)'], {}), '(50)\n', (26513, 26517), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((5880, 5903), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""MFPP/OC"""'], {}), "('MFPP/OC')\n", (5892, 5903), False, 'from thermosteam.units_of_measure import format_units\n'), ((6022, 6044), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""TCI/OC"""'], {}), "('TCI/OC')\n", (6034, 6044), False, 'from thermosteam.units_of_measure import format_units\n'), ((6221, 6245), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""Prod./OC"""'], {}), "('Prod./OC')\n", (6233, 6245), False, 'from thermosteam.units_of_measure import format_units\n'), ((6399, 6420), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""EP/OC"""'], {}), "('EP/OC')\n", (6411, 6420), False, 'from thermosteam.units_of_measure import format_units\n'), ((6579, 6601), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""NGC/OC"""'], {}), "('NGC/OC')\n", (6591, 6601), False, 'from thermosteam.units_of_measure import format_units\n'), ((35513, 35547), 'biosteam.utils.CABBI_colors.green_dirty.shade', 'CABBI_colors.green_dirty.shade', (['(60)'], {}), '(60)\n', (35543, 35547), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((42869, 42888), 'numpy.percentile', 'np.percentile', (['x', '(5)'], {}), '(x, 5)\n', (42882, 42888), True, 'import numpy as np\n'), ((42922, 42942), 'numpy.percentile', 'np.percentile', (['x', '(95)'], {}), '(x, 95)\n', (42935, 42942), True, 'import numpy as np\n'), ((43236, 43263), 'biosteam.utils.colors.purple_tint.tint', 'colors.purple_tint.tint', (['(60)'], {}), '(60)\n', (43259, 43263), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((24435, 24464), 'biosteam.utils.CABBI_colors.orange.shade', 'CABBI_colors.orange.shade', (['(60)'], {}), '(60)\n', (24460, 24464), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((24547, 24574), 'biosteam.utils.CABBI_colors.blue.shade', 'CABBI_colors.blue.shade', (['(60)'], {}), '(60)\n', (24570, 24574), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((28982, 29009), 'biosteam.utils.CABBI_colors.teal.shade', 'CABBI_colors.teal.shade', (['(50)'], {}), '(50)\n', (29005, 29009), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((29602, 29629), 'biosteam.utils.CABBI_colors.grey.shade', 'CABBI_colors.grey.shade', (['(75)'], {}), '(75)\n', (29625, 29629), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((30216, 30243), 'biosteam.utils.CABBI_colors.grey.shade', 'CABBI_colors.grey.shade', (['(75)'], {}), '(75)\n', (30239, 30243), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((30836, 30856), 'biosteam.utils.colors.red.shade', 'colors.red.shade', (['(50)'], {}), '(50)\n', (30852, 30856), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((27804, 27831), 'biosteam.utils.CABBI_colors.blue.shade', 'CABBI_colors.blue.shade', (['(60)'], {}), '(60)\n', (27827, 27831), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((27690, 27719), 'biosteam.utils.CABBI_colors.orange.shade', 'CABBI_colors.orange.shade', (['(60)'], {}), '(60)\n', (27715, 27719), False, 'from biosteam.utils import CABBI_colors, colors\n')] |
"""
TODO description.
Author: <NAME>
Autonomous Systems Lab (ASL), Stanford
(GitHub: spenrich)
"""
if __name__ == "__main__":
import pickle
import jax
import jax.numpy as jnp
from jax.experimental.ode import odeint
from utils import spline, random_ragged_spline
from dynamics import prior, plant, disturbance
# Seed random numbers
seed = 0
key = jax.random.PRNGKey(seed)
# Generate smooth trajectories
num_traj = 500
T = 30
num_knots = 6
poly_orders = (9, 9, 6)
deriv_orders = (4, 4, 2)
min_step = jnp.array([-2., -2., -jnp.pi/6])
max_step = jnp.array([2., 2., jnp.pi/6])
min_knot = jnp.array([-jnp.inf, -jnp.inf, -jnp.pi/3])
max_knot = jnp.array([jnp.inf, jnp.inf, jnp.pi/3])
key, *subkeys = jax.random.split(key, 1 + num_traj)
subkeys = jnp.vstack(subkeys)
in_axes = (0, None, None, None, None, None, None, None, None)
t_knots, knots, coefs = jax.vmap(random_ragged_spline, in_axes)(
subkeys, T, num_knots, poly_orders, deriv_orders,
min_step, max_step, min_knot, max_knot
)
# x_coefs, y_coefs, ϕ_coefs = coefs
r_knots = jnp.dstack(knots)
# Sampled-time simulator
@jax.partial(jax.vmap, in_axes=(None, 0, 0, 0))
def simulate(ts, w, t_knots, coefs,
plant=plant, prior=prior, disturbance=disturbance):
"""TODO: docstring."""
# Construct spline reference trajectory
def reference(t):
x_coefs, y_coefs, ϕ_coefs = coefs
x = spline(t, t_knots, x_coefs)
y = spline(t, t_knots, y_coefs)
ϕ = spline(t, t_knots, ϕ_coefs)
ϕ = jnp.clip(ϕ, -jnp.pi/3, jnp.pi/3)
r = jnp.array([x, y, ϕ])
return r
# Required derivatives of the reference trajectory
def ref_derivatives(t):
ref_vel = jax.jacfwd(reference)
ref_acc = jax.jacfwd(ref_vel)
r = reference(t)
dr = ref_vel(t)
ddr = ref_acc(t)
return r, dr, ddr
# Feedback linearizing PD controller
def controller(q, dq, r, dr, ddr):
kp, kd = 10., 0.1
e, de = q - r, dq - dr
dv = ddr - kp*e - kd*de
H, C, g, B = prior(q, dq)
τ = H@dv + C@dq + g
u = jnp.linalg.solve(B, τ)
return u, τ
# Closed-loop ODE for `x = (q, dq)`, with a zero-order hold on
# the controller
def ode(x, t, u, w=w):
q, dq = x
f_ext = disturbance(q, dq, w)
ddq = plant(q, dq, u, f_ext)
dx = (dq, ddq)
return dx
# Simulation loop
def loop(carry, input_slice):
t_prev, q_prev, dq_prev, u_prev = carry
t = input_slice
qs, dqs = odeint(ode, (q_prev, dq_prev), jnp.array([t_prev, t]),
u_prev)
q, dq = qs[-1], dqs[-1]
r, dr, ddr = ref_derivatives(t)
u, τ = controller(q, dq, r, dr, ddr)
carry = (t, q, dq, u)
output_slice = (q, dq, u, τ, r, dr)
return carry, output_slice
# Initial conditions
t0 = ts[0]
r0, dr0, ddr0 = ref_derivatives(t0)
q0, dq0 = r0, dr0
u0, τ0 = controller(q0, dq0, r0, dr0, ddr0)
# Run simulation loop
carry = (t0, q0, dq0, u0)
carry, output = jax.lax.scan(loop, carry, ts[1:])
q, dq, u, τ, r, dr = output
# Prepend initial conditions
q = jnp.vstack((q0, q))
dq = jnp.vstack((dq0, dq))
u = jnp.vstack((u0, u))
τ = jnp.vstack((τ0, τ))
r = jnp.vstack((r0, r))
dr = jnp.vstack((dr0, dr))
return q, dq, u, τ, r, dr
# Sample wind velocities from the training distribution
w_min = 0. # minimum wind velocity in inertial `x`-direction
w_max = 6. # maximum wind velocity in inertial `x`-direction
a = 5. # shape parameter `a` for beta distribution
b = 9. # shape parameter `b` for beta distribution
key, subkey = jax.random.split(key, 2)
w = w_min + (w_max - w_min)*jax.random.beta(subkey, a, b, (num_traj,))
# Simulate tracking for each `w`
dt = 0.01
t = jnp.arange(0, T + dt, dt) # same times for each trajectory
q, dq, u, τ, r, dr = simulate(t, w, t_knots, coefs)
data = {
'seed': seed, 'prng_key': key,
't': t, 'q': q, 'dq': dq,
'u': u, 'r': r, 'dr': dr,
't_knots': t_knots, 'r_knots': r_knots,
'w': w, 'w_min': w_min, 'w_max': w_max,
'beta_params': (a, b),
}
with open('training_data.pkl', 'wb') as file:
pickle.dump(data, file)
| [
"jax.random.PRNGKey",
"jax.numpy.dstack",
"pickle.dump",
"utils.spline",
"jax.jacfwd",
"dynamics.prior",
"jax.partial",
"jax.numpy.arange",
"dynamics.disturbance",
"dynamics.plant",
"jax.random.beta",
"jax.numpy.array",
"jax.numpy.vstack",
"jax.lax.scan",
"jax.numpy.clip",
"jax.vmap",
... | [((401, 425), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (419, 425), False, 'import jax\n'), ((582, 618), 'jax.numpy.array', 'jnp.array', (['[-2.0, -2.0, -jnp.pi / 6]'], {}), '([-2.0, -2.0, -jnp.pi / 6])\n', (591, 618), True, 'import jax.numpy as jnp\n'), ((630, 663), 'jax.numpy.array', 'jnp.array', (['[2.0, 2.0, jnp.pi / 6]'], {}), '([2.0, 2.0, jnp.pi / 6])\n', (639, 663), True, 'import jax.numpy as jnp\n'), ((675, 719), 'jax.numpy.array', 'jnp.array', (['[-jnp.inf, -jnp.inf, -jnp.pi / 3]'], {}), '([-jnp.inf, -jnp.inf, -jnp.pi / 3])\n', (684, 719), True, 'import jax.numpy as jnp\n'), ((733, 774), 'jax.numpy.array', 'jnp.array', (['[jnp.inf, jnp.inf, jnp.pi / 3]'], {}), '([jnp.inf, jnp.inf, jnp.pi / 3])\n', (742, 774), True, 'import jax.numpy as jnp\n'), ((794, 829), 'jax.random.split', 'jax.random.split', (['key', '(1 + num_traj)'], {}), '(key, 1 + num_traj)\n', (810, 829), False, 'import jax\n'), ((844, 863), 'jax.numpy.vstack', 'jnp.vstack', (['subkeys'], {}), '(subkeys)\n', (854, 863), True, 'import jax.numpy as jnp\n'), ((1164, 1181), 'jax.numpy.dstack', 'jnp.dstack', (['knots'], {}), '(knots)\n', (1174, 1181), True, 'import jax.numpy as jnp\n'), ((1217, 1263), 'jax.partial', 'jax.partial', (['jax.vmap'], {'in_axes': '(None, 0, 0, 0)'}), '(jax.vmap, in_axes=(None, 0, 0, 0))\n', (1228, 1263), False, 'import jax\n'), ((4103, 4127), 'jax.random.split', 'jax.random.split', (['key', '(2)'], {}), '(key, 2)\n', (4119, 4127), False, 'import jax\n'), ((4263, 4288), 'jax.numpy.arange', 'jnp.arange', (['(0)', '(T + dt)', 'dt'], {}), '(0, T + dt, dt)\n', (4273, 4288), True, 'import jax.numpy as jnp\n'), ((958, 997), 'jax.vmap', 'jax.vmap', (['random_ragged_spline', 'in_axes'], {}), '(random_ragged_spline, in_axes)\n', (966, 997), False, 'import jax\n'), ((3431, 3464), 'jax.lax.scan', 'jax.lax.scan', (['loop', 'carry', 'ts[1:]'], {}), '(loop, carry, ts[1:])\n', (3443, 3464), False, 'import jax\n'), ((3551, 3570), 'jax.numpy.vstack', 'jnp.vstack', (['(q0, q)'], {}), '((q0, q))\n', (3561, 3570), True, 'import jax.numpy as jnp\n'), ((3584, 3605), 'jax.numpy.vstack', 'jnp.vstack', (['(dq0, dq)'], {}), '((dq0, dq))\n', (3594, 3605), True, 'import jax.numpy as jnp\n'), ((3618, 3637), 'jax.numpy.vstack', 'jnp.vstack', (['(u0, u)'], {}), '((u0, u))\n', (3628, 3637), True, 'import jax.numpy as jnp\n'), ((3651, 3670), 'jax.numpy.vstack', 'jnp.vstack', (['(τ0, τ)'], {}), '((τ0, τ))\n', (3661, 3670), True, 'import jax.numpy as jnp\n'), ((3682, 3701), 'jax.numpy.vstack', 'jnp.vstack', (['(r0, r)'], {}), '((r0, r))\n', (3692, 3701), True, 'import jax.numpy as jnp\n'), ((3715, 3736), 'jax.numpy.vstack', 'jnp.vstack', (['(dr0, dr)'], {}), '((dr0, dr))\n', (3725, 3736), True, 'import jax.numpy as jnp\n'), ((4692, 4715), 'pickle.dump', 'pickle.dump', (['data', 'file'], {}), '(data, file)\n', (4703, 4715), False, 'import pickle\n'), ((1540, 1567), 'utils.spline', 'spline', (['t', 't_knots', 'x_coefs'], {}), '(t, t_knots, x_coefs)\n', (1546, 1567), False, 'from utils import spline, random_ragged_spline\n'), ((1584, 1611), 'utils.spline', 'spline', (['t', 't_knots', 'y_coefs'], {}), '(t, t_knots, y_coefs)\n', (1590, 1611), False, 'from utils import spline, random_ragged_spline\n'), ((1629, 1656), 'utils.spline', 'spline', (['t', 't_knots', 'φ_coefs'], {}), '(t, t_knots, φ_coefs)\n', (1635, 1656), False, 'from utils import spline, random_ragged_spline\n'), ((1673, 1709), 'jax.numpy.clip', 'jnp.clip', (['φ', '(-jnp.pi / 3)', '(jnp.pi / 3)'], {}), '(φ, -jnp.pi / 3, jnp.pi / 3)\n', (1681, 1709), True, 'import jax.numpy as jnp\n'), ((1721, 1741), 'jax.numpy.array', 'jnp.array', (['[x, y, φ]'], {}), '([x, y, φ])\n', (1730, 1741), True, 'import jax.numpy as jnp\n'), ((1877, 1898), 'jax.jacfwd', 'jax.jacfwd', (['reference'], {}), '(reference)\n', (1887, 1898), False, 'import jax\n'), ((1921, 1940), 'jax.jacfwd', 'jax.jacfwd', (['ref_vel'], {}), '(ref_vel)\n', (1931, 1940), False, 'import jax\n'), ((2272, 2284), 'dynamics.prior', 'prior', (['q', 'dq'], {}), '(q, dq)\n', (2277, 2284), False, 'from dynamics import prior, plant, disturbance\n'), ((2333, 2355), 'jax.numpy.linalg.solve', 'jnp.linalg.solve', (['B', 'τ'], {}), '(B, τ)\n', (2349, 2355), True, 'import jax.numpy as jnp\n'), ((2550, 2571), 'dynamics.disturbance', 'disturbance', (['q', 'dq', 'w'], {}), '(q, dq, w)\n', (2561, 2571), False, 'from dynamics import prior, plant, disturbance\n'), ((2590, 2612), 'dynamics.plant', 'plant', (['q', 'dq', 'u', 'f_ext'], {}), '(q, dq, u, f_ext)\n', (2595, 2612), False, 'from dynamics import prior, plant, disturbance\n'), ((4160, 4202), 'jax.random.beta', 'jax.random.beta', (['subkey', 'a', 'b', '(num_traj,)'], {}), '(subkey, a, b, (num_traj,))\n', (4175, 4202), False, 'import jax\n'), ((2860, 2882), 'jax.numpy.array', 'jnp.array', (['[t_prev, t]'], {}), '([t_prev, t])\n', (2869, 2882), True, 'import jax.numpy as jnp\n')] |
# Generated by Django 3.1.12 on 2021-07-28 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reports', '0016_auto_20210727_2156'),
]
operations = [
migrations.AddIndex(
model_name='report',
index=models.Index(fields=['user'], name='reports_user_index'),
),
migrations.AddIndex(
model_name='report',
index=models.Index(fields=['user', 'project'], name='reports_project_index'),
),
]
| [
"django.db.models.Index"
] | [((308, 364), 'django.db.models.Index', 'models.Index', ([], {'fields': "['user']", 'name': '"""reports_user_index"""'}), "(fields=['user'], name='reports_user_index')\n", (320, 364), False, 'from django.db import migrations, models\n'), ((457, 527), 'django.db.models.Index', 'models.Index', ([], {'fields': "['user', 'project']", 'name': '"""reports_project_index"""'}), "(fields=['user', 'project'], name='reports_project_index')\n", (469, 527), False, 'from django.db import migrations, models\n')] |
#--------------------------------------------------
# mqtt_control.py
# MQTT_Control is a database model to control subscriptions
# and publications
# introduced in u8
# ToraNova
#--------------------------------------------------
from pkg.resrc import res_import as r
from pkg.system.database import dbms
Base = dbms.msgapi.base
class MQTT_Msg(Base):
# PERMA : DO NOT CHANGE ANYTHING HERE UNLESS NECESSARY
__tablename__ = "MQTT_Msgs" #Try to use plurals here (i.e car's')
id = r.Column(r.Integer, primary_key=True)
def __repr__(self):
return '<%r %r>' % (self.__tablename__,self.id)
#---------------------------------------------------------
######################################################################################################
# EDITABLE ZONE
######################################################################################################
# TODO: DEFINE LIST OF COLUMNS
# the string topic of the topic to subscribe to
topic = r.Column(r.String(r.lim.MAX_MQTT_TOPIC_SIZE), nullable=False)
tlink = r.Column(r.Integer, nullable=True) #links to one of our subscribed topic
msg = r.Column(r.String(r.lim.MAX_MQTT_MSGCT_SIZE), nullable=False)
timev0 = r.Column(r.DateTime, nullable=False) #insertion time
timed0 = r.Column(r.DateTime, nullable=True) #deletion time (msg to be kept until)
pflag0 = r.Column(r.Boolean, nullable=False) #flag to check if the msg has been processed
pflag1 = r.Column(r.Boolean, nullable=False) #flag to check if the msg has been processed successfully
delonproc = r.Column(r.Boolean, nullable=False) #flag to check if this message should be delete on process
# TODO: DEFINE THE RLIST
# CHANGED ON U6 : RLISTING NOW MERGED WITH RLINKING : see 'RLINKING _ HOW TO USE:'
# The following is for r-listing (as of u6, rlinking as well) (resource listing)
# the values in the rlist must be the same as the column var name
rlist = r.OrderedDict([
("Topic","topic"),
("Linked (description)","__link__/tlink/MQTT_Subs/id:description"),
("Content","msg"),
("Received","__time__/%b-%d-%Y %H:%M:%S/timev0"),
("Delete on","__time__/%b-%d-%Y %H:%M:%S/timed0"),
("Processed?","pflag0"),
("Process OK?","pflag1")
]) #header,row data
# RLINKING _ HOW TO USE :
# using the __link__ keyword, seperate the arguments with /
# The first argument is the local reference, the field in which we use to refer
# the second argument is the foreign table
# the third argument is the foreign table Primary key
# the fourth argument is the field we want to find from the foreign table
# NOTICE that the fourth table uses ':' instead of /.
# Example
# "RPi id":"__link__/rpi_id/RPi/id:rpi_name"
# for the display of RPi id, we link to a foreign table that is called RPi
# we use the rpi_id foreign key on this table, to locate the id on the foreign table
# then we query for the field rpi_name
# TODO: DEFINE THE priKey and display text
#this primary key is used for rlisting/adding and mod.
rlist_priKey = "id"
rlist_dis = "MQTT Message Stack" #display for r routes
def get_onrecv(self):
# get the name of the process used on this msg
from pkg.msgapi.mqtt.models import MQTT_Sub
t = MQTT_Sub.query.filter( MQTT_Sub.id == self.tlink ).first()
if( t is not None ):
return t.onrecv
# TODO: CONSTRUCTOR DEFINES, PLEASE ADD IN ACCORDING TO COLUMNS
# the key in the insert_list must be the same as the column var name
def __init__(self,insert_list):
'''requirements in insert_list
@param tlink - link to the mqtt sub record
@param topic - the topic string (incase linking failed)
@param msg - the msg content'''
from pkg.msgapi.mqtt.models import MQTT_Sub
from pkg.system.servlog import srvlog
import datetime
from datetime import timedelta
# find links
self.tlink = r.checkNull( insert_list, "tlink")
self.topic = insert_list["topic"]
self.msg = insert_list["msg"]
self.timev0 = datetime.datetime.now()
self.pflag0 = insert_list["pflag0"]
self.pflag1 = insert_list["pflag1"]
submaster = MQTT_Sub.query.filter( MQTT_Sub.id == self.tlink ).first()
if(submaster is not None):
if( submaster.stordur is None):
self.timed0 = None #store forever
else:
self.timed0 = self.timev0 + timedelta( seconds= submaster.stordur)
self.delonproc = submaster.delonproc #inherits from the topic master
else:
srvlog["oper"].warning("MQTT message added to unknown link topic:"+self.topic+
" id="+int(self.tlink))
self.timed0 = r.lim.DEF_MQTT_MSGST_DURA
self.delonproc = True
def default_add_action(self):
# This will be run when the table is added via r-add
# may do some imports here i.e (from pkg.database.fsqlite import db_session)
# TODO add a MQTT restart function here
pass
def default_mod_action(self):
# This will be run when the table is added modified via r-mod
# may do some imports here i.e (from pkg.database.fsqlite import db_session)
pass
def default_del_action(self):
# This will be run when the table is deleted
# may do some imports here i.e (from pkg.database.fsqlite import db_session)
pass
######################################################################################################
| [
"pkg.resrc.res_import.checkNull",
"pkg.resrc.res_import.Column",
"pkg.resrc.res_import.OrderedDict",
"pkg.resrc.res_import.String",
"datetime.datetime.now",
"pkg.msgapi.mqtt.models.MQTT_Sub.query.filter",
"datetime.timedelta"
] | [((492, 529), 'pkg.resrc.res_import.Column', 'r.Column', (['r.Integer'], {'primary_key': '(True)'}), '(r.Integer, primary_key=True)\n', (500, 529), True, 'from pkg.resrc import res_import as r\n'), ((1078, 1112), 'pkg.resrc.res_import.Column', 'r.Column', (['r.Integer'], {'nullable': '(True)'}), '(r.Integer, nullable=True)\n', (1086, 1112), True, 'from pkg.resrc import res_import as r\n'), ((1236, 1272), 'pkg.resrc.res_import.Column', 'r.Column', (['r.DateTime'], {'nullable': '(False)'}), '(r.DateTime, nullable=False)\n', (1244, 1272), True, 'from pkg.resrc import res_import as r\n'), ((1302, 1337), 'pkg.resrc.res_import.Column', 'r.Column', (['r.DateTime'], {'nullable': '(True)'}), '(r.DateTime, nullable=True)\n', (1310, 1337), True, 'from pkg.resrc import res_import as r\n'), ((1389, 1424), 'pkg.resrc.res_import.Column', 'r.Column', (['r.Boolean'], {'nullable': '(False)'}), '(r.Boolean, nullable=False)\n', (1397, 1424), True, 'from pkg.resrc import res_import as r\n'), ((1484, 1519), 'pkg.resrc.res_import.Column', 'r.Column', (['r.Boolean'], {'nullable': '(False)'}), '(r.Boolean, nullable=False)\n', (1492, 1519), True, 'from pkg.resrc import res_import as r\n'), ((1595, 1630), 'pkg.resrc.res_import.Column', 'r.Column', (['r.Boolean'], {'nullable': '(False)'}), '(r.Boolean, nullable=False)\n', (1603, 1630), True, 'from pkg.resrc import res_import as r\n'), ((1974, 2272), 'pkg.resrc.res_import.OrderedDict', 'r.OrderedDict', (["[('Topic', 'topic'), ('Linked (description)',\n '__link__/tlink/MQTT_Subs/id:description'), ('Content', 'msg'), (\n 'Received', '__time__/%b-%d-%Y %H:%M:%S/timev0'), ('Delete on',\n '__time__/%b-%d-%Y %H:%M:%S/timed0'), ('Processed?', 'pflag0'), (\n 'Process OK?', 'pflag1')]"], {}), "([('Topic', 'topic'), ('Linked (description)',\n '__link__/tlink/MQTT_Subs/id:description'), ('Content', 'msg'), (\n 'Received', '__time__/%b-%d-%Y %H:%M:%S/timev0'), ('Delete on',\n '__time__/%b-%d-%Y %H:%M:%S/timed0'), ('Processed?', 'pflag0'), (\n 'Process OK?', 'pflag1')])\n", (1987, 2272), True, 'from pkg.resrc import res_import as r\n'), ((1013, 1048), 'pkg.resrc.res_import.String', 'r.String', (['r.lim.MAX_MQTT_TOPIC_SIZE'], {}), '(r.lim.MAX_MQTT_TOPIC_SIZE)\n', (1021, 1048), True, 'from pkg.resrc import res_import as r\n'), ((1170, 1205), 'pkg.resrc.res_import.String', 'r.String', (['r.lim.MAX_MQTT_MSGCT_SIZE'], {}), '(r.lim.MAX_MQTT_MSGCT_SIZE)\n', (1178, 1205), True, 'from pkg.resrc import res_import as r\n'), ((4020, 4053), 'pkg.resrc.res_import.checkNull', 'r.checkNull', (['insert_list', '"""tlink"""'], {}), "(insert_list, 'tlink')\n", (4031, 4053), True, 'from pkg.resrc import res_import as r\n'), ((4157, 4180), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4178, 4180), False, 'import datetime\n'), ((3329, 3377), 'pkg.msgapi.mqtt.models.MQTT_Sub.query.filter', 'MQTT_Sub.query.filter', (['(MQTT_Sub.id == self.tlink)'], {}), '(MQTT_Sub.id == self.tlink)\n', (3350, 3377), False, 'from pkg.msgapi.mqtt.models import MQTT_Sub\n'), ((4289, 4337), 'pkg.msgapi.mqtt.models.MQTT_Sub.query.filter', 'MQTT_Sub.query.filter', (['(MQTT_Sub.id == self.tlink)'], {}), '(MQTT_Sub.id == self.tlink)\n', (4310, 4337), False, 'from pkg.msgapi.mqtt.models import MQTT_Sub\n'), ((4539, 4575), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'submaster.stordur'}), '(seconds=submaster.stordur)\n', (4548, 4575), False, 'from datetime import timedelta\n')] |
# SPDX-FileCopyrightText: 2021 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import usb_cdc
import rotaryio
import board
import digitalio
serial = usb_cdc.data
encoder = rotaryio.IncrementalEncoder(board.ROTA, board.ROTB)
button = digitalio.DigitalInOut(board.SWITCH)
button.switch_to_input(pull=digitalio.Pull.UP)
last_position = None
button_state = False
while True:
position = encoder.position
if last_position is None or position != last_position:
serial.write(bytes(str(position) + ",", "utf-8"))
last_position = position
print(button.value)
if not button.value and not button_state:
button_state = True
if button.value and button_state:
serial.write(bytes("click,", "utf-8"))
button_state = False
| [
"rotaryio.IncrementalEncoder",
"digitalio.DigitalInOut"
] | [((190, 241), 'rotaryio.IncrementalEncoder', 'rotaryio.IncrementalEncoder', (['board.ROTA', 'board.ROTB'], {}), '(board.ROTA, board.ROTB)\n', (217, 241), False, 'import rotaryio\n'), ((251, 287), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['board.SWITCH'], {}), '(board.SWITCH)\n', (273, 287), False, 'import digitalio\n')] |
import numpy as np
from ..layers.Layer import LayerTrainable
class LayeredModel(object):
def __init__(self, layers):
"""
layers : a list of layers. Treated as a feed-forward model
"""
assert len(layers) > 0, "Model layers must be non-empty"
# check that the output of each layer is the same size as the input of
# the next layer
#for l1, l2 in zip(layers[:-1], layers[1:]):
# print(l1.output_size, l2.input_size)
for l1, l2 in zip(layers[:-1], layers[1:]):
#print(l1,l2)
#print(l1.output_size,l2.input_size)
assert l1.output_size == l2.input_size, "layers do not match input to output in the model"
self.layers = layers
def reset(self):
for l in self.layers:
l.reset()
def forward(self, x, end_layer=None):
"""
x : data to push through the network
end_layer : the layer to stop the forward movement of the data. Used for training. (default=None)
"""
x = x.squeeze()
assert (self.layers[0].input_size == 1 and x.shape == ()) or len(x) == self.layers[0].input_size, "unexpected input dimensionality (check bias)"
# if an end layer has not been named, feedforward the entire model
if end_layer is None:
f_layers = self.layers
else:
f_layers = self.layers[:end_layer]
# for l in f_layers:
# x = np.array(l.forward(x))
for l in f_layers:
#print(l.info())
x = l.forward(x)
return x
def train(self, X, y, warmup_timesteps=100, data_repeats=1):
"""
x : input data to train on
y : output data to train on
warmup_timesteps : number of timesteps to run the data before training (default=100)
"""
assert isinstance(self.layers[-1], LayerTrainable), "This model cannot be trained because the final layer of type {} is not trainable".format(type(self.layers[-1]))
# TODO: for now we assume ONLY the last layer can be trained
# warmup stage
# for x in X[:warmup_timesteps]:
# # some function that allows us to display
# self.display()
# _ = self.forward(x, len(self.layers)-1)
# # training stage
# y_forward = np.zeros((np.shape(X[warmup_timesteps:])[0],
# self.layers[-1].input_size))
# for idx, x in enumerate(X[warmup_timesteps:]):
# # some function that allows us to display
# self.display()
# y_p = self.forward(x, len(self.layers)-1)
# y_forward[idx, :] = y_p
# y_nonwarmup = y[warmup_timesteps:]
y_forward = np.zeros((np.shape(X)[0] - data_repeats*warmup_timesteps,
self.layers[-1].input_size))
y_nonwarmup = np.zeros((np.shape(y)[0] - data_repeats*warmup_timesteps,
np.shape(y)[1]))
y_idx = 0
data_rate = np.shape(X)[0] / data_repeats
# print(data_rate)
# print(X[:10])
# print(X[data_rate:(data_rate+10)])
for idx,x in enumerate(X):
# some function that allows us to display
self.display()
# if idx % data_rate == 0:
# print(x)
# self.reset()
if idx % data_rate < warmup_timesteps:
_ = self.forward(x, len(self.layers)-1)
else:
y_p = self.forward(x, len(self.layers)-1)
y_forward[y_idx, :] = y_p
y_nonwarmup[y_idx, :] = y[idx, :]
y_idx += 1
# training stage
# y_forward = np.zeros((np.shape(X[warmup_timesteps:])[0],
# self.layers[-1].input_size))
# for idx, x in enumerate(X[warmup_timesteps:]):
# # some function that allows us to display
# self.display()
# y_p = self.forward(x, len(self.layers)-1)
# y_forward[idx, :] = y_p
# y_nonwarmup = y[warmup_timesteps:]
self.layers[-1].train(y_forward, y_nonwarmup)
def generate(self, x_data, count, reset_increment=-1, warmup_timesteps=0):
"""
Given a single datapoint, the model will feed this back into itself
to produce generative output data.
x_data : data to generate from (the first data point will be used unless reset_increment != -1)
count : number of times to run the generative process
reset_increment : how often to feed the generator the 'real' data value (default=-1 <= no reset)
"""
# y_outputs = []
y_outputs = np.zeros(count)
# x = np.array(x_data[0])
x = x_data[0]
for e in range(-warmup_timesteps, count, 1):
# some function that allows us to display
self.display()
# if we enable reseting, feed the 'real' data in (e == 0) is for warm-up swap
if e == 0 or (reset_increment != -1 and e % reset_increment == 0):
assert e < len(x_data), "generating data is less than the specified count"
x = x_data[e + warmup_timesteps]
# forward generating without 'warmup'
if e >= 0:
x = self.forward(x)
y_outputs[e] = x
x = np.hstack((x, 1))
# forward generating with 'warmup'
else:
_ = self.forward(x_data[e + warmup_timesteps])
# return np.array(y_outputs).squeeze()
return y_outputs.squeeze()
def get_output_size(self):
return self.layers[-1].output_size
def get_input_size(self):
return self.layers[0].input_size
def display(self):
pass
| [
"numpy.shape",
"numpy.zeros",
"numpy.hstack"
] | [((4802, 4817), 'numpy.zeros', 'np.zeros', (['count'], {}), '(count)\n', (4810, 4817), True, 'import numpy as np\n'), ((3096, 3107), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3104, 3107), True, 'import numpy as np\n'), ((5482, 5499), 'numpy.hstack', 'np.hstack', (['(x, 1)'], {}), '((x, 1))\n', (5491, 5499), True, 'import numpy as np\n'), ((3041, 3052), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (3049, 3052), True, 'import numpy as np\n'), ((2821, 2832), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (2829, 2832), True, 'import numpy as np\n'), ((2961, 2972), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (2969, 2972), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from flask import Flask, send_file, make_response, Response, g, request, stream_with_context
from io import BytesIO
import atexit
import errno
import os
import subprocess
import threading
INPUT = '/dev/video0'
FFMPEG = "/home/test/ffmpeg-nvenc/ffmpeg"
app = Flask(__name__)
@app.route('/pic')
def pic():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-vframes', '1', '-vcodec', 'png', '-f', 'image2pipe', '-']
app.logger.debug('exec: {}'.format(' '.join(cmd)))
p = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
ec = p.wait()
if ec == 0:
return send_file(BytesIO(stdout), mimetype="image/png")
else:
return make_response("<pre>{}</pre>".format(stderr.decode('utf-8', 'replace')), 500)
@app.route('/mpjpeg')
def mpjpeg():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-f', 'mpjpeg', '-s', 'hd720',
'-qmin', '1', '-qmax', '6', '-r', '15', '-']
return Response(_stream(cmd), mimetype="multipart/x-mixed-replace;boundary=ffserver")
@app.route('/ts')
def ts():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-f', 'mpegts', '-s', 'hd720',
'-vcodec', 'h264_nvenc', '-qp', '23',
'-g', '30', '-bf', '0', '-zerolatency', '1',
'-strict_gop', '1', '-sc_threshold', '0', '-']
return Response(_stream(cmd), mimetype="video/ts")
@atexit.register
def teardown(*args):
app.logger.debug('teardown')
app.logger.debug(global_ctx)
global_ctx.close()
def _stream(cmd):
app.logger.debug('stream: {}'.format(' '.join(cmd)))
def generate():
with global_ctx.feed(cmd) as feed:
rpipe = feed.new_reader()
try:
while True:
chunk = os.read(rpipe, 10240)
if not chunk:
break
yield chunk
finally:
os.close(rpipe)
return stream_with_context(generate())
class _GlobalContext:
def __init__(self):
app.logger.debug('_GlobalContext')
self._feeds = {}
self._feed_lock = threading.Lock()
def feed(self, cmd):
with self._feed_lock:
feed_id = ' '.join(cmd)
feed = self._feeds.get(feed_id)
if feed is None:
feed = _Feed(cmd)
self._feeds[feed_id] = feed
return feed
def close(self):
with self._feed_lock:
for feed in self._feeds.values():
feed._close()
self._feeds = {}
class _Feed:
def __init__(self, cmd):
self._acquired = 0
self._lock = threading.Lock()
self._process = None
self._rpipe = None
self._cmd = cmd
self._buffer = None
self._thread = None
self._closed = False
def new_reader(self):
app.logger.debug("feed new reader")
return self._buffer.new_reader()
def _open(self):
app.logger.debug("feed open")
self._closed = False
self._buffer = _MultiClientBuffer()
self._rpipe, wpipe = os.pipe()
try:
try:
self._process = subprocess.Popen(self._cmd, stdin=None, stdout=wpipe, stderr=subprocess.DEVNULL, close_fds=True)
finally:
os.close(wpipe)
thread = threading.Thread(target=self._buffer_loop)
thread.daemon = True
thread.start()
self._thread = thread
except:
if self._rpipe is not None:
os.close(self._rpipe)
self._rpipe = None
self._closed = True
raise
def _close(self):
app.logger.debug("feed close")
self._buffer.close()
self._closed = True
p = self._process
if p:
p.terminate()
try:
p.wait(1.0)
except subprocess.TimeoutExpired:
p.kill()
p.wait()
self._process = None
if self._rpipe:
os.close(self._rpipe)
self._rpipe = None
thread = self._thread
self._thread = None
if thread:
thread.join()
def _buffer_loop(self):
while not self._closed:
chunk = os.read(self._rpipe, 10240)
if not chunk:
break
self._buffer.write(chunk)
def __enter__(self):
with self._lock:
if self._acquired == 0:
self._open()
self._acquired += 1
app.logger.debug("feed enter {}".format(self._acquired))
return self
def __exit__(self, *args):
with self._lock:
app.logger.debug("feed exit {}".format(self._acquired))
self._acquired -= 1
if self._acquired <= 0:
self._close()
class _MultiClientBuffer:
def __init__(self):
self._pipes = []
self._pipes_lock = threading.Lock()
self._closed = False
def new_reader(self):
with self._pipes_lock:
if self._closed:
raise IOError(errno.EIO, "already closed")
rpipe, wpipe = os.pipe()
self._pipes.append((rpipe, wpipe))
return rpipe
def write(self, chunk):
if self._closed:
return
pipes_to_del = []
try:
with self._pipes_lock:
pipes = list(self._pipes)
for idx, (_, wpipe) in enumerate(pipes):
try:
os.write(wpipe, chunk)
except BrokenPipeError:
pipes_to_del.append(idx)
os.close(wpipe)
except Exception:
pipes_to_del = range(len(pipes))
raise
finally:
with self._pipes_lock:
for pipe_idx in reversed(pipes_to_del):
del self._pipes[pipe_idx]
def close(self):
with self._pipes_lock:
self._closed = True
for _, wpipe in self._pipes:
os.close(wpipe)
self._pipes = []
global_ctx = _GlobalContext()
| [
"flask.Flask",
"os.close",
"subprocess.Popen",
"threading.Lock",
"os.write",
"io.BytesIO",
"os.read",
"threading.Thread",
"os.pipe"
] | [((283, 298), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (288, 298), False, 'from flask import Flask, send_file, make_response, Response, g, request, stream_with_context\n'), ((514, 616), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdin': 'None', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'close_fds': '(True)'}), '(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess\n .PIPE, close_fds=True)\n', (530, 616), False, 'import subprocess\n'), ((2201, 2217), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2215, 2217), False, 'import threading\n'), ((2734, 2750), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2748, 2750), False, 'import threading\n'), ((3190, 3199), 'os.pipe', 'os.pipe', ([], {}), '()\n', (3197, 3199), False, 'import os\n'), ((5055, 5071), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (5069, 5071), False, 'import threading\n'), ((708, 723), 'io.BytesIO', 'BytesIO', (['stdout'], {}), '(stdout)\n', (715, 723), False, 'from io import BytesIO\n'), ((3433, 3475), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._buffer_loop'}), '(target=self._buffer_loop)\n', (3449, 3475), False, 'import threading\n'), ((4140, 4161), 'os.close', 'os.close', (['self._rpipe'], {}), '(self._rpipe)\n', (4148, 4161), False, 'import os\n'), ((4377, 4404), 'os.read', 'os.read', (['self._rpipe', '(10240)'], {}), '(self._rpipe, 10240)\n', (4384, 4404), False, 'import os\n'), ((5274, 5283), 'os.pipe', 'os.pipe', ([], {}), '()\n', (5281, 5283), False, 'import os\n'), ((2000, 2015), 'os.close', 'os.close', (['rpipe'], {}), '(rpipe)\n', (2008, 2015), False, 'import os\n'), ((3262, 3363), 'subprocess.Popen', 'subprocess.Popen', (['self._cmd'], {'stdin': 'None', 'stdout': 'wpipe', 'stderr': 'subprocess.DEVNULL', 'close_fds': '(True)'}), '(self._cmd, stdin=None, stdout=wpipe, stderr=subprocess.\n DEVNULL, close_fds=True)\n', (3278, 3363), False, 'import subprocess\n'), ((3396, 3411), 'os.close', 'os.close', (['wpipe'], {}), '(wpipe)\n', (3404, 3411), False, 'import os\n'), ((6192, 6207), 'os.close', 'os.close', (['wpipe'], {}), '(wpipe)\n', (6200, 6207), False, 'import os\n'), ((1845, 1866), 'os.read', 'os.read', (['rpipe', '(10240)'], {}), '(rpipe, 10240)\n', (1852, 1866), False, 'import os\n'), ((3642, 3663), 'os.close', 'os.close', (['self._rpipe'], {}), '(self._rpipe)\n', (3650, 3663), False, 'import os\n'), ((5639, 5661), 'os.write', 'os.write', (['wpipe', 'chunk'], {}), '(wpipe, chunk)\n', (5647, 5661), False, 'import os\n'), ((5767, 5782), 'os.close', 'os.close', (['wpipe'], {}), '(wpipe)\n', (5775, 5782), False, 'import os\n')] |
from unittest import TestCase
from eccCh02 import Point
class PointTest(TestCase):
def test_ne(self):
a = Point(x=3, y=-7, a=5, b=7)
b = Point(x=18, y=77, a=5, b=7)
self.assertTrue(a != b)
self.assertFalse(a != a)
| [
"eccCh02.Point"
] | [((120, 146), 'eccCh02.Point', 'Point', ([], {'x': '(3)', 'y': '(-7)', 'a': '(5)', 'b': '(7)'}), '(x=3, y=-7, a=5, b=7)\n', (125, 146), False, 'from eccCh02 import Point\n'), ((159, 186), 'eccCh02.Point', 'Point', ([], {'x': '(18)', 'y': '(77)', 'a': '(5)', 'b': '(7)'}), '(x=18, y=77, a=5, b=7)\n', (164, 186), False, 'from eccCh02 import Point\n')] |
#%%
import numpy as np
from sapai.data import data
from sapai.rand import MockRandomState
#%%
class Food():
def __init__(self,
name="food-none",
shop=None,
team=[],
seed_state = None):
"""
Food class definition the types of interactions that food undergoes
"""
if len(name) != 0:
if not name.startswith("food-"):
name = "food-{}".format(name)
self.eaten = False
self.shop = shop
self.seed_state = seed_state
if self.seed_state != None:
self.rs = np.random.RandomState()
self.rs.set_state(self.seed_state)
else:
### Otherwise, set use
self.rs = MockRandomState()
self.attack = 0
self.health = 0
self.base_attack = 0
self.base_health = 0
self.status = "none"
self.effect = "none"
self.fd = {}
self.name = name
if name not in data["foods"]:
raise Exception("Food {} not found".format(name))
fd = data["foods"][name]["ability"]
self.fd = fd
self.attack = 0
self.health = 0
self.effect = fd["effect"]["kind"]
if "attackAmount" in fd["effect"]:
self.attack = fd["effect"]["attackAmount"]
self.base_attack = fd["effect"]["attackAmount"]
if "healthAmount" in fd["effect"]:
self.health = fd["effect"]["healthAmount"]
self.base_health = fd["effect"]["healthAmount"]
if "status" in fd["effect"]:
self.status = fd["effect"]["status"]
def apply(self, pet=None):
"""
Serve the food object to the input pet
"""
if self.eaten == True:
raise Exception("This should not be possible")
if self.name == "food-canned-food":
self.shop.can += self.attack
return
pet.attack += self.attack
pet.health += self.health
if self.effect == "ModifyStats":
### Done
return pet
elif self.effect == "ApplyStatus":
pet.status = self.status
def copy(self):
copy_food = Food(self.name, self.shop)
for key,value in self.__dict__.items():
### Although this approach will copy the internal dictionaries by
### reference rather than copy by value, these dictionaries will
### never be modified anyways.
### All integers and strings are copied by value automatically with
### Python, therefore, this achieves the correct behavior
copy_food.__dict__[key] = value
return copy_food
@property
def state(self):
#### Ensure that state can be JSON serialized
if getattr(self, "rs", False):
if type(self.rs).__name__ == "MockRandomState":
seed_state = None
else:
seed_state = list(self.rs.get_state())
seed_state[1] = seed_state[1].tolist()
else:
seed_state = None
state_dict = {
"type": "Food",
"name": self.name,
"eaten": self.eaten,
"attack": self.attack,
"health": self.health,
"seed_state": seed_state
}
return state_dict
@classmethod
def from_state(cls, state):
food = cls(name=state["name"])
food.attack = state["attack"]
food.health = state["health"]
food.eaten = state["eaten"],
### Supply seed_state in state dict should be optional
if "seed_state" in state:
if state["seed_state"] != None:
food.seed_state = state["seed_state"]
food.rs = np.random.RandomState()
food.rs.set_state(state["seed_state"])
return food
def __repr__(self):
return "< {} {}-{} {} >".format(
self.name, self.attack, self.health, self.status)
# %%
| [
"sapai.rand.MockRandomState",
"numpy.random.RandomState"
] | [((661, 684), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (682, 684), True, 'import numpy as np\n'), ((804, 821), 'sapai.rand.MockRandomState', 'MockRandomState', ([], {}), '()\n', (819, 821), False, 'from sapai.rand import MockRandomState\n'), ((3925, 3948), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (3946, 3948), True, 'import numpy as np\n')] |