code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
text = """
ala ma kota
a kot ma ale
"""
# ------------------------------------------------------------------------------
# TODO as class
chars = list(sorted(set(text))) # stabilne indeksy
len_chars = len(chars)+1
c_to_i = {c:i+1 for i,c in enumerate(chars)}
i_to_c = {i+1:c for i,c in enumerate(chars)}
def text_to_i(text):
return [c_to_i.get(c,0) for c in text]
def text_to_hot(text):
out = [[0]*len_chars for _ in text]
i_list = text_to_i(text)
for n,i in enumerate(i_list):
out[n][i] = 1
return out
# ------------------------------------------------------------------------------
import numpy as np
INPUT = 4
sentences = [text[i:i+INPUT+1] for i in range(len(text)-INPUT-1)]
x = np.zeros((len(sentences),INPUT,len_chars),dtype='b')
y = np.zeros((len(sentences),len_chars),dtype='b')
for i,text in enumerate(sentences):
x[i]=text_to_hot(text[:-1])
y[i]=text_to_hot(text[-1:])[0]
print(x)
print(y)
# ------------------------------------------------------------------------------
from keras.models import Sequential,load_model
from keras.layers import Dense,LSTM
from keras.optimizers import RMSprop
if 0:
model = Sequential()
model.add(LSTM(4, input_shape=(INPUT,len_chars)))
#model.add(LSTM(4, batch_input_shape=(3,INPUT,len_chars), stateful=True))
model.add(Dense(len_chars,activation='softmax'))
optimizer=RMSprop(learning_rate=0.1)
model.compile(optimizer,loss='categorical_crossentropy')
model.fit(x,y,batch_size=3,epochs=20)
model.save('lstm_my.h5')
else:
model = load_model('lstm_my.h5')
# ------------------------------------------------------------------------------
def sample(p_list, t=1.0):
if not t: return np.argmax(p_list)
p_list = p_list.astype('float64') # bez tego suma norm_p moze byc > 1
log_p = np.log(p_list) / t
exp_p = np.exp(log_p)
norm_p = exp_p / np.sum(exp_p)
results = np.random.multinomial(1, norm_p, 1)
return np.argmax(results)
# ------------------------------------------------------------------------------
if 0:
px = np.array([text_to_hot('kot')])
py = model.predict(px)
pi=sample(py,0)
pc=i_to_c[pi]
print(px)
print(py)
print(i_to_c)
print(pi)
print(pc)
# ------------------------------------------------------------------------------
# TODO function
text = 'ala '
out = text[:]
for j in range(20):
x = np.array([text_to_hot(text)])
py = model.predict(x)[0]
pc=i_to_c[sample(py,1.0)]
out = out + pc
text = out[-INPUT:]
print(out)
| [
"keras.models.load_model",
"numpy.log",
"numpy.argmax",
"numpy.random.multinomial",
"numpy.exp",
"keras.models.Sequential",
"keras.layers.LSTM",
"numpy.sum",
"keras.layers.Dense",
"keras.optimizers.RMSprop"
] | [((1137, 1149), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1147, 1149), False, 'from keras.models import Sequential, load_model\n'), ((1338, 1364), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (1345, 1364), False, 'from keras.optimizers import RMSprop\n'), ((1503, 1527), 'keras.models.load_model', 'load_model', (['"""lstm_my.h5"""'], {}), "('lstm_my.h5')\n", (1513, 1527), False, 'from keras.models import Sequential, load_model\n'), ((1782, 1795), 'numpy.exp', 'np.exp', (['log_p'], {}), '(log_p)\n', (1788, 1795), True, 'import numpy as np\n'), ((1839, 1874), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'norm_p', '(1)'], {}), '(1, norm_p, 1)\n', (1860, 1874), True, 'import numpy as np\n'), ((1883, 1901), 'numpy.argmax', 'np.argmax', (['results'], {}), '(results)\n', (1892, 1901), True, 'import numpy as np\n'), ((1161, 1200), 'keras.layers.LSTM', 'LSTM', (['(4)'], {'input_shape': '(INPUT, len_chars)'}), '(4, input_shape=(INPUT, len_chars))\n', (1165, 1200), False, 'from keras.layers import Dense, LSTM\n'), ((1287, 1325), 'keras.layers.Dense', 'Dense', (['len_chars'], {'activation': '"""softmax"""'}), "(len_chars, activation='softmax')\n", (1292, 1325), False, 'from keras.layers import Dense, LSTM\n'), ((1656, 1673), 'numpy.argmax', 'np.argmax', (['p_list'], {}), '(p_list)\n', (1665, 1673), True, 'import numpy as np\n'), ((1754, 1768), 'numpy.log', 'np.log', (['p_list'], {}), '(p_list)\n', (1760, 1768), True, 'import numpy as np\n'), ((1814, 1827), 'numpy.sum', 'np.sum', (['exp_p'], {}), '(exp_p)\n', (1820, 1827), True, 'import numpy as np\n')] |
import tensorflow as tf
from lime import lime_image
from skimage.segmentation import mark_boundaries
from xplainer.backend.tools.abstract_tool import AbstractTool, GeneralSettings
from xplainer.backend.utils.image import prepare_for_prediction, get_base64png
class Lime(AbstractTool):
def name(self):
return "LIME"
def category(self):
return "Local"
def description(self):
return "Local Interpretable Model-Agnostic Explanations."
def source_name(self) -> str:
return "lime"
def source_url(self) -> str:
return "https://github.com/marcotcr/lime"
def tool_parameters(self) -> dict:
return {
"list": [
{
"param": "batch_size",
"name": "Batch size",
"type": "int",
"default": 10,
"min": 1,
"step": 1,
},
{
"param": "num_features",
"name": "Features",
"type": "int",
"default": 100000,
"min": 1,
"step": 10000,
},
{
"param": "num_samples",
"name": "Samples",
"type": "int",
"default": 100,
"min": 1,
"step": 100,
},
{
"param": "positive_only",
"name": "Positive only",
"type": "bool",
"default": True,
},
{
"param": "superpixels",
"name": "Superpixels",
"type": "int",
"default": 5,
"min": 1,
"step": 1,
},
{
"param": "min_weight",
"name": "Min weight",
"type": "float",
"default": 0.0,
"min": 0.0,
"step": 0.1,
},
{"param": "hide_rest", "name": "Hide rest", "description": None, "type": "bool", "default": True,},
],
"layout": [
["batch_size", "num_features", "num_samples"],
["superpixels", "min_weight", "positive_only", "hide_rest"],
],
}
def explain(
self, model: tf.keras.Model, image_path: str, general_settings: GeneralSettings, tool_settings: dict = None
) -> list:
image = prepare_for_prediction(model, image_path)
onehot, labels = self._get_labels(model, image, general_settings)
image = tf.cast(image, dtype=tf.float64)
explainer = lime_image.LimeImageExplainer()
explanation = explainer.explain_instance(
image.numpy()[0],
model.predict,
labels=labels,
batch_size=tool_settings["batch_size"],
num_features=tool_settings["num_features"],
num_samples=tool_settings["num_samples"],
)
results = []
for label in labels:
temp, mask = explanation.get_image_and_mask(
label,
positive_only=tool_settings["positive_only"],
num_features=tool_settings["superpixels"],
min_weight=tool_settings["min_weight"],
hide_rest=tool_settings["hide_rest"],
)
result_image = mark_boundaries(temp / 2 + 0.5, mask)
result_image = tf.image.convert_image_dtype(result_image, dtype=tf.uint8, saturate=True)
image_base46 = get_base64png(result_image)
results.append(
{"label_id": int(label), "probability": float(onehot[label]), "image": image_base46,}
)
return results
| [
"skimage.segmentation.mark_boundaries",
"tensorflow.image.convert_image_dtype",
"xplainer.backend.utils.image.prepare_for_prediction",
"lime.lime_image.LimeImageExplainer",
"xplainer.backend.utils.image.get_base64png",
"tensorflow.cast"
] | [((2660, 2701), 'xplainer.backend.utils.image.prepare_for_prediction', 'prepare_for_prediction', (['model', 'image_path'], {}), '(model, image_path)\n', (2682, 2701), False, 'from xplainer.backend.utils.image import prepare_for_prediction, get_base64png\n'), ((2793, 2825), 'tensorflow.cast', 'tf.cast', (['image'], {'dtype': 'tf.float64'}), '(image, dtype=tf.float64)\n', (2800, 2825), True, 'import tensorflow as tf\n'), ((2846, 2877), 'lime.lime_image.LimeImageExplainer', 'lime_image.LimeImageExplainer', ([], {}), '()\n', (2875, 2877), False, 'from lime import lime_image\n'), ((3588, 3625), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp / 2 + 0.5)', 'mask'], {}), '(temp / 2 + 0.5, mask)\n', (3603, 3625), False, 'from skimage.segmentation import mark_boundaries\n'), ((3653, 3726), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['result_image'], {'dtype': 'tf.uint8', 'saturate': '(True)'}), '(result_image, dtype=tf.uint8, saturate=True)\n', (3681, 3726), True, 'import tensorflow as tf\n'), ((3755, 3782), 'xplainer.backend.utils.image.get_base64png', 'get_base64png', (['result_image'], {}), '(result_image)\n', (3768, 3782), False, 'from xplainer.backend.utils.image import prepare_for_prediction, get_base64png\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: <EMAIL>
@Software: PyCharm
@File: model.py
@Overview: The deep speaker model is not entirely the same as ResNet, as there are convolutional layers between blocks.
"""
import math
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd import Variable
from torch.nn import CosineSimilarity
from Define_Model.Loss.SoftmaxLoss import AngleLinear
def get_layer_param(model):
return sum([torch.numel(param) for param in model.parameters()])
class PairwiseDistance(Function):
def __init__(self, p):
super(PairwiseDistance, self).__init__()
self.norm = p
def forward(self, x1, x2):
assert x1.size() == x2.size()
eps = 1e-4 / x1.size(1)
diff = torch.abs(x1 - x2)
# The distance will be (Sum(|x1-x2|**p)+eps)**1/p
out = torch.pow(diff, self.norm).sum(dim=1)
return torch.pow(out + eps, 1. / self.norm)
class TripletMarginLoss(Function):
"""Triplet loss function.
"""
def __init__(self, margin):
super(TripletMarginLoss, self).__init__()
self.margin = margin
self.pdist = PairwiseDistance(2) # norm 2
def forward(self, anchor, positive, negative):
d_p = self.pdist.forward(anchor, positive)
d_n = self.pdist.forward(anchor, negative)
dist_hinge = torch.clamp(self.margin + d_p - d_n, min=0.0)
loss = torch.mean(dist_hinge)
return loss
class TripletMarginCosLoss(Function):
"""Triplet loss function.
"""
def __init__(self, margin):
super(TripletMarginCosLoss, self).__init__()
self.margin = margin
self.pdist = CosineSimilarity(dim=1, eps=1e-6) # norm 2
def forward(self, anchor, positive, negative):
d_p = self.pdist.forward(anchor, positive)
d_n = self.pdist.forward(anchor, negative)
dist_hinge = torch.clamp(self.margin - d_p + d_n, min=0.0)
# loss = torch.sum(dist_hinge)
loss = torch.mean(dist_hinge)
return loss
class ReLU20(nn.Hardtanh):
def __init__(self, inplace=False):
super(ReLU20, self).__init__(0, 20, inplace)
def __repr__(self):
inplace_str = 'inplace' if self.inplace else ''
return self.__class__.__name__ + ' (' \
+ inplace_str + ')'
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = ReLU20(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class myResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
super(myResNet, self).__init__()
self.relu = ReLU20(inplace=True)
self.inplanes = 64
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, stride=2, padding=2,bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, layers[0])
self.inplanes = 128
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2,bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.layer2 = self._make_layer(block, 128, layers[1])
self.inplanes = 256
self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2,bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.layer3 = self._make_layer(block, 256, layers[2])
self.inplanes = 512
self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2,bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.layer4 = self._make_layer(block, 512, layers[3])
self.avgpool = nn.AdaptiveAvgPool2d((4, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
layers = []
layers.append(block(self.inplanes, planes, stride))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class DeepSpeakerModel(nn.Module):
def __init__(self, resnet_size, embedding_size, num_classes, feature_dim=64):
super(DeepSpeakerModel, self).__init__()
resnet_type = {10:[1, 1, 1, 1],
18:[2, 2, 2, 2],
34:[3, 4, 6, 3],
50:[3, 4, 6, 3],
101:[3, 4, 23, 3]}
self.embedding_size = embedding_size
self.resnet_size = resnet_size
self.num_classes = num_classes
self.model = myResNet(BasicBlock, resnet_type[resnet_size])
if feature_dim == 64:
self.model.fc = nn.Linear(512 * 4, self.embedding_size)
elif feature_dim == 40:
self.model.fc = nn.Linear(256 * 5, self.embedding_size)
#self.model.classifier = nn.Linear(self.embedding_size, num_classes)
#self.norm = nn.BatchNorm1d(512)
self.model.classifier = nn.Linear(self.embedding_size, num_classes)
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.layer1(x)
x = self.model.conv2(x)
x = self.model.bn2(x)
x = self.model.relu(x)
x = self.model.layer2(x)
x = self.model.conv3(x)
x = self.model.bn3(x)
x = self.model.relu(x)
x = self.model.layer3(x)
x = self.model.conv4(x)
x = self.model.bn4(x)
x = self.model.relu(x)
x = self.model.layer4(x)
# print(x.shape)
x = self.model.avgpool(x)
x = x.view(x.size(0), -1)
x = self.model.fc(x)
x = self.l2_norm(x)
# Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
alpha = 10
features = x * alpha
# x = x.resize(int(x.size(0) / 17),17 , 512)
# self.features =torch.mean(x,dim=1)
# x = self.model.classifier(features)
return features
def forward_classifier(self, x):
res = self.model.classifier(x)
return res
class ResSpeakerModel(nn.Module):
"""
Define the ResNet model with A-softmax and AM-softmax loss.
"""
def __init__(self, resnet_size, embedding_size, num_classes, feature_dim=64):
super(ResSpeakerModel, self).__init__()
resnet_type = {10:[1, 1, 1, 1],
18:[2, 2, 2, 2],
34:[3, 4, 6, 3],
50:[3, 4, 6, 3],
101:[3, 4, 23, 3]}
self.embedding_size = embedding_size
self.resnet_size = resnet_size
self.num_classes = num_classes
self.model = myResNet(BasicBlock, resnet_type[resnet_size])
if feature_dim == 64:
self.model.fc = nn.Linear(512 * 4, self.embedding_size)
elif feature_dim == 40:
self.model.fc = nn.Linear(256 * 5, self.embedding_size)
elif feature_dim == 257:
self.model.fc = nn.Linear(256 * 5, self.embedding_size)
self.model.classifier = nn.Linear(self.embedding_size, self.num_classes)
# TAP Encoding Layer
self.model.encodinglayer = nn.AdaptiveAvgPool2d((1, 512))
# TODO: SAP, LDE Encoding Layer after the embedding layers
# SAP Encoding Layer
self.model.W = torch.nn.Parameter(torch.randn(self.embedding_size, num_classes))
nn.init.xavier_normal(self.model.W, gain=1)
# self.model.classifier = nn.Softmax(self.embedding_size, num_classes)
# self.model.classifier = AngleLinear(self.embedding_size, num_classes)
# Parameters for a-softmax
self.gamma = 0
self.it = 0
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
# default 4, based on the voxceleb 1, set to 3
self.m = 4
self.ce = nn.CrossEntropyLoss()
# cos(2thera) = 2cos(theta)**2 - 1
# cos(3thera) = 4cos(theta)**2 - 3cos(theta)
# cos(4thera) = 8cos(theta)**4 - 2cos(theta)**2 - 1
self.cos_function = [
lambda x: x ** 0,
lambda x: x ** 1,
lambda x: 2 * x ** 2 - 1,
lambda x: 4 * x ** 3 - 3 * x,
lambda x: 8 * x ** 4 - 8 * x ** 2 + 1,
lambda x: 16 * x ** 5 - 20 * x ** 3 + 5 * x,
]
# Parameters for am-softmax
# default 0.4, based on the voxceleb1, set to 0.3
self.margin = 0.4
self.s = 30
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, x):
# pdb.set_trace()
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.layer1(x)
x = self.model.conv2(x)
x = self.model.bn2(x)
x = self.model.relu(x)
x = self.model.layer2(x)
x = self.model.conv3(x)
x = self.model.bn3(x)
x = self.model.relu(x)
x = self.model.layer3(x)
x = self.model.conv4(x)
x = self.model.bn4(x)
x = self.model.relu(x)
x = self.model.layer4(x)
# print(x.s)
x = self.model.avgpool(x)
x = x.view(x.size(0), -1)
x = self.model.fc(x)
self.features = self.l2_norm(x)
# Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
# alpha=10
# self.features = self.features * alpha
return self.features
def forward_classifier(self, x):
# x = self.forward(x)
# x = self.encodinglayer(x)
res = x.mm(self.model.W)
# res = self.model.classifier(features)
return res
def encodinglayer(self, x):
features = self.model.encodinglayer(x)
return features
def AngularSoftmaxLoss(self, x, label):
self.it += 1
assert x.size()[0] == label.size()[0]
# assert features.size()[1] == self.in_feats
w = self.model.W.renorm(2, 1, 1e-5).mul(1e5) # [batch, out_planes]
x_modulus = x.pow(2).sum(1).pow(0.5) # [batch]
w_modulus = w.pow(2).sum(0).pow(0.5) # [out_planes]
# get w@x=||w||*||x||*cos(theta)
# w = w.cuda()
inner_wx = x.mm(w) # [batch,out_planes]
cos_theta = (inner_wx / x_modulus.view(-1, 1)) / w_modulus.view(1, -1)
cos_theta = cos_theta.clamp(-1, 1)
# get cos(m*theta)
# TODO: cos_m_theta isn't correct.
cos_m_theta = self.cos_function[self.m](cos_theta)
theta = Variable(cos_theta.data.acos())
# get k, theta is in [ k*pi/m , (k+1)*pi/m ]
k = (self.m * theta / math.pi).floor()
minus_one = k * 0 - 1
# get phi_theta = -1^k*cos(m*theta)-2*k
phi_theta = (minus_one ** k) * cos_m_theta - 2 * k
# get cos_x and phi_x
# cos_x = cos(theta)*||x||
# phi_x = phi(theta)*||x||
cos_x = cos_theta * x_modulus.view(-1, 1)
phi_x = phi_theta * x_modulus.view(-1, 1)
target = label.view(-1, 1)
# get one_hot mat
index = cos_x.data * 0.0 # size=(B,Classnum)
index.scatter_(1, target.data.view(-1, 1), 1)
index = index.byte()
index = Variable(index)
# set lamb, change the rate of softmax and A-softmax
self.lamb = max(self.LambdaMin, self.LambdaMax / (1 + 0.1 * self.it))
# get a-softmax and softmax mat
output = cos_x * 1
# output[index] -= cos_x[index]
# output[index] += phi_x[index]
output[index] -= (cos_x[index] * 1.0 / (+self.lamb))
output[index] += (phi_x[index] * 1.0 / (self.lamb))
# pdb.set_trace()
# get loss, which is equal to Cross Entropy.
# logpt = F.log_softmax(output, dim=1) # [batch,classes_num]
# logpt = logpt.gather(1, target) # [batch]
# pt = logpt.data.exp()
# torch.mm()
# loss = -1 * logpt * (1 - pt) ** self.gamma
# loss = loss.mean()
loss = self.ce(output, label)
return loss
def AMSoftmaxLoss(self, x, label):
assert x.size()[0] == label.size()[0]
#assert x.size()[1] == self.in_feats
# pdb.set_trace()
x_norm = torch.norm(x, p=2, dim=1, keepdim=True).clamp(min=1e-12)
x_norm = torch.div(x, x_norm)
w_norm = torch.norm(self.model.W, p=2, dim=0, keepdim=True).clamp(min=1e-12)
w_norm = torch.div(self.model.W, w_norm)
costh = torch.mm(x_norm, w_norm)
lb_view = label.view(-1, 1)
if lb_view.is_cuda:
lb_view = lb_view.cpu()
delt_costh = torch.zeros(costh.size()).scatter_(1, lb_view.data, self.margin)
if x.is_cuda:
delt_costh = Variable(delt_costh.cuda())
costh_m = costh - delt_costh
costh_m_s = self.s * costh_m
loss = self.ce(costh_m_s, label)
return loss
class ResCNNSpeaker(nn.Module):
"""
Define the ResNet model with A-softmax and AM-softmax loss.
Added dropout as https://github.com/nagadomi/kaggle-cifar10-torch7 after average pooling and fc layer.
"""
def __init__(self, resnet_size, embedding_size, num_classes, block=BasicBlock, feature_dim=64, dropout=False):
super(ResCNNSpeaker, self).__init__()
resnet_type = {10:[1, 1, 1, 1],
18:[2, 2, 2, 2],
34:[3, 4, 6, 3],
50:[3, 4, 6, 3],
101:[3, 4, 23, 3]}
layers = resnet_type[resnet_size]
self.embedding_size = embedding_size
self.relu = ReLU20(inplace=True)
self.in_planes = 64
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, stride=2, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, layers[0])
self.in_planes = 128
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.layer2 = self._make_layer(block, 128, layers[1])
self.in_planes = 256
self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.layer3 = self._make_layer(block, 256, layers[2])
self.in_planes = 512
self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2, bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.layer4 = self._make_layer(block, 512, layers[3])
# self.avg_pool = nn.AdaptiveAvgPool2d([4, 1])
self.avg_pool = nn.AdaptiveAvgPool2d((4, 1))
self.fc = nn.Sequential(
nn.Linear(self.in_planes * 4, embedding_size),
nn.BatchNorm1d(embedding_size)
)
self.classifier = nn.Linear(self.embedding_size, num_classes)
for m in self.modules(): # 对于各层参数的初始化
if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
# self.weight = nn.Parameter(torch.Tensor(embedding_size, num_classes)) # 本层权重
# self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5) # 初始化权重,在第一维度上做normalize
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def _make_layer(self, block, planes, blocks, stride=1):
layers = [block(self.in_planes, planes, stride)]
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer3(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.layer4(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.l2_norm(x)
# Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
alpha = 10
feature = x * alpha
# ww = self.weight.renorm(2, 1, 1e-5).mul(1e5) # 方向0上做normalize
# x_len = feature.pow(2).sum(1).pow(0.5)
# w_len = ww.pow(2).sum(0).pow(0.5)
# cos_theta = feature.mm(ww) / x_len.view(-1, 1) / w_len.view(1, -1)
logits = self.classifier(feature)
# res = self.model.classifier(features)
return logits, feature
class SuperficialResCNN(nn.Module): # 定义resnet
def __init__(self, embedding_size, layers=[1, 1, 1, 0], block=BasicBlock, n_classes=1000,
m=3): # block类型,embedding大小,分类数,maigin大小
super(SuperficialResCNN, self).__init__()
self.embedding_size = embedding_size
self.relu = ReLU(inplace=True)
self.in_planes = 64
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, stride=2, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, layers[0])
self.in_planes = 128
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.layer2 = self._make_layer(block, 128, layers[1])
self.in_planes = 256
self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.layer3 = self._make_layer(block, 256, layers[2])
# self.in_planes = 512
# self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2, bias=False)
# self.bn4 = nn.BatchNorm2d(512)
# self.layer4 = self._make_layer(block, 512, layers[3])
# self.avg_pool = nn.AdaptiveAvgPool2d([4, 1])
self.avg_pool = nn.AdaptiveAvgPool2d((4, 1))
self.fc = nn.Sequential(
nn.Linear(self.in_planes * 4, embedding_size),
nn.BatchNorm1d(embedding_size)
)
# self.W = torch.nn.Parameter(torch.randn(self.embedding_size, n_classes))
# self.W.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
# nn.init.xavier_normal(self.W, gain=1)
self.angle_linear = AngleLinear(in_features=embedding_size, out_features=n_classes, m=m)
for m in self.modules(): # 对于各层参数的初始化
if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
layers = [block(self.in_planes, planes, stride)]
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer3(x)
# x = self.conv4(x)
# x = self.bn4(x)
# x = self.relu(x)
# x = self.layer4(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
# x = x * self.alpha
logit = self.angle_linear(x)
return logit, x # 返回倒数第二层
| [
"torch.nn.CrossEntropyLoss",
"torch.nn.Sequential",
"torch.sqrt",
"math.sqrt",
"torch.pow",
"torch.nn.init.xavier_normal",
"torch.numel",
"torch.nn.BatchNorm1d",
"torch.sum",
"torch.nn.BatchNorm2d",
"torch.mean",
"Define_Model.Loss.SoftmaxLoss.AngleLinear",
"torch.nn.AdaptiveAvgPool2d",
"t... | [((2456, 2545), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (2465, 2545), True, 'import torch.nn as nn\n'), ((805, 823), 'torch.abs', 'torch.abs', (['(x1 - x2)'], {}), '(x1 - x2)\n', (814, 823), False, 'import torch\n'), ((949, 986), 'torch.pow', 'torch.pow', (['(out + eps)', '(1.0 / self.norm)'], {}), '(out + eps, 1.0 / self.norm)\n', (958, 986), False, 'import torch\n'), ((1398, 1443), 'torch.clamp', 'torch.clamp', (['(self.margin + d_p - d_n)'], {'min': '(0.0)'}), '(self.margin + d_p - d_n, min=0.0)\n', (1409, 1443), False, 'import torch\n'), ((1459, 1481), 'torch.mean', 'torch.mean', (['dist_hinge'], {}), '(dist_hinge)\n', (1469, 1481), False, 'import torch\n'), ((1714, 1748), 'torch.nn.CosineSimilarity', 'CosineSimilarity', ([], {'dim': '(1)', 'eps': '(1e-06)'}), '(dim=1, eps=1e-06)\n', (1730, 1748), False, 'from torch.nn import CosineSimilarity\n'), ((1934, 1979), 'torch.clamp', 'torch.clamp', (['(self.margin - d_p + d_n)'], {'min': '(0.0)'}), '(self.margin - d_p + d_n, min=0.0)\n', (1945, 1979), False, 'import torch\n'), ((2034, 2056), 'torch.mean', 'torch.mean', (['dist_hinge'], {}), '(dist_hinge)\n', (2044, 2056), False, 'import torch\n'), ((2798, 2820), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2812, 2820), True, 'import torch.nn as nn\n'), ((2926, 2948), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2940, 2948), True, 'import torch.nn as nn\n'), ((3583, 3647), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(1, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (3592, 3647), True, 'import torch.nn as nn\n'), ((3666, 3684), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (3680, 3684), True, 'import torch.nn as nn\n'), ((3796, 3862), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (3805, 3862), True, 'import torch.nn as nn\n'), ((3881, 3900), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3895, 3900), True, 'import torch.nn as nn\n'), ((4012, 4079), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(128, 256, kernel_size=5, stride=2, padding=2, bias=False)\n', (4021, 4079), True, 'import torch.nn as nn\n'), ((4098, 4117), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (4112, 4117), True, 'import torch.nn as nn\n'), ((4229, 4296), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(256, 512, kernel_size=5, stride=2, padding=2, bias=False)\n', (4238, 4296), True, 'import torch.nn as nn\n'), ((4315, 4334), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (4329, 4334), True, 'import torch.nn as nn\n'), ((4430, 4458), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(4, 1)'], {}), '((4, 1))\n', (4450, 4458), True, 'import torch.nn as nn\n'), ((4477, 4522), 'torch.nn.Linear', 'nn.Linear', (['(512 * block.expansion)', 'num_classes'], {}), '(512 * block.expansion, num_classes)\n', (4486, 4522), True, 'import torch.nn as nn\n'), ((5152, 5174), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (5165, 5174), True, 'import torch.nn as nn\n'), ((6428, 6471), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_size', 'num_classes'], {}), '(self.embedding_size, num_classes)\n', (6437, 6471), True, 'import torch.nn as nn\n'), ((6553, 6572), 'torch.pow', 'torch.pow', (['input', '(2)'], {}), '(input, 2)\n', (6562, 6572), False, 'import torch\n'), ((6638, 6655), 'torch.sqrt', 'torch.sqrt', (['normp'], {}), '(normp)\n', (6648, 6655), False, 'import torch\n'), ((8852, 8900), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_size', 'self.num_classes'], {}), '(self.embedding_size, self.num_classes)\n', (8861, 8900), True, 'import torch.nn as nn\n'), ((8966, 8996), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 512)'], {}), '((1, 512))\n', (8986, 8996), True, 'import torch.nn as nn\n'), ((9193, 9236), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.model.W'], {'gain': '(1)'}), '(self.model.W, gain=1)\n', (9214, 9236), True, 'import torch.nn as nn\n'), ((9657, 9678), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (9676, 9678), True, 'import torch.nn as nn\n'), ((10346, 10365), 'torch.pow', 'torch.pow', (['input', '(2)'], {}), '(input, 2)\n', (10355, 10365), False, 'import torch\n'), ((10431, 10448), 'torch.sqrt', 'torch.sqrt', (['normp'], {}), '(normp)\n', (10441, 10448), False, 'import torch\n'), ((13227, 13242), 'torch.autograd.Variable', 'Variable', (['index'], {}), '(index)\n', (13235, 13242), False, 'from torch.autograd import Variable\n'), ((14299, 14319), 'torch.div', 'torch.div', (['x', 'x_norm'], {}), '(x, x_norm)\n', (14308, 14319), False, 'import torch\n'), ((14422, 14453), 'torch.div', 'torch.div', (['self.model.W', 'w_norm'], {}), '(self.model.W, w_norm)\n', (14431, 14453), False, 'import torch\n'), ((14470, 14494), 'torch.mm', 'torch.mm', (['x_norm', 'w_norm'], {}), '(x_norm, w_norm)\n', (14478, 14494), False, 'import torch\n'), ((15659, 15723), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(1, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (15668, 15723), True, 'import torch.nn as nn\n'), ((15743, 15761), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (15757, 15761), True, 'import torch.nn as nn\n'), ((15874, 15940), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (15883, 15940), True, 'import torch.nn as nn\n'), ((15960, 15979), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (15974, 15979), True, 'import torch.nn as nn\n'), ((16093, 16160), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(128, 256, kernel_size=5, stride=2, padding=2, bias=False)\n', (16102, 16160), True, 'import torch.nn as nn\n'), ((16180, 16199), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (16194, 16199), True, 'import torch.nn as nn\n'), ((16313, 16380), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(256, 512, kernel_size=5, stride=2, padding=2, bias=False)\n', (16322, 16380), True, 'import torch.nn as nn\n'), ((16400, 16419), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (16414, 16419), True, 'import torch.nn as nn\n'), ((16562, 16590), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(4, 1)'], {}), '((4, 1))\n', (16582, 16590), True, 'import torch.nn as nn\n'), ((16763, 16806), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_size', 'num_classes'], {}), '(self.embedding_size, num_classes)\n', (16772, 16806), True, 'import torch.nn as nn\n'), ((17612, 17631), 'torch.pow', 'torch.pow', (['input', '(2)'], {}), '(input, 2)\n', (17621, 17631), False, 'import torch\n'), ((17697, 17714), 'torch.sqrt', 'torch.sqrt', (['normp'], {}), '(normp)\n', (17707, 17714), False, 'import torch\n'), ((18126, 18148), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (18139, 18148), True, 'import torch.nn as nn\n'), ((19590, 19654), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(1, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (19599, 19654), True, 'import torch.nn as nn\n'), ((19674, 19692), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (19688, 19692), True, 'import torch.nn as nn\n'), ((19805, 19871), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (19814, 19871), True, 'import torch.nn as nn\n'), ((19891, 19910), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (19905, 19910), True, 'import torch.nn as nn\n'), ((20024, 20091), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(128, 256, kernel_size=5, stride=2, padding=2, bias=False)\n', (20033, 20091), True, 'import torch.nn as nn\n'), ((20111, 20130), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (20125, 20130), True, 'import torch.nn as nn\n'), ((20501, 20529), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(4, 1)'], {}), '((4, 1))\n', (20521, 20529), True, 'import torch.nn as nn\n'), ((20905, 20973), 'Define_Model.Loss.SoftmaxLoss.AngleLinear', 'AngleLinear', ([], {'in_features': 'embedding_size', 'out_features': 'n_classes', 'm': 'm'}), '(in_features=embedding_size, out_features=n_classes, m=m)\n', (20916, 20973), False, 'from Define_Model.Loss.SoftmaxLoss import AngleLinear\n'), ((21785, 21807), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (21798, 21807), True, 'import torch.nn as nn\n'), ((502, 520), 'torch.numel', 'torch.numel', (['param'], {}), '(param)\n', (513, 520), False, 'import torch\n'), ((6137, 6176), 'torch.nn.Linear', 'nn.Linear', (['(512 * 4)', 'self.embedding_size'], {}), '(512 * 4, self.embedding_size)\n', (6146, 6176), True, 'import torch.nn as nn\n'), ((8578, 8617), 'torch.nn.Linear', 'nn.Linear', (['(512 * 4)', 'self.embedding_size'], {}), '(512 * 4, self.embedding_size)\n', (8587, 8617), True, 'import torch.nn as nn\n'), ((9138, 9183), 'torch.randn', 'torch.randn', (['self.embedding_size', 'num_classes'], {}), '(self.embedding_size, num_classes)\n', (9149, 9183), False, 'import torch\n'), ((16637, 16682), 'torch.nn.Linear', 'nn.Linear', (['(self.in_planes * 4)', 'embedding_size'], {}), '(self.in_planes * 4, embedding_size)\n', (16646, 16682), True, 'import torch.nn as nn\n'), ((16696, 16726), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['embedding_size'], {}), '(embedding_size)\n', (16710, 16726), True, 'import torch.nn as nn\n'), ((20576, 20621), 'torch.nn.Linear', 'nn.Linear', (['(self.in_planes * 4)', 'embedding_size'], {}), '(self.in_planes * 4, embedding_size)\n', (20585, 20621), True, 'import torch.nn as nn\n'), ((20635, 20665), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['embedding_size'], {}), '(embedding_size)\n', (20649, 20665), True, 'import torch.nn as nn\n'), ((896, 922), 'torch.pow', 'torch.pow', (['diff', 'self.norm'], {}), '(diff, self.norm)\n', (905, 922), False, 'import torch\n'), ((6237, 6276), 'torch.nn.Linear', 'nn.Linear', (['(256 * 5)', 'self.embedding_size'], {}), '(256 * 5, self.embedding_size)\n', (6246, 6276), True, 'import torch.nn as nn\n'), ((6590, 6610), 'torch.sum', 'torch.sum', (['buffer', '(1)'], {}), '(buffer, 1)\n', (6599, 6610), False, 'import torch\n'), ((8678, 8717), 'torch.nn.Linear', 'nn.Linear', (['(256 * 5)', 'self.embedding_size'], {}), '(256 * 5, self.embedding_size)\n', (8687, 8717), True, 'import torch.nn as nn\n'), ((10383, 10403), 'torch.sum', 'torch.sum', (['buffer', '(1)'], {}), '(buffer, 1)\n', (10392, 10403), False, 'import torch\n'), ((14225, 14264), 'torch.norm', 'torch.norm', (['x'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(x, p=2, dim=1, keepdim=True)\n', (14235, 14264), False, 'import torch\n'), ((14337, 14387), 'torch.norm', 'torch.norm', (['self.model.W'], {'p': '(2)', 'dim': '(0)', 'keepdim': '(True)'}), '(self.model.W, p=2, dim=0, keepdim=True)\n', (14347, 14387), False, 'import torch\n'), ((17649, 17669), 'torch.sum', 'torch.sum', (['buffer', '(1)'], {}), '(buffer, 1)\n', (17658, 17669), False, 'import torch\n'), ((4712, 4730), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (4721, 4730), False, 'import math\n'), ((8779, 8818), 'torch.nn.Linear', 'nn.Linear', (['(256 * 5)', 'self.embedding_size'], {}), '(256 * 5, self.embedding_size)\n', (8788, 8818), True, 'import torch.nn as nn\n'), ((17036, 17054), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (17045, 17054), False, 'import math\n'), ((21203, 21221), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (21212, 21221), False, 'import math\n')] |
import requests
from datetime import datetime
CLIENT_ID = '2OWPT1X5RNXEE0DGOM5VWO2FBM5R5TNTVPPLH50NCSZAX3QD'
CLIENT_SECRET = '<KEY>'
DATE = datetime.today().strftime('%Y%m%d')
BASE_URL = 'https://api.foursquare.com/v2'
AUTH_URL_PART = 'client_id={}&client_secret={}&v={}'.format(CLIENT_ID, CLIENT_SECRET, DATE)
# Foursquare has a annoying 50 search result limit so,
# we need to do some query magic to get more than 50 results
# https://stackoverflow.com/questions/14211120/maximum-number-of-results-in-foursquare-api
def venuSearch(search_param):
'''
url = "{}/venues/search?near={}&limit=50&{}&intent=browse&radius=9000".format(BASE_URL, search_param, AUTH_URL_PART)
request = requests.get(url)
# We want get lat,lng of search param from response geocode
res = request.json()
geocode = res['response']['geocode']
centerCoords = geocode['feature']['geometry']['center']
lat = centerCoords['lat']
lng = centerCoords['lng']
# Make request, alter latlng then make another request
venues = []
count = 1
finished = False
while finished == False:
req = requests.get("{}/venues/search?ll={},{}&limit=50&{}&intent=browse&radius=9000".format(BASE_URL, lat, lng, AUTH_URL_PART))
res = req.json()
ven = res['response']['venues']
# Check to see if each new venue is already in list
for v in ven:
newVID = v['id']
if len(venues) == 0:
venues.append(v)
else:
if v in venues:
break
else:
venues.append(v)
count += 1
if (count > 10):
finished = True
# Now adjust lat,lng
lat += 0.01
lng -= 0.01
finalJSON = {
'venues': venues
}
return finalJSON '''
url = "{}/venues/search?near={}&limit=50&{}&intent=browse&radius=100000".format(BASE_URL, search_param, AUTH_URL_PART)
request = requests.get(url).json()
print(request)
venues = request['response']['venues']
return {
'venues': venues
}
def venuPhotos(venuID):
url = "{}/venues/{}/photos?client_id={}&client_secret={}&v={}".format(BASE_URL, venuID, CLIENT_ID, CLIENT_SECRET, DATE)
# Now perform request
request = requests.get(url)
return request.json()
| [
"datetime.datetime.today",
"requests.get"
] | [((2138, 2155), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2150, 2155), False, 'import requests\n'), ((141, 157), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (155, 157), False, 'from datetime import datetime\n'), ((1832, 1849), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1844, 1849), False, 'import requests\n')] |
from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.ViewView.as_view(), name='profile_own_view'),
url(r'^edit/', views.EditView.as_view(), name='profile_edit'),
url(r'^view/$', views.ViewView.as_view(), name='profile_own_view'),
url(r'^view/(?P<username>[a-zA-Z0-9_-]+)/$', views.ViewView.as_view(), name='profile_view'),
url(r'^delete/$', views.delete_account, name="delete_account"),
url(r'^follow/(?P<username>[a-zA-Z0-9_-]+)/$', views.follow_user, name="follow_user"),
url(r'^unfollow/(?P<username>[a-zA-Z0-9_-]+)/$', views.unfollow_user, name="unfollow_user"),
]
| [
"django.conf.urls.url"
] | [((420, 481), 'django.conf.urls.url', 'url', (['"""^delete/$"""', 'views.delete_account'], {'name': '"""delete_account"""'}), "('^delete/$', views.delete_account, name='delete_account')\n", (423, 481), False, 'from django.conf.urls import include, url\n'), ((488, 577), 'django.conf.urls.url', 'url', (['"""^follow/(?P<username>[a-zA-Z0-9_-]+)/$"""', 'views.follow_user'], {'name': '"""follow_user"""'}), "('^follow/(?P<username>[a-zA-Z0-9_-]+)/$', views.follow_user, name=\n 'follow_user')\n", (491, 577), False, 'from django.conf.urls import include, url\n'), ((579, 674), 'django.conf.urls.url', 'url', (['"""^unfollow/(?P<username>[a-zA-Z0-9_-]+)/$"""', 'views.unfollow_user'], {'name': '"""unfollow_user"""'}), "('^unfollow/(?P<username>[a-zA-Z0-9_-]+)/$', views.unfollow_user, name=\n 'unfollow_user')\n", (582, 674), False, 'from django.conf.urls import include, url\n')] |
import os
import sys
import json
import logging
import requests
class ApiException(Exception):
""" API Exception class. """
pass
logger = logging.getLogger()
logger.setLevel(logging.INFO)
try:
api_url = os.environ['BUS_API_URL']
if api_url[-1] == '/':
api_url = api_url[0:-1]
except KeyError:
logger.error('Missing required environment variables')
sys.exit()
def get_lines():
""" Get a list of bus lines. """
resp = requests.get(api_url + '/lines')
if resp.status_code == 200:
return resp.json()
else:
logger.error(resp.text)
raise ApiException(resp.text)
def get_locations():
""" Get a list of bus lines. """
resp = requests.get(api_url + '/locations')
if resp.status_code == 200:
return resp.json()
else:
logger.error(resp.text)
raise ApiException(resp.text)
def post_line(title, color, direction_1, direction_2):
""" Create a bus line. """
body = {
'title': title,
'color': color,
'direction_1': direction_1,
'direction_2': direction_2
}
resp = requests.post(api_url + '/lines', json=body)
if resp.status_code == 200:
return resp.json()
else:
logger.error(resp.text)
raise ApiException(resp.text)
def post_bus(line_id):
""" Create a bus. """
resp = requests.post(api_url + '/lines/' + line_id + '/bus')
if resp.status_code == 200:
return resp.json()
else:
logger.error(resp.text)
raise ApiException(resp.text)
def post_location(bus_id, latitude, longitude, direction):
""" Update the bus's location. """
body = {
'latitude': latitude,
'longitude': longitude,
'direction': direction
}
resp = requests.post(api_url + '/bus/' + bus_id + '/location', json=body)
if resp.status_code == 200:
return resp.json()
else:
logger.error(resp.text)
raise ApiException(resp.text)
| [
"logging.getLogger",
"requests.post",
"requests.get",
"sys.exit"
] | [((150, 169), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (167, 169), False, 'import logging\n'), ((464, 496), 'requests.get', 'requests.get', (["(api_url + '/lines')"], {}), "(api_url + '/lines')\n", (476, 496), False, 'import requests\n'), ((709, 745), 'requests.get', 'requests.get', (["(api_url + '/locations')"], {}), "(api_url + '/locations')\n", (721, 745), False, 'import requests\n'), ((1125, 1169), 'requests.post', 'requests.post', (["(api_url + '/lines')"], {'json': 'body'}), "(api_url + '/lines', json=body)\n", (1138, 1169), False, 'import requests\n'), ((1372, 1425), 'requests.post', 'requests.post', (["(api_url + '/lines/' + line_id + '/bus')"], {}), "(api_url + '/lines/' + line_id + '/bus')\n", (1385, 1425), False, 'import requests\n'), ((1790, 1856), 'requests.post', 'requests.post', (["(api_url + '/bus/' + bus_id + '/location')"], {'json': 'body'}), "(api_url + '/bus/' + bus_id + '/location', json=body)\n", (1803, 1856), False, 'import requests\n'), ((385, 395), 'sys.exit', 'sys.exit', ([], {}), '()\n', (393, 395), False, 'import sys\n')] |
import os
import os.path as osp
import sys
import numpy as np
from sklearn.svm import LinearSVC
from tqdm import tqdm
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
import torch.utils.data as data
from dataset.modelnet40 import LatentCapsulesModelNet40, LatentVectorsModelNet40
from utils.utils import create_save_folder, initialize_main
def main():
args, logdir = initialize_main()
# save_results folder
save_folder = create_save_folder(logdir, args["save_folder"])["save_folder"]
# datasets
args["train_root"] = os.path.join(logdir, args["train_root"])
args["test_root"] = os.path.join(logdir, args["test_root"])
# train loader
if args["train_root"].endswith(".npz"):
train_set = LatentVectorsModelNet40(args["train_root"]) # root is a npz file
elif args["train_root"].endswith(".h5"):
train_set = LatentCapsulesModelNet40(args["train_root"])
else:
raise Exception("Unknown dataset.")
train_loader = data.DataLoader(
train_set,
batch_size=args["batch_size"],
pin_memory=args["pin_memory"],
num_workers=args["num_workers"],
shuffle=args["shuffle"],
)
# test loader
if args["test_root"].endswith(".npz"):
test_set = LatentVectorsModelNet40(args["test_root"]) # root is a npz file
elif args["test_root"].endswith(".h5"):
test_set = LatentCapsulesModelNet40(args["test_root"])
else:
raise Exception("Unknown dataset.")
test_loader = data.DataLoader(
test_set,
batch_size=args["batch_size"],
pin_memory=args["pin_memory"],
num_workers=args["num_workers"],
shuffle=False,
)
# classifier
clf = LinearSVC()
# main
train_feature = np.zeros((1, args["input_size"]))
train_label = np.zeros((1, 1))
test_feature = np.zeros((1, args["input_size"]))
test_label = np.zeros((1, 1))
for batch_id, (latents, labels) in tqdm(enumerate(train_loader)):
train_label = np.concatenate((train_label, labels.numpy()), axis=None)
train_label = train_label.astype(int)
train_feature = np.concatenate((train_feature, latents.numpy()), axis=0)
if batch_id % 10 == 0:
print("add train batch: ", batch_id)
for batch_id, (latents, labels) in tqdm(enumerate(test_loader)):
test_label = np.concatenate((test_label, labels.numpy()), axis=None)
test_label = test_label.astype(int)
test_feature = np.concatenate((test_feature, latents.numpy()), axis=0)
if batch_id % 10 == 0:
print("add test batch: ", batch_id)
train_feature = train_feature[1:, :]
train_label = train_label[1:]
test_feature = test_feature[1:, :]
test_label = test_label[1:]
print("training the linear SVM.......")
clf.fit(train_feature, train_label)
confidence = clf.score(test_feature, test_label)
print("Accuracy: {} %".format(confidence * 100))
with open(os.path.join(save_folder, "accuracy.txt"), "a") as fp:
fp.write(str(confidence) + "\n")
if __name__ == "__main__":
main()
| [
"sklearn.svm.LinearSVC",
"os.path.join",
"dataset.modelnet40.LatentCapsulesModelNet40",
"numpy.zeros",
"dataset.modelnet40.LatentVectorsModelNet40",
"torch.utils.data.DataLoader",
"os.path.abspath",
"utils.utils.initialize_main",
"utils.utils.create_save_folder"
] | [((392, 409), 'utils.utils.initialize_main', 'initialize_main', ([], {}), '()\n', (407, 409), False, 'from utils.utils import create_save_folder, initialize_main\n'), ((559, 599), 'os.path.join', 'os.path.join', (['logdir', "args['train_root']"], {}), "(logdir, args['train_root'])\n", (571, 599), False, 'import os\n'), ((624, 663), 'os.path.join', 'os.path.join', (['logdir', "args['test_root']"], {}), "(logdir, args['test_root'])\n", (636, 663), False, 'import os\n'), ((997, 1148), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_set'], {'batch_size': "args['batch_size']", 'pin_memory': "args['pin_memory']", 'num_workers': "args['num_workers']", 'shuffle': "args['shuffle']"}), "(train_set, batch_size=args['batch_size'], pin_memory=args[\n 'pin_memory'], num_workers=args['num_workers'], shuffle=args['shuffle'])\n", (1012, 1148), True, 'import torch.utils.data as data\n'), ((1516, 1656), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_set'], {'batch_size': "args['batch_size']", 'pin_memory': "args['pin_memory']", 'num_workers': "args['num_workers']", 'shuffle': '(False)'}), "(test_set, batch_size=args['batch_size'], pin_memory=args[\n 'pin_memory'], num_workers=args['num_workers'], shuffle=False)\n", (1531, 1656), True, 'import torch.utils.data as data\n'), ((1727, 1738), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (1736, 1738), False, 'from sklearn.svm import LinearSVC\n'), ((1771, 1804), 'numpy.zeros', 'np.zeros', (["(1, args['input_size'])"], {}), "((1, args['input_size']))\n", (1779, 1804), True, 'import numpy as np\n'), ((1823, 1839), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1831, 1839), True, 'import numpy as np\n'), ((1859, 1892), 'numpy.zeros', 'np.zeros', (["(1, args['input_size'])"], {}), "((1, args['input_size']))\n", (1867, 1892), True, 'import numpy as np\n'), ((1910, 1926), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1918, 1926), True, 'import numpy as np\n'), ((455, 502), 'utils.utils.create_save_folder', 'create_save_folder', (['logdir', "args['save_folder']"], {}), "(logdir, args['save_folder'])\n", (473, 502), False, 'from utils.utils import create_save_folder, initialize_main\n'), ((748, 791), 'dataset.modelnet40.LatentVectorsModelNet40', 'LatentVectorsModelNet40', (["args['train_root']"], {}), "(args['train_root'])\n", (771, 791), False, 'from dataset.modelnet40 import LatentCapsulesModelNet40, LatentVectorsModelNet40\n'), ((1272, 1314), 'dataset.modelnet40.LatentVectorsModelNet40', 'LatentVectorsModelNet40', (["args['test_root']"], {}), "(args['test_root'])\n", (1295, 1314), False, 'from dataset.modelnet40 import LatentCapsulesModelNet40, LatentVectorsModelNet40\n'), ((161, 182), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (172, 182), True, 'import os.path as osp\n'), ((879, 923), 'dataset.modelnet40.LatentCapsulesModelNet40', 'LatentCapsulesModelNet40', (["args['train_root']"], {}), "(args['train_root'])\n", (903, 923), False, 'from dataset.modelnet40 import LatentCapsulesModelNet40, LatentVectorsModelNet40\n'), ((1400, 1443), 'dataset.modelnet40.LatentCapsulesModelNet40', 'LatentCapsulesModelNet40', (["args['test_root']"], {}), "(args['test_root'])\n", (1424, 1443), False, 'from dataset.modelnet40 import LatentCapsulesModelNet40, LatentVectorsModelNet40\n'), ((2985, 3026), 'os.path.join', 'os.path.join', (['save_folder', '"""accuracy.txt"""'], {}), "(save_folder, 'accuracy.txt')\n", (2997, 3026), False, 'import os\n')] |
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('libretto', '0009_auto_20150423_2042'),
]
operations = [
migrations.AlterModelOptions(
name='pupitre',
options={'ordering': ('-soliste', 'partie'), 'verbose_name': 'pupitre', 'verbose_name_plural': 'pupitres'},
),
migrations.AlterField(
model_name='elementdeprogramme',
name='autre',
field=models.CharField(max_length=500, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='engagement',
name='individus',
field=models.ManyToManyField(related_name='engagements', to='libretto.Individu'),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='circonstance',
field=models.CharField(max_length=500, verbose_name='circonstance', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='deces_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='deces_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='naissance_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='naissance_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='partie',
name='professions',
field=models.ManyToManyField(related_name='parties', to='libretto.Profession', blank=True, help_text='La ou les profession(s) capable(s) de jouer ce r\xf4le ou cet instrument.', null=True, verbose_name='occupations'),
preserve_default=True,
),
migrations.AlterField(
model_name='personnel',
name='engagements',
field=models.ManyToManyField(related_name='personnels', to='libretto.Engagement'),
preserve_default=True,
),
migrations.AlterField(
model_name='pupitre',
name='quantite_max',
field=models.IntegerField(default=1, verbose_name='quantit\xe9 maximale'),
preserve_default=True,
),
migrations.AlterField(
model_name='pupitre',
name='quantite_min',
field=models.IntegerField(default=1, verbose_name='quantit\xe9 minimale'),
preserve_default=True,
),
migrations.AlterField(
model_name='source',
name='date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((187, 348), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""pupitre"""', 'options': "{'ordering': ('-soliste', 'partie'), 'verbose_name': 'pupitre',\n 'verbose_name_plural': 'pupitres'}"}), "(name='pupitre', options={'ordering': (\n '-soliste', 'partie'), 'verbose_name': 'pupitre', 'verbose_name_plural':\n 'pupitres'})\n", (215, 348), False, 'from django.db import models, migrations\n'), ((496, 540), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'blank': '(True)'}), '(max_length=500, blank=True)\n', (512, 540), False, 'from django.db import models, migrations\n'), ((704, 778), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""engagements"""', 'to': '"""libretto.Individu"""'}), "(related_name='engagements', to='libretto.Individu')\n", (726, 778), False, 'from django.db import models, migrations\n'), ((944, 1017), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'verbose_name': '"""circonstance"""', 'blank': '(True)'}), "(max_length=500, verbose_name='circonstance', blank=True)\n", (960, 1017), False, 'from django.db import models, migrations\n'), ((1188, 1326), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si la date est imprécise."""', 'max_length': '(60)', 'verbose_name': '"""date (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si la date est imprécise.',\n max_length=60, verbose_name='date (approximative)', blank=True)\n", (1204, 1326), False, 'from django.db import models, migrations\n'), ((1497, 1636), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si l’heure est imprécise."""', 'max_length': '(30)', 'verbose_name': '"""heure (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si l’heure est imprécise.',\n max_length=30, verbose_name='heure (approximative)', blank=True)\n", (1513, 1636), False, 'from django.db import models, migrations\n'), ((1811, 1972), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si le lieu (ou institution) est imprécis(e)."""', 'max_length': '(50)', 'verbose_name': '"""lieu (approximatif)"""', 'blank': '(True)'}), "(help_text=\n 'Ne remplir que si le lieu (ou institution) est imprécis(e).',\n max_length=50, verbose_name='lieu (approximatif)', blank=True)\n", (1827, 1972), False, 'from django.db import models, migrations\n'), ((2135, 2273), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si la date est imprécise."""', 'max_length': '(60)', 'verbose_name': '"""date (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si la date est imprécise.',\n max_length=60, verbose_name='date (approximative)', blank=True)\n", (2151, 2273), False, 'from django.db import models, migrations\n'), ((2442, 2581), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si l’heure est imprécise."""', 'max_length': '(30)', 'verbose_name': '"""heure (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si l’heure est imprécise.',\n max_length=30, verbose_name='heure (approximative)', blank=True)\n", (2458, 2581), False, 'from django.db import models, migrations\n'), ((2754, 2915), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si le lieu (ou institution) est imprécis(e)."""', 'max_length': '(50)', 'verbose_name': '"""lieu (approximatif)"""', 'blank': '(True)'}), "(help_text=\n 'Ne remplir que si le lieu (ou institution) est imprécis(e).',\n max_length=50, verbose_name='lieu (approximatif)', blank=True)\n", (2770, 2915), False, 'from django.db import models, migrations\n'), ((3079, 3217), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si la date est imprécise."""', 'max_length': '(60)', 'verbose_name': '"""date (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si la date est imprécise.',\n max_length=60, verbose_name='date (approximative)', blank=True)\n", (3095, 3217), False, 'from django.db import models, migrations\n'), ((3386, 3547), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si le lieu (ou institution) est imprécis(e)."""', 'max_length': '(50)', 'verbose_name': '"""lieu (approximatif)"""', 'blank': '(True)'}), "(help_text=\n 'Ne remplir que si le lieu (ou institution) est imprécis(e).',\n max_length=50, verbose_name='lieu (approximatif)', blank=True)\n", (3402, 3547), False, 'from django.db import models, migrations\n'), ((3715, 3853), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si la date est imprécise."""', 'max_length': '(60)', 'verbose_name': '"""date (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si la date est imprécise.',\n max_length=60, verbose_name='date (approximative)', blank=True)\n", (3731, 3853), False, 'from django.db import models, migrations\n'), ((4026, 4187), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si le lieu (ou institution) est imprécis(e)."""', 'max_length': '(50)', 'verbose_name': '"""lieu (approximatif)"""', 'blank': '(True)'}), "(help_text=\n 'Ne remplir que si le lieu (ou institution) est imprécis(e).',\n max_length=50, verbose_name='lieu (approximatif)', blank=True)\n", (4042, 4187), False, 'from django.db import models, migrations\n'), ((4352, 4490), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si la date est imprécise."""', 'max_length': '(60)', 'verbose_name': '"""date (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si la date est imprécise.',\n max_length=60, verbose_name='date (approximative)', blank=True)\n", (4368, 4490), False, 'from django.db import models, migrations\n'), ((4661, 4800), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si l’heure est imprécise."""', 'max_length': '(30)', 'verbose_name': '"""heure (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si l’heure est imprécise.',\n max_length=30, verbose_name='heure (approximative)', blank=True)\n", (4677, 4800), False, 'from django.db import models, migrations\n'), ((4975, 5136), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si le lieu (ou institution) est imprécis(e)."""', 'max_length': '(50)', 'verbose_name': '"""lieu (approximatif)"""', 'blank': '(True)'}), "(help_text=\n 'Ne remplir que si le lieu (ou institution) est imprécis(e).',\n max_length=50, verbose_name='lieu (approximatif)', blank=True)\n", (4991, 5136), False, 'from django.db import models, migrations\n'), ((5292, 5512), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""parties"""', 'to': '"""libretto.Profession"""', 'blank': '(True)', 'help_text': '"""La ou les profession(s) capable(s) de jouer ce rôle ou cet instrument."""', 'null': '(True)', 'verbose_name': '"""occupations"""'}), "(related_name='parties', to='libretto.Profession',\n blank=True, help_text=\n 'La ou les profession(s) capable(s) de jouer ce rôle ou cet instrument.',\n null=True, verbose_name='occupations')\n", (5314, 5512), False, 'from django.db import models, migrations\n'), ((5667, 5742), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""personnels"""', 'to': '"""libretto.Engagement"""'}), "(related_name='personnels', to='libretto.Engagement')\n", (5689, 5742), False, 'from django.db import models, migrations\n'), ((5906, 5970), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""quantité maximale"""'}), "(default=1, verbose_name='quantité maximale')\n", (5925, 5970), False, 'from django.db import models, migrations\n'), ((6137, 6201), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""quantité minimale"""'}), "(default=1, verbose_name='quantité minimale')\n", (6156, 6201), False, 'from django.db import models, migrations\n'), ((6366, 6504), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Ne remplir que si la date est imprécise."""', 'max_length': '(60)', 'verbose_name': '"""date (approximative)"""', 'blank': '(True)'}), "(help_text='Ne remplir que si la date est imprécise.',\n max_length=60, verbose_name='date (approximative)', blank=True)\n", (6382, 6504), False, 'from django.db import models, migrations\n')] |
from unittest.mock import Mock
from pelican.tests.support import unittest
class Test_abbr_role(unittest.TestCase):
def call_it(self, text):
from pelican.rstdirectives import abbr_role
rawtext = text
lineno = 42
inliner = Mock(name='inliner')
nodes, system_messages = abbr_role(
'abbr', rawtext, text, lineno, inliner)
self.assertEqual(system_messages, [])
self.assertEqual(len(nodes), 1)
return nodes[0]
def test(self):
node = self.call_it("Abbr (Abbreviation)")
self.assertEqual(node.astext(), "Abbr")
self.assertEqual(node['explanation'], "Abbreviation")
def test_newlines_in_explanation(self):
node = self.call_it("CUL (See you\nlater)")
self.assertEqual(node.astext(), "CUL")
self.assertEqual(node['explanation'], "See you\nlater")
def test_newlines_in_abbr(self):
node = self.call_it("US of\nA \n (USA)")
self.assertEqual(node.astext(), "US of\nA")
self.assertEqual(node['explanation'], "USA")
| [
"pelican.rstdirectives.abbr_role",
"unittest.mock.Mock"
] | [((260, 280), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""inliner"""'}), "(name='inliner')\n", (264, 280), False, 'from unittest.mock import Mock\n'), ((314, 363), 'pelican.rstdirectives.abbr_role', 'abbr_role', (['"""abbr"""', 'rawtext', 'text', 'lineno', 'inliner'], {}), "('abbr', rawtext, text, lineno, inliner)\n", (323, 363), False, 'from pelican.rstdirectives import abbr_role\n')] |
# Copyright 2011 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from tmapi.constants import XSD_ANY_URI, XSD_STRING
from tmapi.exceptions import ModelConstraintException
from construct_fields import ConstructFields
from locator import Locator
from reifiable import Reifiable
from scoped import Scoped
from typed import Typed
from variant import Variant
class Name (ConstructFields, Reifiable, Scoped, Typed):
"""Represents a topic name item."""
topic = models.ForeignKey('Topic', related_name='names')
value = models.TextField()
class Meta:
app_label = 'tmapi'
def create_variant (self, value, scope, datatype=None):
"""Creates a `Variant` of this topic name with the specified
string `value` and `scope`.
If `datatype` is None, the newly created `Variant` will have
the datatype xsd:string.
The newly created `Variant` will contain all themes from the
parent name and the themes specified in `scope`.
:param value: the string value or locator which represents an IRI
:type value: string or `Locator`
:param scope: list of themes
:type scope: list of `Topic`s
:rtype: `Variant`
"""
if value is None:
raise ModelConstraintException(self, 'The value may not be None')
if not scope:
raise ModelConstraintException(self, 'The scope may not be None')
if type(scope) not in (type([]), type(())):
scope = [scope]
if scope == list(self.get_scope()):
raise ModelConstraintException(
self, 'The variant would be in the same scope as the parent')
if datatype is None:
if isinstance(value, Locator):
datatype = Locator(XSD_ANY_URI)
elif isinstance(value, str):
datatype = Locator(XSD_STRING)
if isinstance(value, Locator):
value = value.to_external_form()
variant = Variant(name=self, datatype=datatype.to_external_form(),
value=value, topic_map=self.topic_map)
variant.save()
for theme in scope:
variant.scope.add(theme)
return variant
def get_parent (self, proxy=None):
"""Returns the `Topic` to which this name belongs.
:param proxy: Django proxy model class
:type proxy: class
:rtype: `Topic` or `proxy`
"""
parent = self.topic
if proxy is not None:
parent = proxy.objects.get(pk=parent.id)
return parent
def get_value (self):
"""Returns the value of this name."""
return self.value
def get_variants (self):
"""Returns the variants defined for this name."""
return self.variants.all()
def set_value (self, value):
"""Sets the value of this name. The previous value is overridden."""
if value is None:
raise ModelConstraintException(self, 'The value may not be None')
self.value = value
self.save()
def __unicode__ (self):
return self.value
| [
"tmapi.exceptions.ModelConstraintException",
"django.db.models.TextField",
"locator.Locator",
"django.db.models.ForeignKey"
] | [((1017, 1065), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Topic"""'], {'related_name': '"""names"""'}), "('Topic', related_name='names')\n", (1034, 1065), False, 'from django.db import models\n'), ((1078, 1096), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1094, 1096), False, 'from django.db import models\n'), ((1812, 1871), 'tmapi.exceptions.ModelConstraintException', 'ModelConstraintException', (['self', '"""The value may not be None"""'], {}), "(self, 'The value may not be None')\n", (1836, 1871), False, 'from tmapi.exceptions import ModelConstraintException\n'), ((1912, 1971), 'tmapi.exceptions.ModelConstraintException', 'ModelConstraintException', (['self', '"""The scope may not be None"""'], {}), "(self, 'The scope may not be None')\n", (1936, 1971), False, 'from tmapi.exceptions import ModelConstraintException\n'), ((2114, 2204), 'tmapi.exceptions.ModelConstraintException', 'ModelConstraintException', (['self', '"""The variant would be in the same scope as the parent"""'], {}), "(self,\n 'The variant would be in the same scope as the parent')\n", (2138, 2204), False, 'from tmapi.exceptions import ModelConstraintException\n'), ((3517, 3576), 'tmapi.exceptions.ModelConstraintException', 'ModelConstraintException', (['self', '"""The value may not be None"""'], {}), "(self, 'The value may not be None')\n", (3541, 3576), False, 'from tmapi.exceptions import ModelConstraintException\n'), ((2317, 2337), 'locator.Locator', 'Locator', (['XSD_ANY_URI'], {}), '(XSD_ANY_URI)\n', (2324, 2337), False, 'from locator import Locator\n'), ((2406, 2425), 'locator.Locator', 'Locator', (['XSD_STRING'], {}), '(XSD_STRING)\n', (2413, 2425), False, 'from locator import Locator\n')] |
#!/usr/bin/env python
from __future__ import print_function
from platform import node
from sys import argv
from os import popen
from re import *
def matlabRange(threads):
limits = threads.split(":")
if len(limits)==1:
return range(int(limits[0]),int(limits[0])+1)
if len(limits)==2:
return range(int(limits[0]),int(limits[1])+1)
elif len(limits)==3:
return range(int(limits[0]),int(limits[2])+1,int(limits[1]))
else:
print("Badly formed range expression!")
exit()
if (len(argv)!=5):
print("Wrong number of arguments!")
exit()
host = argv[1]
port = int(argv[2])
threadRange = matlabRange(argv[3])
duration = int(argv[4])
numberOfRequestsPattern = compile(r"Number of requests: (\d+)")
completedRequestsPattern = compile(r"Completed requests: (\d+)")
failedRequestsPattern = compile(r"Failed requests: (\d+)")
responseTimePattern = compile(r"Average response time for all requests: (\d+)")
completedTimePattern = compile(r"Average response time for completed requests: (\d+)")
failedTimePattern = compile(r"Average response time for failed requests: (\d+)")
print("""%% Output from prefengine.py running perftest
%% Client host: %s
%% Server host: %s
%% Server port: %s
%% Duration: %s s
%% Column 1: Number of threads
%% Column 2: Number of requests
%% Column 3: Number of completed requests
%% Column 4: Number of failed requests
%% Column 5: Average response time for all requests (ms)
%% Column 6: Average response time for completed requests (ms)
%% Column 7: Average response time for failed requests (ms) """ % (node(), host, port, duration))
for numberOfThreads in threadRange:
command = "perftest %s %i %i %i" % (host, port, numberOfThreads, duration)
stdout = popen(command)
output = stdout.read()
numberOfRequests = numberOfRequestsPattern.search(output).group(1)
completedRequests = completedRequestsPattern.search(output).group(1)
failedRequests = failedRequestsPattern.search(output).group(1)
responseTime = responseTimePattern.search(output).group(1)
completedTimeMatch = completedTimePattern.search(output)
if (completedTimeMatch):
completedTime = completedTimeMatch.group(1)
else:
completedTime = "NaN"
failedTimeMatch = failedTimePattern.search(output)
if (failedTimeMatch):
failedTime = failedTimeMatch.group(1)
else:
failedTime = "NaN"
print("%i\t%s\t%s\t%s\t%s\t%s\t%s" % (numberOfThreads,
numberOfRequests,
completedRequests,
failedRequests,
responseTime,
completedTime,
failedTime))
| [
"os.popen",
"platform.node"
] | [((1777, 1791), 'os.popen', 'popen', (['command'], {}), '(command)\n', (1782, 1791), False, 'from os import popen\n'), ((1617, 1623), 'platform.node', 'node', ([], {}), '()\n', (1621, 1623), False, 'from platform import node\n')] |
import os
from typing import Dict, Optional
import tomlkit
def _get_project_meta(pyproj_path: str = "./pyproject.toml") -> Dict[str, str]:
if os.path.exists(pyproj_path):
with open(pyproj_path, "r") as pyproject:
file_contents = pyproject.read()
return tomlkit.parse(file_contents)["tool"]["poetry"]
else:
return {}
pkg_meta: Dict[str, str] = _get_project_meta()
project: Optional[str] = pkg_meta.get("name")
version: Optional[str] = pkg_meta.get("version")
| [
"tomlkit.parse",
"os.path.exists"
] | [((149, 176), 'os.path.exists', 'os.path.exists', (['pyproj_path'], {}), '(pyproj_path)\n', (163, 176), False, 'import os\n'), ((288, 316), 'tomlkit.parse', 'tomlkit.parse', (['file_contents'], {}), '(file_contents)\n', (301, 316), False, 'import tomlkit\n')] |
import datetime as dt
import logging
from collections import Counter, OrderedDict, defaultdict
from dataclasses import dataclass
from io import StringIO
from operator import itemgetter
input = """8
2017-01-03,16:18:50,AAPL,142.64
2017-01-03,16:25:22,AMD,13.86
2017-01-03,16:25:25,AAPL,141.64
2017-01-03,16:25:28,AMZN,845.61
2017-01-03,16:28:50,AAPL,140.64
2017-01-03,16:29:59,FB,140.34
2017-01-04,16:29:32,AAPL,143.64
2017-01-04,16:30:50,AAPL,141.64
"""
@dataclass
class Feed:
date: dt.date = None
time: dt.time = None
symbol: str = None
price: float = None
class DailyFeed:
"""The exchange starts trading daily at 09:30:00 hrsand closes at 16:30:00 hrsevery day
Any quotes outside this time window are invalid and will be ignored.
Input:
StringIO
"""
def __init__(self, input):
self._input_data = input
self._feeds = defaultdict(list)
self.input_total_rows = next(self._input_data).strip()
@staticmethod
def _is_valid_trading(time):
return dt.time(hour=9, minute=30) < time < dt.time(hour=16, minute=30)
@staticmethod
def _extract_feed_from_line(line):
splited = line.strip().split(",")
date = dt.date.fromisoformat(splited[0])
time = dt.time.fromisoformat(splited[1])
symbol = splited[2].upper()
price = float(splited[3])
return Feed(date, time, symbol, price)
def get_trading_day_feeds(self):
trading_day_feeds = None
for _ in range(int(self.input_total_rows)):
feed = self._extract_feed_from_line(self._input_data.readline())
if not self._is_valid_trading(feed.time):
continue
self._feeds[feed.date].append(feed)
if trading_day_feeds and trading_day_feeds[0].date != feed.date:
yield trading_day_feeds
trading_day_feeds = self._feeds[feed.date]
else:
# last trading day
yield trading_day_feeds
def _get_most_active_hour(trading_day_feed):
cnt = Counter(feed.time.hour for feed in trading_day_feed)
sorted_most_common = sorted(cnt.most_common(), key=itemgetter(0))
# [0][0] means get hour from [('12', 3), ('16', 3)]
return sorted_most_common[0][0]
def _get_most_active_symbol(trading_day_feed):
cnt = Counter(feed.symbol for feed in trading_day_feed)
sorted_most_common = sorted(cnt.most_common(), key=itemgetter(0))
return sorted_most_common[0][0]
def _get_last_quote_time(trading_day_feed):
return trading_day_feed[-1].time
def _get_valid_quote_count(trading_day_feed):
return len(trading_day_feed)
def _get_price_statistics(trading_day_feed):
"""Calculate and print the following data for each Symbol as a comma-delimiter string.
Rows should be printed in alphabetical order based on Symbol
i. Time: Most recent timestamp for that Symbol in YYYY-mm-dd HH:MM:SS format
ii. Symbol
iii. High: Maximum Price that occurred for that Symbol during the trading day.
iv. Low: Minimum Price that occurred for that Symbol during the trading day
"""
stat = dict()
for feed in trading_day_feed:
symbol = feed.symbol
if stat.get(symbol):
prev_feed = stat[symbol]
prev_feed["time"] = max(prev_feed["time"], feed.time)
prev_feed["high"] = max(prev_feed["high"], feed.price)
prev_feed["low"] = min(prev_feed["low"], feed.price)
else:
stat[symbol] = {
"date": feed.date,
"time": feed.time,
"high": feed.price,
"low": feed.price,
}
# alphabetical order based on Symbol
price_statistics = OrderedDict.fromkeys(sorted(stat.keys()))
for symbol, symbol_info in stat.items():
price_statistics[symbol] = symbol_info
return price_statistics
#
# Public API
#
def print_trading_summary(feed):
"""After exchange closes at 16:30:00 for each trading day, print
1. Trading Day = <Date>
2. Last Quote Time = <Time of the last quote received before 16:30:00>
3. Number of valid quotes received for the day
4. Most active hour (maximum valid quotes per hour received during the
trading day). If the maximum number of valid quotes per hour occurs for
more than one hour, pick the earliest hour of the day.
5. Most active symbol (maximum valid quotes per symbol received during
the trading day).If the maximum number of valid quotes per symbol
occurs for more than one symbol, pick the first symbol (sorted
alphabetically).
"""
for trading_day_feed in feed.get_trading_day_feeds():
print(
f"\n===Trading Day: {trading_day_feed[0].date}===\n"
f"Last Quote Time: {_get_last_quote_time(trading_day_feed)}\n"
f"Number of valid quotes: {_get_valid_quote_count(trading_day_feed)}\n"
f"Most active hour: {_get_most_active_hour(trading_day_feed)}\n"
f"Most active symbol: {_get_most_active_symbol(trading_day_feed)}\n"
)
print("Price Statistics:")
for symbol, price_statistics in _get_price_statistics(
trading_day_feed
).items():
print(
f"{price_statistics['date']} {price_statistics['time']},{symbol},{price_statistics['high']},{price_statistics['low']}"
)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.DEBUG)
print_trading_summary(DailyFeed(StringIO(input)))
| [
"logging.getLogger",
"datetime.time",
"collections.Counter",
"collections.defaultdict",
"operator.itemgetter",
"io.StringIO",
"datetime.time.fromisoformat",
"datetime.date.fromisoformat"
] | [((2052, 2104), 'collections.Counter', 'Counter', (['(feed.time.hour for feed in trading_day_feed)'], {}), '(feed.time.hour for feed in trading_day_feed)\n', (2059, 2104), False, 'from collections import Counter, OrderedDict, defaultdict\n'), ((2326, 2375), 'collections.Counter', 'Counter', (['(feed.symbol for feed in trading_day_feed)'], {}), '(feed.symbol for feed in trading_day_feed)\n', (2333, 2375), False, 'from collections import Counter, OrderedDict, defaultdict\n'), ((884, 901), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (895, 901), False, 'from collections import Counter, OrderedDict, defaultdict\n'), ((1211, 1244), 'datetime.date.fromisoformat', 'dt.date.fromisoformat', (['splited[0]'], {}), '(splited[0])\n', (1232, 1244), True, 'import datetime as dt\n'), ((1260, 1293), 'datetime.time.fromisoformat', 'dt.time.fromisoformat', (['splited[1]'], {}), '(splited[1])\n', (1281, 1293), True, 'import datetime as dt\n'), ((1032, 1058), 'datetime.time', 'dt.time', ([], {'hour': '(9)', 'minute': '(30)'}), '(hour=9, minute=30)\n', (1039, 1058), True, 'import datetime as dt\n'), ((1068, 1095), 'datetime.time', 'dt.time', ([], {'hour': '(16)', 'minute': '(30)'}), '(hour=16, minute=30)\n', (1075, 1095), True, 'import datetime as dt\n'), ((2160, 2173), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (2170, 2173), False, 'from operator import itemgetter\n'), ((2431, 2444), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (2441, 2444), False, 'from operator import itemgetter\n'), ((5445, 5464), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5462, 5464), False, 'import logging\n'), ((5525, 5540), 'io.StringIO', 'StringIO', (['input'], {}), '(input)\n', (5533, 5540), False, 'from io import StringIO\n')] |
from wiki_scrape_db import get_headlines
headlines = []
for year in list(range(1995, 2020)):
try:
headlines.append(get_headlines(year))
except Exception as e:
print(e)
headlines.append(get_headlines(1994, start_month='July'))
headlines.append(get_headlines(2020, end_month='November'))
with open('headlines.csv', 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(['day', 'month', 'year', 'subject', 'event', 'text'])
csvwriter.writerows(list(chain(*headlines)))
| [
"wiki_scrape_db.get_headlines"
] | [((212, 251), 'wiki_scrape_db.get_headlines', 'get_headlines', (['(1994)'], {'start_month': '"""July"""'}), "(1994, start_month='July')\n", (225, 251), False, 'from wiki_scrape_db import get_headlines\n'), ((270, 311), 'wiki_scrape_db.get_headlines', 'get_headlines', (['(2020)'], {'end_month': '"""November"""'}), "(2020, end_month='November')\n", (283, 311), False, 'from wiki_scrape_db import get_headlines\n'), ((129, 148), 'wiki_scrape_db.get_headlines', 'get_headlines', (['year'], {}), '(year)\n', (142, 148), False, 'from wiki_scrape_db import get_headlines\n')] |
#!/usr/bin/env python
"""Utility and General purpose functions."""
import inspect
import warnings
import os
import re
import shatter.constants as cts
__author__ = '<NAME>'
def read_file(absolute_path):
"""
:param absolute_path: string path.
:return: list with lines of the file.
"""
return [line.rstrip('\n') for line in open(absolute_path)]
def delete_file(filename):
"""
:param filename: relative path to file.
"""
if os.path.exists(filename):
os.remove(filename)
return True
return False
def write_file(filename, the_list):
"""
:param filename: relative path to file.
:param the_list: new file information.
:return: void
"""
new_file = open(filename, 'a')
for item in the_list:
new_file.write("%s\n" % item)
def rewrite_file(filename, the_list):
"""
Delete and write again
:param filename: relative path to file.
:param the_list: new file information.
:return: void
"""
delete_file(filename)
write_file(filename, the_list)
def bit_in_string(string):
"""
Contains a bit in the string
:param string: arbitrary string
:return: boolean
"""
return ('0' in string) or ('1' in string)
def string_has_bits_for_and(str_bits, index):
"""
Returns true if finds a bit, before and after index.
:param index: int
:param str_bits: string
:return: boolean
"""
str_start = str_bits[:index]
str_end = str_bits[index:]
return index > 0 and bit_in_string(str_start) and bit_in_string(str_end)
def from_bool_to_bit(boolean):
"""
Conversion from boolean to bit
:param boolean: True or False
:return: '1' or '0'
"""
if boolean:
return "1"
else:
return "0"
def get_function_path(f):
"""
Passes the internal func_code to a attribute called internal_code on the wrapper.
Then we call the wrapper attribute which throws metadata of the internal function, and gets the path.
:param f: function
:return: path
"""
# does the wrapper is defining the new attribute, to expose internal func_code? or use std func_code if no decorator
code = f.internal_code if hasattr(f, cts.INTERNAL_CODE) else f.__code__
return code.co_filename
def valid_function(f):
"""
Validates function. Returns warning if it is not a function or it doesn't have a decorator.
:param f: function
:return: passes, raises warning or raises TypeError
"""
if not hasattr(f, '__call__'):
raise TypeError('{} is not a valid function.'.format(f))
if not hasattr(f, cts.INTERNAL_CODE):
warnings.warn('Function {} has no decorator, reading can be harder!!!'.format(f.__name__), UserWarning)
return True
def get_function_line_number(f, file_code):
"""
Returns first line number for decorated and un-decorated methods. -1 if not found.
:param f: function.
:param file_code: the code as a list where each element is a line.
:return: the line of the file(starting in zero), 0 if not found!
"""
for index, line in enumerate(file_code):
pattern = re.compile(cts.PARTICULAR_DEFINITION.pattern.format(name=f.__name__))
definition = re.search(pattern, line)
if definition:
return index
return -1
def get_function_inputs(f):
"""
Given function signatures gets the name of the function.
:param f: a callable function
:return: input names on a tuple.
"""
if hasattr(f, cts.INTERNAL_PARAMETERS):
# 'internal_parameters' is defined inside the solver() annotation, see solver.py for details.
return f.internal_parameters
else:
return f.__code__.co_varnames
def get_function_code(start, file_code):
"""
Gets the source code of function. Opt for not using
inspect package because it doesn't work with decorators
:param start: the starting line number, of the function
:param file_code: the source file lines
:return: code.
"""
def not_space_nor_comment(line):
return len(line.strip()) > 0 and line.strip()[0] != '#'
def inside_function(line_indent, f_indent):
return len(line_indent) > len(f_indent) + 3
base_indent = re.search(cts.INDENT, file_code[start]).group()
end = start
for index, l in enumerate(file_code[start + 1:]):
l_indent = re.search(cts.INDENT, l).group()
# decides if adding to function is required: no black space or comment
if not_space_nor_comment(l):
if inside_function(l_indent, base_indent):
end = index + start + 2 # only add code if non-comment or empty spaces are inside function
else:
# end of function if found lower indent that is not a blank space and not a comment
break
return file_code[start:end]
def var_is_true(var):
"""
Returns True if var= True, else False. Remember here that 1 is a almost True value
but in this case should return False.
:param var: any variable.
:return: boolean
"""
return var and isinstance(var, bool)
def var_is_false(var):
"""
Returns True if var = False, else False. Remember here that 1 is a almost True value
but in this case should return False.
:param var: any variable.
:return: boolean
"""
return not var and isinstance(var, bool)
def has_true_key(d):
"""
Returns True only if it has a True value as key.
Has to be done this way because Python confuses '0' and '1' with False and True.
:param d: dict()
:return: boolean
"""
for key in d:
if var_is_true(key):
return True
return False
def has_return(implementation, definition):
"""
Finds if the implementation already has a return.
:param implementation: array with code implementation
:param definition: function definition
:return: Boolean
"""
last_line = implementation[-1]
indent = get_indent_from_definition(definition)
pattern = r"^{indent} return".format(indent=indent)
return re.search(pattern, last_line) is not None
def has_false_key(d):
"""
Returns True only if it has a False value as key.
Has to be done this way because Python confuses '0' and '1' with False and True.
:param d: dict()
:return: boolean
"""
for key in d:
if var_is_false(key):
return True
return False
def var_is_1(var):
"""
Boolean if var is equal to 1 and not True.
:param var: variable
:return: boolean
"""
if var and not isinstance(var, bool):
return True
return False
def var_is_0(var):
"""
Boolean if var is equal to 0 and not False.
:param var: variable
:return: boolean
"""
if not var and not isinstance(var, bool):
return True
return False
def get_indent_from_definition(definition):
"""
Uses regex to get the indent
:param definition: of a function
:return: indent as string
"""
return re.search(cts.INDENT, definition).group()
def is_function(f):
"""
Is it a function?
:param f: function
:return: boolean
"""
return hasattr(f, '__call__')
def remove_list_from_list(all_list, list_to_remove):
"""
:param all_list: original list
:param list_to_remove: elements that will be removed from the original list.
:return: subtracted list
"""
return [value for value in all_list if value not in list_to_remove]
def is_private_call():
"""
Searches in the stack for places where the package is. If there is something then the
function is being called privately from inside the package, otherwise it is called from outside the package.
:return: boolean
"""
p_name = '/{}/'.format(cts.PACKAGE_NAME)
p = re.match(r'^.*' + p_name, inspect.stack()[0].filename).group()
# the number 2 in 'inspect.stack()[2:]' is because we are not looking inside is_private_call() function nor one
# level above it, where its suppose to tell us if that function is being called privately or publicly.
return any(re.match(p, frame.filename) is not None for frame in inspect.stack()[2:])
def name_in_frame(var, frame):
"""
Looks at the locals of the frame and searches in it for var.
:param var: variable to get name from.
:param frame: a inspect frame
:return: list with strings.
"""
callers_local_vars = frame.f_locals.items()
return [var_name for var_name, var_val in callers_local_vars if var_val is var]
def retrieve_name(var):
"""
Gets the name of var. Does it from the out most frame inner-wards.
:param var: variable to get name from.
:return: string
"""
for fi in reversed(inspect.stack()):
names = name_in_frame(var, fi.frame)
if len(names) > 0:
return names[0]
| [
"os.path.exists",
"inspect.stack",
"shatter.constants.PARTICULAR_DEFINITION.pattern.format",
"re.match",
"os.remove",
"re.search"
] | [((463, 487), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (477, 487), False, 'import os\n'), ((497, 516), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (506, 516), False, 'import os\n'), ((3241, 3265), 're.search', 're.search', (['pattern', 'line'], {}), '(pattern, line)\n', (3250, 3265), False, 'import re\n'), ((6112, 6141), 're.search', 're.search', (['pattern', 'last_line'], {}), '(pattern, last_line)\n', (6121, 6141), False, 'import re\n'), ((8777, 8792), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (8790, 8792), False, 'import inspect\n'), ((3161, 3218), 'shatter.constants.PARTICULAR_DEFINITION.pattern.format', 'cts.PARTICULAR_DEFINITION.pattern.format', ([], {'name': 'f.__name__'}), '(name=f.__name__)\n', (3201, 3218), True, 'import shatter.constants as cts\n'), ((4257, 4296), 're.search', 're.search', (['cts.INDENT', 'file_code[start]'], {}), '(cts.INDENT, file_code[start])\n', (4266, 4296), False, 'import re\n'), ((7060, 7093), 're.search', 're.search', (['cts.INDENT', 'definition'], {}), '(cts.INDENT, definition)\n', (7069, 7093), False, 'import re\n'), ((4395, 4419), 're.search', 're.search', (['cts.INDENT', 'l'], {}), '(cts.INDENT, l)\n', (4404, 4419), False, 'import re\n'), ((8149, 8176), 're.match', 're.match', (['p', 'frame.filename'], {}), '(p, frame.filename)\n', (8157, 8176), False, 'import re\n'), ((8202, 8217), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (8215, 8217), False, 'import inspect\n'), ((7873, 7888), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (7886, 7888), False, 'import inspect\n')] |
#!/usr/bin/python
#
import argparse
import os
import pandas as pd
corr = {'pH': [1, 1.02, 1.04, 1.06, 1.08, 1.1, 1.12],
'cl': [1.000000, 1.017857, 1.035714, 1.053571, 1.071429, 1.089286,
1.107143, 1.160714, 1.196429]}
corr = {'pH': [1, 1.02, 1.04, 1.06, 1.08, 1.1],
'cl': [1.000000, 1.017857, 1.035714, 1.053571, 1.071429, 1.089286,
1.107143, 1.160714, 1.196429]}
def main():
description = "make correction for dilution"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('ffile',
help="a file x y1 y2 --> y1*corr y2*corr")
parser.add_argument('out',
help="destination directory")
parser.add_argument('buffer', nargs=2,
help="buffer values for y1 y2")
parser.add_argument("-t", "--titration-of", action="store", default="pH",
choices=["pH", "cl"], dest='titration_type')
args = parser.parse_args()
df = pd.read_csv(args.ffile)
df.y1 -= float(args.buffer[0])
df.y2 -= float(args.buffer[1])
df.y1 *= corr[args.titration_type]
df.y2 *= corr[args.titration_type]
if not os.path.isdir(args.out):
os.makedirs(args.out)
df.to_csv(os.path.join(args.out, args.ffile), index=False)
main()
| [
"os.makedirs",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.join",
"os.path.isdir"
] | [((490, 538), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (513, 538), False, 'import argparse\n'), ((1010, 1033), 'pandas.read_csv', 'pd.read_csv', (['args.ffile'], {}), '(args.ffile)\n', (1021, 1033), True, 'import pandas as pd\n'), ((1193, 1216), 'os.path.isdir', 'os.path.isdir', (['args.out'], {}), '(args.out)\n', (1206, 1216), False, 'import os\n'), ((1226, 1247), 'os.makedirs', 'os.makedirs', (['args.out'], {}), '(args.out)\n', (1237, 1247), False, 'import os\n'), ((1262, 1296), 'os.path.join', 'os.path.join', (['args.out', 'args.ffile'], {}), '(args.out, args.ffile)\n', (1274, 1296), False, 'import os\n')] |
from system import System
from src.basic.sessions.cmd_session import CmdSession
class CmdSystem(System):
def __init__(self):
super(CmdSystem, self).__init__()
@classmethod
def name(cls):
return 'cmd'
def new_session(self, agent, kb):
return CmdSession(agent, kb)
| [
"src.basic.sessions.cmd_session.CmdSession"
] | [((284, 305), 'src.basic.sessions.cmd_session.CmdSession', 'CmdSession', (['agent', 'kb'], {}), '(agent, kb)\n', (294, 305), False, 'from src.basic.sessions.cmd_session import CmdSession\n')] |
# Import necessary modules
from flask import render_template, request, redirect, url_for, flash, Blueprint
from flask_app import app, db
from flask_app.decorators import check_confirmed
from flask_app.models import *
from flask_login import login_required, current_user
from sqlalchemy import or_, and_, func
import random
import requests
# This file includes all main routes (no login related routes)
main = Blueprint('main', __name__)
# Define global variables
orgs_search_results_data = Organizations.query.filter(
Organizations.name.contains(""))
projects_search_results_data = Projects.query.filter(
Projects.description.contains(""))
proj_search_term = None
org_search_term = None
projectsPerPage = 10
orgsPerPage = 10
# Route for home page
@app.route('/')
@app.route('/index')
def index():
# Calculate amount of projects
projsLength = Projects.query.count()
# Calculate amount of organizations
orgsLength = Organizations.query.count()
# Return the static homepage
return render_template(
'index.html', title='Open Source Platform', numProjs=projsLength, numOrgs=orgsLength)
# Route for projects page, includes searching and favoriting projects
@app.route('/projects', methods=['GET', 'POST'])
def projects():
# Pull in the necessary global variables
global projects_search_results_data
global proj_search_term
global projectsPerPage
# Get the page number from request arguments
page = request.args.get('page', 1, type=int)
# Logic for processing project searches
if request.method == 'POST' and 'search_term' in request.form:
# Pull in search term from search form
proj_search_term=request.form['search_term']
# Basic search logic: Look in project parameters for search term
projects_search_results_data = Projects.query.filter(
or_(Projects.description.contains(proj_search_term),
Projects.name.contains(proj_search_term),
Projects.language.contains(proj_search_term),
Projects.owner.contains(proj_search_term),
Projects.source.contains(proj_search_term)))
# Pull in the item per page count, if necessary
if 'per_page' in request.form:
projectsPerPage = int(request.form['per_page'])
# Logic for sorting items by name
if 'sort_by' in request.form and request.form['sort_by'] == 'name':
# Logic for reverse alphabetic search
if 'reverse' in request.form:
projects_search_results_data = projects_search_results_data \
.order_by(Projects.name.desc())
# Logic for standard alphabetic search
else:
projects_search_results_data = projects_search_results_data \
.order_by(Projects.name)
# Logic for sorting items by creation date
elif 'sort_by' in request.form and request.form['sort_by'] == 'created':
# Logic for standard recent search
if 'reverse' in request.form:
projects_search_results_data = projects_search_results_data \
.filter(Projects.source == 'github') \
.order_by(func.date(
Projects.created_time))
# Logic for reverse recent search
else:
projects_search_results_data = projects_search_results_data \
.filter(Projects.source == 'github') \
.order_by(func.date(
Projects \
.created_time).desc())
# Logic for sorting items by number of forks
elif 'sort_by' in request.form and request.form['sort_by'] == 'forks':
# Logic for sorting items by fork count, least first
if 'reverse' in request.form:
projects_search_results_data = projects_search_results_data \
.filter(Projects.source == 'github') \
.order_by(Projects.forks)
# Logic for sorting items by fork count, greatest first
else:
projects_search_results_data = projects_search_results_data \
.filter(Projects.source == 'github') \
.order_by(Projects.forks.desc())
# Logic for sorting items by number of watchers
elif 'sort_by' in request.form \
and request.form['sort_by'] == 'watchers':
# Logic for sorting items by watcher count, least first
if 'reverse' in request.form:
projects_search_results_data = projects_search_results_data \
.filter(Projects.source == 'github') \
.order_by(Projects.watchers)
# Logic for sorting items by watcher count, greatest first
else:
projects_search_results_data = projects_search_results_data \
.filter(Projects.source == 'github') \
.order_by(
Projects.watchers.desc())
# Logic for sorting items by number of issues
elif 'sort_by' in request.form \
and request.form['sort_by'] == 'issues':
# Logic for sorting items by issues count, least first
if 'reverse' in request.form:
projects_search_results_data = projects_search_results_data \
.filter(Projects.source == 'github') \
.order_by(
Projects.open_issues)
# Logic for sorting items by issues count, greatest first
else:
projects_search_results_data = projects_search_results_data \
.filter(Projects.source == 'github') \
.order_by(
Projects \
.open_issues.desc())
# Reset the page after search
page = 1
# Refresh the page after searching
try:
# Render the page for logged-in users
return render_template(
'projects_search.html',
search_results=projects_search_results_data.paginate(
page=page,
per_page=projectsPerPage),
search_term=proj_search_term,
title='OSP | Projects',
favorites=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='project')) \
.with_entities(
Favorites.fav_name))
# If the previous rendering fails, default to anonymous rendering
except:
# Render the page for anonymous users
return render_template(
'projects_search.html',
search_results=projects_search_results_data.paginate(
page=page,
per_page=projectsPerPage),
search_term=proj_search_term,
title='OSP | Projects',
favorites=[])
# Logic for favoriting a project
elif request.method == 'POST' and 'fav_name' in request.form:
# Look for whether this project is already favorited
favorite = Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['fav_name'],
Favorites.fav_type=='project')).first()
# If the favorite does not already exist, continue adding the favorite
if favorite is None:
# Form the Favorites object
new_fav = Favorites(
id=random.randint(-2147483648, 2147483647),
user_id=current_user.id,
fav_name=request.form['fav_name'],
fav_type='project')
# Add and commit the Favorites object
db.session.add(new_fav)
db.session.commit()
# Flash add message
flash('Added ' + request.form['fav_name'] + ' to favorites')
# Refresh the page after favoriting
return redirect(request.path)
# Logic for unfavoriting a project
elif request.method == 'POST' and 'unfav_name' in request.form:
# Look for the Favorite going to be deleted
fav = db.session.query(Favorites).filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['unfav_name'],
Favorites.fav_type=='project')).first()
# Remove the Favorites record and commit
db.session.delete(fav)
db.session.commit()
# Flash delete message
flash('Removed ' + request.form['unfav_name'] + ' from favorites')
# Refresh the page after unfavoriting
return redirect(request.path)
# Render the page for logged-in users and anonymous users
else:
try:
# Render the page for logged-in users
return render_template(
'projects_search.html',
search_results=projects_search_results_data.paginate(
page=page,
per_page=projectsPerPage),
search_term=proj_search_term,
title='OSP | Projects',
favorites=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='project')) \
.with_entities(
Favorites.fav_name))
# If the previous rendering fails, default to anonymous rendering
except:
# Render the page for anonymous users
return render_template(
'projects_search.html',
search_results=projects_search_results_data.paginate(
page=page,
per_page=projectsPerPage),
search_term=proj_search_term,
title='OSP | Projects',
favorites=[])
# Route for the organizations page
@app.route('/organizations', methods=['GET', 'POST'])
def organizations():
# Bring in te necessary global variables
global orgs_search_results_data
global org_search_term
global orgsPerPage
# Bring in the page number from the arguments
page = request.args.get('page', 1, type=int)
# Logic for searching orgs
if request.method == 'POST' and 'search_term' in request.form:
# Bring in the search term from the form
org_search_term=request.form['search_term']
# Perform search by looking for search term in org names
orgs_search_results_data = Organizations.query.filter(
Organizations.name.contains(org_search_term))
# Pull in the item count per page if necessary
if 'per_page' in request.form:
orgsPerPage = int(request.form['per_page'])
# Logic for search by name
if 'name' in request.form:
# Logic for reverse alphabetic search
if 'reverse' in request.form:
orgs_search_results_data = orgs_search_results_data.order_by(
Organizations.name.desc())
# Logic for standard alphabetic search
else:
orgs_search_results_data = orgs_search_results_data.order_by(
Organizations.name)
# Reset the page after search
page = 1
# Calculate search results and org names
search_results=orgs_search_results_data.paginate(
page=page,
per_page=orgsPerPage)
orgNames = [item.name for item in search_results.items]
# Find the projects whose owners are in the search results
projects=Projects.query.filter(Projects.owner.in_(orgNames))
# Refresh the page after search
try:
# Render page for logged-in users
return render_template(
'organizations.html',
search_results=search_results,
search_term=org_search_term,
projects=projects,
title='OSP | Organizations',
favorites=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='org')) \
.with_entities(Favorites.fav_name))
# If the above rendering fails, render the anonymous page
except:
#Render page for anonymous users
return render_template(
'organizations.html',
search_results=search_results,
search_term=org_search_term,
projects=projects,
title='OSP | Organizations',
favorites=[])
# Logic for favoriting an org
if request.method == 'POST' and 'fav_name' in request.form:
# Check if the favorites object already exists
favorite=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['fav_name'],
Favorites.fav_type=='org')).first()
# If the favorite does not already exist, form the Favorites record
if favorite is None:
# Form the favorites record
new_fav = Favorites(
id=random.randint(-2147483648, 2147483647),
user_id=current_user.id,
fav_name=request.form['fav_name'],
fav_type='org')
# Add the favorite and commit it
db.session.add(new_fav)
db.session.commit()
# Flash add message
flash('Added ' + request.form['fav_name'] + ' to favorites')
# Refresh the page after searching
return redirect(request.path)
# Logic for unfavoriting an org
elif request.method == 'POST' and 'unfav_name' in request.form:
# Look for the existing favorites record
fav = db.session.query(Favorites).filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['unfav_name'],
Favorites.fav_type=='org')).first()
# delete the favorite and commit
db.session.delete(fav)
db.session.commit()
# Flash delete message
flash('Removed ' + request.form['unfav_name'] + ' from favorites')
# Refresh the page after unfavoriting
return redirect(request.path)
# Render the page for logged-in users and anonymous users
else:
# Calculate search results and org names
search_results=orgs_search_results_data.paginate(
page=page,
per_page=orgsPerPage)
orgNames = [item.name for item in search_results.items]
# Find the projects whose owners are in the search results
projects=Projects.query.filter(Projects.owner.in_(orgNames))
try:
# Render page for logged-in users
return render_template(
'organizations.html',
search_results=search_results,
search_term=org_search_term,
projects=projects,
title='OSP | Organizations',
favorites=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='org')) \
.with_entities(Favorites.fav_name))
# If the above rendering fails, render the anonymous page
except:
#Render page for anonymous users
return render_template(
'organizations.html',
search_results=search_results,
search_term=org_search_term,
projects=projects,
title='OSP | Organizations',
favorites=[])
# Route for individual project pages
@app.route('/project/<projectName>', methods=['GET', 'POST'])
def project(projectName):
# Logic for favoriting the project
if request.method == 'POST' and 'fav_name' in request.form:
# Look for whether the favorite record already exists
favorite=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['fav_name'],
Favorites.fav_type=='project')).first()
# If the Favorite record does not yet exist, create it
if favorite is None:
# Create the Favorites record
new_fav = Favorites(
id=random.randint(-2147483648, 2147483647),
user_id=current_user.id,
fav_name=request.form['fav_name'],
fav_type='project')
# Flash add message
flash('Added ' + request.form['fav_name'] + ' to favorites')
# Add and commit the Favorites record
db.session.add(new_fav)
db.session.commit()
# Refresh the page after favoriting
return redirect(request.path)
# Logic for unfavoriting the project
elif request.method == 'POST' and 'unfav_name' in request.form:
# Look for the existing Favorites record
fav = db.session.query(Favorites).filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['unfav_name'],
Favorites.fav_type=='project')).first()
# Delete the Favorite and commit
db.session.delete(fav)
db.session.commit()
# Flash delete message
flash('Removed ' + request.form['unfav_name'] + ' from favorites')
# Refresh the page after unfavoriting
return redirect(request.path)
# Try to render the page for this project
try:
# Retrieve the data for the project page
proj = Projects.query.filter(Projects.name==projectName).one()
orgName = Organizations.query.filter(
Organizations.name==proj.owner).one()
projs = Projects.query.filter(Projects.owner==orgName.name)
count = projs.count()
# Render the page for logged-in users
try:
return render_template(
'project.html',
project=proj,
projects=projs,
count=count,
title='OSP | ' + projectName,
favorites=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='project')) \
.with_entities(Favorites.fav_name),
favCount=Favorites.query.filter(
Favorites.fav_name==projectName).count())
# Render the page for anonymous users
except:
return render_template(
'project.html',
project=proj,
projects=projs,
count=count,
title='OSP | ' + projectName,
favorites=[],
favCount=Favorites.query.filter(
Favorites.fav_name==projectName).count())
# If the project does not exist, render the 404 page
except:
return render_template('404.html', title='OSP | 404'), 404
# Route for individual organization pages
@app.route('/org/<orgName>', methods=['GET', 'POST'])
def organization(orgName):
# Logic for favoriting an org
if request.method == 'POST' and 'fav_name' in request.form:
# Look for whether the org exists
favorite=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['fav_name'],
Favorites.fav_type=='org')).first()
# If the favorite does not exist, create it
if favorite is None:
# Create the Favorites record
new_fav = Favorites(
id=random.randint(-2147483648, 2147483647),
user_id=current_user.id,
fav_name=request.form['fav_name'],
fav_type='org')
# Add the favorite and commit it
db.session.add(new_fav)
db.session.commit()
# Flash add message
flash('Added ' + request.form['fav_name'] + ' to favorites')
# Refresh the page after favoriting
return redirect(request.path)
# Logic for unfavoriting the org
elif request.method == 'POST' and 'unfav_name' in request.form:
# Look for the existing Favorites record
fav = db.session.query(Favorites).filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['unfav_name'],
Favorites.fav_type=='org')).first()
# Delete the favorites record and commit
db.session.delete(fav)
db.session.commit()
# Flash delete message
flash('Removed ' + request.form['unfav_name'] + ' from favorites')
# Refresh the page after unfavoriting
return redirect(request.path)
# Logic for favoriting project on org page
elif request.method == 'POST' and 'proj_fav_name' in request.form:
# Look for whether the proj exists
favorite=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['proj_fav_name'],
Favorites.fav_type=='project')).first()
# If the favorite does not exist, create it
if favorite is None:
# Create the Favorites record
new_fav = Favorites(
id=random.randint(-2147483648, 2147483647),
user_id=current_user.id,
fav_name=request.form['proj_fav_name'],
fav_type='project')
# Add the favorite and commit it
db.session.add(new_fav)
db.session.commit()
# Flash add message
flash('Added ' + request.form['proj_fav_name'] + ' to favorites')
# Refresh the page after favoriting
return redirect(request.path)
# Logic for unfavoriting proj on org page
elif request.method == 'POST' and 'proj_unfav_name' in request.form:
# Look for the existing Favorites record
fav = db.session.query(Favorites).filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['proj_unfav_name'],
Favorites.fav_type=='project')).first()
# Delete the favorites record and commit
db.session.delete(fav)
db.session.commit()
# Flash delete message
flash('Removed ' + request.form['proj_unfav_name'] + ' from favorites')
# Refresh the page after unfavoriting
return redirect(request.path)
# Try to render the page for the org
try:
# Retrieve the necessary data
org = Organizations.query.filter(Organizations.name==orgName).one()
projs = Projects.query.filter(Projects.owner==orgName)
# Render the page for logged-in users
try:
return render_template(
'organization.html',
organization=org,
projects=projs,
title='OSP | ' + orgName,
favorites=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='org')) \
.with_entities(Favorites.fav_name),
proj_favs=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='project')) \
.with_entities(Favorites.fav_name),
favCount=Favorites.query.filter(
Favorites.fav_name==orgName).count())
# Render the page for anonymous users
except:
return render_template(
'organization.html',
organization=org,
projects=projs,
title='OSP | ' + orgName,
favorites=[],
proj_favs=[],
favCount=Favorites.query.filter(
Favorites.fav_name==orgName).count())
# If page rendering fails, render the 404 page
except:
return render_template('404.html', title='OSP | 404'), 404
# Route for profile page
@main.route('/profile', methods=['GET', 'POST'])
@login_required
@check_confirmed
def profile():
# Logic for unfavoriting the org
if request.method == 'POST' and 'unfav_name' in request.form:
# Look for the existing Favorites record
fav = db.session.query(Favorites).filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['unfav_name'],
Favorites.fav_type=='org')).first()
# Delete the favorites record and commit
db.session.delete(fav)
db.session.commit()
# Flash delete message
flash('Removed ' + request.form['unfav_name'] + ' from favorites')
# Refresh the page after unfavoriting
return redirect(request.path)
# Logic for unfavoriting proj on org page
elif request.method == 'POST' and 'proj_unfav_name' in request.form:
# Look for the existing Favorites record
fav = db.session.query(Favorites).filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_name==request.form['proj_unfav_name'],
Favorites.fav_type=='project')).first()
# Delete the favorites record and commit
db.session.delete(fav)
db.session.commit()
# Flash delete message
flash('Removed ' + request.form['proj_unfav_name'] + ' from favorites')
# Refresh the page after unfavoriting
return redirect(request.path)
# Find project favorites for current user
favProjs=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='project')) \
.with_entities(
Favorites.fav_name)
# Find favorite projects
projects=db.session.query(Projects).filter(Projects.name.in_(favProjs))
# Find organization favorites for current user
favOrgs=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='org')) \
.with_entities(
Favorites.fav_name)
# Find favorite organizations
organizations=db.session.query(
Organizations).filter(Organizations.name.in_(favOrgs))
# Render the profile for a logged in user
return render_template('profile.html',
name=current_user.name,
title='OSP | Profile',
projects=projects,
organizations=organizations,
favProjsCount=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='project')).count(),
favOrgsCount=Favorites.query.filter(
and_(Favorites.user_id==current_user.id,
Favorites.fav_type=='org')).count(),
favorites=Favorites.query.filter(
Favorites.user_id==current_user.id),
subProjects=Projects.query.all())
# Route for submitting projects
@main.route('/project-submit', methods=['GET', 'POST'])
@login_required
@check_confirmed
def project_submit():
# Logic for submitting github project
if request.method == 'POST':
# Pull in submit term from form
submit_term = request.form['submit_term']
# Use github api to access user's project
item = requests.get(
"https://api.github.com/repos/" + submit_term).json()
# Check if project was found
if "name" not in item:
flash('Project not found on github')
return redirect(url_for('main.project_submit'))
# Check if project is already in database
q = db.session.query(Projects.name).filter(Projects.name==item["name"])
# If the project exists, do not add it again
if db.session.query(q.exists()).scalar():
flash('Project already exists in database')
return redirect(url_for('project', projectName=item["name"]))
# Create the new Project record
new_proj = Projects(
name = item["name"],
url = item["html_url"],
description = item["description"],
source = "github",
owner = item["owner"]["login"],
owner_avatar = item["owner"]["avatar_url"],
language = item["language"],
created_time = item["created_at"],
last_updated = item["updated_at"],
forks = item["forks"],
watchers = item["watchers"],
open_issues = item["open_issues"],
owner_type = item["owner"]["type"])
# Add the project and commit
db.session.add(new_proj)
db.session.commit()
# Logic for adding favorite project
if 'favProj' in request.form:
# Create the Favorites record
new_fav = Favorites(
id=random.randint(-2147483648, 2147483647),
user_id=current_user.id,
fav_name=item["name"],
fav_type='project')
# Add the favorite and commit it
db.session.add(new_fav)
db.session.commit()
# Check if org is already in database
q = db.session.query(Organizations.name).filter(
Organizations.name==item["owner"]["login"])
# If the organization exists, do not add it again
if not db.session.query(q.exists()).scalar():
# Create the new Project record
new_org = Organizations(
name = item["owner"]["login"],
avatar = item["owner"]["avatar_url"],
owner_type = item["owner"]["type"],
url = "https://github.com/" + item["owner"]["login"])
# Add the org and commit
db.session.add(new_org)
db.session.commit()
# flash project and org added message
flash('Project and Organization Added!')
else:
# flash project added message
flash('Project Added!')
# Logic for adding favorite org
if 'favOrg' in request.form:
# Create the Favorites record
new_fav = Favorites(
id=random.randint(-2147483648, 2147483647),
user_id=current_user.id,
fav_name=item["owner"]["login"],
fav_type='org')
# Add the favorite and commit it
db.session.add(new_fav)
db.session.commit()
# Load the manage page
return redirect(url_for('main.project_submit'))
# Render project submit page
return render_template('project-submit.html', title='OSP | Project Submit')
# Route for 404 page
@app.errorhandler(404)
def page_not_found(e):
# Render the 404 page
return render_template('404.html', title='OSP | 404'), 404
| [
"flask.render_template",
"flask.request.args.get",
"random.randint",
"flask.flash",
"flask_app.app.route",
"flask_app.db.session.query",
"requests.get",
"flask.url_for",
"flask.redirect",
"flask_app.app.errorhandler",
"flask_app.db.session.commit",
"flask_app.db.session.add",
"sqlalchemy.fun... | [((410, 437), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (419, 437), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((759, 773), 'flask_app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (768, 773), False, 'from flask_app import app, db\n'), ((775, 794), 'flask_app.app.route', 'app.route', (['"""/index"""'], {}), "('/index')\n", (784, 794), False, 'from flask_app import app, db\n'), ((1203, 1250), 'flask_app.app.route', 'app.route', (['"""/projects"""'], {'methods': "['GET', 'POST']"}), "('/projects', methods=['GET', 'POST'])\n", (1212, 1250), False, 'from flask_app import app, db\n'), ((10804, 10856), 'flask_app.app.route', 'app.route', (['"""/organizations"""'], {'methods': "['GET', 'POST']"}), "('/organizations', methods=['GET', 'POST'])\n", (10813, 10856), False, 'from flask_app import app, db\n'), ((16746, 16806), 'flask_app.app.route', 'app.route', (['"""/project/<projectName>"""'], {'methods': "['GET', 'POST']"}), "('/project/<projectName>', methods=['GET', 'POST'])\n", (16755, 16806), False, 'from flask_app import app, db\n'), ((20156, 20208), 'flask_app.app.route', 'app.route', (['"""/org/<orgName>"""'], {'methods': "['GET', 'POST']"}), "('/org/<orgName>', methods=['GET', 'POST'])\n", (20165, 20208), False, 'from flask_app import app, db\n'), ((32350, 32371), 'flask_app.app.errorhandler', 'app.errorhandler', (['(404)'], {}), '(404)\n', (32366, 32371), False, 'from flask_app import app, db\n'), ((1020, 1126), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Open Source Platform"""', 'numProjs': 'projsLength', 'numOrgs': 'orgsLength'}), "('index.html', title='Open Source Platform', numProjs=\n projsLength, numOrgs=orgsLength)\n", (1035, 1126), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((1469, 1506), 'flask.request.args.get', 'request.args.get', (['"""page"""', '(1)'], {'type': 'int'}), "('page', 1, type=int)\n", (1485, 1506), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((11072, 11109), 'flask.request.args.get', 'request.args.get', (['"""page"""', '(1)'], {'type': 'int'}), "('page', 1, type=int)\n", (11088, 11109), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((32258, 32326), 'flask.render_template', 'render_template', (['"""project-submit.html"""'], {'title': '"""OSP | Project Submit"""'}), "('project-submit.html', title='OSP | Project Submit')\n", (32273, 32326), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((14606, 14628), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (14614, 14628), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((17880, 17902), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (17888, 17902), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((21222, 21244), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (21230, 21244), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((25841, 25863), 'flask_app.db.session.delete', 'db.session.delete', (['fav'], {}), '(fav)\n', (25858, 25863), False, 'from flask_app import app, db\n'), ((25872, 25891), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (25889, 25891), False, 'from flask_app import app, db\n'), ((25932, 25998), 'flask.flash', 'flash', (["('Removed ' + request.form['unfav_name'] + ' from favorites')"], {}), "('Removed ' + request.form['unfav_name'] + ' from favorites')\n", (25937, 25998), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((26069, 26091), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (26077, 26091), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((30216, 30240), 'flask_app.db.session.add', 'db.session.add', (['new_proj'], {}), '(new_proj)\n', (30230, 30240), False, 'from flask_app import app, db\n'), ((30249, 30268), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (30266, 30268), False, 'from flask_app import app, db\n'), ((32433, 32479), 'flask.render_template', 'render_template', (['"""404.html"""'], {'title': '"""OSP | 404"""'}), "('404.html', title='OSP | 404')\n", (32448, 32479), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((8865, 8887), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (8873, 8887), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((14373, 14396), 'flask_app.db.session.add', 'db.session.add', (['new_fav'], {}), '(new_fav)\n', (14387, 14396), False, 'from flask_app import app, db\n'), ((14409, 14428), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (14426, 14428), False, 'from flask_app import app, db\n'), ((14474, 14534), 'flask.flash', 'flash', (["('Added ' + request.form['fav_name'] + ' to favorites')"], {}), "('Added ' + request.form['fav_name'] + ' to favorites')\n", (14479, 14534), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((15055, 15077), 'flask_app.db.session.delete', 'db.session.delete', (['fav'], {}), '(fav)\n', (15072, 15077), False, 'from flask_app import app, db\n'), ((15086, 15105), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (15103, 15105), False, 'from flask_app import app, db\n'), ((15146, 15212), 'flask.flash', 'flash', (["('Removed ' + request.form['unfav_name'] + ' from favorites')"], {}), "('Removed ' + request.form['unfav_name'] + ' from favorites')\n", (15151, 15212), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((15283, 15305), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (15291, 15305), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((17628, 17688), 'flask.flash', 'flash', (["('Added ' + request.form['fav_name'] + ' to favorites')"], {}), "('Added ' + request.form['fav_name'] + ' to favorites')\n", (17633, 17688), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((17764, 17787), 'flask_app.db.session.add', 'db.session.add', (['new_fav'], {}), '(new_fav)\n', (17778, 17787), False, 'from flask_app import app, db\n'), ((17800, 17819), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (17817, 17819), False, 'from flask_app import app, db\n'), ((18338, 18360), 'flask_app.db.session.delete', 'db.session.delete', (['fav'], {}), '(fav)\n', (18355, 18360), False, 'from flask_app import app, db\n'), ((18369, 18388), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (18386, 18388), False, 'from flask_app import app, db\n'), ((18429, 18495), 'flask.flash', 'flash', (["('Removed ' + request.form['unfav_name'] + ' from favorites')"], {}), "('Removed ' + request.form['unfav_name'] + ' from favorites')\n", (18434, 18495), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((18566, 18588), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (18574, 18588), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((20988, 21011), 'flask_app.db.session.add', 'db.session.add', (['new_fav'], {}), '(new_fav)\n', (21002, 21011), False, 'from flask_app import app, db\n'), ((21024, 21043), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (21041, 21043), False, 'from flask_app import app, db\n'), ((21089, 21149), 'flask.flash', 'flash', (["('Added ' + request.form['fav_name'] + ' to favorites')"], {}), "('Added ' + request.form['fav_name'] + ' to favorites')\n", (21094, 21149), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((21680, 21702), 'flask_app.db.session.delete', 'db.session.delete', (['fav'], {}), '(fav)\n', (21697, 21702), False, 'from flask_app import app, db\n'), ((21711, 21730), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (21728, 21730), False, 'from flask_app import app, db\n'), ((21771, 21837), 'flask.flash', 'flash', (["('Removed ' + request.form['unfav_name'] + ' from favorites')"], {}), "('Removed ' + request.form['unfav_name'] + ' from favorites')\n", (21776, 21837), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((21908, 21930), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (21916, 21930), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((26550, 26572), 'flask_app.db.session.delete', 'db.session.delete', (['fav'], {}), '(fav)\n', (26567, 26572), False, 'from flask_app import app, db\n'), ((26581, 26600), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (26598, 26600), False, 'from flask_app import app, db\n'), ((26641, 26712), 'flask.flash', 'flash', (["('Removed ' + request.form['proj_unfav_name'] + ' from favorites')"], {}), "('Removed ' + request.form['proj_unfav_name'] + ' from favorites')\n", (26646, 26712), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((26783, 26805), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (26791, 26805), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((27125, 27151), 'flask_app.db.session.query', 'db.session.query', (['Projects'], {}), '(Projects)\n', (27141, 27151), False, 'from flask_app import app, db\n'), ((27515, 27546), 'flask_app.db.session.query', 'db.session.query', (['Organizations'], {}), '(Organizations)\n', (27531, 27546), False, 'from flask_app import app, db\n'), ((29057, 29093), 'flask.flash', 'flash', (['"""Project not found on github"""'], {}), "('Project not found on github')\n", (29062, 29093), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((29409, 29452), 'flask.flash', 'flash', (['"""Project already exists in database"""'], {}), "('Project already exists in database')\n", (29414, 29452), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((30674, 30697), 'flask_app.db.session.add', 'db.session.add', (['new_fav'], {}), '(new_fav)\n', (30688, 30697), False, 'from flask_app import app, db\n'), ((30710, 30729), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (30727, 30729), False, 'from flask_app import app, db\n'), ((31378, 31401), 'flask_app.db.session.add', 'db.session.add', (['new_org'], {}), '(new_org)\n', (31392, 31401), False, 'from flask_app import app, db\n'), ((31414, 31433), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (31431, 31433), False, 'from flask_app import app, db\n'), ((31497, 31537), 'flask.flash', 'flash', (['"""Project and Organization Added!"""'], {}), "('Project and Organization Added!')\n", (31502, 31537), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((31619, 31642), 'flask.flash', 'flash', (['"""Project Added!"""'], {}), "('Project Added!')\n", (31624, 31642), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((32049, 32072), 'flask_app.db.session.add', 'db.session.add', (['new_fav'], {}), '(new_fav)\n', (32063, 32072), False, 'from flask_app import app, db\n'), ((32085, 32104), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (32102, 32104), False, 'from flask_app import app, db\n'), ((32173, 32203), 'flask.url_for', 'url_for', (['"""main.project_submit"""'], {}), "('main.project_submit')\n", (32180, 32203), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((8631, 8654), 'flask_app.db.session.add', 'db.session.add', (['new_fav'], {}), '(new_fav)\n', (8645, 8654), False, 'from flask_app import app, db\n'), ((8667, 8686), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8684, 8686), False, 'from flask_app import app, db\n'), ((8732, 8792), 'flask.flash', 'flash', (["('Added ' + request.form['fav_name'] + ' to favorites')"], {}), "('Added ' + request.form['fav_name'] + ' to favorites')\n", (8737, 8792), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((9332, 9354), 'flask_app.db.session.delete', 'db.session.delete', (['fav'], {}), '(fav)\n', (9349, 9354), False, 'from flask_app import app, db\n'), ((9363, 9382), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9380, 9382), False, 'from flask_app import app, db\n'), ((9423, 9489), 'flask.flash', 'flash', (["('Removed ' + request.form['unfav_name'] + ' from favorites')"], {}), "('Removed ' + request.form['unfav_name'] + ' from favorites')\n", (9428, 9489), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((9560, 9582), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (9568, 9582), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((13321, 13489), 'flask.render_template', 'render_template', (['"""organizations.html"""'], {'search_results': 'search_results', 'search_term': 'org_search_term', 'projects': 'projects', 'title': '"""OSP | Organizations"""', 'favorites': '[]'}), "('organizations.html', search_results=search_results,\n search_term=org_search_term, projects=projects, title=\n 'OSP | Organizations', favorites=[])\n", (13336, 13489), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((20060, 20106), 'flask.render_template', 'render_template', (['"""404.html"""'], {'title': '"""OSP | 404"""'}), "('404.html', title='OSP | 404')\n", (20075, 20106), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((22973, 22995), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (22981, 22995), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((25233, 25279), 'flask.render_template', 'render_template', (['"""404.html"""'], {'title': '"""OSP | 404"""'}), "('404.html', title='OSP | 404')\n", (25248, 25279), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((26898, 26973), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'project')\n", (26902, 26973), False, 'from sqlalchemy import or_, and_, func\n'), ((27284, 27355), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'org')\n", (27288, 27355), False, 'from sqlalchemy import or_, and_, func\n'), ((28896, 28955), 'requests.get', 'requests.get', (["('https://api.github.com/repos/' + submit_term)"], {}), "('https://api.github.com/repos/' + submit_term)\n", (28908, 28955), False, 'import requests\n'), ((29122, 29152), 'flask.url_for', 'url_for', (['"""main.project_submit"""'], {}), "('main.project_submit')\n", (29129, 29152), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((29225, 29256), 'flask_app.db.session.query', 'db.session.query', (['Projects.name'], {}), '(Projects.name)\n', (29241, 29256), False, 'from flask_app import app, db\n'), ((29481, 29525), 'flask.url_for', 'url_for', (['"""project"""'], {'projectName': "item['name']"}), "('project', projectName=item['name'])\n", (29488, 29525), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((30801, 30837), 'flask_app.db.session.query', 'db.session.query', (['Organizations.name'], {}), '(Organizations.name)\n', (30817, 30837), False, 'from flask_app import app, db\n'), ((13794, 13918), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['fav_name'])", "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['fav_name'], Favorites.fav_type == 'org')\n", (13798, 13918), False, 'from sqlalchemy import or_, and_, func\n'), ((14150, 14189), 'random.randint', 'random.randint', (['(-2147483648)', '(2147483647)'], {}), '(-2147483648, 2147483647)\n', (14164, 14189), False, 'import random\n'), ((16450, 16618), 'flask.render_template', 'render_template', (['"""organizations.html"""'], {'search_results': 'search_results', 'search_term': 'org_search_term', 'projects': 'projects', 'title': '"""OSP | Organizations"""', 'favorites': '[]'}), "('organizations.html', search_results=search_results,\n search_term=org_search_term, projects=projects, title=\n 'OSP | Organizations', favorites=[])\n", (16465, 16618), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((17053, 17181), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['fav_name'])", "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['fav_name'], Favorites.fav_type == 'project')\n", (17057, 17181), False, 'from sqlalchemy import or_, and_, func\n'), ((17414, 17453), 'random.randint', 'random.randint', (['(-2147483648)', '(2147483647)'], {}), '(-2147483648, 2147483647)\n', (17428, 17453), False, 'import random\n'), ((20431, 20555), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['fav_name'])", "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['fav_name'], Favorites.fav_type == 'org')\n", (20435, 20555), False, 'from sqlalchemy import or_, and_, func\n'), ((20765, 20804), 'random.randint', 'random.randint', (['(-2147483648)', '(2147483647)'], {}), '(-2147483648, 2147483647)\n', (20779, 20804), False, 'import random\n'), ((22734, 22757), 'flask_app.db.session.add', 'db.session.add', (['new_fav'], {}), '(new_fav)\n', (22748, 22757), False, 'from flask_app import app, db\n'), ((22770, 22789), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (22787, 22789), False, 'from flask_app import app, db\n'), ((22835, 22900), 'flask.flash', 'flash', (["('Added ' + request.form['proj_fav_name'] + ' to favorites')"], {}), "('Added ' + request.form['proj_fav_name'] + ' to favorites')\n", (22840, 22900), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((23462, 23484), 'flask_app.db.session.delete', 'db.session.delete', (['fav'], {}), '(fav)\n', (23479, 23484), False, 'from flask_app import app, db\n'), ((23493, 23512), 'flask_app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (23510, 23512), False, 'from flask_app import app, db\n'), ((23561, 23632), 'flask.flash', 'flash', (["('Removed ' + request.form['proj_unfav_name'] + ' from favorites')"], {}), "('Removed ' + request.form['proj_unfav_name'] + ' from favorites')\n", (23566, 23632), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((23703, 23725), 'flask.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (23711, 23725), False, 'from flask import render_template, request, redirect, url_for, flash, Blueprint\n'), ((25624, 25750), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['unfav_name'])", "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['unfav_name'], Favorites.fav_type == 'org')\n", (25628, 25750), False, 'from sqlalchemy import or_, and_, func\n'), ((30447, 30486), 'random.randint', 'random.randint', (['(-2147483648)', '(2147483647)'], {}), '(-2147483648, 2147483647)\n', (30461, 30486), False, 'import random\n'), ((31816, 31855), 'random.randint', 'random.randint', (['(-2147483648)', '(2147483647)'], {}), '(-2147483648, 2147483647)\n', (31830, 31855), False, 'import random\n'), ((3366, 3398), 'sqlalchemy.func.date', 'func.date', (['Projects.created_time'], {}), '(Projects.created_time)\n', (3375, 3398), False, 'from sqlalchemy import or_, and_, func\n'), ((8036, 8164), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['fav_name'])", "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['fav_name'], Favorites.fav_type == 'project')\n", (8040, 8164), False, 'from sqlalchemy import or_, and_, func\n'), ((8399, 8438), 'random.randint', 'random.randint', (['(-2147483648)', '(2147483647)'], {}), '(-2147483648, 2147483647)\n', (8413, 8438), False, 'import random\n'), ((14846, 14972), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['unfav_name'])", "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['unfav_name'], Favorites.fav_type == 'org')\n", (14850, 14972), False, 'from sqlalchemy import or_, and_, func\n'), ((18125, 18255), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['unfav_name'])", "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['unfav_name'], Favorites.fav_type == 'project')\n", (18129, 18255), False, 'from sqlalchemy import or_, and_, func\n'), ((21463, 21589), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['unfav_name'])", "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['unfav_name'], Favorites.fav_type == 'org')\n", (21467, 21589), False, 'from sqlalchemy import or_, and_, func\n'), ((25576, 25603), 'flask_app.db.session.query', 'db.session.query', (['Favorites'], {}), '(Favorites)\n', (25592, 25603), False, 'from flask_app import app, db\n'), ((26324, 26459), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['proj_unfav_name'])", "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['proj_unfav_name'], Favorites.fav_type == 'project')\n", (26328, 26459), False, 'from sqlalchemy import or_, and_, func\n'), ((27985, 28060), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'project')\n", (27989, 28060), False, 'from sqlalchemy import or_, and_, func\n'), ((28198, 28269), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'org')\n", (28202, 28269), False, 'from sqlalchemy import or_, and_, func\n'), ((9111, 9241), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['unfav_name'])", "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['unfav_name'], Favorites.fav_type == 'project')\n", (9115, 9241), False, 'from sqlalchemy import or_, and_, func\n'), ((14798, 14825), 'flask_app.db.session.query', 'db.session.query', (['Favorites'], {}), '(Favorites)\n', (14814, 14825), False, 'from flask_app import app, db\n'), ((18077, 18104), 'flask_app.db.session.query', 'db.session.query', (['Favorites'], {}), '(Favorites)\n', (18093, 18104), False, 'from flask_app import app, db\n'), ((21415, 21442), 'flask_app.db.session.query', 'db.session.query', (['Favorites'], {}), '(Favorites)\n', (21431, 21442), False, 'from flask_app import app, db\n'), ((22147, 22280), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['proj_fav_name'])", "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['proj_fav_name'], Favorites.fav_type == 'project')\n", (22151, 22280), False, 'from sqlalchemy import or_, and_, func\n'), ((22490, 22529), 'random.randint', 'random.randint', (['(-2147483648)', '(2147483647)'], {}), '(-2147483648, 2147483647)\n', (22504, 22529), False, 'import random\n'), ((26276, 26303), 'flask_app.db.session.query', 'db.session.query', (['Favorites'], {}), '(Favorites)\n', (26292, 26303), False, 'from flask_app import app, db\n'), ((3738, 3770), 'sqlalchemy.func.date', 'func.date', (['Projects.created_time'], {}), '(Projects.created_time)\n', (3747, 3770), False, 'from sqlalchemy import or_, and_, func\n'), ((7160, 7235), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'project')\n", (7164, 7235), False, 'from sqlalchemy import or_, and_, func\n'), ((9063, 9090), 'flask_app.db.session.query', 'db.session.query', (['Favorites'], {}), '(Favorites)\n', (9079, 9090), False, 'from flask_app import app, db\n'), ((13025, 13096), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'org')\n", (13029, 13096), False, 'from sqlalchemy import or_, and_, func\n'), ((19295, 19370), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'project')\n", (19299, 19370), False, 'from sqlalchemy import or_, and_, func\n'), ((23228, 23363), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_name == request.form['proj_unfav_name'])", "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_name == request.\n form['proj_unfav_name'], Favorites.fav_type == 'project')\n", (23232, 23363), False, 'from sqlalchemy import or_, and_, func\n'), ((24266, 24337), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'org')\n", (24270, 24337), False, 'from sqlalchemy import or_, and_, func\n'), ((24484, 24559), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'project')\n", (24488, 24559), False, 'from sqlalchemy import or_, and_, func\n'), ((16154, 16225), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'org')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'org')\n", (16158, 16225), False, 'from sqlalchemy import or_, and_, func\n'), ((23180, 23207), 'flask_app.db.session.query', 'db.session.query', (['Favorites'], {}), '(Favorites)\n', (23196, 23207), False, 'from flask_app import app, db\n'), ((10112, 10187), 'sqlalchemy.and_', 'and_', (['(Favorites.user_id == current_user.id)', "(Favorites.fav_type == 'project')"], {}), "(Favorites.user_id == current_user.id, Favorites.fav_type == 'project')\n", (10116, 10187), False, 'from sqlalchemy import or_, and_, func\n')] |
from datetime import timedelta
import logging
from typing import Union
from pyschism.enums import (
IofHydroVariables,
IofDvdVariables,
IofWwmVariables,
IofGenVariables,
IofAgeVariables,
IofSedVariables,
IofEcoVariables,
IofIcmVariables,
IofCosVariables,
IofFibVariables,
IofSed2dVariables,
IofMarshVariables,
IofIceVariables,
IofAnaVariables,
SchoutType
)
_logger = logging.getLogger(__name__)
class OutputVariableDescriptor:
def __init__(self, iof_type, name, index):
self._iof_type = iof_type
self._name = name
self._index = index
def __get__(self, obj, val):
return bool(getattr(obj, f'_{self._iof_type}')[self._index])
def __set__(self, obj, val: bool):
if not isinstance(val, bool):
raise TypeError(f'Argument to {self._name} must be boolean, not '
f'type {type(val)}.')
iof = getattr(obj, f'_{self._iof_type}')
iof[self._index] = int(val)
class SurfaceOutputVars:
def __init__(self):
self._surface_output_vars = {
'iof_hydro': [(var.value, i) for i, var
in enumerate(IofHydroVariables)],
'iof_dvd': [(var.value, i) for i, var
in enumerate(IofDvdVariables)],
'iof_wwm': [(var.value, i) for i, var
in enumerate(IofWwmVariables)],
'iof_gen': [(var.value, i) for i, var
in enumerate(IofGenVariables)],
'iof_age': [(var.value, i) for i, var
in enumerate(IofAgeVariables)],
'iof_sed': [(var.value, i) for i, var
in enumerate(IofSedVariables)],
'iof_eco': [(var.value, i) for i, var
in enumerate(IofEcoVariables)],
'iof_icm': [(var.value, i) for i, var
in enumerate(IofIcmVariables)],
'iof_cos': [(var.value, i) for i, var
in enumerate(IofCosVariables)],
'iof_fib': [(var.value, i) for i, var
in enumerate(IofFibVariables)],
'iof_sed2d': [(var.value, i) for i, var
in enumerate(IofSed2dVariables)],
'iof_marsh': [(var.value, i) for i, var
in enumerate(IofMarshVariables)],
'iof_ice': [(var.value, i) for i, var
in enumerate(IofIceVariables)],
'iof_ana': [(var.value, i) for i, var
in enumerate(IofAnaVariables)],
}
def __get__(self, obj, val):
return self._surface_output_vars
class SchoutMeta(type):
surface_output_vars = SurfaceOutputVars()
def __new__(meta, name, bases, attrs):
for iof_type in meta.surface_output_vars.keys():
attrs[f'_{iof_type}'] = len(SchoutType[iof_type].value)*[0]
for iof_type, vardata in meta.surface_output_vars.items():
for name, index in vardata:
attrs[name] = OutputVariableDescriptor(iof_type, name, index)
attrs['surface_output_vars'] = meta.surface_output_vars
return type(name, bases, attrs)
class Nhot:
def __set__(self, obj, nhot: int):
if nhot not in [0, 1]:
raise ValueError(f"nhot must be 0 or 1, not {nhot}")
obj.__dict__['nhot'] = nhot
def __get__(self, obj, val):
return obj.__dict__.get('nhot')
class NhotWrite:
def __set__(self, obj, nhot_write: int):
obj.__dict__['nhot_write'] = nhot_write
obj.__dict__['nhot'] = 1
def __get__(self, obj, val):
return obj.__dict__.get('nhot_write')
class IoutSta:
def __set__(self, obj, iout_sta: int):
if iout_sta not in [0, 1]:
raise ValueError(f"iout_sta must be 0 or 1, not {iout_sta}")
obj.__dict__['iout_sta'] = iout_sta
def __get__(self, obj, val):
return obj.__dict__.get('iout_sta')
class NspoolSta:
def __set__(self, obj, nspool_sta: Union[int, timedelta]):
obj.__dict__['nspool_sta'] = nspool_sta
obj.__dict__['iout_sta'] = 1
def __get__(self, obj, val):
return obj.__dict__.get('nspool_sta')
class SCHOUT(metaclass=SchoutMeta):
""" Provides error checking implementation for SCHOUT group """
_iout_sta = IoutSta()
_nhot = Nhot()
nhot_write = NhotWrite()
nspool_sta = NspoolSta()
def __init__(self, dt, rnday, **outputs):
_logger.info('Initializing SCHOUT.')
for key, val in outputs.items():
setattr(self, key, val)
self._dt = dt.total_seconds() if isinstance(dt, timedelta) \
else float(dt)
self._rnday = rnday.total_seconds() / 3600. if isinstance(
rnday, timedelta) else float(rnday)
def __iter__(self):
for outvar in self._surface_output_vars:
yield outvar, getattr(self, outvar)
def __str__(self):
schout = ["&SCHOUT"]
if self.nhot_write is not None:
schout.append(f" nhot={self._nhot}")
schout.append(f" nhot_write={self.nhot_write}")
if self.nspool_sta is not None:
nspool_sta = self.nspool_sta
if isinstance(nspool_sta, timedelta):
nspool_sta = int(round(nspool_sta.total_seconds() / self._dt))
if isinstance(nspool_sta, float):
nspool_sta = int(
round(timedelta(hours=nspool_sta) / self._dt))
if isinstance(nspool_sta, (int, float)):
if nspool_sta <= 0:
raise ValueError("nspool_sta must be positive.")
schout.append(f" iout_sta={self._iout_sta}")
schout.append(f" nspool_sta={nspool_sta}")
for var in dir(self):
if var.startswith('_iof'):
for i, state in enumerate(getattr(self, var)):
if state == 1:
schout.append(f' {var[1:]}({i+1})={state}')
schout.append('/')
return '\n'.join(schout)
def to_dict(self):
data = {}
if self.nhot_write is not None:
data['nhot'] = self._nhot
data['nhot_write'] = self.nhot_write
if self.nspool_sta is not None:
nspool_sta = self.nspool_sta
if isinstance(nspool_sta, timedelta):
nspool_sta = int(round(nspool_sta.total_seconds() / self._dt))
if isinstance(nspool_sta, float):
nspool_sta = int(
round(timedelta(hours=nspool_sta) / self._dt))
if isinstance(nspool_sta, (int, float)):
if nspool_sta <= 0:
raise ValueError("nspool_sta must be positive.")
data['iout_sta'] = self._iout_sta
data['nspool_sta'] = nspool_sta
for var in dir(self):
if var.startswith('_iof'):
_var = var[1:]
data[_var] = len(getattr(self, var)) * [0]
for i, state in enumerate(getattr(self, var)):
if state == 1:
data[_var][i] = state
return data
| [
"logging.getLogger",
"datetime.timedelta"
] | [((430, 457), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (447, 457), False, 'import logging\n'), ((5495, 5522), 'datetime.timedelta', 'timedelta', ([], {'hours': 'nspool_sta'}), '(hours=nspool_sta)\n', (5504, 5522), False, 'from datetime import timedelta\n'), ((6589, 6616), 'datetime.timedelta', 'timedelta', ([], {'hours': 'nspool_sta'}), '(hours=nspool_sta)\n', (6598, 6616), False, 'from datetime import timedelta\n')] |
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Sequence
import numpy as np
class MatrixBase(object):
__metaclass__ = ABCMeta
_base_tags = set()
@abstractmethod
def __init__(self, backend, ioshape, iopacking, tags):
self.backend = backend
self.ioshape = ioshape
self.iopacking = iopacking
self.tags = self._base_tags | tags
@abstractmethod
def get(self):
return self._unpack(self._get())
@abstractproperty
def nbytes(self):
"""Size in bytes"""
pass
@property
def aos_shape(self):
return self.backend.aos_shape(self.ioshape, self.iopacking)
@property
def soa_shape(self):
return self.backend.soa_shape(self.ioshape, self.iopacking)
class Matrix(MatrixBase):
"""Matrix abstract base class
"""
_base_tags = {'dense'}
@abstractmethod
def set(self, buf):
pass
def rslice(self, p, q):
return self.backend.matrix_rslice(self, p, q)
class MatrixRSlice(object):
"""Slice of a matrix abstract base class"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, backend, mat, p, q):
self.backend = backend
self.parent = mat
if p < 0 or q > mat.nrow or q < p:
raise ValueError('Invalid row slice')
self.nrow = q - p
self.ncol = mat.ncol
self.tags = mat.tags | {'slice'}
@property
def nbytes(self):
return 0
class ConstMatrix(MatrixBase):
"""Constant matrix abstract base class"""
_base_tags = {'const', 'dense'}
class BlockDiagMatrix(MatrixBase):
_base_tags = {'const', 'blockdiag'}
def __init__(self, backend, initval, brange, iopacking, tags):
super(BlockDiagMatrix, self).__init__(backend, initval.shape,
iopacking, tags)
self.initval = initval
# Compact down to a Matrix and extract the blocks
mat = backend.compact_arr(initval, iopacking)
self.blocks = [mat[ri:rj,ci:cj] for ri, rj, ci, cj in brange]
self.ranges = brange
def get(self):
return self.initval
@property
def nbytes(self):
return 0
class MPIMatrix(Matrix):
"""MPI matrix abstract base class"""
pass
class MatrixBank(Sequence):
"""Matrix bank abstract base class"""
@abstractmethod
def __init__(self, backend, mats, initbank, tags):
self.backend = backend
self._mats = mats
self._curr_idx = initbank
self._curr_mat = self._mats[initbank]
# Process tags
if any(mats[0].tags != m.tags for m in mats[1:]):
raise ValueError('Banked matrices must share tags')
self.tags = tags | mats[0].tags
def __len__(self):
return len(self._mats)
def __getitem__(self, idx):
return self._mats[idx]
def __getattr__(self, attr):
return getattr(self._curr_mat, attr)
def rslice(self, p, q):
raise RuntimeError('Matrix banks can not be sliced')
@property
def active(self):
return self._curr_idx
@active.setter
def active(self, idx):
self._curr_idx = idx
self._curr_mat = self._mats[idx]
@property
def nbytes(self):
return sum(m.nbytes for m in self)
class View(object):
"""View abstract base class"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, backend, matmap, rcmap, stridemap, vlen, tags):
self.nrow = nrow = matmap.shape[0]
self.ncol = ncol = matmap.shape[1]
self.vlen = vlen
# Get the different matrices which we map onto
self._mats = list(np.unique(matmap))
# Extract the data type and item size from the first matrix
self.refdtype = self._mats[0].dtype
self.refitemsize = self._mats[0].itemsize
# For vector views a stridemap is required
if vlen != 1 and np.any(stridemap == 0):
raise ValueError('Vector views require a non-zero stride map')
# Check all of the shapes match up
if matmap.shape != rcmap.shape[:2] or\
matmap.shape != stridemap.shape:
raise TypeError('Invalid matrix shapes')
# Validate the matrices
for m in self._mats:
if not isinstance(m, backend.matrix_cls):
raise TypeError('Incompatible matrix type for view')
if m.dtype != self.refdtype:
raise TypeError('Mixed data types are not supported')
@abstractproperty
def nbytes(self):
pass
class MPIView(object):
@abstractmethod
def __init__(self, backend, matmap, rcmap, stridemap, vlen, tags):
self.nrow = nrow = matmap.shape[0]
self.ncol = ncol = matmap.shape[1]
self.vlen = vlen
# Create a normal view
self.view = backend.view(matmap, rcmap, stridemap, vlen, tags)
# Now create an MPI matrix so that the view contents may be packed
self.mpimat = backend.mpi_matrix((nrow, ncol, vlen), None, 'AoS',
tags=tags)
@property
def nbytes(self):
return self.view.nbytes + self.mpimat.nbytes
class Queue(object):
"""Kernel execution queue"""
__metaclass__ = ABCMeta
@abstractmethod
def __lshift__(self, iterable):
"""Appends the kernels in *iterable* to the queue
.. note::
This method does **not** execute the kernels, but rather just
schedules them. Queued kernels should be executed by
calling :meth:`pyfr.backends.base.Backend.runall`
"""
pass
@abstractmethod
def __mod__(self, iterable):
"""Synchronously executes the kernels in *iterable*
.. note::
In the (unusual) instance that the queue already has one or
more kernels queued these will run first.
"""
pass
| [
"numpy.unique",
"numpy.any"
] | [((3740, 3757), 'numpy.unique', 'np.unique', (['matmap'], {}), '(matmap)\n', (3749, 3757), True, 'import numpy as np\n'), ((3999, 4021), 'numpy.any', 'np.any', (['(stridemap == 0)'], {}), '(stridemap == 0)\n', (4005, 4021), True, 'import numpy as np\n')] |
import re
from typing import List
from .consts import *
# =================== #
# INTERNALS FUNCTIONS #
# =================== #
def my_re_escape(text):
escape_char = r"[]"
returned_text = ""
for c in text:
if c in escape_char:
returned_text += "\\"
returned_text += c
return returned_text
ESCAPED_CSI = my_re_escape(CSI)
def remove_attributes_from_sgr(sgr_string: str, attributes: List[str]) -> str:
"""
Remove unwanted attributes in an SRG sequence.
SRG sequence start always with '\033[' and end with 'm'.
Not all attributes have the same number of parameters.
If all the attributes are removed return an empty string (without CSI and 'm')
Args:
sgr_string: SRG string
attributes: attributes to remove
Returns:
SRG string without unwanted attributes
"""
# TODO: replace remove by a set to optimize lookup
# Remove the CSI in the beginning and the 'm' at the end
params = sgr_string[2:-1].split(";")
keep: List[str] = [] # list of params to keep, to return in the end
# Since we are going to jump some iterations we can't use
# for loop, but we will use while loop.
i = 0
while True:
if i >= len(params):
break
param = params[i]
# 38 48 58 are the only attributes that take more than one parameter
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
if param not in ["38", "48", "58"]:
if param not in attributes:
keep.append(param)
else:
second_param = params[i + 1]
# if second param is 2 => the attribute take 5 param
if second_param == "2":
params_to_keep = params[i : i + 5]
i = i + 4
# if second param is 5 => the attribute take 3 param
elif second_param == "5":
params_to_keep = params[i : i + 3]
i = i + 2
else:
# FIXME: How to handle errors? if the attribute is wrong?
params_to_keep = []
if param not in attributes:
keep.extend(params_to_keep)
i += 1
# If keep is empty return an empty string without CSI and 'm'
if keep:
return f"{CSI}{';'.join(keep)}m"
else:
return ""
def repl_remove_attributes_from_sgr(matchobj, remove: List[str]) -> str:
"""Addapted remove_sequence_from_text function to be used with regex"""
return remove_attributes_from_sgr(matchobj.group(0), remove)
re_sgr = re.compile(
fr"""
{ESCAPED_CSI}
\d*
(?:
;\d*
)*
m
""",
re.VERBOSE,
)
def remove_attributes_from_string(string, remove: List[str]) -> str:
"""Remove unwanted attributes form a string"""
return re_sgr.sub(lambda x: repl_remove_attributes_from_sgr(x, remove), string)
# ======================== #
# REMOVE GRAPHIC FUNCTIONS #
# ======================== #
def rmgraphics(string: str) -> str:
"""Remove all graphics attributes (all SGR)"""
return re_sgr.sub("", string)
def rmcolor(text: str) -> str:
"""Remove all color attributes from a string"""
# remove all attributes from 30 to 39
# 30-37 => 8colors
# 38 => 24 bits colors
# 39 => reset colors
remove = [str(i) for i in range(30, 40)]
return remove_attributes_from_string(text, remove)
def rmbackground(text: str) -> str:
"""Remove all color attributes from a string"""
# remove all attributes from 40 to 49
# 40-47 => 8colors background
# 48 => 24 bits colors background
# 49 => reset background colors
attributes = [str(i) for i in range(40, 50)]
return remove_attributes_from_string(text, attributes)
def rmstyle(text: str) -> str:
# TODO: change list to set
# TODO: test rmstyle
# TODO: make the list outside of the function to optimize (not calculate the list each time)
attributes = [
N_UNDERLINE,
N_DOUBLE_UNDERLINE,
N_RESET_UNDERLINE,
N_ITALIC,
N_RESET_ITALIC,
N_CROSS,
N_RESET_CROSS,
N_BLINK,
N_RESET_BLINK,
N_BOLD,
N_DIM,
N_RESET_BOLD_AND_DIM,
]
return remove_attributes_from_string(text, attributes)
def rmunderline(text):
attributes = [N_UNDERLINE, N_DOUBLE_UNDERLINE, N_RESET_UNDERLINE]
return remove_attributes_from_string(text, attributes)
def rmitalic(text):
attributes = [N_ITALIC, N_RESET_ITALIC]
return remove_attributes_from_string(text, attributes)
def rmcross(text):
attributes = [N_CROSS, N_RESET_CROSS]
return remove_attributes_from_string(text, attributes)
def rmblink(text):
attributes = [N_BLINK, N_RESET_BLINK]
return remove_attributes_from_string(text, attributes)
def rmbold_and_dim(text):
attributes = [N_BOLD, N_DIM, N_RESET_BOLD_AND_DIM]
return remove_attributes_from_string(text, attributes)
| [
"re.compile"
] | [((2584, 2687), 're.compile', 're.compile', (['f"""\n {ESCAPED_CSI}\n \\\\d*\n (?:\n ;\\\\d*\n )*\n m\n"""', 're.VERBOSE'], {}), '(\n f"""\n {ESCAPED_CSI}\n \\\\d*\n (?:\n ;\\\\d*\n )*\n m\n""",\n re.VERBOSE)\n', (2594, 2687), False, 'import re\n')] |
from flask import Flask,render_template as render, request
from models import *
app = Flask(__name__,template_folder='./templates')
@app.route('/GETPage',methods = ['GET'])
def Gpage():
return f"You have landed on the page which allows the {request.method} method."
@app.route('/POSTPage', methods = ['GET','POST'])
def Ppage():
if request.method == "POST":
WriteModelJson(dict(zip(dict(request.form).keys(), dict(request.form).values())))
return render('SuccessPage.html')
else:
return render('form.html')
@app.route('/MultiMethodPage', methods = ['GET','PUT','PATCH', 'DELETE'])
def MethodsPage():
import json
if request.method == "DELETE":
data = json.loads(request.data)
message = DeleteJsonModel(data)
return f"The Method used was: {request.method}\n {message}"
elif request.method == 'PUT':
data = json.loads(request.data)
message = PutModelJson(data)
return f"The Method used was: {request.method}\n {message}"
elif request.method == 'PATCH':
message = PatchModelJson(json.loads(request.data))
return f"The Method used was: {request.method}\n {message}"
return "This page/URL supports PUT,PATCH and DELETE Methods. Use Postman to send in requests. "
if __name__ == "__main__":
app.run()
| [
"flask.render_template",
"json.loads",
"flask.Flask"
] | [((90, 136), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '"""./templates"""'}), "(__name__, template_folder='./templates')\n", (95, 136), False, 'from flask import Flask, render_template as render, request\n'), ((478, 504), 'flask.render_template', 'render', (['"""SuccessPage.html"""'], {}), "('SuccessPage.html')\n", (484, 504), True, 'from flask import Flask, render_template as render, request\n'), ((530, 549), 'flask.render_template', 'render', (['"""form.html"""'], {}), "('form.html')\n", (536, 549), True, 'from flask import Flask, render_template as render, request\n'), ((710, 734), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (720, 734), False, 'import json\n'), ((904, 928), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (914, 928), False, 'import json\n'), ((1105, 1129), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (1115, 1129), False, 'import json\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=too-few-public-methods
"""
Web application endpoint
========================
Starts an http endpoint to serve requests
"""
import logging
import mimetypes
import os
import site
import sys
import falcon
from gunicorn.app.base import BaseApplication
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
DESCRIPTION = "Starts Oríon Dashboard"
def get_dashboard_build_path():
"""Find dashboard build folder.
If package is installed, dashboard build should be in installation prefix
https://docs.python.org/3/distutils/setupscript.html#installing-additional-files
Otherwise, dashboard build should be in dashboard folder near src
in orion repository.
"""
current_file_path = __file__
if current_file_path.startswith(sys.prefix):
dashboard_build_path = os.path.join(sys.prefix, "orion-dashboard", "build")
elif current_file_path.startswith(site.USER_BASE):
dashboard_build_path = os.path.join(site.USER_BASE, "orion-dashboard", "build")
else:
dashboard_build_path = os.path.abspath(
os.path.join(
os.path.dirname(current_file_path),
"..",
"..",
"..",
"..",
"dashboard",
"build",
)
)
if not os.path.isdir(dashboard_build_path):
raise RuntimeError(
f"Cannot find dashboard static files to run frontend. "
f"Expected to be located at: {dashboard_build_path}"
)
return dashboard_build_path
class StaticResource:
"""Resource class to serve frontend files."""
STATIC_DIR = get_dashboard_build_path()
PLACEHOLDER = "window.__ORION_BACKEND__"
TEXT_TYPES = ("text/html", "application/javascript")
def __init__(self, args):
self.backend = args.get("backend", None)
logger.info(f"Dashboard build located at: {self.STATIC_DIR}")
def on_get(self, req, resp):
"""Hack HTML and Javascript files to setup backend if necessary."""
path = req.relative_uri.strip("/") or "index.html"
file_path = os.path.join(self.STATIC_DIR, path)
if os.path.isfile(file_path):
content_type, _ = mimetypes.guess_type(file_path)
resp.status = falcon.HTTP_200
resp.content_type = content_type
with open(file_path, "rb") as file:
content = file.read()
if content_type in self.TEXT_TYPES:
content = content.decode()
if self.backend is not None and self.PLACEHOLDER in content:
content = content.replace(self.PLACEHOLDER, repr(self.backend))
resp.body = content
else:
resp.status = falcon.HTTP_404
def add_subparser(parser):
"""Add the subparser that needs to be used for this command"""
serve_parser = parser.add_parser(
"frontend", help=DESCRIPTION, description=DESCRIPTION
)
serve_parser.add_argument(
"-p",
"--port",
type=int,
default=3000,
help="port to run frontend (default 3000)",
)
serve_parser.add_argument(
"-b",
"--backend",
type=str,
default="http://127.0.0.1:8000",
help="backend address (default: http://127.0.0.1:8000)",
)
serve_parser.set_defaults(func=main)
return serve_parser
def main(args):
"""Starts an application server to serve http requests"""
app = falcon.API()
resource = StaticResource(args)
app.add_sink(resource.on_get)
gunicorn_app = GunicornApp(app, args)
gunicorn_app.run()
class GunicornApp(BaseApplication):
"""Custom Gunicorn application, required when integrating gunicorn as an API."""
def __init__(self, app, args=None):
options = {}
if args:
options["bind"] = f"localhost:{args['port']}"
self.options = options
self.application = app
super(GunicornApp, self).__init__()
def init(self, parser, opts, args):
"""Pre-run initialization"""
pass
def load_config(self):
"""Load the gunicorn config"""
for key, value in self.options.items():
self.cfg.set(key.lower(), value)
def load(self):
"""Load the WSGI application"""
return self.application
| [
"logging.getLogger",
"falcon.API",
"os.path.join",
"os.path.isfile",
"os.path.dirname",
"os.path.isdir",
"mimetypes.guess_type"
] | [((325, 352), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (342, 352), False, 'import logging\n'), ((3569, 3581), 'falcon.API', 'falcon.API', ([], {}), '()\n', (3579, 3581), False, 'import falcon\n'), ((873, 925), 'os.path.join', 'os.path.join', (['sys.prefix', '"""orion-dashboard"""', '"""build"""'], {}), "(sys.prefix, 'orion-dashboard', 'build')\n", (885, 925), False, 'import os\n'), ((1382, 1417), 'os.path.isdir', 'os.path.isdir', (['dashboard_build_path'], {}), '(dashboard_build_path)\n', (1395, 1417), False, 'import os\n'), ((2182, 2217), 'os.path.join', 'os.path.join', (['self.STATIC_DIR', 'path'], {}), '(self.STATIC_DIR, path)\n', (2194, 2217), False, 'import os\n'), ((2229, 2254), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (2243, 2254), False, 'import os\n'), ((1012, 1068), 'os.path.join', 'os.path.join', (['site.USER_BASE', '"""orion-dashboard"""', '"""build"""'], {}), "(site.USER_BASE, 'orion-dashboard', 'build')\n", (1024, 1068), False, 'import os\n'), ((2286, 2317), 'mimetypes.guess_type', 'mimetypes.guess_type', (['file_path'], {}), '(file_path)\n', (2306, 2317), False, 'import mimetypes\n'), ((1169, 1203), 'os.path.dirname', 'os.path.dirname', (['current_file_path'], {}), '(current_file_path)\n', (1184, 1203), False, 'import os\n')] |
from __future__ import division
import argparse, logging, os, math, tqdm
import numpy as np
import mxnet as mx
from mxnet import gluon, nd, image
from mxnet.gluon.data.vision import transforms
import matplotlib.pyplot as plt
import gluoncv as gcv
from gluoncv import data
from gluoncv.data import mscoco
from gluoncv.model_zoo import get_model
from gluoncv.data.transforms.pose import get_final_preds
parser = argparse.ArgumentParser(description='Predict ImageNet classes from a given image')
parser.add_argument('--detector', type=str, default='yolo3_mobilenet1.0_coco',
help='name of the detection model to use')
parser.add_argument('--pose-model', type=str, default='simple_pose_resnet50_v1b',
help='name of the pose estimation model to use')
parser.add_argument('--input-pic', type=str, required=True,
help='path to the input picture')
opt = parser.parse_args()
def upscale_bbox_fn(bbox, img, scale=1.25):
new_bbox = []
x0 = bbox[0]
y0 = bbox[1]
x1 = bbox[2]
y1 = bbox[3]
w = (x1 - x0) / 2
h = (y1 - y0) / 2
center = [x0 + w, y0 + h]
new_x0 = max(center[0] - w * scale, 0)
new_y0 = max(center[1] - h * scale, 0)
new_x1 = min(center[0] + w * scale, img.shape[1])
new_y1 = min(center[1] + h * scale, img.shape[0])
new_bbox = [new_x0, new_y0, new_x1, new_y1]
return new_bbox
def crop_resize_normalize(img, bbox_list, output_size):
output_list = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
for bbox in bbox_list:
x0 = max(int(bbox[0]), 0)
y0 = max(int(bbox[1]), 0)
x1 = min(int(bbox[2]), int(img.shape[1]))
y1 = min(int(bbox[3]), int(img.shape[0]))
w = x1 - x0
h = y1 - y0
res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0]))
res_img = transform_test(res_img)
output_list.append(res_img)
output_array = nd.stack(*output_list)
return output_array
def get_final_preds(batch_heatmaps, center, scale):
from gluoncv.data.transforms.pose import get_max_pred
coords, maxvals = get_max_pred(batch_heatmaps)
heatmap_height = batch_heatmaps.shape[2]
heatmap_width = batch_heatmaps.shape[3]
# post-processing
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = batch_heatmaps[n][p]
px = int(nd.floor(coords[n][p][0] + 0.5).asscalar())
py = int(nd.floor(coords[n][p][1] + 0.5).asscalar())
if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1:
diff = nd.concat(hm[py][px+1] - hm[py][px-1],
hm[py+1][px] - hm[py-1][px],
dim=0)
coords[n][p] += nd.sign(diff) * .25
preds = nd.zeros_like(coords)
# Transform back
for i in range(coords.shape[0]):
w_ratio = coords[i][:, 0] / heatmap_width
h_ratio = coords[i][:, 1] / heatmap_height
preds[i][:, 0] = scale[i][0] * 2 * w_ratio + center[i][0] - scale[i][0]
preds[i][:, 1] = scale[i][1] * 2 * h_ratio + center[i][1] - scale[i][1]
return preds, maxvals
def heatmap_to_coord(heatmaps, bbox_list):
center_list = []
scale_list = []
for i, bbox in enumerate(bbox_list):
x0 = bbox[0]
y0 = bbox[1]
x1 = bbox[2]
y1 = bbox[3]
w = (x1 - x0) / 2
h = (y1 - y0) / 2
center_list.append(np.array([x0 + w, y0 + h]))
scale_list.append(np.array([w, h]))
coords, maxvals = get_final_preds(heatmaps, center_list, scale_list)
return coords, maxvals
def keypoint_detection(img_path, detector, pose_net):
# detector
x, img = data.transforms.presets.yolo.load_test(img_path, short=512)
class_IDs, scores, bounding_boxs = detector(x)
# input processing
L = class_IDs.shape[1]
thr = 0.5
upscale_bbox = []
for i in range(L):
if class_IDs[0][i].asscalar() != 0:
continue
if scores[0][i].asscalar() < thr:
continue
bbox = bounding_boxs[0][i]
upscale_bbox.append(upscale_bbox_fn(bbox.asnumpy().tolist(), img, scale=1.25))
pose_input = crop_resize_normalize(img, upscale_bbox, (256, 192))
# pose estimation
predicted_heatmap = pose_net(pose_input)
pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox)
# Plot
confidence_thred = 0.2
joint_visible = confidence[:,:,0].asnumpy() > confidence_thred
joint_pairs = [[0,1], [1,3], [0,2], [2,4],
[5,6], [5,7], [7,9], [6,8], [8,10],
[5,11], [6,12], [11,12],
[11,13], [12,14], [13,15], [14,16]]
person_ind = class_IDs[0].asnumpy() == 0
ax = gcv.utils.viz.plot_bbox(img, bounding_boxs[0].asnumpy()[person_ind[:,0]],
scores[0].asnumpy()[person_ind[:,0]], thresh=0.5)
plt.xlim([0, img.shape[1]-1])
plt.ylim([0, img.shape[0]-1])
plt.gca().invert_yaxis()
for i in range(pred_coords.shape[0]):
pts = pred_coords[i].asnumpy()
colormap_index = np.linspace(0, 1, len(joint_pairs))
for cm_ind, jp in zip(colormap_index, joint_pairs):
if joint_visible[i, jp[0]] and joint_visible[i, jp[1]]:
plt.plot(pts[jp, 0], pts[jp, 1],
linewidth=5.0, alpha=0.7, color=plt.cm.cool(cm_ind))
plt.scatter(pts[jp, 0], pts[jp, 1], s=20)
plt.show()
if __name__ == '__main__':
detector = get_model(opt.detector, ctx=mx.cpu(), pretrained=True)
if opt.detector.startswith('ssd') or opt.detector.startswith('yolo'):
detector.reset_class(["person"], reuse_weights=['person'])
net = get_model(opt.pose_model, pretrained=True, ctx=mx.cpu())
keypoint_detection(opt.input_pic, detector, net)
| [
"gluoncv.data.transforms.presets.yolo.load_test",
"mxnet.nd.sign",
"gluoncv.data.transforms.pose.get_final_preds",
"numpy.array",
"mxnet.nd.zeros_like",
"gluoncv.data.transforms.pose.get_max_pred",
"argparse.ArgumentParser",
"matplotlib.pyplot.scatter",
"mxnet.nd.array",
"matplotlib.pyplot.ylim",
... | [((415, 502), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Predict ImageNet classes from a given image"""'}), "(description=\n 'Predict ImageNet classes from a given image')\n", (438, 502), False, 'import argparse, logging, os, math, tqdm\n'), ((2061, 2083), 'mxnet.nd.stack', 'nd.stack', (['*output_list'], {}), '(*output_list)\n', (2069, 2083), False, 'from mxnet import gluon, nd, image\n'), ((2241, 2269), 'gluoncv.data.transforms.pose.get_max_pred', 'get_max_pred', (['batch_heatmaps'], {}), '(batch_heatmaps)\n', (2253, 2269), False, 'from gluoncv.data.transforms.pose import get_max_pred\n'), ((2929, 2950), 'mxnet.nd.zeros_like', 'nd.zeros_like', (['coords'], {}), '(coords)\n', (2942, 2950), False, 'from mxnet import gluon, nd, image\n'), ((3682, 3732), 'gluoncv.data.transforms.pose.get_final_preds', 'get_final_preds', (['heatmaps', 'center_list', 'scale_list'], {}), '(heatmaps, center_list, scale_list)\n', (3697, 3732), False, 'from gluoncv.data.transforms.pose import get_final_preds\n'), ((3843, 3902), 'gluoncv.data.transforms.presets.yolo.load_test', 'data.transforms.presets.yolo.load_test', (['img_path'], {'short': '(512)'}), '(img_path, short=512)\n', (3881, 3902), False, 'from gluoncv import data\n'), ((5055, 5086), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, img.shape[1] - 1]'], {}), '([0, img.shape[1] - 1])\n', (5063, 5086), True, 'import matplotlib.pyplot as plt\n'), ((5089, 5120), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, img.shape[0] - 1]'], {}), '([0, img.shape[0] - 1])\n', (5097, 5120), True, 'import matplotlib.pyplot as plt\n'), ((5608, 5618), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5616, 5618), True, 'import matplotlib.pyplot as plt\n'), ((1526, 1547), 'mxnet.gluon.data.vision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1545, 1547), False, 'from mxnet.gluon.data.vision import transforms\n'), ((1557, 1623), 'mxnet.gluon.data.vision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1577, 1623), False, 'from mxnet.gluon.data.vision import transforms\n'), ((1901, 1914), 'mxnet.nd.array', 'nd.array', (['img'], {}), '(img)\n', (1909, 1914), False, 'from mxnet import gluon, nd, image\n'), ((3587, 3613), 'numpy.array', 'np.array', (['[x0 + w, y0 + h]'], {}), '([x0 + w, y0 + h])\n', (3595, 3613), True, 'import numpy as np\n'), ((3641, 3657), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (3649, 3657), True, 'import numpy as np\n'), ((5123, 5132), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5130, 5132), True, 'import matplotlib.pyplot as plt\n'), ((5690, 5698), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (5696, 5698), True, 'import mxnet as mx\n'), ((5915, 5923), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (5921, 5923), True, 'import mxnet as mx\n'), ((2723, 2809), 'mxnet.nd.concat', 'nd.concat', (['(hm[py][px + 1] - hm[py][px - 1])', '(hm[py + 1][px] - hm[py - 1][px])'], {'dim': '(0)'}), '(hm[py][px + 1] - hm[py][px - 1], hm[py + 1][px] - hm[py - 1][px],\n dim=0)\n', (2732, 2809), False, 'from mxnet import gluon, nd, image\n'), ((5561, 5602), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pts[jp, 0]', 'pts[jp, 1]'], {'s': '(20)'}), '(pts[jp, 0], pts[jp, 1], s=20)\n', (5572, 5602), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2909), 'mxnet.nd.sign', 'nd.sign', (['diff'], {}), '(diff)\n', (2903, 2909), False, 'from mxnet import gluon, nd, image\n'), ((2520, 2551), 'mxnet.nd.floor', 'nd.floor', (['(coords[n][p][0] + 0.5)'], {}), '(coords[n][p][0] + 0.5)\n', (2528, 2551), False, 'from mxnet import gluon, nd, image\n'), ((2585, 2616), 'mxnet.nd.floor', 'nd.floor', (['(coords[n][p][1] + 0.5)'], {}), '(coords[n][p][1] + 0.5)\n', (2593, 2616), False, 'from mxnet import gluon, nd, image\n'), ((5524, 5543), 'matplotlib.pyplot.cm.cool', 'plt.cm.cool', (['cm_ind'], {}), '(cm_ind)\n', (5535, 5543), True, 'import matplotlib.pyplot as plt\n')] |
#! /usr/bin/env python
import sys
import os
import extargsparse
import re
import time
##importdebugstart
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from strparser import *
from filehdl import *
from fmthdl import *
from extract_ob import *
from obmaklib import *
##importdebugend
REPLACE_IMPORT_LIB=1
REPLACE_STR_PARSER=1
REPLACE_FILE_HDL=1
REPLACE_FMT_HDL=1
REPLACE_EXTRACT_OB=1
REPLACE_OBMAK_LIB=1
def main():
commandline='''
{
"verbose|v" : "+",
"version|V" : false,
"dump|D" : "obcode.json",
"benchmark|B" : false,
"makob<makob_handler>##srcfile to give the other code file ,this need environment variable MAKOB_FILE to get the default (makob.json)##" : {
"namemin" : 5,
"namemax" : 20,
"$" : "+"
},
"unmakob<unmakob_handler>##dstfile to give the origin ,this need environment variable MAKOB_FILE to get the default (makob.json)##" : {
"short" : false,
"$" : "+"
},
"basename<basename_handler>##to make basename##" : {
"$" : "+"
},
"obtrans<obtrans_handler>##translate the srcdir to dstdir in makob file##" : {
"srcdir" : "",
"dstdir" : "",
"$" : "+"
},
"oblist<oblist_handler>##to list files ob files##" : {
"$" : "*"
},
"obuntrans<obuntrans_handler>##inputfile [outputfile] to trans file from MAKOB_FILE##" : {
"$" : "+"
},
"obunfunc<obunfunc_handler>##inputfile;outfile;funcs... to set obfuncs##" : {
"$" : "+"
}
}
'''
d = dict()
d['version'] = "VERSION_RELACE_STRING"
options = extargsparse.ExtArgsOptions(d)
stime = time.time()
parser = extargsparse.ExtArgsParse(options)
parser.load_command_line_string(commandline)
args = parser.parse_command_line(None,parser)
if args.version:
sys.stdout.write('%s\n'%(options.version))
sys.exit(0)
if args.benchmark:
etime = time.time()
sys.stderr.write('run %s time %s second\n'%(sys.argv[1:],etime - stime))
return
##importdebugstart
from obrelease import *
import re
def debug_release():
if '-v' in sys.argv[1:]:
#sys.stderr.write('will make verbose\n')
loglvl = logging.DEBUG
if logging.root is not None and len(logging.root.handlers) > 0:
logging.root.handlers = []
logging.basicConfig(level=loglvl,format='%(asctime)s:%(filename)s:%(funcName)s:%(lineno)d\t%(message)s')
topdir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
tofile= os.path.abspath(os.path.join(topdir,'obmak.py'))
curdir = os.path.abspath(os.path.dirname(__file__))
rlfiles = ReleaseFiles(__file__)
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'strparser.py')),r'REPLACE_STR_PARSER=1')
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'fmthdl.py')),r'REPLACE_FMT_HDL=1')
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'filehdl.py')),r'REPLACE_FILE_HDL=1')
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'obmaklib.py')),r'REPLACE_OBMAK_LIB=1')
rlfiles.add_python_file(os.path.abspath(os.path.join(curdir,'extract_ob.py')),r'REPLACE_EXTRACT_OB=1')
if len(sys.argv) > 2:
for k in sys.argv[1:]:
if not k.startswith('-'):
tofile = k
break
versionfile = os.path.abspath(os.path.join(topdir,'VERSION'))
if not os.path.exists(versionfile):
raise Exception('can not find VERSION file')
with open(versionfile,'r') as f:
for l in f:
l = l.rstrip('\r\n')
vernum = l
break
#logging.info('str_c\n%s'%(strparser_c))
sarr = re.split('\.',vernum)
if len(sarr) != 3:
raise Exception('version (%s) not format x.x.x'%(vernum))
VERSIONNUMBER = vernum
import_rets = fromat_ext_import_files(__file__,rlfiles.get_includes())
logging.info('import_rets\n%s'%(import_rets))
rlfiles.add_repls(r'VERSION_RELACE_STRING',VERSIONNUMBER)
rlfiles.add_repls(r'debug_main','main')
rlfiles.add_repls(r'REPLACE_IMPORT_LIB=1',make_string_slash_ok(import_rets))
#logging.info('repls %s'%(repls.keys()))
disttools.release_file('__main__',tofile,[],[[r'##importdebugstart.*',r'##importdebugend.*']],[],rlfiles.get_repls())
return
def debug_main():
if '--release' in sys.argv[1:]:
debug_release()
return
main()
return
##importdebugend
if __name__ == '__main__':
debug_main()
| [
"re.split",
"os.path.exists",
"extargsparse.ExtArgsParse",
"os.path.join",
"os.path.dirname",
"sys.stderr.write",
"sys.exit",
"extargsparse.ExtArgsOptions",
"time.time",
"sys.stdout.write"
] | [((1726, 1756), 'extargsparse.ExtArgsOptions', 'extargsparse.ExtArgsOptions', (['d'], {}), '(d)\n', (1753, 1756), False, 'import extargsparse\n'), ((1769, 1780), 'time.time', 'time.time', ([], {}), '()\n', (1778, 1780), False, 'import time\n'), ((1794, 1828), 'extargsparse.ExtArgsParse', 'extargsparse.ExtArgsParse', (['options'], {}), '(options)\n', (1819, 1828), False, 'import extargsparse\n'), ((3811, 3834), 're.split', 're.split', (['"""\\\\."""', 'vernum'], {}), "('\\\\.', vernum)\n", (3819, 3834), False, 'import re\n'), ((139, 164), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (154, 164), False, 'import os\n'), ((1957, 1999), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\n' % options.version)"], {}), "('%s\\n' % options.version)\n", (1973, 1999), False, 'import sys\n'), ((2008, 2019), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2016, 2019), False, 'import sys\n'), ((2059, 2070), 'time.time', 'time.time', ([], {}), '()\n', (2068, 2070), False, 'import time\n'), ((2079, 2154), 'sys.stderr.write', 'sys.stderr.write', (["('run %s time %s second\\n' % (sys.argv[1:], etime - stime))"], {}), "('run %s time %s second\\n' % (sys.argv[1:], etime - stime))\n", (2095, 2154), False, 'import sys\n'), ((2676, 2708), 'os.path.join', 'os.path.join', (['topdir', '"""obmak.py"""'], {}), "(topdir, 'obmak.py')\n", (2688, 2708), False, 'import os\n'), ((2738, 2763), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2753, 2763), False, 'import os\n'), ((3499, 3530), 'os.path.join', 'os.path.join', (['topdir', '"""VERSION"""'], {}), "(topdir, 'VERSION')\n", (3511, 3530), False, 'import os\n'), ((3542, 3569), 'os.path.exists', 'os.path.exists', (['versionfile'], {}), '(versionfile)\n', (3556, 3569), False, 'import os\n'), ((2615, 2640), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2630, 2640), False, 'import os\n'), ((2846, 2882), 'os.path.join', 'os.path.join', (['curdir', '"""strparser.py"""'], {}), "(curdir, 'strparser.py')\n", (2858, 2882), False, 'import os\n'), ((2952, 2985), 'os.path.join', 'os.path.join', (['curdir', '"""fmthdl.py"""'], {}), "(curdir, 'fmthdl.py')\n", (2964, 2985), False, 'import os\n'), ((3052, 3086), 'os.path.join', 'os.path.join', (['curdir', '"""filehdl.py"""'], {}), "(curdir, 'filehdl.py')\n", (3064, 3086), False, 'import os\n'), ((3154, 3189), 'os.path.join', 'os.path.join', (['curdir', '"""obmaklib.py"""'], {}), "(curdir, 'obmaklib.py')\n", (3166, 3189), False, 'import os\n'), ((3258, 3295), 'os.path.join', 'os.path.join', (['curdir', '"""extract_ob.py"""'], {}), "(curdir, 'extract_ob.py')\n", (3270, 3295), False, 'import os\n')] |
from ..cross_box import CrossBox
from unittest.mock import Mock, patch
from pyglet.gl import GL_LINES
import unittest
class TestCrossBox(unittest.TestCase):
"""Test rendering of cross box graphics."""
def setUp(self):
"""Provides the following to all tests:
* ``self.rectangle``: Mock rectangle.
* ``self.vertex_count``: Number of vertices for the rectangle.
* ``self.expected_vertices``: Expected verticies for the rectangle.
* ``self.default_color``: Expected default color for the cross box.
* ``self.new_coordinates``: New coordinates to reposition to.
* ``self.new_expected_vertices``: Expected verticies after reposition.
"""
self.rectangle = Mock(x=1, y=2, width=3, height=4)
self.expected_vertices = (
1, 2, 4, 2,
1, 2, 1, 6,
4, 2, 1, 6,
1, 6, 4, 6,
4, 6, 1, 2,
4, 6, 4, 2)
self.vertex_count = (len(self.expected_vertices) // 2)
self.default_color = (0, 0, 0) * self.vertex_count
self.new_coordinates = (2, 4)
self.new_expected_vertices = [
2, 4, 5, 4,
2, 4, 2, 8,
5, 4, 2, 8,
2, 8, 5, 8,
5, 8, 2, 4,
5, 8, 5, 4]
@patch('engine.graphics.cross_box.vertex_list')
def test_creates_vertex_list_without_batch(self, mock_vertex_list):
"""Cross box uses a vertex list if no batch is given."""
CrossBox(self.rectangle)
mock_vertex_list.assert_called_once_with(
self.vertex_count,
('v2i', self.expected_vertices),
('c3B', self.default_color))
@patch('engine.graphics.cross_box.vertex_list')
def test_sets_color_of_vertex_list(self, mock_vertex_list):
"""Cross box sets custom colors for a vertex list."""
color = (1, 2, 3)
expected_colors = color * self.vertex_count
CrossBox(self.rectangle, color)
mock_vertex_list.assert_called_once_with(
self.vertex_count,
('v2i', self.expected_vertices),
('c3B', expected_colors))
@patch('engine.graphics.cross_box.vertex_list')
def test_repositions_vertex_list(self, mock_vertex_list):
"""Repositioning a cross box updates the vertex list."""
mock_vertex_list.return_value.vertices = self.expected_vertices
cross_box = CrossBox(self.rectangle)
cross_box.set_position(self.new_coordinates)
self.assertEqual(
self.new_expected_vertices, mock_vertex_list.return_value.vertices)
def test_uses_batch(self):
"""Cross box uses a batch if given."""
batch = Mock()
CrossBox(self.rectangle, batch=batch)
batch.add.assert_called_once_with(
self.vertex_count,
GL_LINES,
None,
('v2i', self.expected_vertices),
('c3B', self.default_color))
def test_sets_color_with_batch(self):
"""Cross box sets custom colors when using a batch."""
color = (1, 2, 3)
expected_colors = color * self.vertex_count
batch = Mock()
CrossBox(self.rectangle, color, batch)
batch.add.assert_called_once_with(
self.vertex_count,
GL_LINES,
None,
('v2i', self.expected_vertices),
('c3B', expected_colors))
def test_repositions_batch(self):
"""Repositioning a cross box updates the batched vertices."""
batch = Mock()
batch.add.return_value.vertices = self.expected_vertices
cross_box = CrossBox(self.rectangle, batch=batch)
cross_box.set_position(self.new_coordinates)
self.assertEqual(
self.new_expected_vertices, batch.add.return_value.vertices)
| [
"unittest.mock.patch",
"unittest.mock.Mock"
] | [((1298, 1344), 'unittest.mock.patch', 'patch', (['"""engine.graphics.cross_box.vertex_list"""'], {}), "('engine.graphics.cross_box.vertex_list')\n", (1303, 1344), False, 'from unittest.mock import Mock, patch\n'), ((1689, 1735), 'unittest.mock.patch', 'patch', (['"""engine.graphics.cross_box.vertex_list"""'], {}), "('engine.graphics.cross_box.vertex_list')\n", (1694, 1735), False, 'from unittest.mock import Mock, patch\n'), ((2152, 2198), 'unittest.mock.patch', 'patch', (['"""engine.graphics.cross_box.vertex_list"""'], {}), "('engine.graphics.cross_box.vertex_list')\n", (2157, 2198), False, 'from unittest.mock import Mock, patch\n'), ((733, 766), 'unittest.mock.Mock', 'Mock', ([], {'x': '(1)', 'y': '(2)', 'width': '(3)', 'height': '(4)'}), '(x=1, y=2, width=3, height=4)\n', (737, 766), False, 'from unittest.mock import Mock, patch\n'), ((2698, 2704), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (2702, 2704), False, 'from unittest.mock import Mock, patch\n'), ((3153, 3159), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3157, 3159), False, 'from unittest.mock import Mock, patch\n'), ((3530, 3536), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3534, 3536), False, 'from unittest.mock import Mock, patch\n')] |
import pandas as pd
from tqdm import tqdm
from ..binarize import to_binary
from cana.boolean_node import BooleanNode
# set up variables
n_inputs = 2**2
n_rules = 2**(2**2)
df_dict = []
for rule in tqdm(range(n_rules)):
canal = {} # becomes row of dataframe
arr = to_binary(rule, digits=4)
print(arr)
# use dit to calculate decomposition
bn = BooleanNode.from_output_list(outputs=arr, name=rule)
ks = bn.input_symmetry()
kr = bn.input_redundancy()
sym0, sym1 = bn.input_symmetry(mode='input')
red0, red1 = bn.input_redundancy(mode='input')
# update the dictionary with the PI values
canal['rule'] = rule
canal['kr*'] = kr
canal['ks*'] = ks
canal['r(0)'] = red0
canal['r(1)'] = red1
canal['s(0)'] = sym0
canal['s(1)'] = sym1
df_dict.append(canal)
# write out the dataframe
df = pd.DataFrame(df_dict)
df_fout = open(snakemake.output[0], 'w')
df.to_csv(df_fout, index=False)
df_fout.close() | [
"pandas.DataFrame",
"cana.boolean_node.BooleanNode.from_output_list"
] | [((857, 878), 'pandas.DataFrame', 'pd.DataFrame', (['df_dict'], {}), '(df_dict)\n', (869, 878), True, 'import pandas as pd\n'), ((367, 419), 'cana.boolean_node.BooleanNode.from_output_list', 'BooleanNode.from_output_list', ([], {'outputs': 'arr', 'name': 'rule'}), '(outputs=arr, name=rule)\n', (395, 419), False, 'from cana.boolean_node import BooleanNode\n')] |
from django.shortcuts import render
from django.views.generic import ListView, CreateView, DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from posts.models import Post
from posts.forms import PostForm
from django.urls import reverse_lazy
# Create your views here.
class PostListView(LoginRequiredMixin, ListView):
model = Post
ordering = ('-created_at',)
paginate_by = 30
class PostDetailView(LoginRequiredMixin, DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
form_class = PostForm
success_url = reverse_lazy('posts:list')
def form_valid(self, form):
_object = form.save(commit=False)
_object.user = self.request.user
_object.profile = self.request.user.profile
self.object = _object.save()
return super(PostCreateView, self).form_valid(form)
| [
"django.urls.reverse_lazy"
] | [((598, 624), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""posts:list"""'], {}), "('posts:list')\n", (610, 624), False, 'from django.urls import reverse_lazy\n')] |
import cv2
import numpy as np
import random as rd
import os
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input
from tensorflow.keras.optimizers import Adadelta
from tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint
from tensorflow.keras.backend import image_data_format
from tensorflow.keras.layers import BatchNormalization
# ===========================
# SETTINGS
# ===========================
VALIDATION_SPLIT=0.2
BATCH_SIZE = 16
# ===========================
def get_input_shape(height, width, channels = 3):
if image_data_format() == 'channels_first':
return (channels, height, width)
else:
return (height, width, channels)
def get_convnet(height, width, labels):
img_input = Input(shape=get_input_shape(height,width))
x = img_input
for layer in range(1,5):
x = Conv2D(filters=32*layer, kernel_size=(3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(labels,activation='softmax')(x)
return Model(img_input, x, name='calvonet')
def getTrain(input_image, gt, hspan, vspan, num_labels, max_samples_per_class):
X_train = []
Y_train = []
# Speed-up factor
factor = 10.
# Calculate the ratio per label
count = [0] * num_labels
for page in range(len(input_image)):
for i in range(num_labels):
count[i] += (gt[page] == i).sum()
samples_per_class = min(np.min(count), max_samples_per_class)
ratio = [0] * num_labels
for i in range(num_labels):
ratio[i] = factor * (samples_per_class/float(count[i]))
# Just for checking !
count_per_class = [0] * num_labels
# Get samples according to the ratio per label
for page in range(len(input_image)):
page_x = input_image[page]
page_y = gt[page]
[height, width] = page_y.shape
for row in range(vspan,height-vspan-1):
for col in range(hspan,width-hspan-1):
if rd.random() < 1./factor:
label = page_y[row][col]
if 0 <= label < num_labels: # Avoid possible noise in the GT or -1 (unknown pixel)
if rd.random() < ratio[label]: # Take samples according to its
sample = page_x[row-vspan:row+vspan+1,col-hspan:col+hspan+1]
# Categorical vector
y_label = [0]*num_labels
y_label[label] = 1
X_train.append(sample)
Y_train.append(y_label)
count_per_class[label] += 1
# Manage different ordering
if image_data_format() == 'channels_first':
X_train = np.asarray(X_train).reshape(len(X_train), 3, vspan*2 + 1, hspan*2 + 1)
else:
X_train = np.asarray(X_train).reshape(len(X_train), vspan*2 + 1, hspan*2 + 1, 3)
Y_train = np.asarray(Y_train).reshape(len(Y_train), num_labels)
print('Distribution of data per class: ' + str(count_per_class))
return [X_train, Y_train]
def train_model(input_image, gt, hspan, vspan, output_model_path, max_samples_per_class, epochs, num_labels = 4):
# -------------------------------------------------------------------------------------------------------------------
# Create training set
[X_train, Y_train] = getTrain([input_image], [gt],
hspan, vspan,
num_labels,
max_samples_per_class=max_samples_per_class)
print('Training created with ' + str(len(X_train)) + ' samples.')
# Training configuration
print('Training a new model')
model = get_convnet(
height=hspan * 2 + 1,
width=vspan * 2 + 1,
labels=num_labels
)
#model.summary()
# In Tensorflow 2, it is necessary to add '.h5' to the end of the filename to force saving
# in hdf5 format with a ModelCheckpoint. Rodan will not accept anything but the file's
# original filename, however, so we must rename it back after training.
new_output_path = os.path.join(output_model_path + '.h5')
callbacks_list = [
ModelCheckpoint(new_output_path, save_best_only=True, monitor='val_acc', verbose=1, mode='max'),
EarlyStopping(monitor='val_acc', patience=3, verbose=0, mode='max')
]
model.compile(loss='categorical_crossentropy',
optimizer=Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
metrics=["accuracy"])
# Training stage
model.fit(X_train, Y_train,
verbose=2,
batch_size=BATCH_SIZE,
validation_split=VALIDATION_SPLIT,
callbacks=callbacks_list,
epochs=epochs
)
# Rename the file back to what Rodan expects.
os.rename(new_output_path, output_model_path)
return 0
| [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"os.rename",
"os.path.join",
"numpy.asarray",
"tensorflow.keras.optimizers.Adadelta",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.callbacks.EarlyStopping",
"random.random",
"tensorflow.keras.layers.Dense"... | [((1236, 1272), 'tensorflow.keras.models.Model', 'Model', (['img_input', 'x'], {'name': '"""calvonet"""'}), "(img_input, x, name='calvonet')\n", (1241, 1272), False, 'from tensorflow.keras.models import Sequential, Model\n'), ((4349, 4388), 'os.path.join', 'os.path.join', (["(output_model_path + '.h5')"], {}), "(output_model_path + '.h5')\n", (4361, 4388), False, 'import os\n'), ((5094, 5139), 'os.rename', 'os.rename', (['new_output_path', 'output_model_path'], {}), '(new_output_path, output_model_path)\n', (5103, 5139), False, 'import os\n'), ((683, 702), 'tensorflow.keras.backend.image_data_format', 'image_data_format', ([], {}), '()\n', (700, 702), False, 'from tensorflow.keras.backend import image_data_format\n'), ((1166, 1175), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1173, 1175), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1187, 1222), 'tensorflow.keras.layers.Dense', 'Dense', (['labels'], {'activation': '"""softmax"""'}), "(labels, activation='softmax')\n", (1192, 1222), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1649, 1662), 'numpy.min', 'np.min', (['count'], {}), '(count)\n', (1655, 1662), True, 'import numpy as np\n'), ((2899, 2918), 'tensorflow.keras.backend.image_data_format', 'image_data_format', ([], {}), '()\n', (2916, 2918), False, 'from tensorflow.keras.backend import image_data_format\n'), ((4425, 4524), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['new_output_path'], {'save_best_only': '(True)', 'monitor': '"""val_acc"""', 'verbose': '(1)', 'mode': '"""max"""'}), "(new_output_path, save_best_only=True, monitor='val_acc',\n verbose=1, mode='max')\n", (4440, 4524), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((4534, 4601), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'patience': '(3)', 'verbose': '(0)', 'mode': '"""max"""'}), "(monitor='val_acc', patience=3, verbose=0, mode='max')\n", (4547, 4601), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((977, 1039), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32 * layer)', 'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(filters=32 * layer, kernel_size=(3, 3), padding='same')\n", (983, 1039), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input\n'), ((1053, 1073), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1071, 1073), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((1089, 1107), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1099, 1107), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1123, 1153), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1135, 1153), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input\n'), ((3143, 3162), 'numpy.asarray', 'np.asarray', (['Y_train'], {}), '(Y_train)\n', (3153, 3162), True, 'import numpy as np\n'), ((4696, 4748), 'tensorflow.keras.optimizers.Adadelta', 'Adadelta', ([], {'lr': '(1.0)', 'rho': '(0.95)', 'epsilon': '(1e-08)', 'decay': '(0.0)'}), '(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)\n', (4704, 4748), False, 'from tensorflow.keras.optimizers import Adadelta\n'), ((2958, 2977), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (2968, 2977), True, 'import numpy as np\n'), ((3057, 3076), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (3067, 3076), True, 'import numpy as np\n'), ((2195, 2206), 'random.random', 'rd.random', ([], {}), '()\n', (2204, 2206), True, 'import random as rd\n'), ((2398, 2409), 'random.random', 'rd.random', ([], {}), '()\n', (2407, 2409), True, 'import random as rd\n')] |
from django.views.generic import ListView, DetailView, TemplateView, CreateView, UpdateView, DeleteView
from board.models import Post
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from mysite.views import OwnerOnlyMixin
from django.conf import settings
#--- ListView
class PostLV(ListView):
model = Post
template_name = 'board/post_all.html'
context_object_name = 'posts'
paginate_by = 2
#--- DetailView
class PostDV(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'slug', 'content']
initial = {'slug': 'auto-filling-do-not-input'}
success_url = reverse_lazy('board:index')
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class PostChangeLV(LoginRequiredMixin, ListView):
template_name = 'board/post_change_list.html'
def get_queryset(self):
return Post.objects.filter(owner=self.request.user)
class PostUpdateView(OwnerOnlyMixin, UpdateView):
model = Post
fields = ['title', 'slug', 'content']
success_url = reverse_lazy('board:index')
class PostDeleteView(OwnerOnlyMixin, DetailView):
model = Post
success_url = reverse_lazy('board:index')
| [
"board.models.Post.objects.filter",
"django.urls.reverse_lazy"
] | [((702, 729), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""board:index"""'], {}), "('board:index')\n", (714, 729), False, 'from django.urls import reverse_lazy\n'), ((1171, 1198), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""board:index"""'], {}), "('board:index')\n", (1183, 1198), False, 'from django.urls import reverse_lazy\n'), ((1286, 1313), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""board:index"""'], {}), "('board:index')\n", (1298, 1313), False, 'from django.urls import reverse_lazy\n'), ((997, 1041), 'board.models.Post.objects.filter', 'Post.objects.filter', ([], {'owner': 'self.request.user'}), '(owner=self.request.user)\n', (1016, 1041), False, 'from board.models import Post\n')] |
from django.conf import settings
from django.db import models
from django.dispatch import Signal
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from froide.foirequest.models import FoiMessage
from .utils import inform_user_problem_resolved
class ProblemChoices(models.TextChoices):
MESSAGE_NOT_DELIVERED = "message_not_delivered", _(
"Your message was not delivered."
)
ATTACHMENT_BROKEN = "attachment_broken", _("The attachments don't seem to work.")
REDACTION_NEEDED = "redaction_needed", _("More redactions are needed.")
FOI_HELP_NEEDED = "foi_help_needed", _(
"You need help to understand or reply to this message."
)
OTHER = "other", _("Something else...")
NOT_FOI = "not_foi", _("This is not a proper FOI request.")
NOT_NICE = "not_nice", _("Content is against netiquette.")
INFO_OUTDATED = "info_outdated", _("Published information is outdated.")
INFO_WRONG = "info_wrong", _("Published information is wrong.")
BOUNCE_PUBLICBODY = "bounce_publicbody", _(
"You received a bounce mail from the public body."
)
MAIL_INAUTHENTIC = "mail_inauthentic", _(
"Received mail does not pass authenticity checks."
)
def make_choices(value_list):
return [(k, k.label) for k in value_list]
USER_PROBLEM_CHOICES = make_choices(
[
ProblemChoices.MESSAGE_NOT_DELIVERED,
ProblemChoices.ATTACHMENT_BROKEN,
ProblemChoices.REDACTION_NEEDED,
ProblemChoices.FOI_HELP_NEEDED,
ProblemChoices.OTHER,
]
)
EXTERNAL_PROBLEM_CHOICES = make_choices(
[
ProblemChoices.NOT_FOI,
ProblemChoices.REDACTION_NEEDED,
ProblemChoices.NOT_NICE,
ProblemChoices.INFO_OUTDATED,
ProblemChoices.INFO_WRONG,
ProblemChoices.OTHER,
]
)
reported = Signal()
claimed = Signal()
resolved = Signal()
unclaimed = Signal()
escalated = Signal()
class ProblemReportManager(models.Manager):
def report(self, **kwargs):
report = ProblemReport.objects.create(**kwargs)
reported.send(sender=report)
return report
def find_and_resolve(
self, message=None, foirequest=None, kind=None, user=None, resolution=""
):
if not message and not foirequest:
return
if not kind:
return
qs = self.get_queryset().filter(resolved=False)
if message:
qs = qs.filter(message=message)
if foirequest:
qs = qs.filter(message__request=foirequest)
qs = qs.filter(kind=kind)
for report in qs:
report.resolve(user, resolution=resolution)
class ProblemReport(models.Model):
PROBLEM = ProblemChoices
message = models.ForeignKey(FoiMessage, on_delete=models.CASCADE)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
on_delete=models.SET_NULL,
blank=True,
related_name="problems_reported",
)
kind = models.CharField(max_length=50, choices=ProblemChoices.choices)
timestamp = models.DateTimeField(default=timezone.now)
auto_submitted = models.BooleanField(default=False)
resolved = models.BooleanField(default=False)
description = models.TextField(blank=True)
resolution = models.TextField(blank=True)
resolution_timestamp = models.DateTimeField(null=True, blank=True)
claimed = models.DateTimeField(null=True, blank=True)
escalation = models.TextField(blank=True)
escalated = models.DateTimeField(null=True, blank=True)
moderator = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
on_delete=models.SET_NULL,
blank=True,
related_name="problems_moderated",
)
objects = ProblemReportManager()
class Meta:
ordering = ("-timestamp",)
verbose_name = _("problem report")
verbose_name_plural = _("problem reports")
def __str__(self):
return self.kind
def get_absolute_url(self):
return self.message.get_absolute_short_url()
def get_absolute_domain_url(self):
return self.message.get_absolute_domain_short_url()
@property
def is_requester(self):
return self.user_id == self.message.request.user_id
def related_publicbody_id(self):
if self.message.is_response:
return self.message.sender_public_body_id
return self.message.recipient_public_body_id
def claim(self, user):
self.claimed = timezone.now()
self.moderator = user
self.save()
claimed.send(sender=self)
def unclaim(self, user):
self.moderator = None
self.claimed = None
self.save()
unclaimed.send(sender=self)
def resolve(self, user, resolution=""):
self.resolved = True
self.resolution = resolution
self.resolution_timestamp = timezone.now()
self.moderator = user
self.save()
resolved.send(sender=self)
self.resolve_identical(user, resolution=resolution)
return inform_user_problem_resolved(self)
def resolve_identical(self, user, resolution=""):
ProblemReport.objects.find_and_resolve(
message=self.message, kind=self.kind, user=user, resolution=resolution
)
def escalate(self, user, escalation=""):
self.moderator = user
self.escalation = escalation
self.escalated = timezone.now()
self.save()
escalated.send(sender=self)
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.utils.translation.gettext_lazy",
"django.dispatch.Signal",
"django.db.models.BooleanField",
"django.utils.timezone.now",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((1854, 1862), 'django.dispatch.Signal', 'Signal', ([], {}), '()\n', (1860, 1862), False, 'from django.dispatch import Signal\n'), ((1873, 1881), 'django.dispatch.Signal', 'Signal', ([], {}), '()\n', (1879, 1881), False, 'from django.dispatch import Signal\n'), ((1893, 1901), 'django.dispatch.Signal', 'Signal', ([], {}), '()\n', (1899, 1901), False, 'from django.dispatch import Signal\n'), ((1914, 1922), 'django.dispatch.Signal', 'Signal', ([], {}), '()\n', (1920, 1922), False, 'from django.dispatch import Signal\n'), ((1935, 1943), 'django.dispatch.Signal', 'Signal', ([], {}), '()\n', (1941, 1943), False, 'from django.dispatch import Signal\n'), ((2750, 2805), 'django.db.models.ForeignKey', 'models.ForeignKey', (['FoiMessage'], {'on_delete': 'models.CASCADE'}), '(FoiMessage, on_delete=models.CASCADE)\n', (2767, 2805), False, 'from django.db import models\n'), ((2817, 2949), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'null': '(True)', 'on_delete': 'models.SET_NULL', 'blank': '(True)', 'related_name': '"""problems_reported"""'}), "(settings.AUTH_USER_MODEL, null=True, on_delete=models.\n SET_NULL, blank=True, related_name='problems_reported')\n", (2834, 2949), False, 'from django.db import models\n'), ((3003, 3066), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'choices': 'ProblemChoices.choices'}), '(max_length=50, choices=ProblemChoices.choices)\n', (3019, 3066), False, 'from django.db import models\n'), ((3083, 3125), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (3103, 3125), False, 'from django.db import models\n'), ((3147, 3181), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3166, 3181), False, 'from django.db import models\n'), ((3197, 3231), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3216, 3231), False, 'from django.db import models\n'), ((3250, 3278), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (3266, 3278), False, 'from django.db import models\n'), ((3296, 3324), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (3312, 3324), False, 'from django.db import models\n'), ((3352, 3395), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3372, 3395), False, 'from django.db import models\n'), ((3410, 3453), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3430, 3453), False, 'from django.db import models\n'), ((3471, 3499), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (3487, 3499), False, 'from django.db import models\n'), ((3516, 3559), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3536, 3559), False, 'from django.db import models\n'), ((3576, 3709), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'null': '(True)', 'on_delete': 'models.SET_NULL', 'blank': '(True)', 'related_name': '"""problems_moderated"""'}), "(settings.AUTH_USER_MODEL, null=True, on_delete=models.\n SET_NULL, blank=True, related_name='problems_moderated')\n", (3593, 3709), False, 'from django.db import models\n'), ((381, 417), 'django.utils.translation.gettext_lazy', '_', (['"""Your message was not delivered."""'], {}), "('Your message was not delivered.')\n", (382, 417), True, 'from django.utils.translation import gettext_lazy as _\n'), ((477, 517), 'django.utils.translation.gettext_lazy', '_', (['"""The attachments don\'t seem to work."""'], {}), '("The attachments don\'t seem to work.")\n', (478, 517), True, 'from django.utils.translation import gettext_lazy as _\n'), ((561, 593), 'django.utils.translation.gettext_lazy', '_', (['"""More redactions are needed."""'], {}), "('More redactions are needed.')\n", (562, 593), True, 'from django.utils.translation import gettext_lazy as _\n'), ((635, 693), 'django.utils.translation.gettext_lazy', '_', (['"""You need help to understand or reply to this message."""'], {}), "('You need help to understand or reply to this message.')\n", (636, 693), True, 'from django.utils.translation import gettext_lazy as _\n'), ((729, 751), 'django.utils.translation.gettext_lazy', '_', (['"""Something else..."""'], {}), "('Something else...')\n", (730, 751), True, 'from django.utils.translation import gettext_lazy as _\n'), ((777, 815), 'django.utils.translation.gettext_lazy', '_', (['"""This is not a proper FOI request."""'], {}), "('This is not a proper FOI request.')\n", (778, 815), True, 'from django.utils.translation import gettext_lazy as _\n'), ((843, 878), 'django.utils.translation.gettext_lazy', '_', (['"""Content is against netiquette."""'], {}), "('Content is against netiquette.')\n", (844, 878), True, 'from django.utils.translation import gettext_lazy as _\n'), ((916, 955), 'django.utils.translation.gettext_lazy', '_', (['"""Published information is outdated."""'], {}), "('Published information is outdated.')\n", (917, 955), True, 'from django.utils.translation import gettext_lazy as _\n'), ((987, 1023), 'django.utils.translation.gettext_lazy', '_', (['"""Published information is wrong."""'], {}), "('Published information is wrong.')\n", (988, 1023), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1069, 1122), 'django.utils.translation.gettext_lazy', '_', (['"""You received a bounce mail from the public body."""'], {}), "('You received a bounce mail from the public body.')\n", (1070, 1122), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1180, 1233), 'django.utils.translation.gettext_lazy', '_', (['"""Received mail does not pass authenticity checks."""'], {}), "('Received mail does not pass authenticity checks.')\n", (1181, 1233), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3865, 3884), 'django.utils.translation.gettext_lazy', '_', (['"""problem report"""'], {}), "('problem report')\n", (3866, 3884), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3915, 3935), 'django.utils.translation.gettext_lazy', '_', (['"""problem reports"""'], {}), "('problem reports')\n", (3916, 3935), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4507, 4521), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4519, 4521), False, 'from django.utils import timezone\n'), ((4897, 4911), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4909, 4911), False, 'from django.utils import timezone\n'), ((5441, 5455), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5453, 5455), False, 'from django.utils import timezone\n')] |
# In The Name of God
# =======================================
# [] File Name : pipe.py
#
# [] Creation Date : 27-11-2019
#
# [] Created By : <NAME> <<EMAIL>>
# =======================================
import os
def child(n, w):
print('I am Child')
f = os.fdopen(w, 'w')
# the old way
f.write('hello %d\n' % (n) )
# the new way
f.write(f'hello {n}\n')
def parent():
r, w = os.pipe()
n = 10
if os.fork() == 0:
child(n, w)
else:
print('I am Parent')
f = os.fdopen(r, 'r')
print(f.readline())
if __name__ == '__main__':
parent()
| [
"os.fdopen",
"os.fork",
"os.pipe"
] | [((263, 280), 'os.fdopen', 'os.fdopen', (['w', '"""w"""'], {}), "(w, 'w')\n", (272, 280), False, 'import os\n'), ((405, 414), 'os.pipe', 'os.pipe', ([], {}), '()\n', (412, 414), False, 'import os\n'), ((433, 442), 'os.fork', 'os.fork', ([], {}), '()\n', (440, 442), False, 'import os\n'), ((520, 537), 'os.fdopen', 'os.fdopen', (['r', '"""r"""'], {}), "(r, 'r')\n", (529, 537), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger()
logger.basicConfig = logging.basicConfig(level=logging.DEBUG)
import numpy as np
import matplotlib.pyplot as plt
import logictensornetworks_wrapper as ltnw
nr_samples=500
data=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
data_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)<.09)]
data_B=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)]
ltnw.variable("?data",data)
ltnw.variable("?data_A",data_A)
ltnw.variable("?data_B",data_B)
ltnw.predicate("A",2)
ltnw.predicate("B",2)
ltnw.axiom("forall ?data_A: A(?data_A)")
ltnw.axiom("forall ?data_B: ~A(?data_B)")
ltnw.axiom("forall ?data_B: B(?data_B)")
ltnw.axiom("forall ?data_A: ~B(?data_A)")
ltnw.axiom("forall ?data: A(?data) -> ~B(?data)")
ltnw.axiom("forall ?data: B(?data) -> ~A(?data)")
ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1)
sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.99,max_epochs=20000)
result=ltnw.ask("A(?data)")
plt.figure(figsize=(10,8))
plt.subplot(2,2,1)
plt.title("A(x) - training")
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
plt.subplot(2,2,2)
result=ltnw.ask("B(?data)")
plt.title("B(x) - training")
plt.scatter(data[:,0],data[:,1],c=result.squeeze())
plt.colorbar()
data_test=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32)
ltnw.variable("?data_test",data_test)
result=ltnw.ask("A(?data_test)")
plt.subplot(2,2,3)
plt.title("A(x) - test")
plt.scatter(data_test[:,0],data_test[:,1],c=result.squeeze())
plt.colorbar()
result=ltnw.ask("B(?data_test)")
plt.subplot(2,2,4)
plt.title("B(x) -test")
plt.scatter(data_test[:,0],data_test[:,1],c=result.squeeze())
plt.colorbar()
plt.show()
ltnw.constant("a",[0.5,.5])
ltnw.constant("b",[0.75,.75])
print("a is in A: %s" % ltnw.ask("A(a)"))
print("b is in A: %s" % ltnw.ask("A(b)"))
print("a is in B: %s" % ltnw.ask("B(a)"))
print("b is in B: %s" % ltnw.ask("B(b)"))
| [
"logging.getLogger",
"logging.basicConfig",
"logictensornetworks_wrapper.constant",
"logictensornetworks_wrapper.variable",
"matplotlib.pyplot.colorbar",
"logictensornetworks_wrapper.train",
"numpy.square",
"logictensornetworks_wrapper.predicate",
"logictensornetworks_wrapper.axiom",
"logictensorn... | [((48, 67), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (65, 67), False, 'import logging\n'), ((89, 129), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (108, 129), False, 'import logging\n'), ((448, 476), 'logictensornetworks_wrapper.variable', 'ltnw.variable', (['"""?data"""', 'data'], {}), "('?data', data)\n", (461, 476), True, 'import logictensornetworks_wrapper as ltnw\n'), ((476, 508), 'logictensornetworks_wrapper.variable', 'ltnw.variable', (['"""?data_A"""', 'data_A'], {}), "('?data_A', data_A)\n", (489, 508), True, 'import logictensornetworks_wrapper as ltnw\n'), ((508, 540), 'logictensornetworks_wrapper.variable', 'ltnw.variable', (['"""?data_B"""', 'data_B'], {}), "('?data_B', data_B)\n", (521, 540), True, 'import logictensornetworks_wrapper as ltnw\n'), ((541, 563), 'logictensornetworks_wrapper.predicate', 'ltnw.predicate', (['"""A"""', '(2)'], {}), "('A', 2)\n", (555, 563), True, 'import logictensornetworks_wrapper as ltnw\n'), ((563, 585), 'logictensornetworks_wrapper.predicate', 'ltnw.predicate', (['"""B"""', '(2)'], {}), "('B', 2)\n", (577, 585), True, 'import logictensornetworks_wrapper as ltnw\n'), ((586, 626), 'logictensornetworks_wrapper.axiom', 'ltnw.axiom', (['"""forall ?data_A: A(?data_A)"""'], {}), "('forall ?data_A: A(?data_A)')\n", (596, 626), True, 'import logictensornetworks_wrapper as ltnw\n'), ((627, 668), 'logictensornetworks_wrapper.axiom', 'ltnw.axiom', (['"""forall ?data_B: ~A(?data_B)"""'], {}), "('forall ?data_B: ~A(?data_B)')\n", (637, 668), True, 'import logictensornetworks_wrapper as ltnw\n'), ((670, 710), 'logictensornetworks_wrapper.axiom', 'ltnw.axiom', (['"""forall ?data_B: B(?data_B)"""'], {}), "('forall ?data_B: B(?data_B)')\n", (680, 710), True, 'import logictensornetworks_wrapper as ltnw\n'), ((711, 752), 'logictensornetworks_wrapper.axiom', 'ltnw.axiom', (['"""forall ?data_A: ~B(?data_A)"""'], {}), "('forall ?data_A: ~B(?data_A)')\n", (721, 752), True, 'import logictensornetworks_wrapper as ltnw\n'), ((754, 803), 'logictensornetworks_wrapper.axiom', 'ltnw.axiom', (['"""forall ?data: A(?data) -> ~B(?data)"""'], {}), "('forall ?data: A(?data) -> ~B(?data)')\n", (764, 803), True, 'import logictensornetworks_wrapper as ltnw\n'), ((804, 853), 'logictensornetworks_wrapper.axiom', 'ltnw.axiom', (['"""forall ?data: B(?data) -> ~A(?data)"""'], {}), "('forall ?data: B(?data) -> ~A(?data)')\n", (814, 853), True, 'import logictensornetworks_wrapper as ltnw\n'), ((855, 917), 'logictensornetworks_wrapper.initialize_knowledgebase', 'ltnw.initialize_knowledgebase', ([], {'initial_sat_level_threshold': '(0.1)'}), '(initial_sat_level_threshold=0.1)\n', (884, 917), True, 'import logictensornetworks_wrapper as ltnw\n'), ((927, 1002), 'logictensornetworks_wrapper.train', 'ltnw.train', ([], {'track_sat_levels': '(1000)', 'sat_level_epsilon': '(0.99)', 'max_epochs': '(20000)'}), '(track_sat_levels=1000, sat_level_epsilon=0.99, max_epochs=20000)\n', (937, 1002), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1008, 1028), 'logictensornetworks_wrapper.ask', 'ltnw.ask', (['"""A(?data)"""'], {}), "('A(?data)')\n", (1016, 1028), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1029, 1056), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1039, 1056), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1076), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1067, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1075, 1103), 'matplotlib.pyplot.title', 'plt.title', (['"""A(x) - training"""'], {}), "('A(x) - training')\n", (1084, 1103), True, 'import matplotlib.pyplot as plt\n'), ((1156, 1170), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1168, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1172, 1192), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1183, 1192), True, 'import matplotlib.pyplot as plt\n'), ((1198, 1218), 'logictensornetworks_wrapper.ask', 'ltnw.ask', (['"""B(?data)"""'], {}), "('B(?data)')\n", (1206, 1218), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1219, 1247), 'matplotlib.pyplot.title', 'plt.title', (['"""B(x) - training"""'], {}), "('B(x) - training')\n", (1228, 1247), True, 'import matplotlib.pyplot as plt\n'), ((1300, 1314), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1312, 1314), True, 'import matplotlib.pyplot as plt\n'), ((1393, 1431), 'logictensornetworks_wrapper.variable', 'ltnw.variable', (['"""?data_test"""', 'data_test'], {}), "('?data_test', data_test)\n", (1406, 1431), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1438, 1463), 'logictensornetworks_wrapper.ask', 'ltnw.ask', (['"""A(?data_test)"""'], {}), "('A(?data_test)')\n", (1446, 1463), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1464, 1484), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (1475, 1484), True, 'import matplotlib.pyplot as plt\n'), ((1483, 1507), 'matplotlib.pyplot.title', 'plt.title', (['"""A(x) - test"""'], {}), "('A(x) - test')\n", (1492, 1507), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1584), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1582, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1618), 'logictensornetworks_wrapper.ask', 'ltnw.ask', (['"""B(?data_test)"""'], {}), "('B(?data_test)')\n", (1601, 1618), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1619, 1639), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (1630, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1661), 'matplotlib.pyplot.title', 'plt.title', (['"""B(x) -test"""'], {}), "('B(x) -test')\n", (1647, 1661), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1738), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1736, 1738), True, 'import matplotlib.pyplot as plt\n'), ((1740, 1750), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1748, 1750), True, 'import matplotlib.pyplot as plt\n'), ((1752, 1782), 'logictensornetworks_wrapper.constant', 'ltnw.constant', (['"""a"""', '[0.5, 0.5]'], {}), "('a', [0.5, 0.5])\n", (1765, 1782), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1780, 1812), 'logictensornetworks_wrapper.constant', 'ltnw.constant', (['"""b"""', '[0.75, 0.75]'], {}), "('b', [0.75, 0.75])\n", (1793, 1812), True, 'import logictensornetworks_wrapper as ltnw\n'), ((247, 301), 'numpy.random.uniform', 'np.random.uniform', (['[0, 0]', '[1.0, 1.0]', '(nr_samples, 2)'], {}), '([0, 0], [1.0, 1.0], (nr_samples, 2))\n', (264, 301), True, 'import numpy as np\n'), ((1326, 1380), 'numpy.random.uniform', 'np.random.uniform', (['[0, 0]', '[1.0, 1.0]', '(nr_samples, 2)'], {}), '([0, 0], [1.0, 1.0], (nr_samples, 2))\n', (1343, 1380), True, 'import numpy as np\n'), ((1834, 1850), 'logictensornetworks_wrapper.ask', 'ltnw.ask', (['"""A(a)"""'], {}), "('A(a)')\n", (1842, 1850), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1876, 1892), 'logictensornetworks_wrapper.ask', 'ltnw.ask', (['"""A(b)"""'], {}), "('A(b)')\n", (1884, 1892), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1918, 1934), 'logictensornetworks_wrapper.ask', 'ltnw.ask', (['"""B(a)"""'], {}), "('B(a)')\n", (1926, 1934), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1960, 1976), 'logictensornetworks_wrapper.ask', 'ltnw.ask', (['"""B(b)"""'], {}), "('B(b)')\n", (1968, 1976), True, 'import logictensornetworks_wrapper as ltnw\n'), ((342, 370), 'numpy.square', 'np.square', (['(data - [0.5, 0.5])'], {}), '(data - [0.5, 0.5])\n', (351, 370), True, 'import numpy as np\n'), ((408, 436), 'numpy.square', 'np.square', (['(data - [0.5, 0.5])'], {}), '(data - [0.5, 0.5])\n', (417, 436), True, 'import numpy as np\n')] |
from api.subgroups_names import ClassicalSubgroups
from graph_constructor import get_graph
from plotter.graph_plotter import GraphPlotter
from plotter.geodesic_plotter import GeodesicPlotter
from plotter.marker_plotter import MarkerPlotter
from special_polygon import SpecialPolygon
from fimath import Matrix, Field
from reduction import Decomposer
from .error import ApiError, FormatError, ValueRangeError
class Api(object):
def __init__(self):
self._subgroup_name = None
self._subgroup = None
self._n = None
self._graph = None
self._domain = None
self._tree = None
self._involutions = None
self._generators = None
self._white_markers = None
self._black_markers = None
self._cut_markers = None
self._decomposition = None
self._marker_plotter = None
def set_subgroup(self, subgroup: ClassicalSubgroups, n=2):
if type(n) is not int:
if n == '':
raise FormatError('Field N should be filled')
try:
n = int(n)
except ValueError:
raise TypeError('N should be int')
if n <= 1:
raise ValueRangeError('N should be greater than 1')
self._n = n
self._subgroup_name = subgroup
self._subgroup = subgroup.to_class()(self._n)
def calc_graph(self):
if self._subgroup:
self._graph = get_graph(self._subgroup)
else:
raise Exception('subgroup is not set.')
def plot_graph_on_axes(self, axes):
if not self._graph:
raise ApiError('graph is not calculated')
gd = GraphPlotter(axes)
gd.plot(self._graph)
def plot_graph_on_bokeh(self, fig):
if not self._graph:
raise ApiError('graph is not calculated')
gd = GraphPlotter(bokeh_fig=fig)
gd.plot(self._graph)
def calc_domain(self):
if self._graph:
sp = SpecialPolygon(self._graph)
try:
sp.construct_polygon()
except Exception as e:
raise ApiError(f'unexpected error during constructing polygon: {e}')
self._domain = sp.edges
self._tree = sp.tree
self._involutions = sp.involutions
self._white_markers = sp.white_vertices
self._black_markers = sp.black_vertices
self._cut_markers = sp.cut_vertices
self._generators = list(zip(*self._involutions))[2]
else:
raise ApiError('graph is not set.')
@property
def markers(self):
return [self._white_markers, self._black_markers, self._cut_markers]
def plot_domain_on_axes(self, axes, markers=False):
gp = GeodesicPlotter(ax=axes)
gp.plot(self._domain)
gp.plot(self._tree, color='r', alpha=0.8, line_width=0.75, dashed=True)
if markers:
_marker_plotter = MarkerPlotter(axes)
try:
_marker_plotter.plot(self._white_markers, self._black_markers, self._cut_markers)
except Exception:
raise ApiError('unexpected error.')
def plot_domain_on_bokeh(self, fig, markers=True):
gp = GeodesicPlotter(bokeh_fig=fig)
gp.plot(self._domain)
gp.plot(self._tree, color='red', alpha=0.8, line_width=0.75, dashed=True)
if markers:
_marker_plotter = MarkerPlotter(bokeh_fig=fig)
_marker_plotter.plot(self._white_markers, self._black_markers, self._cut_markers)
# try:
# _marker_plotter.plot(self._white_markers, self._black_markers, self._cut_markers)
# except Exception:
# raise ApiError('unexpected error.')
def change_markers_state(self):
if self._marker_plotter:
self._marker_plotter.change_visible()
def get_generators_str(self):
return Matrix.beautify(self._generators)
def is_in_subgroup(self, matrix: Matrix):
if matrix.det() != 1:
return False
a, b, c, d = matrix.a, matrix.b, matrix.c, matrix.d
n = self._n
if self._subgroup_name is ClassicalSubgroups.GammaBotZero:
return c % n == 0
elif self._subgroup_name is ClassicalSubgroups.GammaTopZero:
return b % n == 0
elif self._subgroup_name is ClassicalSubgroups.GammaBotOne:
return a % n == 1 and d % n == 1 and c % n == 0
elif self._subgroup_name is ClassicalSubgroups.GammaTopOne:
return a % n == 1 and d % n == 1 and b % n == 0
elif self._subgroup_name is ClassicalSubgroups.Gamma:
return a % n == 1 and d % n == 1 and b % n == 0 and c % n == 0
return False
def decompose_matrix(self, matrix_str):
try:
matrix = Matrix.from_str(matrix_str)
except Exception:
raise ApiError('Matrix should be in following format: a, b, c, d')
if not self.is_in_subgroup(matrix):
raise ApiError('Matrix does not belong to subgroup')
z = Field(0.5+1.5j)
w = matrix.moe(z)
decomposer = Decomposer(polygon=self._domain, involutions=self._involutions, z=z, w=w)
try:
self._decomposition = decomposer.decompose()
except Exception:
raise ApiError('unexpected algorithm error occurred.')
def get_decomposition(self):
return Matrix.beautify(self._decomposition)
| [
"graph_constructor.get_graph",
"fimath.Matrix.beautify",
"plotter.geodesic_plotter.GeodesicPlotter",
"fimath.Matrix.from_str",
"reduction.Decomposer",
"plotter.marker_plotter.MarkerPlotter",
"special_polygon.SpecialPolygon",
"fimath.Field",
"plotter.graph_plotter.GraphPlotter"
] | [((1674, 1692), 'plotter.graph_plotter.GraphPlotter', 'GraphPlotter', (['axes'], {}), '(axes)\n', (1686, 1692), False, 'from plotter.graph_plotter import GraphPlotter\n'), ((1858, 1885), 'plotter.graph_plotter.GraphPlotter', 'GraphPlotter', ([], {'bokeh_fig': 'fig'}), '(bokeh_fig=fig)\n', (1870, 1885), False, 'from plotter.graph_plotter import GraphPlotter\n'), ((2767, 2791), 'plotter.geodesic_plotter.GeodesicPlotter', 'GeodesicPlotter', ([], {'ax': 'axes'}), '(ax=axes)\n', (2782, 2791), False, 'from plotter.geodesic_plotter import GeodesicPlotter\n'), ((3238, 3268), 'plotter.geodesic_plotter.GeodesicPlotter', 'GeodesicPlotter', ([], {'bokeh_fig': 'fig'}), '(bokeh_fig=fig)\n', (3253, 3268), False, 'from plotter.geodesic_plotter import GeodesicPlotter\n'), ((3929, 3962), 'fimath.Matrix.beautify', 'Matrix.beautify', (['self._generators'], {}), '(self._generators)\n', (3944, 3962), False, 'from fimath import Matrix, Field\n'), ((5093, 5110), 'fimath.Field', 'Field', (['(0.5 + 1.5j)'], {}), '(0.5 + 1.5j)\n', (5098, 5110), False, 'from fimath import Matrix, Field\n'), ((5156, 5229), 'reduction.Decomposer', 'Decomposer', ([], {'polygon': 'self._domain', 'involutions': 'self._involutions', 'z': 'z', 'w': 'w'}), '(polygon=self._domain, involutions=self._involutions, z=z, w=w)\n', (5166, 5229), False, 'from reduction import Decomposer\n'), ((5443, 5479), 'fimath.Matrix.beautify', 'Matrix.beautify', (['self._decomposition'], {}), '(self._decomposition)\n', (5458, 5479), False, 'from fimath import Matrix, Field\n'), ((1446, 1471), 'graph_constructor.get_graph', 'get_graph', (['self._subgroup'], {}), '(self._subgroup)\n', (1455, 1471), False, 'from graph_constructor import get_graph\n'), ((1984, 2011), 'special_polygon.SpecialPolygon', 'SpecialPolygon', (['self._graph'], {}), '(self._graph)\n', (1998, 2011), False, 'from special_polygon import SpecialPolygon\n'), ((2952, 2971), 'plotter.marker_plotter.MarkerPlotter', 'MarkerPlotter', (['axes'], {}), '(axes)\n', (2965, 2971), False, 'from plotter.marker_plotter import MarkerPlotter\n'), ((3431, 3459), 'plotter.marker_plotter.MarkerPlotter', 'MarkerPlotter', ([], {'bokeh_fig': 'fig'}), '(bokeh_fig=fig)\n', (3444, 3459), False, 'from plotter.marker_plotter import MarkerPlotter\n'), ((4837, 4864), 'fimath.Matrix.from_str', 'Matrix.from_str', (['matrix_str'], {}), '(matrix_str)\n', (4852, 4864), False, 'from fimath import Matrix, Field\n')] |
"""
Module description:
"""
__version__ = '0.3.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
import tensorflow as tf
import numpy as np
import random
class Sampler():
def __init__(self, indexed_ratings=None, m=None, num_users=None, num_items=None, transactions=None, batch_size=512, random_seed=42):
np.random.seed(random_seed)
random.seed(random_seed)
self._UIDICT = {u: list(set(indexed_ratings[u])) for u in indexed_ratings}
self._POS = list({(u, i, 1) for u, items in self._UIDICT.items() for i in items})
self._POS = random.sample(self._POS, len(self._POS))
self._M = m
self._NUM_USERS = num_users
self._NUM_ITEMS = num_items
self._transactions = transactions
self._batch_size = batch_size
def _full_generator(self):
r_int = np.random.randint
n_items = self._NUM_ITEMS
ui_dict = self._UIDICT
neg = set()
for u, i, _ in self._POS:
ui = ui_dict[u]
for _ in range(self._M):
j = r_int(n_items)
while j in ui:
j = r_int(n_items)
neg.add((u, j, 0))
samples = self._POS[:]
samples.extend(list(neg))
samples = random.sample(samples, len(samples))
# u, i, b = map(np.array, zip(*samples))
# yield u,i,b
for start in range(0, len(samples), self._batch_size):
u, i, b = map(np.array, zip(*samples[start:min(start + self._batch_size, len(samples))]))
yield u, i, b
def step(self, batch_size: int):
r_int = np.random.randint
n_items = self._NUM_ITEMS
ui_dict = self._UIDICT
pos = {(u, i, 1) for u, items in ui_dict.items() for i in items}
neg = set()
for u, i, _ in pos:
ui = ui_dict[u]
for _ in range(self._M):
j = r_int(n_items)
while j in ui:
j = r_int(n_items)
neg.add((u, j, 0))
samples = list(pos)
samples.extend(list(neg))
samples = random.sample(samples, len(samples))
for start in range(0, len(samples), batch_size):
u, i, b = map(np.array, zip(*samples[start:min(start + batch_size, len(samples))]))
yield u, i, b
def create_tf_dataset(self):
data = tf.data.Dataset.from_generator(generator=self._full_generator,
output_types=(np.int64, np.int64, np.int64),
)
# data = data.unbatch().batch(batch_size=self._batch_size)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data | [
"tensorflow.data.Dataset.from_generator",
"numpy.random.seed",
"random.seed"
] | [((333, 360), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (347, 360), True, 'import numpy as np\n'), ((369, 393), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (380, 393), False, 'import random\n'), ((2386, 2498), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', ([], {'generator': 'self._full_generator', 'output_types': '(np.int64, np.int64, np.int64)'}), '(generator=self._full_generator, output_types\n =(np.int64, np.int64, np.int64))\n', (2416, 2498), True, 'import tensorflow as tf\n')] |
#! /usr/bin/env python
import socket
def send_ping_data(HOST, PORT):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST,PORT))
s.sendall(b'Hello, world')
data=s.recv(1024)
return data
if __name__== "__main__" :
print('Received',repr(send_ping_data('127.0.0.1','65432')))
| [
"socket.socket"
] | [((80, 129), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (93, 129), False, 'import socket\n')] |
# bot.py
import os
import random
import discord
from dotenv import load_dotenv
import commands as cm
import qrys
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
client = discord.Client()
prefix = '$'
@client.event
async def on_ready():
for guild in client.guilds:
if guild.name == GUILD:
break
await qrys.connect_db()
print(
f'{client.user} is connected to the following guild:\n'
f'{guild.name}(id: {guild.id})'
)
@client.event
async def on_message(message):
if message.author == client.user:
return
if not message.content.startswith(prefix):
return
switcher = {
'test': cm.m_test,
'facts': cm.m_facts,
'user': cm.m_user,
'reset': cm.m_reset,
'bet': cm.m_bet,
}
args = message.content.split(' ')
response = await switcher.get(args[0][1:], cm.m_usage)(message, args[1:])
if response == 'Invalid':
print('Invalid Command: ' + message.content + '\nwith details: ' + str(message))
response = await cm.m_usage(0, 0)
await message.channel.send(response)
client.run(TOKEN)
| [
"qrys.connect_db",
"os.getenv",
"commands.m_usage",
"dotenv.load_dotenv",
"discord.Client"
] | [((115, 128), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (126, 128), False, 'from dotenv import load_dotenv\n'), ((137, 163), 'os.getenv', 'os.getenv', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (146, 163), False, 'import os\n'), ((172, 198), 'os.getenv', 'os.getenv', (['"""DISCORD_GUILD"""'], {}), "('DISCORD_GUILD')\n", (181, 198), False, 'import os\n'), ((209, 225), 'discord.Client', 'discord.Client', ([], {}), '()\n', (223, 225), False, 'import discord\n'), ((373, 390), 'qrys.connect_db', 'qrys.connect_db', ([], {}), '()\n', (388, 390), False, 'import qrys\n'), ((1099, 1115), 'commands.m_usage', 'cm.m_usage', (['(0)', '(0)'], {}), '(0, 0)\n', (1109, 1115), True, 'import commands as cm\n')] |
# flake8: noqa
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add kubernetes scheduler uniqueness
Revision ID: 8<PASSWORD>
Revises: <PASSWORD>
Create Date: 2018-04-03 15:31:20.814328
"""
# revision identifiers, used by Alembic.
revision = '<KEY>0'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
RESOURCE_TABLE = "kube_worker_uuid"
def upgrade():
table = op.create_table(
RESOURCE_TABLE,
sa.Column("one_row_id", sa.Boolean, server_default=sa.true(), primary_key=True),
sa.Column("worker_uuid", sa.String(255)),
sa.CheckConstraint("one_row_id", name="kube_worker_one_row_id")
)
op.bulk_insert(table, [
{"worker_uuid": ""}
])
def downgrade():
op.drop_table(RESOURCE_TABLE)
| [
"sqlalchemy.true",
"alembic.op.drop_table",
"alembic.op.bulk_insert",
"sqlalchemy.String",
"sqlalchemy.CheckConstraint"
] | [((1195, 1239), 'alembic.op.bulk_insert', 'op.bulk_insert', (['table', "[{'worker_uuid': ''}]"], {}), "(table, [{'worker_uuid': ''}])\n", (1209, 1239), False, 'from alembic import op\n'), ((1277, 1306), 'alembic.op.drop_table', 'op.drop_table', (['RESOURCE_TABLE'], {}), '(RESOURCE_TABLE)\n', (1290, 1306), False, 'from alembic import op\n'), ((1121, 1184), 'sqlalchemy.CheckConstraint', 'sa.CheckConstraint', (['"""one_row_id"""'], {'name': '"""kube_worker_one_row_id"""'}), "('one_row_id', name='kube_worker_one_row_id')\n", (1139, 1184), True, 'import sqlalchemy as sa\n'), ((1096, 1110), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (1105, 1110), True, 'import sqlalchemy as sa\n'), ((1033, 1042), 'sqlalchemy.true', 'sa.true', ([], {}), '()\n', (1040, 1042), True, 'import sqlalchemy as sa\n')] |
import torch
import torch.nn.functional as F
from ..models.progressive import ProGANGenerator, ProGANDiscriminator
from ..modules.gan_loss import ImprovedWGANLoss
from ..modules.instance_refiner import InstanceRefiner
from tools.utils import to_cuda
from models import load_network, save_network, print_network
class SegModel(torch.nn.Module):
def __init__(self, opt, is_train=True, is_main=True, logger=None):
super().__init__()
self.opt = opt
self.is_main = is_main
self.netG, self.netD = self.initialize_networks(is_train)
if is_train:
self.opt_g, self.opt_d = self.create_optimizers(self.opt)
self.gan_loss = ImprovedWGANLoss(self.netD)
self.logger = logger if self.is_main else None
self.ins_refiner = InstanceRefiner(self.opt)
def forward(self, data, fake_data={}, interpolate=False, alpha=None, mode='', log=False, hard=True,
global_iteration=None):
z, real_seg, real_cond = self.preprocess_input(data)
_, fake_seg, _ = self.preprocess_input(fake_data, is_fake=True)
if mode == 'generator':
g_loss, fake_seg = self.compute_generator_loss(real_cond, z, interpolate, alpha, hard, log, global_iteration)
fake_seg = self.postprocess_output(fake_seg)
return g_loss, fake_seg
elif mode == 'discriminator':
d_loss = self.compute_discriminator_loss(real_cond, real_seg, fake_seg, interpolate, alpha, log, global_iteration)
return d_loss
elif mode == 'inference':
fake_seg = self.generate_fake(real_cond, z, interpolate, alpha, hard, log, global_iteration)
fake_seg = self.postprocess_output(fake_seg)
return fake_seg
else:
raise ValueError(f"mode '{mode}' is invalid")
def postprocess_output(self, seg):
if self.opt.dim != self.opt.seg_dim:
size = (int(self.opt.dim), int(self.opt.aspect_ratio * self.opt.dim))
mode = 'bilinear' if self.opt.discretization == "none" or self.opt.bilimax else 'nearest'
seg = {k: self.resize(v, size, mode=mode) for k, v in seg.items()}
if self.opt.bilimax:
index = seg["sem_seg"].max(1, keepdim=True)[1]
seg["sem_seg"] = torch.zeros_like(seg["sem_seg"]).scatter_(1, index, 1.0)
return seg
def resize(self, t, size, mode='nearest'):
if size is not None and not 0 in t.size():
return torch.nn.functional.interpolate(t, size=size, mode=mode)
else:
return t
def preprocess_input(self, data, is_fake=False):
size = (int(self.opt.seg_dim), int(self.opt.aspect_ratio * self.opt.seg_dim)) if self.opt.dim != self.opt.seg_dim else None
data["z_seg"] = to_cuda(data, "z_seg")
data["sem_seg"] = to_cuda(data, "sem_seg")
data["ins_center"] = to_cuda(data, "ins_center")
data["ins_offset"] = to_cuda(data, "ins_offset")
data["ins_edge"] = to_cuda(data, "ins_edge")
data["ins_density"] = to_cuda(data, "ins_density")
data["sem_cond"] = to_cuda(data, "sem_cond")
data["ins_cond"] = to_cuda(data, "ins_cond")
if is_fake:
data["sem_seg"] = data["sem_seg"].detach()
data["ins_center"] = data["ins_center"].detach()
data["ins_offset"] = data["ins_offset"].detach()
data["ins_edge"] = data["ins_edge"].detach()
data["ins_density"] = data["ins_density"].detach()
z = data["z_seg"]
seg = {'sem_seg': self.resize(data["sem_seg"], size),
'ins_center': self.resize(data["ins_center"], size),
'ins_offset': self.resize(data["ins_offset"], size),
'ins_edge': self.resize(data["ins_edge"], size),
'ins_density': self.resize(data["ins_density"], size)}
cond = {'sem_cond': data["sem_cond"],
'ins_cond': data["ins_cond"]}
return z, seg, cond
def initialize_networks(self, is_train):
if self.opt.model == 'progressive':
netG = ProGANGenerator(self.opt).cuda()
netD = ProGANDiscriminator(self.opt).cuda() if is_train else None
else:
raise ValueError
if self.is_main:
netG = load_network(netG, "seg_g", self.opt)
print_network(netG)
if is_train:
netD = load_network(netD, "seg_d", self.opt)
print_network(netD)
netG.res = self.opt.seg_dim
if netD:
netD.res = self.opt.seg_dim
return netG, netD
def save_model(self, global_iteration, latest):
save_network(self.netG, "seg_g", global_iteration, self.opt, latest=latest)
save_network(self.netD, "seg_d", global_iteration, self.opt, latest=latest)
def create_optimizers(self, opt):
if opt.optimizer == "adam":
opt_g = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
opt_d = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
else:
raise NotImplementedError
return opt_g, opt_d
def compute_generator_loss(self, real_cond, z, interpolate, alpha, hard, log, global_iteration):
if interpolate:
fake_segs = self.netG.interpolate(z, alpha, cond=real_cond)
else:
fake_segs = self.netG(z, cond=real_cond)
if not "inter" in self.opt.cond_mode:
fake_segs = [fake_segs[-1]]
x_fake_segs = [self.to_discrete(fake_seg) for fake_seg in fake_segs]
sem_for_dis = real_cond["sem_cond"] if "original_cgan" in self.opt.cond_mode else None
if interpolate:
score = self.netD.interpolate(x_fake_segs[-1], alpha, sem_cond=sem_for_dis)
else:
score = self.netD(x_fake_segs[-1], sem_cond=sem_for_dis)
loss_gen = self.gan_loss.generator_loss_logits(score).sum()
loss = loss_gen
spread = torch.tensor([])
fake_sem_mask = torch.tensor([])
fake_ins_cond = torch.tensor([])
pseudo_center_mask = torch.tensor([])
fake_raw_filtered_sem_seg = torch.tensor([])
real_sem_cond = torch.tensor([])
real_ins_cond = torch.tensor([])
pseudo_ins_center = torch.tensor([])
pseudo_ins_offset = torch.tensor([])
entropy = torch.tensor([])
fake_sem_cond = torch.tensor([])
fake_center_mask = torch.tensor([])
loss_sem_entropy = []
loss_sem_recover = []
loss_sem_d_recover = []
loss_ins_recover = []
loss_pseudo_center = []
loss_pseudo_offset = []
loss_sem_spread = []
loss_ova = []
for fake_seg, x_fake_seg in zip(fake_segs, x_fake_segs):
logprob = torch.log(fake_seg["sem_seg"] + 0.00001)
entropy = -torch.sum(torch.mul(fake_seg["sem_seg"], logprob), dim=1, keepdim=True)
loss_sem_entropy.append(torch.mean(entropy))
if self.opt.cond_seg:
cond_loss = 0
if self.opt.cond_seg in ["semantic", "panoptic"]:
real_sem_cond = real_cond["sem_cond"]
fake_sem_cond = torch.mean(fake_seg["sem_seg"], dim=(2, 3))
index = fake_seg["sem_seg"].max(1, keepdim=True)[1]
d_fake_sem_seg = torch.zeros_like(fake_seg["sem_seg"]).scatter_(1, index, 1.0)
d_fake_sem_cond = torch.mean(d_fake_sem_seg, dim=(2, 3))
logprob_cond = torch.log(fake_sem_cond + 0.00001)
d_logprob_cond = torch.log(d_fake_sem_cond + 0.00001)
loss_sem_recover.append(F.kl_div(logprob_cond, real_sem_cond, reduction='batchmean'))
loss_sem_d_recover.append(F.kl_div(d_logprob_cond, real_sem_cond, reduction='batchmean'))
if 'sem_recover' in self.opt.cond_mode:
cond_loss += loss_sem_recover[-1]
if self.opt.cond_seg in ["instance", "panoptic"] and "density" in self.opt.instance_type:
real_ins_cond = real_cond["ins_cond"]
fake_ins_cond = torch.sum(fake_seg["ins_density"], dim=(2, 3))
loss_ins_recover.append(F.l1_loss(fake_ins_cond, real_ins_cond))
if 'ins_recover' in self.opt.cond_mode:
cond_loss += loss_ins_recover[-1]
if 'sem_assisted' in self.opt.cond_mode:
fake_sem_mask = fake_seg["sem_mask"]
spread = torch.sum(fake_sem_mask, dim=1)
loss_sem_spread.append(torch.mean((spread - 1) ** 2))
if len(self.opt.ova_idx) > 0:
ova = 0
for idx in self.opt.ova_idx:
other_idx = [i for i in range(self.opt.num_semantics) if i != idx]
ova += torch.mean(torch.sum(fake_sem_mask[:, other_idx], dim=1) * fake_sem_mask[:, idx])
loss_ova.append(ova)
cond_loss += ova * self.opt.lambda_ova
if 'spread' in self.opt.cond_mode:
cond_loss += loss_sem_spread[-1]
if 'entropy' in self.opt.cond_mode:
cond_loss += loss_sem_entropy[-1]
loss += cond_loss
if self.opt.pseudo_supervision:
with torch.no_grad():
pseudo = self.ins_refiner.batch_transform(fake_seg["ins_center"], x_fake_seg["ins_offset"], x_fake_seg["sem_seg"])
pseudo_ins_center, pseudo_ins_offset = pseudo
loss_pseudo_center.append(F.mse_loss(fake_seg["ins_center"], pseudo_ins_center))
loss_pseudo_offset.append(F.mse_loss(x_fake_seg["ins_offset"], pseudo_ins_offset))
loss_pseudo = loss_pseudo_center[-1] + loss_pseudo_offset[-1]
loss += loss_pseudo
if self.logger:
# log scalars every step
self.logger.log_scalar("seg_generator/sem_entropy", loss_sem_entropy, global_iteration)
self.logger.log_scalar("seg_generator/gen", loss_gen, global_iteration)
self.logger.log_scalar("seg_generator/sem_cond_recover", loss_sem_recover, global_iteration)
self.logger.log_scalar("seg_generator/sem_cond_true_recover", loss_sem_d_recover, global_iteration)
self.logger.log_scalar("seg_generator/sem_ins_recover", loss_ins_recover, global_iteration)
self.logger.log_scalar("seg_generator/sem_cond_spread", loss_sem_spread, global_iteration)
self.logger.log_scalar("seg_generator/ins_pseudo_center", loss_pseudo_center, global_iteration)
self.logger.log_scalar("seg_generator/ins_pseudo_offset", loss_pseudo_offset, global_iteration)
self.logger.log_scalar("seg_generator/one_versus_all", loss_ova, global_iteration)
# log images every few steps
if log:
fake_seg = fake_segs[-1]
x_fake_seg = x_fake_segs[-1]
with torch.no_grad():
fake_raw_sem_seg = fake_seg["raw_sem_seg"]
if fake_raw_sem_seg.size(0) > 0:
fake_raw_filtered_sem_seg = torch.zeros(fake_raw_sem_seg[:16].cpu().shape)
fake_raw_filtered_sem_seg[real_sem_cond[:16].cpu()>0] = fake_raw_sem_seg[:16].cpu()[real_sem_cond[:16].cpu()>0]
if fake_seg["ins_center"].size(0) > 0:
fake_center_mask = self.ins_refiner.get_peak_mask(fake_seg["ins_center"][:16])
if pseudo_ins_center.size(0) > 0 and pseudo_center_mask.size(0) == 0:
pseudo_center_mask = self.ins_refiner.get_peak_mask(pseudo_ins_center[:16])
self.logger.log_semantic_seg("seg_generator/fake", fake_seg["sem_seg"][:16].cpu(), 4, global_iteration)
self.logger.log_semantic_seg("seg_generator/fake_gumbel", x_fake_seg["sem_seg"][:16].cpu(), 4, global_iteration)
self.logger.log_cond_distrib("seg_generator/semantic_distrib", real_sem_cond[:16].cpu(), fake_sem_cond[:16].cpu(), 4, 4, global_iteration)
self.logger.log_img("seg_generator/entropy", entropy[:16].cpu(), 4, global_iteration)
self.logger.log_spread("seg_generator/spread", spread[:16].cpu(), 4, global_iteration)
self.logger.log_semantic_mask("seg_generator/semantic_mask", fake_sem_mask[:1].cpu(), real_sem_cond[:1].cpu(), 16, 4, global_iteration)
self.logger.log_semantic_seg("seg_generator/fake_raw", fake_raw_sem_seg[:16].cpu(), 4, global_iteration)
self.logger.log_semantic_seg("seg_generator/fake_raw_filtered", fake_raw_filtered_sem_seg[:16].cpu(), 4, global_iteration)
self.logger.log_ins_center("seg_generator/fake_ins_center", fake_seg["ins_center"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_center("seg_generator/pseudo_ins_center_gumbel", pseudo_ins_center[:16].cpu(), 4, global_iteration)
self.logger.log_img("seg_generator/fake_center_mask", fake_center_mask[:16].cpu(), 4, global_iteration)
self.logger.log_img("seg_generator/pseudo_center_mask_gumbel", pseudo_center_mask[:16].cpu(), 4, global_iteration)
self.logger.log_instance("seg_generator/fake_instance_gumbel", x_fake_seg["sem_seg"][:16].cpu(), fake_center_mask[:16].cpu(), x_fake_seg["ins_offset"][:16].cpu(), 4, global_iteration)
self.logger.log_instance("seg_generator/pseudo_instance_gumbel", x_fake_seg["sem_seg"][:16].cpu(), pseudo_center_mask[:16].cpu(), pseudo_ins_offset[:16].cpu(), 4, global_iteration)
self.logger.log_ins_offset("seg_generator/fake_ins_offset_gumbel", x_fake_seg["sem_seg"][:16].cpu(), x_fake_seg["ins_offset"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_offset("seg_generator/pseudo_ins_offset_gumbel", x_fake_seg["sem_seg"][:16].cpu(), pseudo_ins_offset[:16].cpu(), 4, global_iteration)
self.logger.log_img("seg_generator/fake_ins_edge", fake_seg["ins_edge"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_density("seg_generator/fake_ins_density", fake_seg["ins_density"][:16].cpu(), 4, global_iteration)
self.logger.log_cond_distrib("seg_generator/instance_distrib", real_ins_cond[:16].cpu(), fake_ins_cond[:16].cpu(), 4, 4, global_iteration)
if hard:
return loss, x_fake_segs[-1]
else:
return loss, fake_segs[-1]
def compute_discriminator_loss(self, real_cond, real_seg, fake_seg, interpolate, alpha, log, global_iteration):
sem_for_dis_real = torch.mean(real_seg["sem_seg"], dim=(2, 3)) if "original_cgan" in self.opt.cond_mode else None
sem_for_dis_fake = real_cond["sem_cond"] if "original_cgan" in self.opt.cond_mode else None
if interpolate:
real_score = self.netD.interpolate(real_seg, alpha, sem_cond=sem_for_dis_real)
fake_score = self.netD.interpolate(fake_seg, alpha, sem_cond=sem_for_dis_fake)
forward = lambda x: self.netD.interpolate(x, alpha, sem_cond=sem_for_dis_real)
else:
real_score = self.netD(real_seg, sem_cond=sem_for_dis_real)
fake_score = self.netD(fake_seg, sem_cond=sem_for_dis_fake)
forward = lambda x: self.netD(x, sem_cond=sem_for_dis_real)
if self.opt.panoptic:
real = torch.cat([real_seg["sem_seg"], real_seg["ins_center"], real_seg["ins_offset"], real_seg["ins_edge"],
real_seg["ins_density"]], dim=1)
fake = torch.cat([fake_seg["sem_seg"], fake_seg["ins_center"], fake_seg["ins_offset"], fake_seg["ins_edge"],
fake_seg["ins_density"]], dim=1)
else:
real = real_seg["sem_seg"]
fake = fake_seg["sem_seg"]
loss = self.gan_loss.discriminator_loss_logits(real, fake, real_score, fake_score, forward=forward)
if self.logger:
# log scalars every step
self.logger.log_scalar("seg_generator/dis", loss, global_iteration)
# log images every few step
if log:
real_center_mask = torch.Tensor([])
if self.opt.panoptic:
with torch.no_grad():
real_center_mask = self.ins_refiner.get_peak_mask(real_seg["ins_center"])
self.logger.log_semantic_seg("seg_generator/real", real_seg["sem_seg"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_center("seg_generator/real_ins_center", real_seg["ins_center"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_offset("seg_generator/real_ins_offset", real_seg["sem_seg"][:16].cpu(), real_seg["ins_offset"][:16].cpu(), 4, global_iteration)
self.logger.log_instance("seg_generator/real_instance", real_seg["sem_seg"][:16].cpu(), real_center_mask[:16].cpu(), real_seg["ins_offset"][:16].cpu(), 4, global_iteration)
self.logger.log_img("seg_generator/real_ins_edge", real_seg["ins_edge"][:16].cpu(), 4, global_iteration)
self.logger.log_ins_density("seg_generator/real_ins_density", real_seg["ins_density"][:16].cpu(), 4, global_iteration)
return loss
def generate_fake(self, real_cond, z, interpolate, alpha, hard, log, global_iteration):
with torch.no_grad():
if interpolate:
fake_seg = self.netG.interpolate(z, alpha, cond=real_cond)[-1]
else:
fake_seg = self.netG(z, cond=real_cond)[-1]
x_fake_seg = self.to_discrete(fake_seg)
fake_sem_cond = torch.mean(x_fake_seg["sem_seg"], dim=(2, 3))
if self.opt.cond_seg in ["semantic", "panoptic"]:
real_sem_cond = real_cond["sem_cond"]
else:
real_sem_cond = torch.Tensor([])
if log and self.logger:
self.logger.log_semantic_seg("seg_generator/fake", fake_seg["sem_seg"][:16].cpu(), 4, global_iteration)
self.logger.log_cond_distrib("seg_generator/semantic_distrib", real_sem_cond[:16].cpu(), fake_sem_cond[:16].cpu(), 4, 4, global_iteration)
if hard:
return x_fake_seg
else:
return fake_seg
def to_discrete(self, fake_seg):
fake_sem_seg = fake_seg["sem_seg"]
if self.opt.discretization == "gumbel":
x_fake_sem_seg = self.gumbel_sampler(fake_sem_seg)
elif self.opt.discretization == "max":
x_fake_sem_seg = self.max_sampler(fake_sem_seg)
elif self.opt.discretization == "none":
x_fake_sem_seg = self.none_sampler(fake_sem_seg)
else:
raise ValueError
fake_ins_center, fake_ins_offset = fake_seg["ins_center"], fake_seg["ins_offset"]
fake_ins_edge = fake_seg["ins_edge"]
fake_ins_density = fake_seg["ins_density"]
x_fake_ins_offset = self.ins_refiner.filter_offset(fake_ins_offset, x_fake_sem_seg)
x_fake_ins_density = self.ins_refiner.filter_density(fake_ins_density, x_fake_sem_seg)
x_fake_seg = {"sem_seg": x_fake_sem_seg, "ins_center": fake_ins_center, "ins_offset": x_fake_ins_offset,
"ins_edge": fake_ins_edge, "ins_density": x_fake_ins_density}
if self.opt.store_masks:
x_fake_seg["sem_mask"] = fake_seg["sem_mask"]
return x_fake_seg
def max_sampler(self, fake, hard=True, dim=1):
y_soft = fake
if hard:
# straight through.
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(fake).scatter_(dim, index, 1.0)
return (y_hard - y_soft).detach() + y_soft
else:
# reparametrization trick.
return y_soft
def gumbel_sampler(self, fake, hard=True, dim=1):
logits = torch.log(fake + 0.00001)
if torch.isnan(logits.max()).data:
print(fake.min(), fake.max())
gumbels = -(torch.empty_like(logits).exponential_()).log() # ~Gumbel(0, 1)
gumbels = (logits + gumbels) / self.opt.t # ~Gumbel(logits, tau)
y_soft = gumbels.softmax(dim)
if hard:
# straight through.
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0)
return (y_hard - y_soft).detach() + y_soft
else:
# reparametrization trick.
return y_soft
def none_sampler(self, fake, hard=True, dim=1):
return fake | [
"torch.mul",
"torch.nn.functional.kl_div",
"torch.nn.functional.mse_loss",
"torch.log",
"torch.nn.functional.l1_loss",
"models.save_network",
"torch.mean",
"torch.Tensor",
"torch.empty_like",
"torch.tensor",
"torch.sum",
"models.load_network",
"torch.nn.functional.interpolate",
"tools.util... | [((2887, 2909), 'tools.utils.to_cuda', 'to_cuda', (['data', '"""z_seg"""'], {}), "(data, 'z_seg')\n", (2894, 2909), False, 'from tools.utils import to_cuda\n'), ((2937, 2961), 'tools.utils.to_cuda', 'to_cuda', (['data', '"""sem_seg"""'], {}), "(data, 'sem_seg')\n", (2944, 2961), False, 'from tools.utils import to_cuda\n'), ((2992, 3019), 'tools.utils.to_cuda', 'to_cuda', (['data', '"""ins_center"""'], {}), "(data, 'ins_center')\n", (2999, 3019), False, 'from tools.utils import to_cuda\n'), ((3050, 3077), 'tools.utils.to_cuda', 'to_cuda', (['data', '"""ins_offset"""'], {}), "(data, 'ins_offset')\n", (3057, 3077), False, 'from tools.utils import to_cuda\n'), ((3106, 3131), 'tools.utils.to_cuda', 'to_cuda', (['data', '"""ins_edge"""'], {}), "(data, 'ins_edge')\n", (3113, 3131), False, 'from tools.utils import to_cuda\n'), ((3163, 3191), 'tools.utils.to_cuda', 'to_cuda', (['data', '"""ins_density"""'], {}), "(data, 'ins_density')\n", (3170, 3191), False, 'from tools.utils import to_cuda\n'), ((3220, 3245), 'tools.utils.to_cuda', 'to_cuda', (['data', '"""sem_cond"""'], {}), "(data, 'sem_cond')\n", (3227, 3245), False, 'from tools.utils import to_cuda\n'), ((3274, 3299), 'tools.utils.to_cuda', 'to_cuda', (['data', '"""ins_cond"""'], {}), "(data, 'ins_cond')\n", (3281, 3299), False, 'from tools.utils import to_cuda\n'), ((4825, 4900), 'models.save_network', 'save_network', (['self.netG', '"""seg_g"""', 'global_iteration', 'self.opt'], {'latest': 'latest'}), "(self.netG, 'seg_g', global_iteration, self.opt, latest=latest)\n", (4837, 4900), False, 'from models import load_network, save_network, print_network\n'), ((4910, 4985), 'models.save_network', 'save_network', (['self.netD', '"""seg_d"""', 'global_iteration', 'self.opt'], {'latest': 'latest'}), "(self.netD, 'seg_d', global_iteration, self.opt, latest=latest)\n", (4922, 4985), False, 'from models import load_network, save_network, print_network\n'), ((6216, 6232), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6228, 6232), False, 'import torch\n'), ((6258, 6274), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6270, 6274), False, 'import torch\n'), ((6300, 6316), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6312, 6316), False, 'import torch\n'), ((6347, 6363), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6359, 6363), False, 'import torch\n'), ((6401, 6417), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6413, 6417), False, 'import torch\n'), ((6443, 6459), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6455, 6459), False, 'import torch\n'), ((6485, 6501), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6497, 6501), False, 'import torch\n'), ((6531, 6547), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6543, 6547), False, 'import torch\n'), ((6577, 6593), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6589, 6593), False, 'import torch\n'), ((6613, 6629), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6625, 6629), False, 'import torch\n'), ((6655, 6671), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6667, 6671), False, 'import torch\n'), ((6700, 6716), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (6712, 6716), False, 'import torch\n'), ((20527, 20550), 'torch.log', 'torch.log', (['(fake + 1e-05)'], {}), '(fake + 1e-05)\n', (20536, 20550), False, 'import torch\n'), ((2577, 2633), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['t'], {'size': 'size', 'mode': 'mode'}), '(t, size=size, mode=mode)\n', (2608, 2633), False, 'import torch\n'), ((4436, 4473), 'models.load_network', 'load_network', (['netG', '"""seg_g"""', 'self.opt'], {}), "(netG, 'seg_g', self.opt)\n", (4448, 4473), False, 'from models import load_network, save_network, print_network\n'), ((4487, 4506), 'models.print_network', 'print_network', (['netG'], {}), '(netG)\n', (4500, 4506), False, 'from models import load_network, save_network, print_network\n'), ((7055, 7093), 'torch.log', 'torch.log', (["(fake_seg['sem_seg'] + 1e-05)"], {}), "(fake_seg['sem_seg'] + 1e-05)\n", (7064, 7093), False, 'import torch\n'), ((15185, 15228), 'torch.mean', 'torch.mean', (["real_seg['sem_seg']"], {'dim': '(2, 3)'}), "(real_seg['sem_seg'], dim=(2, 3))\n", (15195, 15228), False, 'import torch\n'), ((15973, 16112), 'torch.cat', 'torch.cat', (["[real_seg['sem_seg'], real_seg['ins_center'], real_seg['ins_offset'],\n real_seg['ins_edge'], real_seg['ins_density']]"], {'dim': '(1)'}), "([real_seg['sem_seg'], real_seg['ins_center'], real_seg[\n 'ins_offset'], real_seg['ins_edge'], real_seg['ins_density']], dim=1)\n", (15982, 16112), False, 'import torch\n'), ((16159, 16298), 'torch.cat', 'torch.cat', (["[fake_seg['sem_seg'], fake_seg['ins_center'], fake_seg['ins_offset'],\n fake_seg['ins_edge'], fake_seg['ins_density']]"], {'dim': '(1)'}), "([fake_seg['sem_seg'], fake_seg['ins_center'], fake_seg[\n 'ins_offset'], fake_seg['ins_edge'], fake_seg['ins_density']], dim=1)\n", (16168, 16298), False, 'import torch\n'), ((17972, 17987), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17985, 17987), False, 'import torch\n'), ((18262, 18307), 'torch.mean', 'torch.mean', (["x_fake_seg['sem_seg']"], {'dim': '(2, 3)'}), "(x_fake_seg['sem_seg'], dim=(2, 3))\n", (18272, 18307), False, 'import torch\n'), ((4557, 4594), 'models.load_network', 'load_network', (['netD', '"""seg_d"""', 'self.opt'], {}), "(netD, 'seg_d', self.opt)\n", (4569, 4594), False, 'from models import load_network, save_network, print_network\n'), ((4612, 4631), 'models.print_network', 'print_network', (['netD'], {}), '(netD)\n', (4625, 4631), False, 'from models import load_network, save_network, print_network\n'), ((7229, 7248), 'torch.mean', 'torch.mean', (['entropy'], {}), '(entropy)\n', (7239, 7248), False, 'import torch\n'), ((16775, 16791), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (16787, 16791), False, 'import torch\n'), ((18480, 18496), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (18492, 18496), False, 'import torch\n'), ((7130, 7169), 'torch.mul', 'torch.mul', (["fake_seg['sem_seg']", 'logprob'], {}), "(fake_seg['sem_seg'], logprob)\n", (7139, 7169), False, 'import torch\n'), ((7487, 7530), 'torch.mean', 'torch.mean', (["fake_seg['sem_seg']"], {'dim': '(2, 3)'}), "(fake_seg['sem_seg'], dim=(2, 3))\n", (7497, 7530), False, 'import torch\n'), ((7743, 7781), 'torch.mean', 'torch.mean', (['d_fake_sem_seg'], {'dim': '(2, 3)'}), '(d_fake_sem_seg, dim=(2, 3))\n', (7753, 7781), False, 'import torch\n'), ((7818, 7850), 'torch.log', 'torch.log', (['(fake_sem_cond + 1e-05)'], {}), '(fake_sem_cond + 1e-05)\n', (7827, 7850), False, 'import torch\n'), ((7891, 7925), 'torch.log', 'torch.log', (['(d_fake_sem_cond + 1e-05)'], {}), '(d_fake_sem_cond + 1e-05)\n', (7900, 7925), False, 'import torch\n'), ((8473, 8519), 'torch.sum', 'torch.sum', (["fake_seg['ins_density']"], {'dim': '(2, 3)'}), "(fake_seg['ins_density'], dim=(2, 3))\n", (8482, 8519), False, 'import torch\n'), ((8878, 8909), 'torch.sum', 'torch.sum', (['fake_sem_mask'], {'dim': '(1)'}), '(fake_sem_mask, dim=1)\n', (8887, 8909), False, 'import torch\n'), ((9779, 9794), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9792, 9794), False, 'import torch\n'), ((10042, 10095), 'torch.nn.functional.mse_loss', 'F.mse_loss', (["fake_seg['ins_center']", 'pseudo_ins_center'], {}), "(fake_seg['ins_center'], pseudo_ins_center)\n", (10052, 10095), True, 'import torch.nn.functional as F\n'), ((10140, 10195), 'torch.nn.functional.mse_loss', 'F.mse_loss', (["x_fake_seg['ins_offset']", 'pseudo_ins_offset'], {}), "(x_fake_seg['ins_offset'], pseudo_ins_offset)\n", (10150, 10195), True, 'import torch.nn.functional as F\n'), ((11479, 11494), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11492, 11494), False, 'import torch\n'), ((20263, 20285), 'torch.zeros_like', 'torch.zeros_like', (['fake'], {}), '(fake)\n', (20279, 20285), False, 'import torch\n'), ((20970, 20994), 'torch.zeros_like', 'torch.zeros_like', (['logits'], {}), '(logits)\n', (20986, 20994), False, 'import torch\n'), ((2376, 2408), 'torch.zeros_like', 'torch.zeros_like', (["seg['sem_seg']"], {}), "(seg['sem_seg'])\n", (2392, 2408), False, 'import torch\n'), ((7973, 8033), 'torch.nn.functional.kl_div', 'F.kl_div', (['logprob_cond', 'real_sem_cond'], {'reduction': '"""batchmean"""'}), "(logprob_cond, real_sem_cond, reduction='batchmean')\n", (7981, 8033), True, 'import torch.nn.functional as F\n'), ((8082, 8144), 'torch.nn.functional.kl_div', 'F.kl_div', (['d_logprob_cond', 'real_sem_cond'], {'reduction': '"""batchmean"""'}), "(d_logprob_cond, real_sem_cond, reduction='batchmean')\n", (8090, 8144), True, 'import torch.nn.functional as F\n'), ((8565, 8604), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['fake_ins_cond', 'real_ins_cond'], {}), '(fake_ins_cond, real_ins_cond)\n', (8574, 8604), True, 'import torch.nn.functional as F\n'), ((8954, 8983), 'torch.mean', 'torch.mean', (['((spread - 1) ** 2)'], {}), '((spread - 1) ** 2)\n', (8964, 8983), False, 'import torch\n'), ((16857, 16872), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16870, 16872), False, 'import torch\n'), ((7642, 7679), 'torch.zeros_like', 'torch.zeros_like', (["fake_seg['sem_seg']"], {}), "(fake_seg['sem_seg'])\n", (7658, 7679), False, 'import torch\n'), ((20663, 20687), 'torch.empty_like', 'torch.empty_like', (['logits'], {}), '(logits)\n', (20679, 20687), False, 'import torch\n'), ((9266, 9311), 'torch.sum', 'torch.sum', (['fake_sem_mask[:, other_idx]'], {'dim': '(1)'}), '(fake_sem_mask[:, other_idx], dim=1)\n', (9275, 9311), False, 'import torch\n')] |
import os
import sys
TOPIC = 'studio.schoolpower.SchoolPower'
PS_API = 'https://powerschool.mapleleaf.cn'
CACHE_DB_LOCATION = os.environ.get("CACHE_DB_LOCATION", None)
DB_LOCATION = os.environ.get('DB_LOCATION', 'users.db')
PEM_FILE_PATH = os.environ.get("APNS_CERT_FILE", None)
SECRET = os.environ.get("SECRET", "test")
TIME_OUT = float(os.environ.get("TIME_OUT", 6))
if SECRET == "test":
print("SECRET not set. Make sure you set it when it's deployed.", file=sys.stderr)
| [
"os.environ.get"
] | [((127, 168), 'os.environ.get', 'os.environ.get', (['"""CACHE_DB_LOCATION"""', 'None'], {}), "('CACHE_DB_LOCATION', None)\n", (141, 168), False, 'import os\n'), ((183, 224), 'os.environ.get', 'os.environ.get', (['"""DB_LOCATION"""', '"""users.db"""'], {}), "('DB_LOCATION', 'users.db')\n", (197, 224), False, 'import os\n'), ((241, 279), 'os.environ.get', 'os.environ.get', (['"""APNS_CERT_FILE"""', 'None'], {}), "('APNS_CERT_FILE', None)\n", (255, 279), False, 'import os\n'), ((289, 321), 'os.environ.get', 'os.environ.get', (['"""SECRET"""', '"""test"""'], {}), "('SECRET', 'test')\n", (303, 321), False, 'import os\n'), ((339, 368), 'os.environ.get', 'os.environ.get', (['"""TIME_OUT"""', '(6)'], {}), "('TIME_OUT', 6)\n", (353, 368), False, 'import os\n')] |
# Generated by Django 2.1.1 on 2018-10-01 17:34
import django.contrib.postgres.fields
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('custom', '0002_riskfield_risk_type'),
]
operations = [
migrations.AlterModelOptions(
name='riskfield',
options={'ordering': ('id',), 'verbose_name': 'Risk Field', 'verbose_name_plural': 'Risk Fields'},
),
migrations.AlterModelOptions(
name='risktype',
options={'ordering': ('-id',), 'verbose_name': 'Risk Type', 'verbose_name_plural': 'Risk Types'},
),
migrations.AddField(
model_name='risktype',
name='table_name',
field=models.UUIDField(default=uuid.uuid4, unique=True),
),
migrations.AlterField(
model_name='riskfield',
name='options',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=50), default=list, size=None),
),
migrations.AlterField(
model_name='riskfield',
name='type',
field=models.CharField(choices=[('text', 'Text'), ('select', 'Select'), ('date', 'Date'), ('number', 'Number'), ('currency', 'Currency'), ('option', 'Option'), ('color', 'Color'), ('bool', 'Boolean')], default='text', max_length=20),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.UUIDField",
"django.db.models.CharField"
] | [((285, 434), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""riskfield"""', 'options': "{'ordering': ('id',), 'verbose_name': 'Risk Field', 'verbose_name_plural':\n 'Risk Fields'}"}), "(name='riskfield', options={'ordering': ('id',),\n 'verbose_name': 'Risk Field', 'verbose_name_plural': 'Risk Fields'})\n", (313, 434), False, 'from django.db import migrations, models\n'), ((475, 622), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""risktype"""', 'options': "{'ordering': ('-id',), 'verbose_name': 'Risk Type', 'verbose_name_plural':\n 'Risk Types'}"}), "(name='risktype', options={'ordering': ('-id',),\n 'verbose_name': 'Risk Type', 'verbose_name_plural': 'Risk Types'})\n", (503, 622), False, 'from django.db import migrations, models\n'), ((768, 817), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'unique': '(True)'}), '(default=uuid.uuid4, unique=True)\n', (784, 817), False, 'from django.db import migrations, models\n'), ((1176, 1414), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('text', 'Text'), ('select', 'Select'), ('date', 'Date'), ('number',\n 'Number'), ('currency', 'Currency'), ('option', 'Option'), ('color',\n 'Color'), ('bool', 'Boolean')]", 'default': '"""text"""', 'max_length': '(20)'}), "(choices=[('text', 'Text'), ('select', 'Select'), ('date',\n 'Date'), ('number', 'Number'), ('currency', 'Currency'), ('option',\n 'Option'), ('color', 'Color'), ('bool', 'Boolean')], default='text',\n max_length=20)\n", (1192, 1414), False, 'from django.db import migrations, models\n'), ((996, 1027), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1012, 1027), False, 'from django.db import migrations, models\n')] |
import os
import sys
import requests
import logging
import time
import json
import pandas as pd
import numpy as np
from concurrent.futures import ProcessPoolExecutor
from git import Git
class FPL_Gameweek:
""" Get the Gameweek state """
def __init__(self, logger, season_data):
"""
Args:
logger (logging.Logger): Logging pacakge
season_data (int): Season year
"""
self.season = season_data['season']
self.root = f'data/fpl_official/{self.season}-{self.season % 2000 + 1}/gameweek/'
if not os.path.exists(self.root):
os.makedirs(self.root)
self.current_gw, self.players = self.get_fpl_metadata()
self.players.to_csv(os.path.join(self.root, 'player_ids.csv'))
self.logger = logger
def get_fpl_metadata(self):
""" Request the FPL API
Returns:
(tuple): Next GW and player ids
"""
url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
res = requests.get(url).json()
# Get current gameweek
current_gw = self.get_current_gw(res['events'])
if not os.path.exists(os.path.join(self.root, f'{current_gw}')):
os.mkdir(os.path.join(self.root, f'{current_gw}'))
# Get player ids
cols = ["id", "first_name", "second_name", "team"]
players = pd.DataFrame(res['elements'])[cols]
players = players.set_index("id")
return current_gw, players
def get_current_gw(self, events):
""" Get the next gameweek to be played in the EPL
Args:
events (json): FPL API response
Returns:
(int): Next gameweek
"""
for idx, gw in enumerate(events):
if gw['is_current']:
return idx + 1
def sample_ranks(self):
"""Sample every rank to get metadata"""
# Data management
transfer_strategy = self.players.copy()
transfer_strategy.loc[:, [
'Top_100_in', 'Top_1K_in', 'Top_10K_in', 'Top_50K_in',
'Top_100K_in', 'Top_250K_in', 'Top_500K_in', 'Top_100_out',
'Top_1K_out', 'Top_10K_out', 'Top_50K_out', 'Top_100K_out',
'Top_250K_out', 'Top_500K_out']] = 0
self.players.loc[:, [
'Top_100', 'Top_1K', 'Top_10K', 'Top_50K', 'Top_100K',
'Top_250K', 'Top_500K']] = 0
captain = self.players.copy()
chip_strategy = pd.DataFrame(index=[
'wildcard', 'freehit', 'bboost', '3xc'])
chip_strategy.loc[:, [
'Top_100', 'Top_1K', 'Top_10K', 'Top_50K',
'Top_100K', 'Top_250K', 'Top_500K']] = 0
hit_strategy = pd.DataFrame(index=['transfers'])
hit_strategy.loc[:, [
'Top_100', 'Top_1K', 'Top_10K', 'Top_50K',
'Top_100K', 'Top_250K', 'Top_500K']] = 0
# Sample ~10% of players teams
range_limits = [
('Top_100', 0, 100, 75),
('Top_1K', 100, 1000, 500),
('Top_10K', 1000, 10000, 2000),
('Top_50K', 10000, 50000, 4000),
('Top_100K', 50000, 100000, 5000),
('Top_250K', 100000, 250000, 15000),
('Top_500K', 250000, 500000, 25000)
]
for col, min_rank, max_rank, n_samples in range_limits:
self.logger.info(f"Starting to scrape {col} ranks")
fpl_ranks = np.random.randint(min_rank, max_rank, n_samples)
# Concurrent API Requests
with ProcessPoolExecutor(max_workers=8) as executor:
team_data = list(
executor.map(self.get_fpl_strategy, fpl_ranks))
for team, cap, chip, transfer in team_data:
# Ownership
for p in team:
self.players.loc[p, col] += 1
if cap is not None and len(cap):
captain.loc[cap[0], col] += 1
# Chip strategy
if chip is not None:
chip_strategy.loc[chip, col] += 1
# Transfer strategy
if transfer is not None and len(transfer):
transfer_in, transfer_out = transfer
for p_in, p_out in zip(transfer_in, transfer_out):
transfer_strategy.loc[p_in, col+'_in'] += 1
transfer_strategy.loc[p_out, col+'_out'] += 1
hit_strategy.loc['transfers', col] += len(transfer_in)
self.players.loc[:, col] = (
self.players.loc[:, col] / n_samples * 100)
captain.loc[:, col] = captain.loc[:, col] / n_samples * 100
chip_strategy.loc[:, col] = (
chip_strategy.loc[:, col] / n_samples * 100)
hit_strategy.loc[:, col] = hit_strategy.loc[:, col] / n_samples
transfer_strategy.loc[:, col+'_in'] = (
transfer_strategy.loc[:, col + '_in'] / n_samples * 100)
transfer_strategy.loc[:, col+'_out'] = (
transfer_strategy.loc[:, col + '_out'] / n_samples * 100)
self.players.to_csv(
os.path.join(self.root, f"{self.current_gw}/player_ownership.csv"))
captain.to_csv(
os.path.join(self.root, f"{self.current_gw}/captain.csv"))
chip_strategy.to_csv(
os.path.join(self.root, f"{self.current_gw}/chip_strategy.csv"))
hit_strategy.to_csv(
os.path.join(self.root, f"{self.current_gw}/hit_strategy.csv"))
transfer_strategy.to_csv(
os.path.join(self.root, f"{self.current_gw}/transfer_strategy.csv"))
def get_fpl_teamid(self, rank):
""" Get the FPL Team ID based on the rank
Args:
rank (int): Manager rank
Returns:
int: FPL Team ID
"""
# Scrape the correct page
page = rank // 50 + 1
place = rank % 50
url = f'https://fantasy.premierleague.com/api/leagues-classic/314/standings/?page_standings={page}'
res = requests.get(url)
return res.json()['standings']['results'][place]['entry']
def get_fpl_team(self, team_id):
""" Get the ids of player in a team
Args:
team_id (int): FPL Team ID
Returns:
(list): FPL Player IDs of the players selected
"""
res = requests.get(
f'https://fantasy.premierleague.com/api/entry/{team_id}/event/{self.current_gw}/picks/'
).json()
return (
[i['element'] for i in res['picks']],
[i['element'] for i in res['picks'] if i['multiplier'] > 1])
def get_fpl_strategy(self, rank):
""" Scrape FPL manager metadata
Args:
rank (int): FPL rank
Returns:
(tuple): strategy
"""
attempts = 3
while attempts:
try:
team_id = self.get_fpl_teamid(rank)
fpl_team, fpl_cap = self.get_fpl_team(team_id)
fpl_chips = self.get_fpl_chips(team_id)
fpl_transfers = self.get_fpl_transfers(team_id)
return fpl_team, fpl_cap, fpl_chips, fpl_transfers
except:
attempts -= 1
if not attempts:
self.logger.warning(
f"API Call to rank {rank} failed after 3 attempts.")
return [], [], [], []
self.logger.warning(
f'API Call failed, retrying in 3 seconds! Rank: {rank}')
time.sleep(3)
def get_fpl_chips(self, team_id):
""" Get the GW when a manager used a chip
Args:
team_id (int): Manager id
Returns:
(int): Gameweek
"""
res = requests.get(
f'https://fantasy.premierleague.com/api/entry/{team_id}/history/'
).json()['chips']
if res == []:
return None
if res[-1]['event'] == self.current_gw:
return res[-1]['name']
else:
return None
def get_fpl_transfers(self, team_id):
""" Get the transfer managers did
Args:
team_id (int): Manager id
Returns:
(tuple): FPL player ids
"""
res = requests.get(
f'https://fantasy.premierleague.com/api/entry/{team_id}/transfers'
).json()
transfer_in = []
transfer_out = []
for transfers in res:
if transfers['event'] == self.current_gw:
transfer_in.append(transfers['element_in'])
transfer_out.append(transfers['element_out'])
else:
return transfer_in, transfer_out
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s")
logger: logging.Logger = logging.getLogger(__name__)
with open('info.json') as f:
season_data = json.load(f)
fplg = FPL_Gameweek(logger, season_data)
fplg.sample_ranks()
if len(sys.argv) > 1:
logger.info("Saving data ...")
Git()
else:
print("Local")
| [
"logging.basicConfig",
"logging.getLogger",
"os.path.exists",
"pandas.DataFrame",
"os.makedirs",
"git.Git",
"os.path.join",
"requests.get",
"time.sleep",
"numpy.random.randint",
"concurrent.futures.ProcessPoolExecutor",
"json.load"
] | [((8774, 8849), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s - %(message)s')\n", (8793, 8849), False, 'import logging\n'), ((8879, 8906), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (8896, 8906), False, 'import logging\n'), ((2466, 2526), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['wildcard', 'freehit', 'bboost', '3xc']"}), "(index=['wildcard', 'freehit', 'bboost', '3xc'])\n", (2478, 2526), True, 'import pandas as pd\n'), ((2702, 2735), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['transfers']"}), "(index=['transfers'])\n", (2714, 2735), True, 'import pandas as pd\n'), ((6042, 6059), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6054, 6059), False, 'import requests\n'), ((8963, 8975), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8972, 8975), False, 'import json\n'), ((9120, 9125), 'git.Git', 'Git', ([], {}), '()\n', (9123, 9125), False, 'from git import Git\n'), ((576, 601), 'os.path.exists', 'os.path.exists', (['self.root'], {}), '(self.root)\n', (590, 601), False, 'import os\n'), ((615, 637), 'os.makedirs', 'os.makedirs', (['self.root'], {}), '(self.root)\n', (626, 637), False, 'import os\n'), ((731, 772), 'os.path.join', 'os.path.join', (['self.root', '"""player_ids.csv"""'], {}), "(self.root, 'player_ids.csv')\n", (743, 772), False, 'import os\n'), ((1381, 1410), 'pandas.DataFrame', 'pd.DataFrame', (["res['elements']"], {}), "(res['elements'])\n", (1393, 1410), True, 'import pandas as pd\n'), ((3416, 3464), 'numpy.random.randint', 'np.random.randint', (['min_rank', 'max_rank', 'n_samples'], {}), '(min_rank, max_rank, n_samples)\n', (3433, 3464), True, 'import numpy as np\n'), ((5142, 5208), 'os.path.join', 'os.path.join', (['self.root', 'f"""{self.current_gw}/player_ownership.csv"""'], {}), "(self.root, f'{self.current_gw}/player_ownership.csv')\n", (5154, 5208), False, 'import os\n'), ((5246, 5303), 'os.path.join', 'os.path.join', (['self.root', 'f"""{self.current_gw}/captain.csv"""'], {}), "(self.root, f'{self.current_gw}/captain.csv')\n", (5258, 5303), False, 'import os\n'), ((5347, 5410), 'os.path.join', 'os.path.join', (['self.root', 'f"""{self.current_gw}/chip_strategy.csv"""'], {}), "(self.root, f'{self.current_gw}/chip_strategy.csv')\n", (5359, 5410), False, 'import os\n'), ((5453, 5515), 'os.path.join', 'os.path.join', (['self.root', 'f"""{self.current_gw}/hit_strategy.csv"""'], {}), "(self.root, f'{self.current_gw}/hit_strategy.csv')\n", (5465, 5515), False, 'import os\n'), ((5563, 5630), 'os.path.join', 'os.path.join', (['self.root', 'f"""{self.current_gw}/transfer_strategy.csv"""'], {}), "(self.root, f'{self.current_gw}/transfer_strategy.csv')\n", (5575, 5630), False, 'import os\n'), ((1029, 1046), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1041, 1046), False, 'import requests\n'), ((1172, 1212), 'os.path.join', 'os.path.join', (['self.root', 'f"""{current_gw}"""'], {}), "(self.root, f'{current_gw}')\n", (1184, 1212), False, 'import os\n'), ((1236, 1276), 'os.path.join', 'os.path.join', (['self.root', 'f"""{current_gw}"""'], {}), "(self.root, f'{current_gw}')\n", (1248, 1276), False, 'import os\n'), ((3521, 3555), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': '(8)'}), '(max_workers=8)\n', (3540, 3555), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((6365, 6476), 'requests.get', 'requests.get', (['f"""https://fantasy.premierleague.com/api/entry/{team_id}/event/{self.current_gw}/picks/"""'], {}), "(\n f'https://fantasy.premierleague.com/api/entry/{team_id}/event/{self.current_gw}/picks/'\n )\n", (6377, 6476), False, 'import requests\n'), ((8301, 8386), 'requests.get', 'requests.get', (['f"""https://fantasy.premierleague.com/api/entry/{team_id}/transfers"""'], {}), "(f'https://fantasy.premierleague.com/api/entry/{team_id}/transfers'\n )\n", (8313, 8386), False, 'import requests\n'), ((7565, 7578), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (7575, 7578), False, 'import time\n'), ((7793, 7872), 'requests.get', 'requests.get', (['f"""https://fantasy.premierleague.com/api/entry/{team_id}/history/"""'], {}), "(f'https://fantasy.premierleague.com/api/entry/{team_id}/history/')\n", (7805, 7872), False, 'import requests\n')] |
from chroma_core.lib.storage_plugin.api import attributes
from chroma_core.lib.storage_plugin.api.identifiers import GlobalId, ScopedId
from chroma_core.lib.storage_plugin.api.plugin import Plugin
from chroma_core.lib.storage_plugin.api import resources
from chroma_core.lib.storage_plugin.api import relations
version = 1
class Controller(resources.ScannableResource):
class Meta:
identifier = GlobalId("address")
address = attributes.String()
class Lun(resources.LogicalDrive):
class Meta:
identifier = ScopedId("lun_id")
lun_id = attributes.String()
class Presentation(resources.Resource):
lun_id = attributes.String()
path = attributes.String()
host_id = attributes.Integer()
class Meta:
identifier = ScopedId("lun_id", "host_id")
relations = [
relations.Provide(provide_to=resources.DeviceNode, attributes=["host_id", "path"]),
relations.Subscribe(subscribe_to=Lun, attributes=["lun_id"]),
]
class TestPlugin(Plugin):
pass
| [
"chroma_core.lib.storage_plugin.api.relations.Provide",
"chroma_core.lib.storage_plugin.api.identifiers.ScopedId",
"chroma_core.lib.storage_plugin.api.attributes.String",
"chroma_core.lib.storage_plugin.api.relations.Subscribe",
"chroma_core.lib.storage_plugin.api.attributes.Integer",
"chroma_core.lib.sto... | [((445, 464), 'chroma_core.lib.storage_plugin.api.attributes.String', 'attributes.String', ([], {}), '()\n', (462, 464), False, 'from chroma_core.lib.storage_plugin.api import attributes\n'), ((572, 591), 'chroma_core.lib.storage_plugin.api.attributes.String', 'attributes.String', ([], {}), '()\n', (589, 591), False, 'from chroma_core.lib.storage_plugin.api import attributes\n'), ((647, 666), 'chroma_core.lib.storage_plugin.api.attributes.String', 'attributes.String', ([], {}), '()\n', (664, 666), False, 'from chroma_core.lib.storage_plugin.api import attributes\n'), ((678, 697), 'chroma_core.lib.storage_plugin.api.attributes.String', 'attributes.String', ([], {}), '()\n', (695, 697), False, 'from chroma_core.lib.storage_plugin.api import attributes\n'), ((712, 732), 'chroma_core.lib.storage_plugin.api.attributes.Integer', 'attributes.Integer', ([], {}), '()\n', (730, 732), False, 'from chroma_core.lib.storage_plugin.api import attributes\n'), ((410, 429), 'chroma_core.lib.storage_plugin.api.identifiers.GlobalId', 'GlobalId', (['"""address"""'], {}), "('address')\n", (418, 429), False, 'from chroma_core.lib.storage_plugin.api.identifiers import GlobalId, ScopedId\n'), ((539, 557), 'chroma_core.lib.storage_plugin.api.identifiers.ScopedId', 'ScopedId', (['"""lun_id"""'], {}), "('lun_id')\n", (547, 557), False, 'from chroma_core.lib.storage_plugin.api.identifiers import GlobalId, ScopedId\n'), ((771, 800), 'chroma_core.lib.storage_plugin.api.identifiers.ScopedId', 'ScopedId', (['"""lun_id"""', '"""host_id"""'], {}), "('lun_id', 'host_id')\n", (779, 800), False, 'from chroma_core.lib.storage_plugin.api.identifiers import GlobalId, ScopedId\n'), ((835, 921), 'chroma_core.lib.storage_plugin.api.relations.Provide', 'relations.Provide', ([], {'provide_to': 'resources.DeviceNode', 'attributes': "['host_id', 'path']"}), "(provide_to=resources.DeviceNode, attributes=['host_id',\n 'path'])\n", (852, 921), False, 'from chroma_core.lib.storage_plugin.api import relations\n'), ((931, 991), 'chroma_core.lib.storage_plugin.api.relations.Subscribe', 'relations.Subscribe', ([], {'subscribe_to': 'Lun', 'attributes': "['lun_id']"}), "(subscribe_to=Lun, attributes=['lun_id'])\n", (950, 991), False, 'from chroma_core.lib.storage_plugin.api import relations\n')] |
import json
import time
import logging
import requests
import websocket
from . import abc
from .. import events, sansio, methods, exceptions
LOG = logging.getLogger(__name__)
class SlackAPI(abc.SlackAPI):
"""
`requests` implementation of :class:`slack.io.abc.SlackAPI`
Args:
session: HTTP session
"""
def __init__(self, *, session=None, **kwargs):
self._session = session or requests.session()
super().__init__(**kwargs)
def _request(self, method, url, headers, body):
response = self._session.request(method, url, headers=headers, data=body)
return response.status_code, response.content, response.headers
def _rtm(self, url):
ws = websocket.create_connection(url)
while True:
event = ws.recv()
if event:
yield event
else:
self.sleep(0.5)
def sleep(self, seconds):
time.sleep(seconds)
def _make_query(self, url, body, headers):
while self.rate_limited and self.rate_limited > int(time.time()):
self.sleep(self.rate_limited - int(time.time()))
status, rep_body, rep_headers = self._request("POST", url, headers, body)
try:
response_data = sansio.decode_response(status, rep_headers, rep_body)
except exceptions.RateLimited as rate_limited:
if self._retry_when_rate_limit:
LOG.warning(
"Rate limited ! Waiting for %s seconds", rate_limited.retry_after
)
self.rate_limited = int(time.time()) + rate_limited.retry_after
return self._make_query(url, body, headers)
else:
raise
else:
self.rate_limited = False
return response_data
def query(self, url, data=None, headers=None, as_json=None):
"""
Query the slack API
When using :class:`slack.methods` the request is made `as_json` if available
Args:
url: :class:`slack.methods` or url string
data: JSON encodable MutableMapping
headers: Custom headers
as_json: Post JSON to the slack API
Returns:
dictionary of slack API response data
"""
url, body, headers = sansio.prepare_request(
url=url,
data=data,
headers=headers,
global_headers=self._headers,
token=self._token,
)
return self._make_query(url, body, headers)
def iter(
self,
url,
data=None,
headers=None,
*,
limit=200,
iterkey=None,
itermode=None,
minimum_time=None,
as_json=None
):
"""
Iterate over a slack API method supporting pagination
When using :class:`slack.methods` the request is made `as_json` if available
Args:
url: :class:`slack.methods` or url string
data: JSON encodable MutableMapping
headers:
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
minimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0).
If not reached the client will sleep for the remaining time.
as_json: Post JSON to the slack API
Returns:
Async iterator over `response_data[key]`
"""
itervalue = None
if not data:
data = {}
last_request_time = None
while True:
current_time = time.time()
if (
minimum_time
and last_request_time
and last_request_time + minimum_time > current_time
):
self.sleep(last_request_time + minimum_time - current_time)
data, iterkey, itermode = sansio.prepare_iter_request(
url,
data,
iterkey=iterkey,
itermode=itermode,
limit=limit,
itervalue=itervalue,
)
last_request_time = time.time()
response_data = self.query(url, data, headers, as_json)
itervalue = sansio.decode_iter_request(response_data)
for item in response_data[iterkey]:
yield item
if not itervalue:
break
def rtm(self, url=None, bot_id=None):
"""
Iterate over event from the RTM API
Args:
url: Websocket connection url
bot_id: Connecting bot ID
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
"""
while True:
bot_id = bot_id or self._find_bot_id()
url = url or self._find_rtm_url()
for event in self._incoming_from_rtm(url, bot_id):
yield event
url = None
def _find_bot_id(self):
auth = self.query(methods.AUTH_TEST)
user_info = self.query(methods.USERS_INFO, {"user": auth["user_id"]})
bot_id = user_info["user"]["profile"]["bot_id"]
LOG.info("BOT_ID is %s", bot_id)
return bot_id
def _find_rtm_url(self):
response = self.query(methods.RTM_CONNECT)
return response["url"]
def _incoming_from_rtm(self, url, bot_id):
for data in self._rtm(url):
event = events.Event.from_rtm(json.loads(data))
if sansio.need_reconnect(event):
break
elif sansio.discard_event(event, bot_id):
continue
else:
yield event
| [
"logging.getLogger",
"requests.session",
"json.loads",
"time.sleep",
"websocket.create_connection",
"time.time"
] | [((150, 177), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (167, 177), False, 'import logging\n'), ((720, 752), 'websocket.create_connection', 'websocket.create_connection', (['url'], {}), '(url)\n', (747, 752), False, 'import websocket\n'), ((942, 961), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (952, 961), False, 'import time\n'), ((418, 436), 'requests.session', 'requests.session', ([], {}), '()\n', (434, 436), False, 'import requests\n'), ((3787, 3798), 'time.time', 'time.time', ([], {}), '()\n', (3796, 3798), False, 'import time\n'), ((4333, 4344), 'time.time', 'time.time', ([], {}), '()\n', (4342, 4344), False, 'import time\n'), ((5645, 5661), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (5655, 5661), False, 'import json\n'), ((1071, 1082), 'time.time', 'time.time', ([], {}), '()\n', (1080, 1082), False, 'import time\n'), ((1132, 1143), 'time.time', 'time.time', ([], {}), '()\n', (1141, 1143), False, 'import time\n'), ((1597, 1608), 'time.time', 'time.time', ([], {}), '()\n', (1606, 1608), False, 'import time\n')] |
from django import forms
from student.models import Major, UserProfile
from django.contrib.auth.models import User
class MajorForm(forms.ModelForm):
code = forms.CharField(max_length=20)
viName = forms.CharField(max_length=128)
enName = forms.CharField(max_length=128)
class Meta:
model = Major
fields = ('code', 'viName', 'enName')
class UserProfileForm(forms.ModelForm):
GENDER_CHOICES = (
("Nam", "Nam"),
("Nữ", "Nữ"),
)
GRADE_CHOICES = (
("Отлично", "Отлично"),
("Хорошо", "Хорошо"),
("Удовлетворительно", "Удовлетворительно"),
("Зачтено", "Зачтено")
)
major = forms.ModelChoiceField(queryset=Major.objects.all())
fullName = forms.CharField(max_length=128)
gender = forms.ChoiceField(choices=GENDER_CHOICES)
birthday = forms.DateField()
ethnic = forms.CharField(max_length=20)
religion = forms.CharField(max_length=20)
studyYear = forms.IntegerField()
addressVN = forms.CharField(max_length=128)
addressRu = forms.CharField(max_length=128)
phone = forms.CharField(max_length=128)
workPlace = forms.CharField(max_length=128, required=False)
dateOfAdmission = forms.DateField() # Ngay nhap hoc
dateOfStudy = forms.DateField() # Ngay bat dau khoa hoc
timeOfStudy = forms.CharField(max_length=128) # Thoi gian dao tao
infoOfStudy = forms.CharField(max_length=128) # Dang hoc hoc ki ?
# Result of study
ruSubject1 = forms.CharField(max_length=128, required=False)
viSubject1 = forms.CharField(max_length=128, required=False)
resultSubject1 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject2 = forms.CharField(max_length=128, required=False)
viSubject2 = forms.CharField(max_length=128, required=False)
resultSubject2 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject3 = forms.CharField(max_length=128, required=False)
viSubject3 = forms.CharField(max_length=128, required=False)
resultSubject3 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject4 = forms.CharField(max_length=128, required=False)
viSubject4 = forms.CharField(max_length=128, required=False)
resultSubject4 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject5 = forms.CharField(max_length=128, required=False)
viSubject5 = forms.CharField(max_length=128, required=False)
resultSubject5 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject6 = forms.CharField(max_length=128, required=False)
viSubject6 = forms.CharField(max_length=128, required=False)
resultSubject6 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject7 = forms.CharField(max_length=128, required=False)
viSubject7 = forms.CharField(max_length=128, required=False)
resultSubject7 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject8 = forms.CharField(max_length=128, required=False)
viSubject8 = forms.CharField(max_length=128, required=False)
resultSubject8 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject9 = forms.CharField(max_length=128, required=False)
viSubject9 = forms.CharField(max_length=128, required=False)
resultSubject9 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject10 = forms.CharField(max_length=128, required=False)
viSubject10 = forms.CharField(max_length=128, required=False)
resultSubject10 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject11 = forms.CharField(max_length=128, required=False)
viSubject11 = forms.CharField(max_length=128, required=False)
resultSubject11 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
ruSubject12 = forms.CharField(max_length=128, required=False)
viSubject12 = forms.CharField(max_length=128, required=False)
resultSubject12 = forms.ChoiceField(choices=GRADE_CHOICES, required=False)
# Information of Bank
nameBank = forms.CharField(max_length=128, required=False)
nameAccount = forms.CharField(max_length=128, required=False) # Ten tieng nga
class Meta:
model = UserProfile
exclude = ('user',) | [
"django.forms.CharField",
"django.forms.DateField",
"django.forms.ChoiceField",
"student.models.Major.objects.all",
"django.forms.IntegerField"
] | [((159, 189), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (174, 189), False, 'from django import forms\n'), ((200, 231), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (215, 231), False, 'from django import forms\n'), ((242, 273), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (257, 273), False, 'from django import forms\n'), ((671, 702), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (686, 702), False, 'from django import forms\n'), ((713, 754), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GENDER_CHOICES'}), '(choices=GENDER_CHOICES)\n', (730, 754), False, 'from django import forms\n'), ((767, 784), 'django.forms.DateField', 'forms.DateField', ([], {}), '()\n', (782, 784), False, 'from django import forms\n'), ((795, 825), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (810, 825), False, 'from django import forms\n'), ((838, 868), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (853, 868), False, 'from django import forms\n'), ((882, 902), 'django.forms.IntegerField', 'forms.IntegerField', ([], {}), '()\n', (900, 902), False, 'from django import forms\n'), ((916, 947), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (931, 947), False, 'from django import forms\n'), ((961, 992), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (976, 992), False, 'from django import forms\n'), ((1002, 1033), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1017, 1033), False, 'from django import forms\n'), ((1047, 1094), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (1062, 1094), False, 'from django import forms\n'), ((1114, 1131), 'django.forms.DateField', 'forms.DateField', ([], {}), '()\n', (1129, 1131), False, 'from django import forms\n'), ((1164, 1181), 'django.forms.DateField', 'forms.DateField', ([], {}), '()\n', (1179, 1181), False, 'from django import forms\n'), ((1222, 1253), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1237, 1253), False, 'from django import forms\n'), ((1290, 1321), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1305, 1321), False, 'from django import forms\n'), ((1377, 1424), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (1392, 1424), False, 'from django import forms\n'), ((1439, 1486), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (1454, 1486), False, 'from django import forms\n'), ((1505, 1561), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (1522, 1561), False, 'from django import forms\n'), ((1577, 1624), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (1592, 1624), False, 'from django import forms\n'), ((1639, 1686), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (1654, 1686), False, 'from django import forms\n'), ((1705, 1761), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (1722, 1761), False, 'from django import forms\n'), ((1777, 1824), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (1792, 1824), False, 'from django import forms\n'), ((1839, 1886), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (1854, 1886), False, 'from django import forms\n'), ((1905, 1961), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (1922, 1961), False, 'from django import forms\n'), ((1977, 2024), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (1992, 2024), False, 'from django import forms\n'), ((2039, 2086), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2054, 2086), False, 'from django import forms\n'), ((2105, 2161), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (2122, 2161), False, 'from django import forms\n'), ((2177, 2224), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2192, 2224), False, 'from django import forms\n'), ((2239, 2286), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2254, 2286), False, 'from django import forms\n'), ((2305, 2361), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (2322, 2361), False, 'from django import forms\n'), ((2377, 2424), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2392, 2424), False, 'from django import forms\n'), ((2439, 2486), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2454, 2486), False, 'from django import forms\n'), ((2505, 2561), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (2522, 2561), False, 'from django import forms\n'), ((2577, 2624), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2592, 2624), False, 'from django import forms\n'), ((2639, 2686), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2654, 2686), False, 'from django import forms\n'), ((2705, 2761), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (2722, 2761), False, 'from django import forms\n'), ((2777, 2824), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2792, 2824), False, 'from django import forms\n'), ((2839, 2886), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2854, 2886), False, 'from django import forms\n'), ((2905, 2961), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (2922, 2961), False, 'from django import forms\n'), ((2977, 3024), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (2992, 3024), False, 'from django import forms\n'), ((3039, 3086), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3054, 3086), False, 'from django import forms\n'), ((3105, 3161), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (3122, 3161), False, 'from django import forms\n'), ((3178, 3225), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3193, 3225), False, 'from django import forms\n'), ((3241, 3288), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3256, 3288), False, 'from django import forms\n'), ((3308, 3364), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (3325, 3364), False, 'from django import forms\n'), ((3381, 3428), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3396, 3428), False, 'from django import forms\n'), ((3444, 3491), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3459, 3491), False, 'from django import forms\n'), ((3511, 3567), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (3528, 3567), False, 'from django import forms\n'), ((3584, 3631), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3599, 3631), False, 'from django import forms\n'), ((3647, 3694), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3662, 3694), False, 'from django import forms\n'), ((3714, 3770), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'GRADE_CHOICES', 'required': '(False)'}), '(choices=GRADE_CHOICES, required=False)\n', (3731, 3770), False, 'from django import forms\n'), ((3807, 3854), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3822, 3854), False, 'from django import forms\n'), ((3870, 3917), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(128)', 'required': '(False)'}), '(max_length=128, required=False)\n', (3885, 3917), False, 'from django import forms\n'), ((638, 657), 'student.models.Major.objects.all', 'Major.objects.all', ([], {}), '()\n', (655, 657), False, 'from student.models import Major, UserProfile\n')] |
# -*- coding: utf-8 -*-
# Standard library imports
import sys
# Third party imports
from Qt import QtWidgets
# Local imports
from .ui import Dialog
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
d = Dialog()
sys.exit(d.exec_())
| [
"Qt.QtWidgets.QApplication"
] | [((190, 222), 'Qt.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (212, 222), False, 'from Qt import QtWidgets\n')] |
from flask import url_for
from tests.conftest import normalize_spaces
def test_set_inbound_sms_sets_a_number_for_service(
logged_in_client,
mock_add_sms_sender,
multiple_available_inbound_numbers,
service_one,
fake_uuid,
mock_no_inbound_number_for_service,
mocker
):
mocker.patch('app.service_api_client.update_service_with_properties')
data = {
"inbound_number": "781d9c60-7a7e-46b7-9896-7b045b992fa5",
}
response = logged_in_client.post(
url_for('main.service_set_inbound_number', service_id=service_one['id']),
data=data
)
assert response.status_code == 302
mock_add_sms_sender.assert_called_once_with(
service_one['id'],
sms_sender="781d9c60-7a7e-46b7-9896-7b045b992fa5",
is_default=True,
inbound_number_id="781d9c60-7a7e-46b7-9896-7b045b992fa5"
)
def test_set_inbound_sms_when_no_available_inbound_numbers(
client_request,
service_one,
no_available_inbound_numbers,
mock_no_inbound_number_for_service,
mocker
):
page = client_request.get(
'main.service_set_inbound_number',
service_id=service_one['id']
)
assert normalize_spaces(page.select_one('main p').text) == "No available inbound numbers"
def test_set_inbound_sms_when_service_already_has_sms(
client_request,
service_one,
multiple_available_inbound_numbers,
mock_get_inbound_number_for_service,
):
page = client_request.get(
'main.service_set_inbound_number',
service_id=service_one['id']
)
assert normalize_spaces(page.select_one('main p').text) == "This service already has an inbound number"
| [
"flask.url_for"
] | [((504, 576), 'flask.url_for', 'url_for', (['"""main.service_set_inbound_number"""'], {'service_id': "service_one['id']"}), "('main.service_set_inbound_number', service_id=service_one['id'])\n", (511, 576), False, 'from flask import url_for\n')] |
from ..function.node import *
from ..function.tree import *
import numpy as np
def generate_tree(name='test'):
p_branch = .2
p_infertile = .1
p_channel = 1 - p_branch - p_infertile
decay = .25
branch_nodes = [Max, Sum, Mean, Min, Product, Median]
infertile_nodes = [Constant, Input, Uniform, Normal]
channel_node = [Square, Reciprocal, Sqrt, Exp, Sin, Cos, ArcTan, Abs, Sign]
max_node_count = 10
max_num_branches = 3
max_size_branch = 3
branch_disribution = np.random.randint
tree = FunctionTree(name=name)
stack = [tree.Output.name]
c_node = tree.Input.name
branch_count = 0
nodes_count = 0
while len(stack) > 0:
p_node = stack.pop(0)
if branch_count > max_num_branches:
p_infertile += p_branch/2
p_channel += p_branch/2
p_branch = 0
print(p_infertile, p_channel, p_branch)
elif nodes_count > max_node_count - branch_count*(max_size_branch/2 -1):
decay_amount = p_channel*decay
p_channel -= decay_amount
p_infertile += decay_amount
elif nodes_count > 2* max_node_count:
p_channel = 0
p_infertile = 1
new_node_type = np.random.choice(['branch', 'infertile', 'channel'], p=[p_branch, p_infertile, p_channel])
if new_node_type == 'branch':
new_node = np.random.choice(branch_nodes)
num_nodes = branch_disribution(2, max_size_branch+1)
branch_count += 1
elif new_node_type == 'infertile':
new_node = np.random.choice(infertile_nodes)
num_nodes = 1
else:
new_node = np.random.choice(channel_node)
num_nodes = 1
new_node = new_node(name=str(nodes_count))
print('Node to be added: ', new_node.latex, new_node.name)
try:
if new_node_type == 'infertile':
tree.insert(new_node, parent_item=p_node)
else:
tree.insert(new_node, parent_item=p_node, child_item=c_node)
print('Tree Map')
print(tree.tree_map)
nodes_count += num_nodes
if new_node_type != 'infertile':
for _ in range(num_nodes):
stack.append(new_node.name)
except:
stack.insert(0, p_node)
return tree
class FunctionGenerator:
"""Generator of FunctionTrees
# Arguments:
base_cohert_name: A String, used as the base of naming new Trees
Ex: A = FunctionGenerator(base_cohert_name = 'test')
next(A) --> FunctionTree Object with name 'test_1'
next(A) --> FunctionTree Object with name 'test_2'
...
max_nodes: An Integer, maximum number of nodes in a graph
max_branches: An Integer, maximum number of branch type function
nodes in a graph
node_type_probability: A dictionary, representing a discrete node distribution
Ex: {'channel': 1/3, 'infertile': 1/3, 'branch': 1/3}
node_probability: A numpy distribution,
channel_nodes: A dictionary with FunctionNodes, used as the key,
and "probability" of selecting that node when a channel node is needed
infertile_nodes: A dictionary with FunctionNodes, used as the key,
and "probability" of selecting that node when a infertile node is needed
branch_nodes: A dictionary with FunctionNodes, used as the key,
and "probability" of selecting that node when a branch node is needed
# Returns (When called)
FunctionTree,
Logic:
Breadth first creation!
Determine_number of nodes
Determine how many branch and infertile nodes
"""
pass | [
"numpy.random.choice"
] | [((1244, 1338), 'numpy.random.choice', 'np.random.choice', (["['branch', 'infertile', 'channel']"], {'p': '[p_branch, p_infertile, p_channel]'}), "(['branch', 'infertile', 'channel'], p=[p_branch,\n p_infertile, p_channel])\n", (1260, 1338), True, 'import numpy as np\n'), ((1396, 1426), 'numpy.random.choice', 'np.random.choice', (['branch_nodes'], {}), '(branch_nodes)\n', (1412, 1426), True, 'import numpy as np\n'), ((1589, 1622), 'numpy.random.choice', 'np.random.choice', (['infertile_nodes'], {}), '(infertile_nodes)\n', (1605, 1622), True, 'import numpy as np\n'), ((1686, 1716), 'numpy.random.choice', 'np.random.choice', (['channel_node'], {}), '(channel_node)\n', (1702, 1716), True, 'import numpy as np\n')] |
from grbl import *
from pyb import I2C, delay, millis
from pyb_i2c_lcd import I2cLcd
from lcd_v_minus import *
import time
"""
X
01234567890123456789
X=-xxx.xx F=xxxx* W
Y=-xxx.xx S=xxxx* FM
Z=-xxx.xx Idle. XYZP
message
---------------------
* override
F - feed
S - spindle
W - coordinate W M O
FM - flood mist
xyzp - pins
"""
"""
<Idle| #Idle, Run, Hold, Jog, Alarm, Door, Check, Home, Sleep
MPos:0.000,0.000,0.000| #machine position !WPos = MPos - WCO
WPos:0.000,0.000,0.000| #work position !MPos = WPos + WCO
WCO:0.000,0.000,0.000| #Work Coordinate Offset
Bf:15,128| #Buffer
Ln:99999| #line number
FS:0,1000| #Current Feed and Speed
Ov:100,100,100| #Override % feed, rapids, and spindle
A:SFM| # SC - spindle, F flood, M mist ON
Pn:XYZPDHRS | #Input Pin State
ALARM:5
MSG:
error:
"""
class grblcontrol:
""" """
def __init__(self, debug=False, uart_port=1, baudrate=115200, i2c_port=1, i2c_addr=0x27):
self.ver='20211206'
self.debug=debug
self.g=grbl(uart_port,baudrate,debug)
startV()
i2c = I2C(i2c_port, I2C.MASTER)
if not i2c.is_ready(i2c_addr):
print("i2c port=%d device 0x%x not found. Scan.." % (i2c_port, i2c_addr))
i2c.scan()
print ("end scan")
raise "lcd not found"
self.lcd = I2cLcd(i2c, i2c_addr, 4, 20)
self.lcd.backlight_on()
def __del__(self):
self.lcd.backlight_off()
def tstart(self):
if(self.debug):
self.t0=time.time_ns()
def tstop(self):
if(self.debug):
print("delay: %dms" % ((time.time_ns()-self.t0)/1000000))
def xy(self, x=0, y=0):
self.lcd.move_to(x,y)
def lcdprint(self, str=""):
self.lcd.putstr(str)
def connect(self):
self.lcd.clear()
self.xy(1,0)
self.lcdprint("panel ver.%s" % self.ver)
self.xy(1,1)
s=self.g.getstat()
self.lcdprint("grbl %s" % s['ver'])
# print(s['ver'])
self.xy(1,2)
if(self.g.is_connect()):
self.lcdprint("connected")
else:
self.lcdprint("error")
time.sleep_ms(1000)
self.lcd.clear()
self.lcdstatic()
def lcdstatic(self):
self.xy()
self.lcdprint("X=")
self.xy(0,1)
self.lcdprint("Y=")
self.xy(0,2)
self.lcdprint("Z=")
self.xy(10,0)
self.lcdprint("F=")
self.xy(10,1)
self.lcdprint("S=")
def send(self,c):
self.g.send_read(c+"\r\n")
@micropython.native
def upd_stat(self):
self.tstart()
s=self.g.req_parse(b'?')
if('WPos' in s):
self.xy(19,0)
self.lcdprint("W")
for i in range(0,3): ## pos
self.xy(2,i)
self.lcdprint("{: 7.2f}".format(s['WPos'][i]))
self.xy(12, 0) # feed
self.lcdprint("{:>4}".format(s['FS'][0]))
try:
# if(s['Ov'][0] != 100.0): #feed
self.lcdprint("*")
except KeyError:
self.lcdprint(" ")
self.xy(12, 1) # spindle
self.lcdprint("{:>4}".format(s['FS'][1]))
try:
# if(s['Ov'][2] != 100.0): #speed
self.lcdprint("*")
except KeyError:
self.lcdprint(" ")
self.xy(10,2) # mode
self.lcdprint("{:<5}".format(s['mode']))
self.xy(16,2)
self.lcdprint("{:$>}".format(''.join(s['Pn'][0:4])))
self.xy(0,3) # messages
str=""
if(s['error']):
str+="err:%d " % s['error']
if(s['alarm']):
str+="alarm:%d " % s['alarm']
if(s['msg']):
str+=s['msg']
self.lcdprint("{:<20}".format(str))
self.tstop()
| [
"time.time_ns",
"pyb_i2c_lcd.I2cLcd",
"pyb.I2C",
"time.sleep_ms"
] | [((1057, 1082), 'pyb.I2C', 'I2C', (['i2c_port', 'I2C.MASTER'], {}), '(i2c_port, I2C.MASTER)\n', (1060, 1082), False, 'from pyb import I2C, delay, millis\n'), ((1315, 1343), 'pyb_i2c_lcd.I2cLcd', 'I2cLcd', (['i2c', 'i2c_addr', '(4)', '(20)'], {}), '(i2c, i2c_addr, 4, 20)\n', (1321, 1343), False, 'from pyb_i2c_lcd import I2cLcd\n'), ((2147, 2166), 'time.sleep_ms', 'time.sleep_ms', (['(1000)'], {}), '(1000)\n', (2160, 2166), False, 'import time\n'), ((1500, 1514), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (1512, 1514), False, 'import time\n'), ((1597, 1611), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (1609, 1611), False, 'import time\n')] |
import random
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.core.cache import caches
class Command(BaseCommand):
help = "Loads cache with test objects"
def add_arguments(self, parser):
parser.add_argument("-c", "--cache", nargs="+", type=str)
parser.add_argument("-k", "--keys", type=int, default=50)
def handle(self, *args, **options):
caches_to_populate = []
test_data_file = settings.BASE_DIR / "dragon" / "management" / "commands" / "random_words.txt"
with open(test_data_file, "r", encoding='utf-8') as fh:
self.stdout.write(f"Reading test data from file")
test_data = fh.read().split("\n")
if "caches" in options:
caches_to_populate = [(c, caches[c]) for c in options["caches"] if c in settings.CACHES.keys()]
else:
caches_to_populate = [(c, caches[c]) for c in settings.CACHES.keys()]
self.stdout.write(
f"Populating {len(caches_to_populate)} caches with {len(test_data)} random words of test data: " +
", ".join([c[0] for c in caches_to_populate])
)
for cache_name, cache in caches_to_populate:
self.stdout.write(f"Populating cache '{cache_name}' with {options['keys']} items")
try:
for _ in range(options["keys"]):
cache.set(
random.choice(test_data),
random.choice(test_data)
)
except Exception as e:
self.stderr.write(f"Unable to populate {cache_name} due to error: {e}")
| [
"random.choice",
"django.conf.settings.CACHES.keys"
] | [((955, 977), 'django.conf.settings.CACHES.keys', 'settings.CACHES.keys', ([], {}), '()\n', (975, 977), False, 'from django.conf import settings\n'), ((859, 881), 'django.conf.settings.CACHES.keys', 'settings.CACHES.keys', ([], {}), '()\n', (879, 881), False, 'from django.conf import settings\n'), ((1465, 1489), 'random.choice', 'random.choice', (['test_data'], {}), '(test_data)\n', (1478, 1489), False, 'import random\n'), ((1515, 1539), 'random.choice', 'random.choice', (['test_data'], {}), '(test_data)\n', (1528, 1539), False, 'import random\n')] |
import os
import tensorflow as tf
from .. import preprocessing
def tf_parse_line(line, data_dir):
line_split = tf.strings.split(line, '\t')
audio_fn = line_split[1]
transcription = line_split[2]
audio_filepath = tf.strings.join([data_dir, 'clips', audio_fn], '/')
wav_filepath = tf.strings.substr(audio_filepath, 0, tf.strings.length(audio_filepath) - 4) + '.wav'
# if os.path.exists(wav_filepath):
# audio, sr = preprocessing.tf_load_audio(wav_filepath)
# else:
# print(wav_filepath, " don't exist")
print(wav_filepath)
audio, sr = preprocessing.tf_load_audio(wav_filepath)
return audio, sr, transcription
def load_dataset(base_path, name):
filepath = os.path.join(base_path, '{}.tsv'.format(name))
with open(filepath, 'r') as f:
dataset_size = sum(1 for _ in f) - 1
dataset = tf.data.TextLineDataset([filepath])
dataset = dataset.skip(1)
dataset = dataset.map(lambda line: tf_parse_line(line, base_path), num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset, dataset_size
def texts_generator(base_path):
split_names = ['dev', 'train', 'test']
for split_name in split_names:
with open(os.path.join(base_path, '{}.tsv'.format(split_name)), 'r') as f:
for line in f:
transcription = line.split('\t')[2]
yield transcription | [
"tensorflow.strings.length",
"tensorflow.strings.split",
"tensorflow.data.TextLineDataset",
"tensorflow.strings.join"
] | [((118, 146), 'tensorflow.strings.split', 'tf.strings.split', (['line', '"""\t"""'], {}), "(line, '\\t')\n", (134, 146), True, 'import tensorflow as tf\n'), ((233, 284), 'tensorflow.strings.join', 'tf.strings.join', (["[data_dir, 'clips', audio_fn]", '"""/"""'], {}), "([data_dir, 'clips', audio_fn], '/')\n", (248, 284), True, 'import tensorflow as tf\n'), ((863, 898), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['[filepath]'], {}), '([filepath])\n', (886, 898), True, 'import tensorflow as tf\n'), ((341, 374), 'tensorflow.strings.length', 'tf.strings.length', (['audio_filepath'], {}), '(audio_filepath)\n', (358, 374), True, 'import tensorflow as tf\n')] |
import numpy as np
import math
class virtual_factory(object):
def __init__(self, blade_specs , operation, gating_ct, non_gating_ct, options):
self.options = options
# Blade inputs
self.n_webs = blade_specs['n_webs']
# Financial parameters
self.wage = 20. # [$] Wage of an unskilled worker
self.beni = 30.4 # [%] Benefits on wage and salary
self.overhead = 30. # [%] Labor overhead
self.crr = 10. # [%] Capital recovery rate
self.wcp = 3. # [month] Working capital period - amount of time it takes to turn the net current assets and current liabilities into cash
self.p_life = 1 # [yr] Length of production run
self.rejr = 0.25 # [%] Part reject rate per process
# Productive lives
self.building_life = 30. # [yr] Building recovery life
self.eq_life = 10. # [yr] Equipment recovery life
self.tool_life = 4. # [yr] Productive tool life
# Factory parameters
self.n_blades = 1000 # [-] Number of blades that the factory aims at manufacturing
self.install_cost = 10. # [%] Installation costs
self.price_space = 800. # [$/m2] Price of building space
self.maintenance_cost = 4. # [%] Maintenance costs
self.electr = 0.08 # [$/kWh] Price of electricity
self.hours = 24. # [hr] Working hours per day
self.days = 250. # [day] Working days per year
self.avg_dt = 20. # [%] Average downtime for workers and equipment
# Compute cumulative rejection rate
self.cum_rejr = np.zeros(len(operation)) # [%]
self.cum_rejr[-1] = 1. - (1. - (self.rejr / 100.))
for i_op in range(1, len(operation)):
self.cum_rejr[-i_op-1] = 1. - (1. - (self.rejr / 100)) * (1. - self.cum_rejr[-i_op])
# Calculate the number of sets of lp and hp skin molds needed
if self.options['discrete']:
self.n_set_molds_skins = np.ceil(self.n_blades * sum(gating_ct) / (1 - self.cum_rejr[5 + self.n_webs]) / (self.hours * self.days)) # [-] Number of skin mold sets (low and high pressure)
else:
self.n_set_molds_skins = self.n_blades * sum(gating_ct) / (1 - self.cum_rejr[5 + self.n_webs]) / (self.hours * self.days) # [-] Number of skin mold sets (low and high pressure)
# Number of parallel processes
self.parallel_proc = np.ones(len(operation)) # [-]
if self.options['discrete']:
for i_op in range(0, len(operation)):
self.parallel_proc[i_op] = np.ceil(self.n_set_molds_skins * non_gating_ct[i_op] / sum(gating_ct) / (1 - self.cum_rejr[i_op]))
n_molds_root = 2 * self.n_set_molds_skins * non_gating_ct[1] / sum(gating_ct) / (1 - self.cum_rejr[1])
if n_molds_root < 1:
self.parallel_proc[2] = 0
else:
self.parallel_proc[1] = np.ceil(self.n_set_molds_skins * non_gating_ct[ 1] / sum(gating_ct) / (1 - self.cum_rejr[1]))
self.parallel_proc[2] = np.ceil(self.n_set_molds_skins * non_gating_ct[ 2] / sum(gating_ct) / (1 - self.cum_rejr[2]))
for i_web in range(self.n_webs):
self.parallel_proc[3 + i_web] = np.ceil(2 * self.n_set_molds_skins * non_gating_ct[3 + i_web] / sum(gating_ct) / (1 - self.cum_rejr[3 + i_web]))
else:
for i_op in range(0, len(operation)):
self.parallel_proc[i_op] = self.n_set_molds_skins * non_gating_ct[i_op] / sum(gating_ct) / (1 - self.cum_rejr[i_op])
n_molds_root = 2 * self.n_set_molds_skins * non_gating_ct[1] / sum(gating_ct) / (1 - self.cum_rejr[1])
if n_molds_root < 1:
self.parallel_proc[2] = 0
else:
self.parallel_proc[1] = self.n_set_molds_skins * non_gating_ct[ 1] / sum(gating_ct) / (1 - self.cum_rejr[1])
self.parallel_proc[2] = self.n_set_molds_skins * non_gating_ct[ 2] / sum(gating_ct) / (1 - self.cum_rejr[2])
for i_web in range(self.n_webs):
self.parallel_proc[3 + i_web] = 2 * self.n_set_molds_skins * non_gating_ct[3 + i_web] / sum(gating_ct) / (1 - self.cum_rejr[3 + i_web])
self.parallel_proc[5 + self.n_webs] = self.n_set_molds_skins
self.parallel_proc[6 + self.n_webs] = self.n_set_molds_skins
self.parallel_proc[7 + self.n_webs] = self.n_set_molds_skins
self.parallel_proc[8 + self.n_webs] = self.n_set_molds_skins
# Building space per operation
delta = 2. #[m] Distance between blades
self.floor_space = np.zeros(len(operation)) # [m2]
self.floor_space[0] = 3. * blade_specs['blade_length'] # [m2] Material cutting
self.floor_space[1] = self.parallel_proc[ 1] * (delta + blade_specs['root_D']) * (delta + blade_specs['root_preform_length']) # [m2] Infusion root preform lp
self.floor_space[2] = self.parallel_proc[ 2] * (delta + blade_specs['root_D']) * (delta + blade_specs['root_preform_length']) # [m2] Infusion root preform hp
for i_web in range(self.n_webs):
self.floor_space[3 + i_web] = self.parallel_proc[ 3 + i_web] * (delta + blade_specs['length_webs'][i_web]) * (delta + blade_specs['height_webs_start'][i_web]) # [m2] Infusion webs
self.floor_space[3 + self.n_webs] = self.parallel_proc[ 3 + self.n_webs] * (delta + blade_specs['length_sc_lp']) * (delta + blade_specs['width_sc_start_lp']) # [m2] Infusion spar caps
self.floor_space[4 + self.n_webs] = self.parallel_proc[ 4 + self.n_webs] * (delta + blade_specs['length_sc_hp']) * (delta + blade_specs['width_sc_start_hp']) # [m2] Infusion spar caps
self.floor_space[5 + self.n_webs] = self.parallel_proc[ 5 + self.n_webs] * (blade_specs['max_chord'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Infusion skin shell lp
self.floor_space[6 + self.n_webs] = self.parallel_proc[ 6 + self.n_webs] * (blade_specs['max_chord'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Infusion skin shell hp
self.floor_space[9 + self.n_webs] = self.parallel_proc[ 9 + self.n_webs] * (blade_specs['max_chord'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Trim
self.floor_space[10 + self.n_webs] = self.parallel_proc[10 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Overlay
self.floor_space[11 + self.n_webs] = self.parallel_proc[11 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Post cure
self.floor_space[12 + self.n_webs] = self.parallel_proc[12 + self.n_webs] * (blade_specs['max_chord'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Root cut and drill
self.floor_space[13 + self.n_webs] = self.parallel_proc[13 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Root hardware install
self.floor_space[14 + self.n_webs] = self.parallel_proc[14 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Surface preparation
self.floor_space[15 + self.n_webs] = self.parallel_proc[15 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Paint
self.floor_space[16 + self.n_webs] = self.parallel_proc[16 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Surface inspection and finish
self.floor_space[17 + self.n_webs] = self.parallel_proc[17 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Weight and balance
self.floor_space[18 + self.n_webs] = self.parallel_proc[18 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Inspection
self.floor_space[19 + self.n_webs] = self.parallel_proc[19 + self.n_webs] * (blade_specs['root_D'] + delta) * (blade_specs['blade_length'] + delta) # [m2] Shipping preparation
# Average power consumption during each operation
Cp = 1.01812 # [kJ/kg/K] Kalogiannakis et. al 2003
Tcure = 70 # [C]
Tamb = 22 # [C]
OvenCycle = 7 # [hr]
EtaOven = 0.5 # [-]
kJ_per_kg = Cp * (Tcure-Tamb) / (OvenCycle * 3600) / EtaOven
self.power_consumpt = self.floor_space * 250 / self.hours / self.days # [kW] 80000 btu / sq ft
self.power_consumpt[1] = self.power_consumpt[1] + self.parallel_proc[ 1] * blade_specs['mass_root_preform_lp'] * kJ_per_kg # [kW] Root preform lp
self.power_consumpt[2] = self.power_consumpt[2] + self.parallel_proc[ 2] * blade_specs['mass_root_preform_hp'] * kJ_per_kg # [kW] Root preform hp
for i_web in range(self.n_webs):
self.power_consumpt[3 + i_web] = self.power_consumpt[ 3 + i_web] + self.parallel_proc[3 + i_web] * blade_specs['mass_webs'][i_web] * kJ_per_kg # [kW] Root preform hp
self.power_consumpt[3 + self.n_webs] = self.power_consumpt[ 3 + self.n_webs] + self.parallel_proc[ 3 + self.n_webs] * blade_specs['mass_sc_lp'] * kJ_per_kg # [kW] Spar cap lp
self.power_consumpt[4 + self.n_webs] = self.power_consumpt[ 4 + self.n_webs] + self.parallel_proc[ 4 + self.n_webs] * blade_specs['mass_sc_hp'] * kJ_per_kg # [kW] Spar cap hp
self.power_consumpt[5 + self.n_webs] = self.power_consumpt[ 5 + self.n_webs] + self.parallel_proc[ 5 + self.n_webs] * (blade_specs['mass_shell_lp']) * kJ_per_kg # [kW] Shell lp
self.power_consumpt[6 + self.n_webs] = self.power_consumpt[ 6 + self.n_webs] + self.parallel_proc[ 6 + self.n_webs] * (blade_specs['mass_shell_hp']) * kJ_per_kg # [kW] Shell hp
self.power_consumpt[11 + self.n_webs] = self.power_consumpt[11 + self.n_webs] + self.parallel_proc[11 + self.n_webs] * blade_specs['blade_mass'] * kJ_per_kg # [kW] Post cure
# Tooling investment per station per operation (molds)
self.tooling_investment = np.zeros(len(operation)) # [$]
price_mold_sqm = 5000.
self.tooling_investment[1] = price_mold_sqm * self.parallel_proc[1] * blade_specs['area_lp_root'] # [$] Mold of the root preform - lp, cost assumed equal to 50000 $ per meter square of surface
self.tooling_investment[2] = price_mold_sqm * self.parallel_proc[2] * blade_specs['area_hp_root'] # [$] Mold of the root preform - hp, cost assumed equal to 50000 $ per meter square of surface
for i_web in range(self.n_webs):
self.tooling_investment[3 + i_web] = price_mold_sqm * self.parallel_proc[3 + i_web] * blade_specs['area_webs_w_flanges'][i_web] # [$] Mold of the webs, cost assumed equal to 10800 $ per meter square of surface
self.tooling_investment[3 + self.n_webs] = price_mold_sqm * self.parallel_proc[3 + self.n_webs] * blade_specs['area_sc_lp'] # [$] Mold of the low pressure spar cap, cost assumed equal to 10800 $ per meter square of surface
self.tooling_investment[4 + self.n_webs] = price_mold_sqm * self.parallel_proc[4 + self.n_webs] * blade_specs['area_sc_hp'] # [$] Mold of the high pressure spar cap, cost assumed equal to 10800 $ per meter square of surface
self.tooling_investment[5 + self.n_webs] = price_mold_sqm * self.parallel_proc[5 + self.n_webs] * blade_specs['area_lpskin_w_flanges'] # [$] Mold of the low pressure skin shell, assumed equal to 9400 $ per meter square of surface
self.tooling_investment[6 + self.n_webs] = price_mold_sqm * self.parallel_proc[6 + self.n_webs] * blade_specs['area_hpskin_w_flanges'] # [$] Mold of the low pressure skin shell, assumed equal to 9400 $ per meter square of surface
# Equipment investment per station per operation
self.equipm_investment = np.zeros(len(operation)) # [$]
self.equipm_investment[0] = 5000. * self.parallel_proc[ 0] * blade_specs['blade_length'] # [$] Equipment for material cutting is assumed at 5000 $ per meter of blade length
self.equipm_investment[1] = 15000. * self.parallel_proc[ 1] * blade_specs['root_D'] # [$] Equipment for root preform infusion is assumed at 15000 $ per meter of blade root diameter
self.equipm_investment[2] = 15000. * self.parallel_proc[ 2] * blade_specs['root_D'] # [$] Equipment for root preform infusion is assumed at 15000 $ per meter of blade root diameter
for i_web in range(self.n_webs):
self.equipm_investment[3 + i_web] = 1700. * self.parallel_proc[ 3 + i_web] * blade_specs['length_webs'][i_web] # [$] Equipment for webs infusion is assumed at 1700 $ per meter of web length
self.equipm_investment[3 + self.n_webs] = 1700. * self.parallel_proc[ 3 + self.n_webs] * blade_specs['length_sc_lp'] # [$] Equipment for spar caps infusion is assumed at 1700 $ per meter of spar cap length
self.equipm_investment[4 + self.n_webs] = 1700. * self.parallel_proc[ 4 + self.n_webs] * blade_specs['length_sc_hp'] # [$] Equipment for spar caps infusion is assumed at 1700 $ per meter of spar cap length
self.equipm_investment[5 + self.n_webs] = 1600. * self.parallel_proc[ 5 + self.n_webs] * blade_specs['skin_perimeter_wo_root']# [$] Equipment for skins infusion is assumed at 1600 $ per meter of skin perimeter
self.equipm_investment[6 + self.n_webs] = 1600. * self.parallel_proc[ 6 + self.n_webs] * blade_specs['skin_perimeter_wo_root']# [$] Equipment for skins infusion is assumed at 1600 $ per meter of skin perimeter
self.equipm_investment[7 + self.n_webs] = 6600. * self.parallel_proc[ 7 + self.n_webs] * sum(blade_specs['length_webs'])# [$] Equipment for assembly is assumed equal to 6600 $ per meter of total webs length
self.equipm_investment[9 + self.n_webs] = 25000. * self.parallel_proc[ 9 + self.n_webs] * blade_specs['blade_length'] # [$] Equipment for trim booth is assumed at 25000 $ per meter of blade length
self.equipm_investment[10 + self.n_webs] = 250. * self.parallel_proc[10 + self.n_webs] * blade_specs['blade_length'] # [$] Equipment for overlay is assumed at 250 $ per meter of blade length
self.equipm_investment[11 + self.n_webs] = 28500. * self.parallel_proc[11 + self.n_webs] * blade_specs['blade_length'] # [$] Equipment for post-cure is assumed at 28500 $ per meter of blade length
self.equipm_investment[12 + self.n_webs] = 390000. * self.parallel_proc[12 + self.n_webs] * blade_specs['root_D'] # [$] Equipment for root cut and drill is assumed at 390000 $ per meter of root diameter
self.equipm_investment[13 + self.n_webs] = 15500. * self.parallel_proc[13 + self.n_webs] * blade_specs['root_D'] # [$] Equipment for root hardware install is assumed at 15500 $ per meter of root diameter
self.equipm_investment[14 + self.n_webs] = 160. * self.parallel_proc[14 + self.n_webs] * (blade_specs['area_lpskin_wo_flanges'] + blade_specs['area_hpskin_wo_flanges']) # [$] Equipment for surface preparation is assumed at 160 $ per meter square of blade outer surface
self.equipm_investment[15 + self.n_webs] = 57000. * self.parallel_proc[15 + self.n_webs] * blade_specs['blade_length'] # [$] Equipment for paint booth is assumed at 57000 $ per meter of blade length
self.equipm_investment[16 + self.n_webs] = 800. * self.parallel_proc[16 + self.n_webs] * blade_specs['blade_length'] # [$] Equipment for surface inspection and finish is assumed at 800 $ per meter of blade length
self.equipm_investment[17 + self.n_webs] = 200000. * self.parallel_proc[17 + self.n_webs] # [$] Weight and Balance, assumed constant
self.equipm_investment[18 + self.n_webs] = 400. * self.parallel_proc[18 + self.n_webs] * blade_specs['blade_length'] # [$] Equipment for final inspection is assumed at 400 $ per meter of blade length
self.equipm_investment[19 + self.n_webs] = 8000. * self.parallel_proc[19 + self.n_webs] * blade_specs['root_D'] # [$] Equipment for shipping preparation is assumed at 8000 $ per meter of root diameter
def execute_direct_labor_cost(self ,operation, labor_hours):
if self.options['verbosity']:
verbosity = 1
else:
verbosity = 0
direct_labor_cost_per_blade = np.zeros(len(operation)) # [$]
direct_labor_cost_per_year = np.zeros(len(operation)) # [$]
if verbosity:
print('\n#################################\nDirect labor cost')
for i_op in range(0, len(operation)):
direct_labor_cost_per_blade[i_op] , direct_labor_cost_per_year[i_op] = compute_direct_labor_cost(self, labor_hours[i_op], operation[i_op], self.cum_rejr[i_op], verbosity)
total_direct_labor_cost_per_blade = sum(direct_labor_cost_per_blade)
total_direct_labor_cost_per_year = sum(direct_labor_cost_per_year)
total_labor_overhead_per_blade = total_direct_labor_cost_per_blade * (self.overhead / 100.)
return total_direct_labor_cost_per_blade , total_labor_overhead_per_blade
def execute_utility_cost(self, operation, ct):
if self.options['verbosity']:
verbosity = 1
else:
verbosity = 0
utility_cost_per_blade = np.zeros(len(operation)) # [$]
utility_cost_per_year = np.zeros(len(operation)) # [$]
if verbosity:
print('\n#################################\nUtility cost')
for i_op in range(0, len(operation)):
utility_cost_per_blade[i_op] , utility_cost_per_year[i_op] = compute_utility_cost(self, ct[i_op], self.power_consumpt[i_op], operation[i_op], self.cum_rejr[i_op], verbosity)
total_utility_cost_per_blade = sum(utility_cost_per_blade)
total_utility_labor_cost_per_year = sum(utility_cost_per_year)
return total_utility_cost_per_blade
def execute_fixed_cost(self, operation, ct, blade_variable_cost_w_overhead):
if self.options['verbosity']:
verbosity = 1
else:
verbosity = 0
building_cost_per_blade = np.zeros(len(operation)) # [$]
building_cost_per_year = np.zeros(len(operation)) # [$]
building_annuity = np.zeros(len(operation)) # [$]
tooling_cost_per_blade = np.zeros(len(operation)) # [$]
tooling_cost_per_year = np.zeros(len(operation)) # [$]
tooling_annuity = np.zeros(len(operation)) # [$]
equipment_cost_per_blade = np.zeros(len(operation)) # [$]
equipment_cost_per_year = np.zeros(len(operation)) # [$]
equipment_annuity = np.zeros(len(operation)) # [$]
maintenance_cost_per_blade = np.zeros(len(operation)) # [$]
maintenance_cost_per_year = np.zeros(len(operation)) # [$]
if self.options['verbosity']:
print('\n#################################\nFixed cost')
for i_op in range(0, len(operation)):
if verbosity:
print('\nBuilding:')
building_investment = self.floor_space[i_op] * self.price_space
investment_bu = building_investment * self.parallel_proc[i_op]
building_cost_per_blade[i_op], building_cost_per_year[i_op], building_annuity[i_op] = compute_cost_annuity(self, operation[i_op], investment_bu, self.building_life, verbosity)
if verbosity:
print('\nTooling:')
investment_to = self.tooling_investment[i_op] * self.parallel_proc[i_op]
tooling_cost_per_blade[i_op], tooling_cost_per_year[i_op], tooling_annuity[i_op] = compute_cost_annuity(self, operation[i_op], investment_to, self.tool_life, verbosity)
if verbosity:
print('\nEquipment:')
investment_eq = self.equipm_investment[i_op] * self.parallel_proc[i_op]
equipment_cost_per_blade[i_op], equipment_cost_per_year[i_op], equipment_annuity[i_op] = compute_cost_annuity(self, operation[i_op], investment_eq, self.eq_life, verbosity)
if verbosity:
print('\nMaintenance:')
maintenance_cost_per_blade[i_op], maintenance_cost_per_year[i_op] = compute_maintenance_cost(self, operation[i_op], investment_eq, investment_to, investment_bu, verbosity)
# Sums across operations
total_building_labor_cost_per_year = sum(building_cost_per_year)
total_building_cost_per_blade = sum(building_cost_per_blade)
total_tooling_labor_cost_per_year = sum(tooling_cost_per_year)
total_tooling_cost_per_blade = sum(tooling_cost_per_blade)
total_equipment_labor_cost_per_year = sum(equipment_cost_per_year)
total_equipment_cost_per_blade = sum(equipment_cost_per_blade)
total_maintenance_labor_cost_per_year = sum(maintenance_cost_per_year)
total_maintenance_cost_per_blade = sum(maintenance_cost_per_blade)
# Annuity
equipment_annuity_tot = sum(equipment_annuity)
tooling_annuity_tot = sum(tooling_annuity)
building_annuity_tot = sum(building_annuity)
working_annuity = np.pmt(self.crr /100. / 12. , self.wcp, -(self.wcp / 12. * (total_maintenance_labor_cost_per_year + blade_variable_cost_w_overhead * self.n_blades))) * 12.
annuity_tot_per_year = equipment_annuity_tot + tooling_annuity_tot + building_annuity_tot + working_annuity
cost_of_capital_per_year = annuity_tot_per_year - (blade_variable_cost_w_overhead * self.n_blades + total_equipment_labor_cost_per_year + total_tooling_labor_cost_per_year + total_building_labor_cost_per_year + total_maintenance_labor_cost_per_year)
cost_of_capital_per_blade = cost_of_capital_per_year / self.n_blades
return total_equipment_cost_per_blade, total_tooling_cost_per_blade, total_building_cost_per_blade, total_maintenance_cost_per_blade, cost_of_capital_per_blade
def compute_direct_labor_cost(self, labor_hours, operation, cum_rejr, verbosity):
cost_per_blade = (self.wage * (1. + self.beni / 100.) * labor_hours) / (1. - self.avg_dt / 100.)/(1. - cum_rejr)
cost_per_year = cost_per_blade * self.n_blades
if verbosity == 1:
print('Activity: ' + operation)
print('per blade: {:8.2f} $ \t \t --- \t \t per year: {:8.2f} $'.format(float(cost_per_blade),float(cost_per_year)))
return cost_per_blade , cost_per_year
def compute_utility_cost(self, ct, power_consumpt, operation, cum_rejr, verbosity):
cost_per_blade = (self.electr * power_consumpt * ct) / (1. - self.avg_dt / 100.)/(1. - cum_rejr)
cost_per_year = cost_per_blade * self.n_blades
if verbosity == 1:
print('Activity: ' + operation)
print('per blade: {:8.2f} $ \t \t --- \t \t per year: {:8.2f} $'.format(float(cost_per_blade),float(cost_per_year)))
return cost_per_blade , cost_per_year
def compute_cost_annuity(self, operation, investment, life, verbosity):
cost_per_year = investment / life
cost_per_blade = cost_per_year / self.n_blades
annuity = np.pmt(self.crr / 100. / 12. , life * 12., -investment) * 12.
if verbosity == 1:
print('Activity: ' + operation)
print('per blade: {:8.2f} $ \t \t --- \t \t per year: {:8.2f} $ \t \t --- \t \t annuity: {:8.2f} $'.format(float(cost_per_blade),float(cost_per_year),float(annuity)))
return cost_per_blade , cost_per_year, annuity
def compute_maintenance_cost(self, operation, investment_eq, investment_to, investment_bu, verbosity):
cost_per_year = self.maintenance_cost / 100. * (investment_eq + investment_to + investment_bu)
cost_per_blade = cost_per_year / self.n_blades
if verbosity == 1:
print('Activity: ' + operation)
print('per blade: {:8.2f} $ \t \t --- \t \t per year: {:8.2f} $'.format(float(cost_per_blade),float(cost_per_year)))
return cost_per_blade , cost_per_year
| [
"numpy.pmt"
] | [((26268, 26325), 'numpy.pmt', 'np.pmt', (['(self.crr / 100.0 / 12.0)', '(life * 12.0)', '(-investment)'], {}), '(self.crr / 100.0 / 12.0, life * 12.0, -investment)\n', (26274, 26325), True, 'import numpy as np\n'), ((24092, 24253), 'numpy.pmt', 'np.pmt', (['(self.crr / 100.0 / 12.0)', 'self.wcp', '(-(self.wcp / 12.0 * (total_maintenance_labor_cost_per_year + \n blade_variable_cost_w_overhead * self.n_blades)))'], {}), '(self.crr / 100.0 / 12.0, self.wcp, -(self.wcp / 12.0 * (\n total_maintenance_labor_cost_per_year + blade_variable_cost_w_overhead *\n self.n_blades)))\n', (24098, 24253), True, 'import numpy as np\n')] |
from hikari import Permissions
from lightbulb import Context, Check, errors
from datetime import datetime, timedelta
from unicodedata import normalize
from aiohttp import ClientSession
from typing import Union
from os import environ
async def api_call(link: str, headers: dict = None, post: bool = False, json: bool = True) -> Union[dict, str]:
async with ClientSession() as s:
if post:
async with s.post(link, headers=headers) as resp:
return await resp.json() if json else await resp.text()
async with s.get(link, headers=headers) as resp:
return await resp.json() if json else await resp.text()
async def get_oauth():
client, secret = environ["TWITCH_CLIENT"], environ["TWITCH_TOKEN"]
data = await api_call(f"https://id.twitch.tv/oauth2/token?client_id={client}&client_secret={secret}&grant_type=client_credentials", post=True)
return {
"token": data["access_token"],
"expire": datetime.now() + timedelta(seconds=data["expires_in"])
}
def normalize_string(s: str) -> str:
return normalize(u"NFKD", s).encode("ascii", "ignore").decode("utf8")
def now(utc: bool = False) -> datetime:
if utc:
return datetime.utcnow()
return datetime.utcnow() + timedelta(hours=2)
def _is_higher(ctx: Context) -> Union[bool, Exception]:
args = ctx.message.content.split()
guild = ctx.get_guild()
member = guild.get_member(int(args[1].strip("<@!>")))
if not member:
raise errors.ConverterFailure("member")
author_top = ctx.member.get_top_role()
member_top = member.get_top_role()
if author_top.position > member_top.position:
return True
raise errors.MissingRequiredPermission(Permissions.ADMINISTRATOR)
is_higher = Check(_is_higher)
| [
"aiohttp.ClientSession",
"lightbulb.errors.ConverterFailure",
"datetime.datetime.utcnow",
"datetime.datetime.now",
"lightbulb.errors.MissingRequiredPermission",
"unicodedata.normalize",
"datetime.timedelta",
"lightbulb.Check"
] | [((1776, 1793), 'lightbulb.Check', 'Check', (['_is_higher'], {}), '(_is_higher)\n', (1781, 1793), False, 'from lightbulb import Context, Check, errors\n'), ((1702, 1761), 'lightbulb.errors.MissingRequiredPermission', 'errors.MissingRequiredPermission', (['Permissions.ADMINISTRATOR'], {}), '(Permissions.ADMINISTRATOR)\n', (1734, 1761), False, 'from lightbulb import Context, Check, errors\n'), ((363, 378), 'aiohttp.ClientSession', 'ClientSession', ([], {}), '()\n', (376, 378), False, 'from aiohttp import ClientSession\n'), ((1219, 1236), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1234, 1236), False, 'from datetime import datetime, timedelta\n'), ((1248, 1265), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1263, 1265), False, 'from datetime import datetime, timedelta\n'), ((1268, 1286), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (1277, 1286), False, 'from datetime import datetime, timedelta\n'), ((1504, 1537), 'lightbulb.errors.ConverterFailure', 'errors.ConverterFailure', (['"""member"""'], {}), "('member')\n", (1527, 1537), False, 'from lightbulb import Context, Check, errors\n'), ((976, 990), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (988, 990), False, 'from datetime import datetime, timedelta\n'), ((993, 1030), 'datetime.timedelta', 'timedelta', ([], {'seconds': "data['expires_in']"}), "(seconds=data['expires_in'])\n", (1002, 1030), False, 'from datetime import datetime, timedelta\n'), ((1087, 1108), 'unicodedata.normalize', 'normalize', (['u"""NFKD"""', 's'], {}), "(u'NFKD', s)\n", (1096, 1108), False, 'from unicodedata import normalize\n')] |
import torchvision
__all__ = ["plot_compare"]
def plot_compare(sr, hr, baseline, filename):
"""
Plot Super-Resolution and High-Resolution image comparison
"""
sr, hr, baseline = sr.squeeze(), hr.squeeze(), baseline.squeeze()
grid = torchvision.utils.make_grid([hr, baseline, sr])
torchvision.utils.save_image(grid, filename)
| [
"torchvision.utils.make_grid",
"torchvision.utils.save_image"
] | [((256, 303), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['[hr, baseline, sr]'], {}), '([hr, baseline, sr])\n', (283, 303), False, 'import torchvision\n'), ((308, 352), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['grid', 'filename'], {}), '(grid, filename)\n', (336, 352), False, 'import torchvision\n')] |
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
def data_loader(targets, labels):
batch_size = 10
train_samples, test_samples, train_labels, test_labels = train_test_split(targets, labels, test_size=0.2)
train_samples = torch.FloatTensor(train_samples)
train_labels = torch.LongTensor(train_labels)
test_samples = torch.FloatTensor(test_samples)
test_labels = torch.LongTensor(test_labels)
trainD = TensorDataset(train_samples, train_labels)
testD = TensorDataset(test_samples, test_labels)
train_loader = DataLoader(trainD, batch_size, shuffle=True)
test_loader = DataLoader(testD, batch_size, shuffle=False)
return train_loader, test_loader | [
"sklearn.model_selection.train_test_split",
"torch.LongTensor",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.FloatTensor"
] | [((238, 286), 'sklearn.model_selection.train_test_split', 'train_test_split', (['targets', 'labels'], {'test_size': '(0.2)'}), '(targets, labels, test_size=0.2)\n', (254, 286), False, 'from sklearn.model_selection import train_test_split\n'), ((307, 339), 'torch.FloatTensor', 'torch.FloatTensor', (['train_samples'], {}), '(train_samples)\n', (324, 339), False, 'import torch\n'), ((359, 389), 'torch.LongTensor', 'torch.LongTensor', (['train_labels'], {}), '(train_labels)\n', (375, 389), False, 'import torch\n'), ((410, 441), 'torch.FloatTensor', 'torch.FloatTensor', (['test_samples'], {}), '(test_samples)\n', (427, 441), False, 'import torch\n'), ((460, 489), 'torch.LongTensor', 'torch.LongTensor', (['test_labels'], {}), '(test_labels)\n', (476, 489), False, 'import torch\n'), ((503, 545), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_samples', 'train_labels'], {}), '(train_samples, train_labels)\n', (516, 545), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((558, 598), 'torch.utils.data.TensorDataset', 'TensorDataset', (['test_samples', 'test_labels'], {}), '(test_samples, test_labels)\n', (571, 598), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((618, 662), 'torch.utils.data.DataLoader', 'DataLoader', (['trainD', 'batch_size'], {'shuffle': '(True)'}), '(trainD, batch_size, shuffle=True)\n', (628, 662), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((681, 725), 'torch.utils.data.DataLoader', 'DataLoader', (['testD', 'batch_size'], {'shuffle': '(False)'}), '(testD, batch_size, shuffle=False)\n', (691, 725), False, 'from torch.utils.data import TensorDataset, DataLoader\n')] |
import numbers
import xnmt.tensor_tools as tt
import xnmt.modelparts.decoders as decoders
import xnmt.transducers.recurrent as recurrent
import xnmt.transducers.base as transducers_base
import xnmt.expression_seqs as expr_seq
import xnmt.vocabs as vocabs
class SimultaneousState(decoders.AutoRegressiveDecoderState):
"""
The read/write state used to determine the state of the SimultaneousTranslator.
"""
def __init__(self,
model,
encoder_state: recurrent.UniLSTMState,
context_state: decoders.AutoRegressiveDecoderState,
output_embed: tt.Tensor,
to_read:int = 0,
to_write:int = 0,
prev_written_word: numbers.Integral = None,
reset_attender:bool = True):
super().__init__(None, None)
self.model = model
self.encoder_state = encoder_state
self.context_state = context_state
self.output_embed = output_embed
self.has_been_read = to_read
self.has_been_written = to_write
self.prev_written_word = prev_written_word
self.reset_attender = reset_attender
def read(self, src):
src_embed = self.model.src_embedder.embed(src[self.has_been_read])
next_encoder_state = self.encoder_state.add_input(src_embed)
return SimultaneousState(self.model, next_encoder_state, self.context_state,
self.output_embed, self.has_been_read+1, self.has_been_written,
self.prev_written_word, True)
def calc_context(self, src_encoding):
# Generating h_t based on RNN(h_{t-1}, embed(e_{t-1}))
if self.prev_written_word is None:
final_transducer_state = [transducers_base.FinalTransducerState(h, c) \
for h, c in zip(self.encoder_state.h(), self.encoder_state.c())]
context_state = self.model.decoder.initial_state(final_transducer_state,
vocabs.Vocab.SS)
else:
context_state = self.model.decoder.add_input(self.context_state, self.prev_written_word)
# Reset attender if there is a read action
reset_attender = self.reset_attender
if reset_attender:
self.model.attender.init_sent(expr_seq.ExpressionSequence(expr_list=src_encoding))
reset_attender = False
# Calc context for decoding
context_state.context = self.model.attender.calc_context(context_state.rnn_state.output())
return SimultaneousState(self.model, self.encoder_state, context_state,
self.output_embed, self.has_been_read, self.has_been_written,
self.prev_written_word,
reset_attender)
def write(self, next_word):
return SimultaneousState(self.model, self.encoder_state, self.context_state,
self.model.decoder.embedder.embed(next_word), self.has_been_read,
self.has_been_written+1,
next_word,
self.reset_attender)
# These states are used for decoding
def as_vector(self):
return self.context_state.as_vector()
@property
def rnn_state(self):
return self.context_state.rnn_state
@property
def context(self):
return self.context_state.context
@context.setter
def context(self, value):
self.context_state.context = value
| [
"xnmt.expression_seqs.ExpressionSequence",
"xnmt.transducers.base.FinalTransducerState"
] | [((1684, 1727), 'xnmt.transducers.base.FinalTransducerState', 'transducers_base.FinalTransducerState', (['h', 'c'], {}), '(h, c)\n', (1721, 1727), True, 'import xnmt.transducers.base as transducers_base\n'), ((2230, 2281), 'xnmt.expression_seqs.ExpressionSequence', 'expr_seq.ExpressionSequence', ([], {'expr_list': 'src_encoding'}), '(expr_list=src_encoding)\n', (2257, 2281), True, 'import xnmt.expression_seqs as expr_seq\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/27 下午6:04
# @Title : 83. 删除排序链表中的重复元素
# @Link : https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list/
QUESTION = """
给定一个排序链表,删除所有重复的元素,使得每个元素只出现一次。
示例 1:
输入: 1->1->2
输出: 1->2
示例 2:
输入: 1->1->2->3->3
输出: 1->2->3
"""
THINKING = """
引入变量duplicate_nodes记录新的ListNode的头部,previous生成这个ListNode
从头开始遍历,在previous和head的val属性不相等的时候
previous.next = ListNode(head.val) # 将这个节点实例化
previous = previous.next # previous挪到下一个节点
head也往后挪一位head = head.next
最后返回previous的头部duplicate_nodes
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if head:
duplicate_nodes = previous = ListNode(head.val)
while head:
if head.val != previous.val:
previous.next = ListNode(head.val)
previous = previous.next
head = head.next
return duplicate_nodes
if __name__ == '__main__':
from utils.linked_list import LinkedListGen
s = Solution()
head = LinkedListGen.list_to_nodes([1, 1, 2, 3, 3])
duplicate_nodes = s.deleteDuplicates(head)
print(LinkedListGen.nodes_to_list(duplicate_nodes))
| [
"utils.linked_list.LinkedListGen.nodes_to_list",
"utils.linked_list.LinkedListGen.list_to_nodes"
] | [((1210, 1254), 'utils.linked_list.LinkedListGen.list_to_nodes', 'LinkedListGen.list_to_nodes', (['[1, 1, 2, 3, 3]'], {}), '([1, 1, 2, 3, 3])\n', (1237, 1254), False, 'from utils.linked_list import LinkedListGen\n'), ((1312, 1356), 'utils.linked_list.LinkedListGen.nodes_to_list', 'LinkedListGen.nodes_to_list', (['duplicate_nodes'], {}), '(duplicate_nodes)\n', (1339, 1356), False, 'from utils.linked_list import LinkedListGen\n')] |
import numpy as np
import pickle as pkl
from envs.babyai.oracle.teacher import Teacher
class XYCorrections(Teacher):
def __init__(self, *args, **kwargs):
super(XYCorrections, self).__init__(*args, **kwargs)
self.next_state_coords = self.empty_feedback()
def empty_feedback(self):
"""
Return a tensor corresponding to no feedback.
"""
return np.zeros(8) - 1
def random_feedback(self):
"""
Return a tensor corresponding to no feedback.
"""
return np.random.uniform(0, 1, size=8)
def compute_feedback(self, oracle, last_action=-1):
"""
Return the expert action from the previous timestep.
"""
# Copy so we don't mess up the state of the real oracle
oracle_copy = pkl.loads(pkl.dumps(oracle))
self.step_ahead(oracle_copy, last_action=last_action)
return np.concatenate([self.next_state_coords])
# TODO: THIS IS NO IMPLEMENTED FOR THIS TEACHER! IF WE END UP USING THIS METRIC, WE SHOULD MAKE IT CORRECT!
def success_check(self, state, action, oracle):
return True
def step_ahead(self, oracle, last_action=-1):
env = oracle.mission
# Remove teacher so we don't end up with a recursion error
env.teacher = None
try:
curr_coords = np.concatenate([env.agent_pos, [env.agent_dir, int(env.carrying is not None)]]).astype(
np.float32)
self.next_state, next_state_coords, _, _ = self.step_away_state(oracle, self.cartesian_steps,
last_action=last_action)
# Coords are quite large, so normalize them to between [-1, 1]
self.next_state_coords = next_state_coords.astype(np.float32)
self.next_state_coords[:2] = (self.next_state_coords[:2].astype(np.float32) - 12) / 12
curr_coords[:2] = (curr_coords[:2] - 12) / 6
self.next_state_coords = np.concatenate([self.next_state_coords, curr_coords])
# Also normalize direction
self.next_state_coords[2] = self.next_state_coords[2] - 2
self.next_state_coords[6] = self.next_state_coords[6] - 2
except Exception as e:
print("STEP AWAY FAILED XY!", e)
self.next_state = self.next_state * 0
self.next_state_coords = self.empty_feedback()
self.last_step_error = True
return oracle
| [
"pickle.dumps",
"numpy.zeros",
"numpy.concatenate",
"numpy.random.uniform"
] | [((541, 572), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(8)'}), '(0, 1, size=8)\n', (558, 572), True, 'import numpy as np\n'), ((907, 947), 'numpy.concatenate', 'np.concatenate', (['[self.next_state_coords]'], {}), '([self.next_state_coords])\n', (921, 947), True, 'import numpy as np\n'), ((400, 411), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (408, 411), True, 'import numpy as np\n'), ((811, 828), 'pickle.dumps', 'pkl.dumps', (['oracle'], {}), '(oracle)\n', (820, 828), True, 'import pickle as pkl\n'), ((2011, 2064), 'numpy.concatenate', 'np.concatenate', (['[self.next_state_coords, curr_coords]'], {}), '([self.next_state_coords, curr_coords])\n', (2025, 2064), True, 'import numpy as np\n')] |
TEST_TEMP_RAW = 529191
TEST_TEMP_CMP = 24.7894877676
TEST_PRES_RAW = 326816
TEST_PRES_CMP = 1006.61517564
TEST_ALT_CMP = 57.3174
def test_temperature():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature into the virtual registers
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_temperature(), 4) == round(TEST_TEMP_CMP, 4)
def test_temperature_forced():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature into the virtual registers
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup(mode="forced")
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_temperature(), 4) == round(TEST_TEMP_CMP, 4)
def test_pressure():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature values into the virtual registers
# Pressure is temperature compensated!!!
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
# Load the fake pressure values
dev.regs[0xf9] = (TEST_PRES_RAW & 0x0000F) << 4
dev.regs[0xf8] = (TEST_PRES_RAW & 0x00FF0) >> 4
dev.regs[0xf7] = (TEST_PRES_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_pressure(), 4) == round(TEST_PRES_CMP, 4)
def test_altitude():
from tools import SMBusFakeDevice
from bmp280 import BMP280
from calibration import BMP280Calibration
dev = SMBusFakeDevice(1)
# Load the fake temperature values into the virtual registers
# Pressure is temperature compensated!!!
dev.regs[0xfc] = (TEST_TEMP_RAW & 0x0000F) << 4
dev.regs[0xfb] = (TEST_TEMP_RAW & 0x00FF0) >> 4
dev.regs[0xfa] = (TEST_TEMP_RAW & 0xFF000) >> 12
# Load the fake pressure values
dev.regs[0xf9] = (TEST_PRES_RAW & 0x0000F) << 4
dev.regs[0xf8] = (TEST_PRES_RAW & 0x00FF0) >> 4
dev.regs[0xf7] = (TEST_PRES_RAW & 0xFF000) >> 12
bmp280 = BMP280(i2c_dev=dev)
bmp280.setup()
# Replace the loaded calibration with our known values
bmp280.calibration = BMP280Calibration()
assert round(bmp280.get_altitude(), 4) == round(TEST_ALT_CMP, 4)
| [
"calibration.BMP280Calibration",
"bmp280.BMP280",
"tools.SMBusFakeDevice"
] | [((280, 298), 'tools.SMBusFakeDevice', 'SMBusFakeDevice', (['(1)'], {}), '(1)\n', (295, 298), False, 'from tools import SMBusFakeDevice\n'), ((530, 549), 'bmp280.BMP280', 'BMP280', ([], {'i2c_dev': 'dev'}), '(i2c_dev=dev)\n', (536, 549), False, 'from bmp280 import BMP280\n'), ((654, 673), 'calibration.BMP280Calibration', 'BMP280Calibration', ([], {}), '()\n', (671, 673), False, 'from calibration import BMP280Calibration\n'), ((905, 923), 'tools.SMBusFakeDevice', 'SMBusFakeDevice', (['(1)'], {}), '(1)\n', (920, 923), False, 'from tools import SMBusFakeDevice\n'), ((1155, 1174), 'bmp280.BMP280', 'BMP280', ([], {'i2c_dev': 'dev'}), '(i2c_dev=dev)\n', (1161, 1174), False, 'from bmp280 import BMP280\n'), ((1292, 1311), 'calibration.BMP280Calibration', 'BMP280Calibration', ([], {}), '()\n', (1309, 1311), False, 'from calibration import BMP280Calibration\n'), ((1533, 1551), 'tools.SMBusFakeDevice', 'SMBusFakeDevice', (['(1)'], {}), '(1)\n', (1548, 1551), False, 'from tools import SMBusFakeDevice\n'), ((2029, 2048), 'bmp280.BMP280', 'BMP280', ([], {'i2c_dev': 'dev'}), '(i2c_dev=dev)\n', (2035, 2048), False, 'from bmp280 import BMP280\n'), ((2153, 2172), 'calibration.BMP280Calibration', 'BMP280Calibration', ([], {}), '()\n', (2170, 2172), False, 'from calibration import BMP280Calibration\n'), ((2391, 2409), 'tools.SMBusFakeDevice', 'SMBusFakeDevice', (['(1)'], {}), '(1)\n', (2406, 2409), False, 'from tools import SMBusFakeDevice\n'), ((2887, 2906), 'bmp280.BMP280', 'BMP280', ([], {'i2c_dev': 'dev'}), '(i2c_dev=dev)\n', (2893, 2906), False, 'from bmp280 import BMP280\n'), ((3011, 3030), 'calibration.BMP280Calibration', 'BMP280Calibration', ([], {}), '()\n', (3028, 3030), False, 'from calibration import BMP280Calibration\n')] |
# mnist example, Downloaded from PML github
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
### MNIST code originally from https://github.com/pytorch/examples/blob/master/mnist/main.py ###
from torchvision import datasets, transforms
from pytorch_metric_learning import distances, losses, miners, reducers, testers
from pytorch_metric_learning.utils.accuracy_calculator import AccuracyCalculator
from loss import SmoothAPLoss
### MNIST code originally from https://github.com/pytorch/examples/blob/master/mnist/main.py ###
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
return x
### MNIST code originally from https://github.com/pytorch/examples/blob/master/mnist/main.py ###
def train(model, loss_func, mining_func, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, labels) in enumerate(train_loader):
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
embeddings = model(data)
indices_tuple = None
if mining_func is not None:
indices_tuple = mining_func(embeddings, labels)
loss = loss_func(embeddings, labels, indices_tuple)
loss.backward()
optimizer.step()
if batch_idx % 20 == 0:
print(
"Epoch {} Iteration {}: Loss = {}, Number of mined triplets = {}".format(
epoch, batch_idx, loss, mining_func.num_triplets if mining_func is not None else None
)
)
### convenient function from pytorch-metric-learning ###
def get_all_embeddings(dataset, model):
tester = testers.BaseTester()
return tester.get_all_embeddings(dataset, model)
### compute accuracy using AccuracyCalculator from pytorch-metric-learning ###
def test(train_set, test_set, model, accuracy_calculator):
train_embeddings, train_labels = get_all_embeddings(train_set, model)
test_embeddings, test_labels = get_all_embeddings(test_set, model)
train_labels = train_labels.squeeze(1)
test_labels = test_labels.squeeze(1)
print("Computing accuracy")
accuracies = accuracy_calculator.get_accuracy(
test_embeddings, train_embeddings, test_labels, train_labels, False
)
print("Test set accuracy (Precision@1) = {}".format(accuracies["precision_at_1"]))
device = torch.device("cuda")
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
batch_size = 256
dataset1 = datasets.MNIST(".", train=True, download=True, transform=transform)
dataset2 = datasets.MNIST(".", train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, batch_size=256, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset2, batch_size=256)
model = Net().to(device)
optimizer = optim.Adam(model.parameters(), lr=0.01)
num_epochs = 1
### pytorch-metric-learning stuff ###
# distance = distances.CosineSimilarity()
# reducer = reducers.ThresholdReducer(low=0)
# loss_func = losses.TripletMarginLoss(margin=0.2, distance=distance, reducer=reducer)
# mining_func = miners.TripletMarginMiner(
# margin=0.2, distance=distance, type_of_triplets="semihard"
# )
loss_func = losses.CrossBatchMemory(SmoothAPLoss(), 128, memory_size=512)
accuracy_calculator = AccuracyCalculator(include=("precision_at_1",), k=1)
### pytorch-metric-learning stuff ###
for epoch in range(1, num_epochs + 1):
train(model, loss_func, None, device, train_loader, optimizer, epoch)
test(dataset1, dataset2, model, accuracy_calculator)
| [
"torch.device",
"torch.nn.Dropout2d",
"torch.nn.Conv2d",
"pytorch_metric_learning.utils.accuracy_calculator.AccuracyCalculator",
"torchvision.datasets.MNIST",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torchvision.transforms.Nor... | [((2839, 2859), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2851, 2859), False, 'import torch\n'), ((2997, 3064), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""."""'], {'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "('.', train=True, download=True, transform=transform)\n", (3011, 3064), False, 'from torchvision import datasets, transforms\n'), ((3076, 3129), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""."""'], {'train': '(False)', 'transform': 'transform'}), "('.', train=False, transform=transform)\n", (3090, 3129), False, 'from torchvision import datasets, transforms\n'), ((3145, 3212), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset1'], {'batch_size': '(256)', 'shuffle': '(True)'}), '(dataset1, batch_size=256, shuffle=True)\n', (3172, 3212), False, 'import torch\n'), ((3227, 3280), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset2'], {'batch_size': '(256)'}), '(dataset2, batch_size=256)\n', (3254, 3280), False, 'import torch\n'), ((3796, 3848), 'pytorch_metric_learning.utils.accuracy_calculator.AccuracyCalculator', 'AccuracyCalculator', ([], {'include': "('precision_at_1',)", 'k': '(1)'}), "(include=('precision_at_1',), k=1)\n", (3814, 3848), False, 'from pytorch_metric_learning.utils.accuracy_calculator import AccuracyCalculator\n'), ((2133, 2153), 'pytorch_metric_learning.testers.BaseTester', 'testers.BaseTester', ([], {}), '()\n', (2151, 2153), False, 'from pytorch_metric_learning import distances, losses, miners, reducers, testers\n'), ((3736, 3750), 'loss.SmoothAPLoss', 'SmoothAPLoss', ([], {}), '()\n', (3748, 3750), False, 'from loss import SmoothAPLoss\n'), ((677, 699), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(32)', '(3)', '(1)'], {}), '(1, 32, 3, 1)\n', (686, 699), True, 'import torch.nn as nn\n'), ((721, 744), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(3)', '(1)'], {}), '(32, 64, 3, 1)\n', (730, 744), True, 'import torch.nn as nn\n'), ((769, 787), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (781, 787), True, 'import torch.nn as nn\n'), ((812, 829), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.5)'], {}), '(0.5)\n', (824, 829), True, 'import torch.nn as nn\n'), ((849, 869), 'torch.nn.Linear', 'nn.Linear', (['(9216)', '(128)'], {}), '(9216, 128)\n', (858, 869), True, 'import torch.nn as nn\n'), ((935, 944), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (941, 944), True, 'import torch.nn.functional as F\n'), ((983, 992), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (989, 992), True, 'import torch.nn.functional as F\n'), ((1005, 1023), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (1017, 1023), True, 'import torch.nn.functional as F\n'), ((1065, 1084), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1078, 1084), False, 'import torch\n'), ((2898, 2919), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2917, 2919), False, 'from torchvision import datasets, transforms\n'), ((2921, 2963), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (2941, 2963), False, 'from torchvision import datasets, transforms\n')] |
import time
while True:
print("Test program")
time.sleep(10)
pass | [
"time.sleep"
] | [((54, 68), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (64, 68), False, 'import time\n')] |
#!/usr/bin/env python3.6
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved.
#
# @@-COPYRIGHT-END-@@
# =============================================================================
# ==============================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import json
import argparse
import logging
import tensorflow as tf
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tensorflow.contrib.slim import tfexample_decoder as slim_example_decoder
from tensorflow.contrib.quantize.python import quantize
from tensorflow.contrib.quantize.python import fold_batch_norms
from object_detection.core import standard_fields as fields
from object_detection.data_decoders.tf_example_decoder import TfExampleDecoder
from aimet_tensorflow import quantizer as q
from aimet_tensorflow import quantsim
from aimet_tensorflow.batch_norm_fold import fold_all_batch_norms
logger = logging.getLogger(__file__)
def load_graph(graph, meta_graph, checkpoint=None):
"""
Load a TF graph given the meta and checkpoint files
:param graph: Graph to load into
:param meta_graph: Meta file
:param checkpoint: Checkpoint file
:return: Newly created TF session
"""
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
sess = tf.Session(config=config, graph=graph)
# Open the graph and restore the parameters
saver = tf.train.import_meta_graph(meta_graph, clear_devices=True)
if checkpoint is None:
checkpoint = meta_graph.split('.meta')[0]
saver.restore(sess, checkpoint)
return sess, saver
def initialize_uninitialized_vars(sess):
"""
Some graphs have variables created after training that need to be initialized.
However, in pre-trained graphs we don't want to reinitialize variables that are already
which would overwrite the values obtained during training. Therefore search for all
uninitialized variables and initialize ONLY those variables.
:param sess: TF session
:return:
"""
from itertools import compress
global_vars = tf.global_variables()
is_not_initialized = sess.run([~(tf.is_variable_initialized(var)) for var in global_vars])
uninitialized_vars = list(compress(global_vars, is_not_initialized))
if uninitialized_vars:
sess.run(tf.variables_initializer(uninitialized_vars))
class CocoParser:
def __init__(self, data_inputs=None, validation_inputs=None, batch_size=1):
"""
Constructor
:param data_inputs: List of input ops for the model
:param validation_inputs: List of validation ops for the model
:param batch_size: Batch size for the data
"""
self._validation_inputs = validation_inputs
self._data_inputs = data_inputs
self._batch_size = batch_size
if data_inputs is None:
self._data_inputs = ['image_tensor']
else:
self._data_inputs = data_inputs
self.keys_to_features = TfExampleDecoder().keys_to_features
self.items_to_handlers = {
fields.InputDataFields.image: (
slim_example_decoder.Image(image_key='image/encoded', format_key='image/format', channels=3)),
fields.InputDataFields.source_id: (slim_example_decoder.Tensor('image/source_id')),
}
def get_data_inputs(self):
return self._data_inputs
def get_validation_inputs(self):
return self._validation_inputs
def get_batch_size(self):
return self._batch_size
def parse(self, serialized_example, is_trainning):
"""
Parse one example
:param serialized_example:
:param is_trainning:
:return: tensor_dict
"""
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
self.items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape(
tensor_dict[fields.InputDataFields.image])[:2]
tensor_dict[fields.InputDataFields.image] = tf.image.resize_images(
tensor_dict[fields.InputDataFields.image], tf.stack([300, 300]),
method=0)
if fields.InputDataFields.image_additional_channels in tensor_dict:
channels = tensor_dict[fields.InputDataFields.image_additional_channels]
channels = tf.squeeze(channels, axis=3)
channels = tf.transpose(channels, perm=[1, 2, 0])
tensor_dict[fields.InputDataFields.image_additional_channels] = channels
if fields.InputDataFields.groundtruth_boxes in tensor_dict:
is_crowd = fields.InputDataFields.groundtruth_is_crowd
tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
def default_groundtruth_weights():
shape = tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]
return tf.ones([shpae], dtype=tf.float32)
shape = tf.shape(tensor_dict[fields.InputDataFields.groundtruth_weights])[0]
tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
tf.greater(shape,0),
lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
default_groundtruth_weights)
return tensor_dict
def get_batch(self, iterator, next_element, sess):
"""
Get the next batch of data
:param next_element:
:param iterator: Data iterator
:return: Inputs in feed_dict form
"""
try:
keys = next_element.keys()
tensors = []
for key in keys:
tensors.append(next_element[key])
tensors_np = sess.run(tensors)
except tf.errors.OutOfRangeError:
tf.logging.error('tf.errors.OutOfRangeError')
raise
return dict(zip(keys, tensors_np))
class TfRecordGenerator:
""" Dataset generator for TfRecords"""
def __init__(self, dataset_dir, parser, file_pattern=None, is_trainning=False, num_gpus=1, num_epochs=None):
"""
Constructor
:param dataset_dir: The directory where the dataset files are stored.
:param file_pattern: The file pattern to use for matching the dataset source files.
:param parser: parser function to read tfrecords.
:param num_gpus: The number of GPUs being used. Data batches must be generated for each GPU device
:param num_epochs: How many times to repeat the dataset. Default is forever. Then the
amount of data generated is determined by the number of iterations the model is run and the batch
size. If set to a specific number the dataset will only provide the amount of the total dataset
'num_epochs' times.
:return: A new TfRecord generator used to generate data for model analysis
"""
self._parser = parser
self._num_gpus = num_gpus
# Setup the Dataset reader
if not file_pattern:
if not is_trainning:
file_pattern = 'validation-*-of-*'
else:
file_pattern = 'train-*-of-*'
file_pattern = os.path.join(dataset_dir, file_pattern)
tfrecords = tf.data.Dataset.list_files(file_pattern, shuffle=False)
self._dataset = tf.data.TFRecordDataset(tfrecords).repeat(num_epochs)
batch_size = self._parser.get_batch_size()
parse_fn = lambda x: self._parser.parse(x, is_trainning)
self._dataset = self._dataset.map(parse_fn)
self._dataset = self._dataset.batch(batch_size)
# Initialize the iterator. This must be allocated during init when the
# generator is to be used manually. Otherwise the generator will generate a
# new iterator each time it's used as an iterator
with self._dataset._graph.as_default():
self._iterator = self._dataset.make_one_shot_iterator()
self._next_element = self._iterator.get_next()
self.sess = tf.Session()
def __iter__(self):
"""
Iter method for the generator
:return:
"""
with self._dataset._graph.as_default():
self._iterator = self._dataset.make_one_shot_iterator()
self._next_element = self._iterator.get_next()
self.sess = tf.Session()
return self
def __next__(self):
"""
Return the next set of batched data
**NOTE** This function will not return new batches until the previous batches have
actually been used by a call to tensorflow. Eg used in a graph with a call to
'run' etc. If it's unused the same tensors will be returned over and over again.
:return:
"""
return self._parser.get_batch(self._iterator, self._next_element, self.sess)
# Map next for python27 compatibility
next = __next__
def get_data_inputs(self):
return self._parser.get_data_inputs()
def get_validation_inputs(self):
return self._parser.get_validation_inputs()
def get_batch_size(self):
return self._parser.get_batch_size()
@property
def dataset(self):
return self._dataset
class MobileNetV2SSDRunner:
def __init__(self, generator, checkpoint, annotation_file, graph=None, network=None,
is_train=False,
fold_bn=False, quantize=False):
self._generator = generator
self._checkpoint = checkpoint
self._annotation_file = annotation_file
self._graph = graph
self._network = network
self._is_train = is_train
self._fold_bn = fold_bn
self._quantize = quantize
if is_train is False:
self._eval_session, self._eval_saver = self.build_eval_graph()
@staticmethod
def post_func(tensors_dict, annotation_file):
json_list = []
# t_bbox [ymin,xmin,ymax,xmax]
# gt [xmin,ymin,width,height]
for i in range(len(tensors_dict)):
result_dict = tensors_dict[i]
for j in range(len(result_dict[fields.DetectionResultFields.detection_scores])):
t_score = result_dict[fields.DetectionResultFields.detection_scores][j]
t_bbox = result_dict[fields.DetectionResultFields.detection_boxes][j]
t_class = result_dict[fields.DetectionResultFields.detection_classes][j]
image_id = int(result_dict[fields.InputDataFields.source_id][j])
Height = result_dict[fields.InputDataFields.original_image_spatial_shape][j][0]
Width = result_dict[fields.InputDataFields.original_image_spatial_shape][j][1]
for index, conf in enumerate(t_score):
top_conf = float(t_score[index])
top_ymin = t_bbox[index][0] * Height
top_xmin = t_bbox[index][1] * Width
top_h = (t_bbox[index][3] - t_bbox[index][1]) * Width
top_w = (t_bbox[index][2] - t_bbox[index][0]) * Height
top_cat = int(t_class[index])
json_dict = {'image_id': image_id, 'category_id': top_cat,
'bbox': [top_xmin, top_ymin, top_h, top_w], 'score': top_conf}
json_list.append(json_dict)
cocoGt = COCO(annotation_file)
cocoDt = cocoGt.loadRes(json_list)
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
dict_map_result = {'IoU[0.50:0.95]': cocoEval.stats[0], 'IoU[0.50]': cocoEval.stats[1],
'IoU[0.75]': cocoEval.stats[2]}
return dict_map_result
@property
def eval_session(self):
return self._eval_session
def evaluate(self, session, iterations, loginfo=None, generator=None, post_func=None, eval_names=None):
generator = generator if generator is not None else self._generator
post_func = post_func if post_func is not None else self.post_func
eval_names = eval_names if eval_names is not None else self.eval_names
if loginfo is not None:
logger.info(loginfo)
return self.run_graph(session, generator, eval_names, post_func, iterations)
def build_eval_graph(self):
g = tf.Graph()
with g.as_default():
sess, saver = load_graph(g, self._graph, self._checkpoint)
if self._fold_bn:
fold_batch_norms.FoldBatchNorms(graph=sess.graph, freeze_batch_norm_delay=None,
is_training=False)
if self._quantize:
quantize.Quantize(
graph=sess.graph,
is_training=False,
quant_delay=0,
weight_bits=8,
activation_bits=8,
scope=None)
return sess, saver
def run_graph(self, session, generator, eval_names, post_func, iterations):
"""
Evaluates the graph's performance by running data through the network
and calling an evaluation function to generate the performance metric.
:param session: The tensorflow session that contains the graph
:param generator: The data generator providing the network with batch data
:param eval_names: The names providing the nodes on which the network's performance should be judged
:param post_func: The customized post processing function to evaluate the network performance
:param iterations: The number of iterations (batches) to run through the network
:return:
"""
initialize_uninitialized_vars(session)
image_tensor = session.graph.get_tensor_by_name('image_tensor:0')
eval_outputs = []
for name in eval_names:
op = session.graph.get_operation_by_name(name)
eval_outputs.append(op.outputs[0])
counters = {'skipped': 0, 'success': 0}
result_list = []
try:
for _, input_dict in zip(range(iterations), generator):
# Setup the feed dictionary
feed_dict = {image_tensor: input_dict[fields.InputDataFields.image]}
try:
output_data = session.run(eval_outputs, feed_dict=feed_dict)
counters['success'] += 1
export_dict = {
fields.InputDataFields.source_id:
input_dict[fields.InputDataFields.source_id],
fields.InputDataFields.original_image_spatial_shape:
input_dict[fields.InputDataFields.original_image_spatial_shape]
}
export_dict.update(dict(zip(eval_names, output_data)))
result_list.append(export_dict)
except tf.errors.InvalidArgumentError:
counters['skipped'] += 1
except tf.errors.OutOfRangeError:
logger.info("Completed evaluation iterations: %i, success: %i, skipped: %i",
iterations, counters['success'], counters['skipped'])
finally:
if post_func is not None:
perf = post_func(result_list, self._annotation_file)
logger.info("%s", perf)
else:
perf = result_list
return perf
def forward_func(self, sess, iterations):
return self.run_graph(sess, self._generator, self.eval_names, None, iterations)
@property
def eval_names(self):
return [fields.DetectionResultFields.detection_scores, fields.DetectionResultFields.detection_boxes,
fields.DetectionResultFields.detection_classes]
def parse_args():
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Evaluation script for SSD MobileNet v2.')
parser.add_argument('--model-checkpoint', help='Path to model checkpoint', required=True)
parser.add_argument('--dataset-dir', help='Dir path to dataset (TFRecord format)', required=True)
parser.add_argument('--TFRecord-file-pattern', help='Dataset file pattern, e.g. coco_val.record-*-of-00010',
required=True)
parser.add_argument('--annotation-json-file', help='Path to ground truth annotation json file', required=True)
parser.add_argument('--eval-batch-size', help='Batch size to evaluate', default=1, type=int)
parser.add_argument('--eval-num-examples', help='Number of examples to evaluate, total 5000', default=5000,
type=int)
parser.add_argument('--quantsim-output-dir', help='Use this flag if want to save the quantized graph')
return parser.parse_args()
def ssd_mobilenet_v2_quanteval(args):
parser = CocoParser(batch_size=args.eval_batch_size)
generator = TfRecordGenerator(dataset_dir=args.dataset_dir, file_pattern=args.TFRecord_file_pattern,
parser=parser, is_trainning=False)
# Allocate the runner related to model session run
runner = MobileNetV2SSDRunner(generator=generator, checkpoint=args.model_checkpoint,
annotation_file=args.annotation_json_file, graph=args.model_checkpoint + '.meta',
fold_bn=False, quantize=False, is_train=False)
float_sess = runner.eval_session
iterations = int(args.eval_num_examples / args.eval_batch_size)
runner.evaluate(float_sess, iterations, 'original model evaluating')
# Fold BN
after_fold_sess, _ = fold_all_batch_norms(float_sess, generator.get_data_inputs(), ['concat', 'concat_1'])
#
# Allocate the quantizer and quantize the network using the default 8 bit params/activations
sim = quantsim.QuantizationSimModel(after_fold_sess, ['FeatureExtractor/MobilenetV2/MobilenetV2/input'],
output_op_names=['concat', 'concat_1'],
quant_scheme='tf',
default_output_bw=8, default_param_bw=8,
use_cuda=False)
# Compute encodings
sim.compute_encodings(runner.forward_func, forward_pass_callback_args=50)
# Export model for target inference
if args.quantsim_output_dir:
sim.export(os.path.join(args.quantsim_output_dir, 'export'), 'model.ckpt')
# Evaluate simulated quantization performance
runner.evaluate(sim.session, iterations, 'quantized model evaluating')
if __name__ == '__main__':
args = parse_args()
ssd_mobilenet_v2_quanteval(args)
| [
"logging.getLogger",
"tensorflow.shape",
"pycocotools.cocoeval.COCOeval",
"tensorflow.transpose",
"tensorflow.data.Dataset.list_files",
"tensorflow.contrib.slim.tfexample_decoder.TFExampleDecoder",
"tensorflow.cast",
"aimet_tensorflow.quantsim.QuantizationSimModel",
"tensorflow.variables_initializer... | [((1760, 1787), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1777, 1787), False, 'import logging\n'), ((2079, 2111), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (2092, 2111), True, 'import tensorflow as tf\n'), ((2125, 2191), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'gpu_options': 'gpu_options'}), '(allow_soft_placement=True, gpu_options=gpu_options)\n', (2139, 2191), True, 'import tensorflow as tf\n'), ((2203, 2241), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config', 'graph': 'graph'}), '(config=config, graph=graph)\n', (2213, 2241), True, 'import tensorflow as tf\n'), ((2302, 2360), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['meta_graph'], {'clear_devices': '(True)'}), '(meta_graph, clear_devices=True)\n', (2328, 2360), True, 'import tensorflow as tf\n'), ((2978, 2999), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2997, 2999), True, 'import tensorflow as tf\n'), ((17032, 17110), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluation script for SSD MobileNet v2."""'}), "(description='Evaluation script for SSD MobileNet v2.')\n", (17055, 17110), False, 'import argparse\n'), ((18988, 19216), 'aimet_tensorflow.quantsim.QuantizationSimModel', 'quantsim.QuantizationSimModel', (['after_fold_sess', "['FeatureExtractor/MobilenetV2/MobilenetV2/input']"], {'output_op_names': "['concat', 'concat_1']", 'quant_scheme': '"""tf"""', 'default_output_bw': '(8)', 'default_param_bw': '(8)', 'use_cuda': '(False)'}), "(after_fold_sess, [\n 'FeatureExtractor/MobilenetV2/MobilenetV2/input'], output_op_names=[\n 'concat', 'concat_1'], quant_scheme='tf', default_output_bw=8,\n default_param_bw=8, use_cuda=False)\n", (19017, 19216), False, 'from aimet_tensorflow import quantsim\n'), ((3125, 3166), 'itertools.compress', 'compress', (['global_vars', 'is_not_initialized'], {}), '(global_vars, is_not_initialized)\n', (3133, 3166), False, 'from itertools import compress\n'), ((4640, 4729), 'tensorflow.contrib.slim.tfexample_decoder.TFExampleDecoder', 'slim_example_decoder.TFExampleDecoder', (['self.keys_to_features', 'self.items_to_handlers'], {}), '(self.keys_to_features, self.\n items_to_handlers)\n', (4677, 4729), True, 'from tensorflow.contrib.slim import tfexample_decoder as slim_example_decoder\n'), ((8358, 8397), 'os.path.join', 'os.path.join', (['dataset_dir', 'file_pattern'], {}), '(dataset_dir, file_pattern)\n', (8370, 8397), False, 'import os\n'), ((8418, 8473), 'tensorflow.data.Dataset.list_files', 'tf.data.Dataset.list_files', (['file_pattern'], {'shuffle': '(False)'}), '(file_pattern, shuffle=False)\n', (8444, 8473), True, 'import tensorflow as tf\n'), ((12515, 12536), 'pycocotools.coco.COCO', 'COCO', (['annotation_file'], {}), '(annotation_file)\n', (12519, 12536), False, 'from pycocotools.coco import COCO\n'), ((12599, 12631), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['cocoGt', 'cocoDt', '"""bbox"""'], {}), "(cocoGt, cocoDt, 'bbox')\n", (12607, 12631), False, 'from pycocotools.cocoeval import COCOeval\n'), ((13516, 13526), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (13524, 13526), True, 'import tensorflow as tf\n'), ((3212, 3256), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['uninitialized_vars'], {}), '(uninitialized_vars)\n', (3236, 3256), True, 'import tensorflow as tf\n'), ((3885, 3903), 'object_detection.data_decoders.tf_example_decoder.TfExampleDecoder', 'TfExampleDecoder', ([], {}), '()\n', (3901, 3903), False, 'from object_detection.data_decoders.tf_example_decoder import TfExampleDecoder\n'), ((4017, 4114), 'tensorflow.contrib.slim.tfexample_decoder.Image', 'slim_example_decoder.Image', ([], {'image_key': '"""image/encoded"""', 'format_key': '"""image/format"""', 'channels': '(3)'}), "(image_key='image/encoded', format_key=\n 'image/format', channels=3)\n", (4043, 4114), True, 'from tensorflow.contrib.slim import tfexample_decoder as slim_example_decoder\n'), ((4159, 4205), 'tensorflow.contrib.slim.tfexample_decoder.Tensor', 'slim_example_decoder.Tensor', (['"""image/source_id"""'], {}), "('image/source_id')\n", (4186, 4205), True, 'from tensorflow.contrib.slim import tfexample_decoder as slim_example_decoder\n'), ((5082, 5133), 'tensorflow.shape', 'tf.shape', (['tensor_dict[fields.InputDataFields.image]'], {}), '(tensor_dict[fields.InputDataFields.image])\n', (5090, 5133), True, 'import tensorflow as tf\n'), ((5283, 5303), 'tensorflow.stack', 'tf.stack', (['[300, 300]'], {}), '([300, 300])\n', (5291, 5303), True, 'import tensorflow as tf\n'), ((5512, 5540), 'tensorflow.squeeze', 'tf.squeeze', (['channels'], {'axis': '(3)'}), '(channels, axis=3)\n', (5522, 5540), True, 'import tensorflow as tf\n'), ((5564, 5602), 'tensorflow.transpose', 'tf.transpose', (['channels'], {'perm': '[1, 2, 0]'}), '(channels, perm=[1, 2, 0])\n', (5576, 5602), True, 'import tensorflow as tf\n'), ((5860, 5905), 'tensorflow.cast', 'tf.cast', (['tensor_dict[is_crowd]'], {'dtype': 'tf.bool'}), '(tensor_dict[is_crowd], dtype=tf.bool)\n', (5867, 5905), True, 'import tensorflow as tf\n'), ((9197, 9209), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9207, 9209), True, 'import tensorflow as tf\n'), ((9513, 9525), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9523, 9525), True, 'import tensorflow as tf\n'), ((19557, 19605), 'os.path.join', 'os.path.join', (['args.quantsim_output_dir', '"""export"""'], {}), "(args.quantsim_output_dir, 'export')\n", (19569, 19605), False, 'import os\n'), ((3037, 3068), 'tensorflow.is_variable_initialized', 'tf.is_variable_initialized', (['var'], {}), '(var)\n', (3063, 3068), True, 'import tensorflow as tf\n'), ((6068, 6102), 'tensorflow.ones', 'tf.ones', (['[shpae]'], {'dtype': 'tf.float32'}), '([shpae], dtype=tf.float32)\n', (6075, 6102), True, 'import tensorflow as tf\n'), ((6124, 6189), 'tensorflow.shape', 'tf.shape', (['tensor_dict[fields.InputDataFields.groundtruth_weights]'], {}), '(tensor_dict[fields.InputDataFields.groundtruth_weights])\n', (6132, 6189), True, 'import tensorflow as tf\n'), ((6288, 6308), 'tensorflow.greater', 'tf.greater', (['shape', '(0)'], {}), '(shape, 0)\n', (6298, 6308), True, 'import tensorflow as tf\n'), ((6949, 6994), 'tensorflow.logging.error', 'tf.logging.error', (['"""tf.errors.OutOfRangeError"""'], {}), "('tf.errors.OutOfRangeError')\n", (6965, 6994), True, 'import tensorflow as tf\n'), ((8498, 8532), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['tfrecords'], {}), '(tfrecords)\n', (8521, 8532), True, 'import tensorflow as tf\n'), ((13673, 13776), 'tensorflow.contrib.quantize.python.fold_batch_norms.FoldBatchNorms', 'fold_batch_norms.FoldBatchNorms', ([], {'graph': 'sess.graph', 'freeze_batch_norm_delay': 'None', 'is_training': '(False)'}), '(graph=sess.graph, freeze_batch_norm_delay=\n None, is_training=False)\n', (13704, 13776), False, 'from tensorflow.contrib.quantize.python import fold_batch_norms\n'), ((13867, 13986), 'tensorflow.contrib.quantize.python.quantize.Quantize', 'quantize.Quantize', ([], {'graph': 'sess.graph', 'is_training': '(False)', 'quant_delay': '(0)', 'weight_bits': '(8)', 'activation_bits': '(8)', 'scope': 'None'}), '(graph=sess.graph, is_training=False, quant_delay=0,\n weight_bits=8, activation_bits=8, scope=None)\n', (13884, 13986), False, 'from tensorflow.contrib.quantize.python import quantize\n'), ((5978, 6041), 'tensorflow.shape', 'tf.shape', (['tensor_dict[fields.InputDataFields.groundtruth_boxes]'], {}), '(tensor_dict[fields.InputDataFields.groundtruth_boxes])\n', (5986, 6041), True, 'import tensorflow as tf\n')] |
import torch
from torchvision import datasets
import shutil
import argparse
import os
import numpy as np
from tqdm import tqdm
########### Help ###########
'''
#size = (h,w)
python split_train_val.py \
--data_dir /Users/aman.gupta/Documents/self/datasets/blank_page_detection/letterbox_training_data/ \
--val_ratio 0.10 \
--output_dir /Users/aman.gupta/Documents/self/datasets/blank_page_detection/splitted_letterbox_training_data
'''
###########################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="this script splits classification data into train and val based on ratio provided by user")
parser.add_argument("--data_dir",required = True,help="training data path")
parser.add_argument("--val_ratio",default = 0.2,type = float, help="ratio of val in total data")
parser.add_argument("--output_dir",required=False,default="./data/",type=str,help="dir to save images")
args = parser.parse_args()
os.makedirs(args.output_dir,exist_ok=True)
data = datasets.ImageFolder(args.data_dir)
imgs_info = data.imgs
classes = data.classes
output_folders = ['train','val']
for o_folder in output_folders:
for folder in classes:
os.makedirs(os.path.join(args.output_dir,o_folder,folder),exist_ok=True)
print(f"Total classes:{len(classes)}")
np.random.shuffle(imgs_info)
num_samples = len(imgs_info)
split = int(np.floor(args.val_ratio * num_samples))
train_info, test_info = imgs_info[split:], imgs_info[:split]
print(f"processing train...")
for info in tqdm(train_info):
folder = classes[info[1]]
source = info[0]
file_name = os.path.basename(source)
destination = os.path.join(args.output_dir,output_folders[0],folder,file_name)
shutil.copy(source, destination)
print(f"processing val...")
for info in tqdm(test_info):
folder = classes[info[1]]
source = info[0]
file_name = os.path.basename(source)
destination = os.path.join(args.output_dir,output_folders[1],folder,file_name)
shutil.copy(source, destination)
| [
"os.makedirs",
"argparse.ArgumentParser",
"tqdm.tqdm",
"numpy.floor",
"os.path.join",
"torchvision.datasets.ImageFolder",
"os.path.basename",
"shutil.copy",
"numpy.random.shuffle"
] | [((524, 662), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""this script splits classification data into train and val based on ratio provided by user"""'}), "(description=\n 'this script splits classification data into train and val based on ratio provided by user'\n )\n", (547, 662), False, 'import argparse\n'), ((982, 1025), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (993, 1025), False, 'import os\n'), ((1038, 1073), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['args.data_dir'], {}), '(args.data_dir)\n', (1058, 1073), False, 'from torchvision import datasets\n'), ((1370, 1398), 'numpy.random.shuffle', 'np.random.shuffle', (['imgs_info'], {}), '(imgs_info)\n', (1387, 1398), True, 'import numpy as np\n'), ((1610, 1626), 'tqdm.tqdm', 'tqdm', (['train_info'], {}), '(train_info)\n', (1614, 1626), False, 'from tqdm import tqdm\n'), ((1914, 1929), 'tqdm.tqdm', 'tqdm', (['test_info'], {}), '(test_info)\n', (1918, 1929), False, 'from tqdm import tqdm\n'), ((1453, 1491), 'numpy.floor', 'np.floor', (['(args.val_ratio * num_samples)'], {}), '(args.val_ratio * num_samples)\n', (1461, 1491), True, 'import numpy as np\n'), ((1707, 1731), 'os.path.basename', 'os.path.basename', (['source'], {}), '(source)\n', (1723, 1731), False, 'import os\n'), ((1754, 1821), 'os.path.join', 'os.path.join', (['args.output_dir', 'output_folders[0]', 'folder', 'file_name'], {}), '(args.output_dir, output_folders[0], folder, file_name)\n', (1766, 1821), False, 'import os\n'), ((1827, 1859), 'shutil.copy', 'shutil.copy', (['source', 'destination'], {}), '(source, destination)\n', (1838, 1859), False, 'import shutil\n'), ((2010, 2034), 'os.path.basename', 'os.path.basename', (['source'], {}), '(source)\n', (2026, 2034), False, 'import os\n'), ((2057, 2124), 'os.path.join', 'os.path.join', (['args.output_dir', 'output_folders[1]', 'folder', 'file_name'], {}), '(args.output_dir, output_folders[1], folder, file_name)\n', (2069, 2124), False, 'import os\n'), ((2130, 2162), 'shutil.copy', 'shutil.copy', (['source', 'destination'], {}), '(source, destination)\n', (2141, 2162), False, 'import shutil\n'), ((1256, 1303), 'os.path.join', 'os.path.join', (['args.output_dir', 'o_folder', 'folder'], {}), '(args.output_dir, o_folder, folder)\n', (1268, 1303), False, 'import os\n')] |
""" Yahoo Finance Model """
__docformat__ = "numpy"
import logging
import pandas as pd
import yfinance as yf
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
INDICES = {
"sp500": {"name": "S&P 500", "ticker": "^GSPC"},
"nasdaq": {"name": "Nasdaq Composite", "ticker": "^IXIC"},
"dowjones": {"name": "Dow Jones Industrial Average", "ticker": "^DJI"},
"vix": {"name": "CBOE Volatility Index", "ticker": "^VIX"},
"russel": {"name": "Russel 2000 Index", "ticker": "^RUT"},
"tsx": {"name": "TSX Composite", "ticker": "^GSPTSE"},
"nikkei": {"name": "Nikkei 255 Stock Average", "ticker": "^N225"},
"shanghai": {"name": "Shanghai Composite Index", "ticker": "000001.SS"},
"ftse100": {"name": "FTSE 100 Index ('footsie')", "ticker": "^FTSE"},
"stoxx50": {"name": "Euro STOXX 50", "ticker": "^STOXX50E"},
"dax": {"name": "DAX Performance Index", "ticker": "^GDAXI"},
"cac40": {"name": "CAC 40 Index", "ticker": "^FCHI"},
}
@log_start_end(log=logger)
def get_index(
index: str,
interval: str = "1d",
start_date: int = None,
end_date: int = None,
column: str = "Adj Close",
) -> pd.Series:
"""Obtain data on any index [Source: Yahoo Finance]
Parameters
----------
index: str
The index you wish to collect data for.
start_date : str
the selected country
end_date : bool
The currency you wish to convert the data to.
interval : str
Valid intervals: 1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo or 3mo
Intraday data cannot extend last 60 days
column : str
The column you wish to select, by default this is Adjusted Close.
Returns
----------
pd.Series
A series with the requested index
"""
if index.lower() in INDICES:
ticker = INDICES[index.lower()]["ticker"]
else:
ticker = index
index_data = yf.download(
ticker,
start=start_date,
end=end_date,
interval=interval,
progress=False,
show_errors=False,
)
if column not in index_data.columns:
console.print(
f"The chosen column is not available for {ticker}. Please choose "
f"between: {', '.join(index_data.columns)}\n"
)
return pd.Series()
if index_data.empty or len(index_data) < 2:
console.print(
f"The chosen index {ticker}, returns no data. Please check if "
f"there is any data available.\n"
)
return pd.Series()
return index_data[column]
| [
"logging.getLogger",
"pandas.Series",
"gamestonk_terminal.decorators.log_start_end",
"gamestonk_terminal.rich_config.console.print",
"yfinance.download"
] | [((229, 256), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (246, 256), False, 'import logging\n'), ((1064, 1089), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (1077, 1089), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((1991, 2100), 'yfinance.download', 'yf.download', (['ticker'], {'start': 'start_date', 'end': 'end_date', 'interval': 'interval', 'progress': '(False)', 'show_errors': '(False)'}), '(ticker, start=start_date, end=end_date, interval=interval,\n progress=False, show_errors=False)\n', (2002, 2100), True, 'import yfinance as yf\n'), ((2379, 2390), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2388, 2390), True, 'import pandas as pd\n'), ((2447, 2568), 'gamestonk_terminal.rich_config.console.print', 'console.print', (['f"""The chosen index {ticker}, returns no data. Please check if there is any data available.\n"""'], {}), '(\n f"""The chosen index {ticker}, returns no data. Please check if there is any data available.\n"""\n )\n', (2460, 2568), False, 'from gamestonk_terminal.rich_config import console\n'), ((2609, 2620), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2618, 2620), True, 'import pandas as pd\n')] |
# Simple XML against XSD Validator for Python 2.7 - 3.2
# to run this script you need additionally: lxml (http://lxml.de)
# author: <NAME>, 2013
import sys
from lxml import etree
xsd_files = []
xml_files = []
def usage():
print("Usage: ")
print("python XSDValidator.py <list of xml files> <list of xsd files>")
print("At least one .xml and one .xsd file is required.")
def validate_files():
""" validates every xml file against every schema file"""
for schema in xsd_files:
xmlschema = etree.XMLSchema(file=schema)
for file in xml_files:
xml_file = etree.parse(file)
if xmlschema.validate(xml_file):
print(file + " is valid against " + schema)
else:
log = xmlschema.error_log
print(file + " is not valid against " + schema)
for error in iter(log):
print("\tReason: " + error.message)
def main():
if(len(sys.argv) < 3):
usage()
sys.exit()
for arg in sys.argv[1:]:
if arg.lower().endswith(".xml"):
xml_files.append(arg)
elif arg.lower().endswith(".xsd"):
xsd_files.append(arg)
if len(xsd_files) < 1 or len(xml_files) < 1:
usage()
sys.exit()
validate_files()
if __name__ == '__main__':
main()
| [
"lxml.etree.XMLSchema",
"lxml.etree.parse",
"sys.exit"
] | [((525, 553), 'lxml.etree.XMLSchema', 'etree.XMLSchema', ([], {'file': 'schema'}), '(file=schema)\n', (540, 553), False, 'from lxml import etree\n'), ((1019, 1029), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1027, 1029), False, 'import sys\n'), ((1284, 1294), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1292, 1294), False, 'import sys\n'), ((608, 625), 'lxml.etree.parse', 'etree.parse', (['file'], {}), '(file)\n', (619, 625), False, 'from lxml import etree\n')] |
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from zshoes.stores.models import Store
@python_2_unicode_compatible
class Article(models.Model):
"""
Entity that represents the articles of the store
"""
#: Name of the Article
name = models.CharField(max_length=45)
#: Description of the Article
description = models.TextField(null=True, blank=True)
#: Price of the Article
price = models.FloatField()
#: Available articles in shelf
total_in_shelf = models.PositiveIntegerField(default=0)
#: Available articles in vault
total_in_vault = models.PositiveIntegerField(default=0)
#: Store of the article
store = models.ForeignKey(Store)
def __str__(self):
return self.name
| [
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.CharField"
] | [((339, 370), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(45)'}), '(max_length=45)\n', (355, 370), False, 'from django.db import models\n'), ((423, 462), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (439, 462), False, 'from django.db import models\n'), ((503, 522), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (520, 522), False, 'from django.db import models\n'), ((579, 617), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (606, 617), False, 'from django.db import models\n'), ((674, 712), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (701, 712), False, 'from django.db import models\n'), ((753, 777), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Store'], {}), '(Store)\n', (770, 777), False, 'from django.db import models\n')] |
#!/usr/bin/env python3
import sys, os, json
import random
# Check to make sure we are running the correct version of Python
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
# The game and item description files (in the same folder as this script)
game_file = 'game.json'
# Load the contents of the files into the game and items dictionaries. You can largely ignore this
# Sorry it's messy, I'm trying to account for any potential craziness with the file location
def load_files():
try:
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, game_file)) as json_file: game = json.load(json_file)
return game
except:
print("There was a problem reading either the game or item file.")
os._exit(1)
score = {
"Happiness": 50,
"Unrest": 50,
"Economy": 50,
"Corruption": 50
}
def render(game,current):
c = game[current]
print("\n\nHappiness:", score["Happiness"])
print("Unrest:", score["Unrest"])
print("Economy:", score["Economy"])
print("Corruption:",score["Corruption"])
print(c["name"])
print(c["desc"])
if len(c["exits"]):
print("\nChoose: ")
for p in range(len(c["exits"])):
print("{}. {}".format(p+1, c["exits"][p]["exit"]))
def get_input():
response = input("\nMake a choice: ")
response = response.upper().strip()
return response
def update(game,current,response):
c = game[current]
if response.isdigit():
try:
p = int(response) - 1
score["Happiness"] += c["exits"][p]["happiness"]
score["Unrest"] += c["exits"][p]["unrest"]
score["Economy"] += c["exits"][p]["economy"]
score["Corruption"] += c["exits"][p]["corruption"]
return c["exits"][p]["target"]
except:
return current
return current
# The main function for the game
def main():
current = "INTRO" # The starting location
end_game = ['END'] # Any of the end-game locations
game = load_files()
while True:
if score["Happiness"] <= 0:
print("Your people are unhappy. They won't rise up but instead leave peacefully in search of a new happier life.")
print("Your final scores were:")
print("Happiness:", score["Happiness"])
print("Unrest:", score["Unrest"])
print("Economy:", score["Economy"])
print("Corruption:",score["Corruption"])
break
elif score["Unrest"] >= 100:
print("Your people have had enough. You get thrown out of power and the nation goes back into chaos.")
print("Your final scores were:")
print("Happiness:", score["Happiness"])
print("Unrest:", score["Unrest"])
print("Economy:", score["Economy"])
print("Corruption:",score["Corruption"])
break
elif score["Economy"] <= 0:
print("Your people are to poor. While they might like living in your country, they leave in mass in search of a place were they can make a living and not starve.")
print("Your final scores were:")
print("Happiness:", score["Happiness"])
print("Unrest:", score["Unrest"])
print("Economy:", score["Economy"])
print("Corruption:",score["Corruption"])
break
elif score["Corruption"] >= 100:
print("You let corruption grow right under your nose, with so much corruption you are thrown out of power and a new, worse goverment takes power.")
print("Your final scores were:")
print("Happiness:", score["Happiness"])
print("Unrest:", score["Unrest"])
print("Economy:", score["Economy"])
print("Corruption:",score["Corruption"])
break
else:
render(game,current)
if current in end_game:
print("You've made your decisions!")
print("Your stats ended up looking like this:")
print("Happiness:", score["Happiness"])
print("Unrest:", score["Unrest"])
print("Economy:", score["Economy"])
print("Corruption:",score["Corruption"])
total_score = score["Happiness"] + score["Unrest"] + score["Economy"] + score["Corruption"]
if total_score > 350:
print("Through your efforts you have made a near perfect nation and your people have high hopes for the future")
print("AMAZING VICTORY")
elif total_score > 300:
print("Through your efforts you have made a great nation and your people believe things will only get better from here")
print("GREAT VICTORY")
elif total_score > 250:
print("Through your efforts you have made an good nation but, your people don't fully believe that the nation will become the best")
print("GOOD VICTORY")
elif total_score > 200:
print("Through your efforts you have barely squeezed out a nation and your people don't believe you will make it better, but it's better than what was happening before.")
print("MINOR VICTORY")
else:
print("Despite your efforts your people aren't happy and eventually your are removed from power.")
print("YOU LOSE")
break #break out of the while loop
response = get_input()
if response == "QUIT" or response == "Q":
break #break out of the while loop
current = update(game,current,response)
print("\nThanks for playing!")
# run the main function
if __name__ == '__main__':
main() | [
"os.path.join",
"os.getcwd",
"os.path.dirname",
"os._exit",
"json.load"
] | [((693, 713), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (702, 713), False, 'import sys, os, json\n'), ((829, 840), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (837, 840), False, 'import sys, os, json\n'), ((574, 585), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (583, 585), False, 'import sys, os, json\n'), ((587, 612), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (602, 612), False, 'import sys, os, json\n'), ((633, 670), 'os.path.join', 'os.path.join', (['__location__', 'game_file'], {}), '(__location__, game_file)\n', (645, 670), False, 'import sys, os, json\n')] |
"""
This module provides a sequence class which can be used for cyclic values
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class Cycle(list,Generic[T]):
"""This class can be used to store cyclic values"""
def __getitem__(self,key:int) -> T:
return super().__getitem__(key%len(self))
def __setitem__(self,key:int,value:T):
super().__setitem__(key%len(self),value)
def remove(self,index:int):
"""Remove value at location \"index\""""
del self[index%len(self)]
| [
"typing.TypeVar"
] | [((124, 136), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (131, 136), False, 'from typing import Generic, TypeVar\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-17 12:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='orders',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.PositiveIntegerField()),
('order_amount', models.DecimalField(decimal_places=2, max_digits=8)),
('customer', models.CharField(max_length=200, null=True, unique=True, verbose_name=b'Company Name')),
('ship_date', models.DateField(help_text=b'Please use the following format: YYYY/MM/DD.', null=True)),
],
),
]
| [
"django.db.models.DateField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((366, 459), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (382, 459), False, 'from django.db import migrations, models\n'), ((487, 516), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (514, 516), False, 'from django.db import migrations, models\n'), ((552, 603), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(8)'}), '(decimal_places=2, max_digits=8)\n', (571, 603), False, 'from django.db import migrations, models\n'), ((635, 726), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)', 'unique': '(True)', 'verbose_name': "b'Company Name'"}), "(max_length=200, null=True, unique=True, verbose_name=\n b'Company Name')\n", (651, 726), False, 'from django.db import migrations, models\n'), ((754, 844), 'django.db.models.DateField', 'models.DateField', ([], {'help_text': "b'Please use the following format: YYYY/MM/DD.'", 'null': '(True)'}), "(help_text=b'Please use the following format: YYYY/MM/DD.',\n null=True)\n", (770, 844), False, 'from django.db import migrations, models\n')] |
###############################################
# <NAME> - PG Applied AI - Programming
# Unit tests, for the graph algorithms
###############################################
import unittest # unit testing ftw
from board import Board
import numpy as np
import play
class TestMethods(unittest.TestCase):
def test_horizontal_win(self):
game_play = Board(4)
game_play.set_state([
[' ',' ',' ',' '],
[' ','O','O','O'],
[' ',' ',' ',' '],
[' ',' ',' ',' ']
])
self.assertEqual(True, game_play.player_won())
def test_diagonal_down_win(self):
game_play = Board(4)
game_play.set_state([
['O',' ',' ',' '],
[' ','O',' ',' '],
[' ',' ','O',' '],
[' ',' ',' ',' ']
])
self.assertEqual(True, game_play.player_won())
def test_diagonal_up_win(self):
game_play = Board(4)
game_play.set_state([
[' ',' ',' ',' '],
[' ',' ','O',' '],
[' ','O',' ',' '],
['O',' ',' ',' ']
])
self.assertEqual(True, game_play.player_won())
def test_vertical_win(self):
game_play = Board(4)
game_play.set_state([
[' ',' ',' ',' '],
[' ',' ','O',' '],
[' ',' ','O',' '],
[' ',' ','O',' ']
])
self.assertEqual(True, game_play.player_won())
def test_empty_cells(self):
game_play = Board(2)
game_play.set_state([
[' ','1'],
['1',' ']
])
self.assertEquals([[0,0],[1,1]], game_play.get_empty_cells())
def test_game_finished_score(self):
game_play = Board(4)
game_play.set_state([
[' ',' ',' ',' '],
[' ',' ','O',' '],
[' ','O',' ',' '],
['O',' ',' ',' ']
])
game_over, score = game_play.check_game_finished(True)
self.assertEqual(True, game_over)
self.assertLess(score, 0)
def test_game_finished_score_case01(self):
game_play = Board(3)
game_play.set_state([
['X','O','X'],
['O','O','X'],
['O','X','X']
])
game_over, score = game_play.check_game_finished(True)
self.assertEqual(True, game_over)
self.assertGreater(score, 0)
def test_diagonals(self):
game_play = Board(4)
game_state = [
[' ',' ',' ',' '],
['O','O',' ','O'],
[' ',' ','O',' '],
[' ',' ','O','O']
]
game_play.set_state(game_state)
self.assertEquals([' ', 'O', 'O', 'O'], game_play.get_diagonal_range(0, 0, True))
self.assertEquals([' ',' ',' '], game_play.get_diagonal_range(0, 1, True))
self.assertEquals([' ',' ',' ', ' '], game_play.get_diagonal_range(3, 0, False))
self.assertEquals([' ','O',' '], game_play.get_diagonal_range(2, 0, False))
| [
"board.Board"
] | [((379, 387), 'board.Board', 'Board', (['(4)'], {}), '(4)\n', (384, 387), False, 'from board import Board\n'), ((686, 694), 'board.Board', 'Board', (['(4)'], {}), '(4)\n', (691, 694), False, 'from board import Board\n'), ((991, 999), 'board.Board', 'Board', (['(4)'], {}), '(4)\n', (996, 999), False, 'from board import Board\n'), ((1293, 1301), 'board.Board', 'Board', (['(4)'], {}), '(4)\n', (1298, 1301), False, 'from board import Board\n'), ((1594, 1602), 'board.Board', 'Board', (['(2)'], {}), '(2)\n', (1599, 1602), False, 'from board import Board\n'), ((1832, 1840), 'board.Board', 'Board', (['(4)'], {}), '(4)\n', (1837, 1840), False, 'from board import Board\n'), ((2234, 2242), 'board.Board', 'Board', (['(3)'], {}), '(3)\n', (2239, 2242), False, 'from board import Board\n'), ((2580, 2588), 'board.Board', 'Board', (['(4)'], {}), '(4)\n', (2585, 2588), False, 'from board import Board\n')] |
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from . import dash_managers, dash_queue, dash_service
from .app import app
from .navbar import navbar
body = dbc.Container(
[
dbc.Row([
dbc.Col(
[
html.H2("Overview"),
html.P("""\
Welcome to the QCFractal Dashboard which will give a high
level overview of the current state of the database.
"""),
dbc.Button("View details", color="secondary"),
],
md=4,
),
dbc.Col([
html.H2("Graph"),
dcc.Graph(figure={"data": [{
"x": [1, 2, 3],
"y": [1, 4, 9]
}]}),
]),
])
],
className="mt-4",
)
app.layout = html.Div([dcc.Location(id='url', refresh=False), html.Div(id='page-content')])
@app.callback(Output('page-content', 'children'), [Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/manager':
return dash_managers.layout()
elif pathname == '/queue':
return dash_queue.layout()
elif pathname == '/service':
return dash_service.layout()
else:
return html.Div([navbar, body])
| [
"dash_bootstrap_components.Button",
"dash.dependencies.Output",
"dash_core_components.Location",
"dash.dependencies.Input",
"dash_html_components.H2",
"dash_core_components.Graph",
"dash_html_components.P",
"dash_html_components.Div"
] | [((1018, 1052), 'dash.dependencies.Output', 'Output', (['"""page-content"""', '"""children"""'], {}), "('page-content', 'children')\n", (1024, 1052), False, 'from dash.dependencies import Input, Output\n'), ((933, 970), 'dash_core_components.Location', 'dcc.Location', ([], {'id': '"""url"""', 'refresh': '(False)'}), "(id='url', refresh=False)\n", (945, 970), True, 'import dash_core_components as dcc\n'), ((972, 999), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""page-content"""'}), "(id='page-content')\n", (980, 999), True, 'import dash_html_components as html\n'), ((1055, 1079), 'dash.dependencies.Input', 'Input', (['"""url"""', '"""pathname"""'], {}), "('url', 'pathname')\n", (1060, 1079), False, 'from dash.dependencies import Input, Output\n'), ((1340, 1364), 'dash_html_components.Div', 'html.Div', (['[navbar, body]'], {}), '([navbar, body])\n', (1348, 1364), True, 'import dash_html_components as html\n'), ((364, 383), 'dash_html_components.H2', 'html.H2', (['"""Overview"""'], {}), "('Overview')\n", (371, 383), True, 'import dash_html_components as html\n'), ((405, 540), 'dash_html_components.P', 'html.P', (['"""Welcome to the QCFractal Dashboard which will give a high\nlevel overview of the current state of the database.\n"""'], {}), '(\n """Welcome to the QCFractal Dashboard which will give a high\nlevel overview of the current state of the database.\n"""\n )\n', (411, 540), True, 'import dash_html_components as html\n'), ((554, 599), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""View details"""'], {'color': '"""secondary"""'}), "('View details', color='secondary')\n", (564, 599), True, 'import dash_bootstrap_components as dbc\n'), ((695, 711), 'dash_html_components.H2', 'html.H2', (['"""Graph"""'], {}), "('Graph')\n", (702, 711), True, 'import dash_html_components as html\n'), ((729, 791), 'dash_core_components.Graph', 'dcc.Graph', ([], {'figure': "{'data': [{'x': [1, 2, 3], 'y': [1, 4, 9]}]}"}), "(figure={'data': [{'x': [1, 2, 3], 'y': [1, 4, 9]}]})\n", (738, 791), True, 'import dash_core_components as dcc\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 11:09:33 2019
@author: 10365
"""
#CreateDataSet
import numpy as np
import sys
sys.path.append('../subway_system')
sys.path.append('../ato_agent')
import TrainAndRoadCharacter as trc
import trainRunningModel as trm
import pandas as pds
import matplotlib.pyplot as plt
import atoController
def readSwitchPointSet(file):
#返回值SwitchPointMat
with open(file,mode='r',encoding='UTF-8-sig') as file_obj:
contents=file_obj.readlines()
row=len(contents) #行数
col=len(contents[0].split(',')) #列数
SwitchPointMat=np.zeros((row,col))
rIndex=0
for line in contents:
line.strip() #移除'\n'
listFormline=line.split(',')
cIndex=0
for ele in listFormline:
SwitchPointMat[rIndex,cIndex]=float(ele)
cIndex+=1
rIndex+=1
return SwitchPointMat
def SaveDataSet(num,data,filedir):
data.reset_index()
data.to_csv(filedir+str(num)+'_dataSet.csv',index=False)
return True
def ReadDataSet(num,filedir):
with open(filedir+str(num)+'_dataSet.csv',mode='r',encoding='UTF-8-sig') as file_obj:
contents=file_obj.readlines()
line=contents[0]
line.strip()
strElems=line.split(',')
row=len(contents)
col=len(strElems)
dataMat=np.zeros((row,col))
for i in range(0,row):
line=contents[i]
line.strip()
strElems=line.split(',')
for j in range(0,col):
dataMat[i,j]=float(strElems[j])
return dataMat
def TanslationBySimulation(switchPoint,index):
#模拟列车运行,将工况控制点转换成(s,v,t,u)组合
dt=0.2 #时间步长
startPoint=trc.SLStartPoint[0]
endPoint=trc.SLStartPoint[-1]
sl=trc.getRoadspeedLimit(startPoint)
gd=trc.getRoadGradinet(startPoint)
nsl=trc.getNextSpeedLimit(startPoint)
nsld=trc.getSpeedLimitEndPoint(startPoint)
vList=[0] #速度
sList=[startPoint] #位置
tList=[0] #时间
uList=[1] #加速度
gdList=[gd] #坡度 千分
slList=[sl] #路段限速(m/s)
nslList=[nsl] #下一限速的值(m/s)
nsldList=[nsld] #下一限速的距离(m)
train=trm.Train_model(startPoint,0,0.6,dt)
PIDC=atoController.PidController(dt,8,10,1)
trd=trc.TrainAndRoadData()
t=0
accList=[0]
stateList=[2]
state=2
acc=0.1
while True:
t=t+dt
laststate=state
state=trc.getRunState(sList[-1],switchPoint)
if state==1 and laststate!=1:
vbar=vList[-1]
if state==2:
#牵引
acc=1
if state==1:
#巡航
acc=PIDC.Step(vbar,vList[-1])
elif state==0:
#惰行
acc=0
elif state==-1:
#制动
acc=-1
if vList[-1]>trd.getEmerencyBrakeSpeed(sList[-1]):
acc=-1
out=train.Step(acc)
trueAcc=out['acc']
stateList.append(state)
accList.append(acc)
sl=trc.getRoadspeedLimit(out['S'])
gd=trc.getRoadGradinet(out['S'])
nsl=trc.getNextSpeedLimit(out['S'])
nsld=trc.getSpeedLimitEndPoint(out['S'])
vList.append(out['v'])
sList.append(out['S'])
tList.append(t)
uList.append(acc)
gdList.append(gd) #坡度 千分
slList.append(sl) #路段限速(m/s)
nslList.append(nsl) #下一限速的值(m/s)
nsldList.append(nsld) #下一限速的距离(m)
if out['S']>endPoint or out['v']<0:
break
#保存数据
plt.plot(sList,accList)
plt.plot(sList,stateList)
plt.show()
trc.plotSpeedLimitRoadGrad('abstract')
plt.plot(sList,vList)
plt.show()
plt.plot(tList,vList)
plt.show()
print('-------------------%d---------------------' %index)
dataList=[]
for i in range(0,len(vList)):
s=sList[i]
# t=tList[i]
sr=endPoint-sList[i]
t=tList[i]
tr=tList[-1]-t
vn=vList[i]
un=uList[i]
sr=round(sr,2)
tr=round(tr,2)
vn=round(vn,2)
sl=slList[i]
gd=gdList[i]
nsl=nslList[i]
nsld=nsldList[i]
line=[s,sr,tList[-1],tr,sl,gd,nsl,nsld,un]
#如果list是一维的,则是以列的形式来进行添加,如果list是二维的则是以行的形式进行添加的
dataList.append(line)
tC=np.mat([sList,vList])
targetCurve=pds.DataFrame(data=tC.T,columns=['s','v'])
pData=pds.DataFrame(data=dataList,columns=['s','sr','t','tr','sl','gd','nsl','nsld','un'])
return pData, targetCurve, tList[-1]
def produceData(style):
if style=='train':
#训练数据
swfile='TrainSwitchPointSet.csv'
outputdir='./TrainningDataSet/'
#测试数据
elif style=='test':
swfile='TestSwitchPointSet.csv'
outputdir='./TestingdataSet/'
sps=readSwitchPointSet(swfile)
print('开始生产数据')
row=sps.shape[0]
maxLevel=0
for i in range(0,row):
dataList,targetCurve,T=TanslationBySimulation(sps[i,:].tolist(),i)
SaveDataSet(i+1,dataList,outputdir)
targetCurve.to_csv('./targetCurveDataSet/'+str(round(T,2))+'_Curve.csv',index=False)
print(str(i))
print(str(T))
return maxLevel
if __name__ == '__main__':
ml=produceData('train')
ml=produceData('test') | [
"numpy.mat",
"TrainAndRoadCharacter.getRoadGradinet",
"trainRunningModel.Train_model",
"TrainAndRoadCharacter.TrainAndRoadData",
"pandas.DataFrame",
"atoController.PidController",
"TrainAndRoadCharacter.getNextSpeedLimit",
"TrainAndRoadCharacter.plotSpeedLimitRoadGrad",
"matplotlib.pyplot.plot",
"... | [((133, 168), 'sys.path.append', 'sys.path.append', (['"""../subway_system"""'], {}), "('../subway_system')\n", (148, 168), False, 'import sys\n'), ((169, 200), 'sys.path.append', 'sys.path.append', (['"""../ato_agent"""'], {}), "('../ato_agent')\n", (184, 200), False, 'import sys\n'), ((1810, 1843), 'TrainAndRoadCharacter.getRoadspeedLimit', 'trc.getRoadspeedLimit', (['startPoint'], {}), '(startPoint)\n', (1831, 1843), True, 'import TrainAndRoadCharacter as trc\n'), ((1851, 1882), 'TrainAndRoadCharacter.getRoadGradinet', 'trc.getRoadGradinet', (['startPoint'], {}), '(startPoint)\n', (1870, 1882), True, 'import TrainAndRoadCharacter as trc\n'), ((1891, 1924), 'TrainAndRoadCharacter.getNextSpeedLimit', 'trc.getNextSpeedLimit', (['startPoint'], {}), '(startPoint)\n', (1912, 1924), True, 'import TrainAndRoadCharacter as trc\n'), ((1934, 1971), 'TrainAndRoadCharacter.getSpeedLimitEndPoint', 'trc.getSpeedLimitEndPoint', (['startPoint'], {}), '(startPoint)\n', (1959, 1971), True, 'import TrainAndRoadCharacter as trc\n'), ((2307, 2346), 'trainRunningModel.Train_model', 'trm.Train_model', (['startPoint', '(0)', '(0.6)', 'dt'], {}), '(startPoint, 0, 0.6, dt)\n', (2322, 2346), True, 'import trainRunningModel as trm\n'), ((2353, 2394), 'atoController.PidController', 'atoController.PidController', (['dt', '(8)', '(10)', '(1)'], {}), '(dt, 8, 10, 1)\n', (2380, 2394), False, 'import atoController\n'), ((2400, 2422), 'TrainAndRoadCharacter.TrainAndRoadData', 'trc.TrainAndRoadData', ([], {}), '()\n', (2420, 2422), True, 'import TrainAndRoadCharacter as trc\n'), ((3707, 3731), 'matplotlib.pyplot.plot', 'plt.plot', (['sList', 'accList'], {}), '(sList, accList)\n', (3715, 3731), True, 'import matplotlib.pyplot as plt\n'), ((3735, 3761), 'matplotlib.pyplot.plot', 'plt.plot', (['sList', 'stateList'], {}), '(sList, stateList)\n', (3743, 3761), True, 'import matplotlib.pyplot as plt\n'), ((3765, 3775), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3773, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3780, 3818), 'TrainAndRoadCharacter.plotSpeedLimitRoadGrad', 'trc.plotSpeedLimitRoadGrad', (['"""abstract"""'], {}), "('abstract')\n", (3806, 3818), True, 'import TrainAndRoadCharacter as trc\n'), ((3823, 3845), 'matplotlib.pyplot.plot', 'plt.plot', (['sList', 'vList'], {}), '(sList, vList)\n', (3831, 3845), True, 'import matplotlib.pyplot as plt\n'), ((3849, 3859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3857, 3859), True, 'import matplotlib.pyplot as plt\n'), ((3864, 3886), 'matplotlib.pyplot.plot', 'plt.plot', (['tList', 'vList'], {}), '(tList, vList)\n', (3872, 3886), True, 'import matplotlib.pyplot as plt\n'), ((3890, 3900), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3898, 3900), True, 'import matplotlib.pyplot as plt\n'), ((4467, 4489), 'numpy.mat', 'np.mat', (['[sList, vList]'], {}), '([sList, vList])\n', (4473, 4489), True, 'import numpy as np\n'), ((4505, 4549), 'pandas.DataFrame', 'pds.DataFrame', ([], {'data': 'tC.T', 'columns': "['s', 'v']"}), "(data=tC.T, columns=['s', 'v'])\n", (4518, 4549), True, 'import pandas as pds\n'), ((4558, 4655), 'pandas.DataFrame', 'pds.DataFrame', ([], {'data': 'dataList', 'columns': "['s', 'sr', 't', 'tr', 'sl', 'gd', 'nsl', 'nsld', 'un']"}), "(data=dataList, columns=['s', 'sr', 't', 'tr', 'sl', 'gd',\n 'nsl', 'nsld', 'un'])\n", (4571, 4655), True, 'import pandas as pds\n'), ((603, 623), 'numpy.zeros', 'np.zeros', (['(row, col)'], {}), '((row, col))\n', (611, 623), True, 'import numpy as np\n'), ((1377, 1397), 'numpy.zeros', 'np.zeros', (['(row, col)'], {}), '((row, col))\n', (1385, 1397), True, 'import numpy as np\n'), ((2566, 2605), 'TrainAndRoadCharacter.getRunState', 'trc.getRunState', (['sList[-1]', 'switchPoint'], {}), '(sList[-1], switchPoint)\n', (2581, 2605), True, 'import TrainAndRoadCharacter as trc\n'), ((3132, 3163), 'TrainAndRoadCharacter.getRoadspeedLimit', 'trc.getRoadspeedLimit', (["out['S']"], {}), "(out['S'])\n", (3153, 3163), True, 'import TrainAndRoadCharacter as trc\n'), ((3175, 3204), 'TrainAndRoadCharacter.getRoadGradinet', 'trc.getRoadGradinet', (["out['S']"], {}), "(out['S'])\n", (3194, 3204), True, 'import TrainAndRoadCharacter as trc\n'), ((3217, 3248), 'TrainAndRoadCharacter.getNextSpeedLimit', 'trc.getNextSpeedLimit', (["out['S']"], {}), "(out['S'])\n", (3238, 3248), True, 'import TrainAndRoadCharacter as trc\n'), ((3262, 3297), 'TrainAndRoadCharacter.getSpeedLimitEndPoint', 'trc.getSpeedLimitEndPoint', (["out['S']"], {}), "(out['S'])\n", (3287, 3297), True, 'import TrainAndRoadCharacter as trc\n')] |
#!/usr/bin/python3
import boto3
import json
if __name__ == '__main__':
ec2_client = boto3.client('ec2')
ec2_filter = [{'Name': 'tag:role', 'Values': ['ecs-cluster']}]
instances=ec2_client.describe_tags(Filters=ec2_filter)
#get only the instance_ids
instance_ids = []
for i in instances['Tags']:
instance_ids.append(i['ResourceId'])
#get the instance IDs
result = ec2_client.describe_instances(InstanceIds=instance_ids)
#turn them into DNS names
dns_names = []
for instance_list in result['Reservations']:
for instance in instance_list['Instances']:
if instance['State']['Name'] == 'running': #exclude terminated/stopped instances
dns_names.append(instance['PublicDnsName'])
#output as formatted json string for Ansible
output_dict = {"ecs-hosts": {"hosts": dns_names}}
print(json.dumps(output_dict))
| [
"json.dumps",
"boto3.client"
] | [((91, 110), 'boto3.client', 'boto3.client', (['"""ec2"""'], {}), "('ec2')\n", (103, 110), False, 'import boto3\n'), ((882, 905), 'json.dumps', 'json.dumps', (['output_dict'], {}), '(output_dict)\n', (892, 905), False, 'import json\n')] |
from cstream import stdwar
from svgen import Point, Vector, SVG, Figure, Camera, Domain, Map, Surface, Animation
from math import radians, sin, cos, pi, hypot, sqrt
import sys
from tqdm import tqdm
from svgen.svglib.math import Transform
COLOR = "#3703b3"
if len(sys.argv) > 1 and sys.argv[1] == "-o":
proj = Camera.ORTHOGRAPHIC
else:
proj = Camera.PERSPECTIVE
def f(t = 0.0):
def g(x, y):
a = 25.0
x = x / a
y = y / a
return -(a * sin(x + t) - a * cos(y + t))
return g
w = 500
h = 500
n = 30
ani = Animation(delay=1000)
D = Domain((-250, 250), (250, -250))
TX = Transform.rotation(radians( 25), Vector.X)
TY = Transform.rotation(radians(-25), Vector.Y)
TZ = Transform.rotation(radians(-45), Vector.Z)
TS = Transform.scale(0.5)
kw = {"stroke": COLOR, "stroke-width": 2.5, "fill": "none"}
for i in tqdm(range(n)):
M = Map(f((i / n) * 2.0 * pi), D)
S = Surface(M)
S.transform(TY)
S.transform(TX)
S.transform(TS)
svg = SVG(w, h, proj=proj)
svg.surf(S, m = 20, n = 20, **kw)
ani.add_frame(svg)
else:
ani.main(sys.argv) | [
"svgen.Domain",
"svgen.Surface",
"math.radians",
"svgen.Animation",
"math.cos",
"svgen.svglib.math.Transform.scale",
"math.sin",
"svgen.SVG"
] | [((557, 578), 'svgen.Animation', 'Animation', ([], {'delay': '(1000)'}), '(delay=1000)\n', (566, 578), False, 'from svgen import Point, Vector, SVG, Figure, Camera, Domain, Map, Surface, Animation\n'), ((584, 616), 'svgen.Domain', 'Domain', (['(-250, 250)', '(250, -250)'], {}), '((-250, 250), (250, -250))\n', (590, 616), False, 'from svgen import Point, Vector, SVG, Figure, Camera, Domain, Map, Surface, Animation\n'), ((768, 788), 'svgen.svglib.math.Transform.scale', 'Transform.scale', (['(0.5)'], {}), '(0.5)\n', (783, 788), False, 'from svgen.svglib.math import Transform\n'), ((642, 653), 'math.radians', 'radians', (['(25)'], {}), '(25)\n', (649, 653), False, 'from math import radians, sin, cos, pi, hypot, sqrt\n'), ((690, 702), 'math.radians', 'radians', (['(-25)'], {}), '(-25)\n', (697, 702), False, 'from math import radians, sin, cos, pi, hypot, sqrt\n'), ((738, 750), 'math.radians', 'radians', (['(-45)'], {}), '(-45)\n', (745, 750), False, 'from math import radians, sin, cos, pi, hypot, sqrt\n'), ((922, 932), 'svgen.Surface', 'Surface', (['M'], {}), '(M)\n', (929, 932), False, 'from svgen import Point, Vector, SVG, Figure, Camera, Domain, Map, Surface, Animation\n'), ((1004, 1024), 'svgen.SVG', 'SVG', (['w', 'h'], {'proj': 'proj'}), '(w, h, proj=proj)\n', (1007, 1024), False, 'from svgen import Point, Vector, SVG, Figure, Camera, Domain, Map, Surface, Animation\n'), ((483, 493), 'math.sin', 'sin', (['(x + t)'], {}), '(x + t)\n', (486, 493), False, 'from math import radians, sin, cos, pi, hypot, sqrt\n'), ((500, 510), 'math.cos', 'cos', (['(y + t)'], {}), '(y + t)\n', (503, 510), False, 'from math import radians, sin, cos, pi, hypot, sqrt\n')] |
import sys
sys.path.insert(0, '../')
from mocap.settings import get_amass_validation_files, get_amass_test_files
from mocap.math.amass_fk import rotmat2euclidean, exp2euclidean
from mocap.visualization.sequence import SequenceVisualizer
from mocap.math.mirror_smpl import mirror_p3d
from mocap.datasets.dataset import Limb
from mocap.datasets.combined import Combined
from mocap.datasets.framerate import AdaptFramerate
import mocap.datasets.h36m as H36M
import numpy as np
import numpy.linalg as la
from mocap.datasets.amass import AMASS_SMPL3d, AMASS_QUAT, AMASS_EXP
data_loc = '/mnt/Data/datasets/amass'
val = get_amass_validation_files()
test = get_amass_test_files()
ds = AMASS_SMPL3d(val, data_loc=data_loc)
print(ds.get_joints_for_limb(Limb.LEFT_LEG))
ds = AdaptFramerate(Combined(ds), target_framerate=50)
print(ds.get_joints_for_limb(Limb.LEFT_LEG))
ds_h36m = Combined(H36M.H36M_FixedSkeleton(actors=['S5'], actions=['walking'], remove_global_Rt=True))
seq3d = ds[0]
seq3d_h36m = ds_h36m[0]
seq3d = seq3d[0:200].reshape((200, 14, 3))
seq3d_h36m = seq3d_h36m[0:200].reshape((200, 14, 3))
a = np.array([[[0.4, 0, 0]]])
b = np.array([[[-0.4, 0, 0]]])
seq3d += a
seq3d_h36m += b
vis_dir = '../output/'
vis = SequenceVisualizer(vis_dir, 'vis_amass_vs_h36m',
to_file=True,
mark_origin=False)
vis.plot(seq1=seq3d, seq2=seq3d_h36m, parallel=True,
create_video=True,
noaxis=False,
plot_jid=False,
) | [
"mocap.datasets.combined.Combined",
"sys.path.insert",
"mocap.settings.get_amass_validation_files",
"mocap.datasets.h36m.H36M_FixedSkeleton",
"mocap.datasets.amass.AMASS_SMPL3d",
"numpy.array",
"mocap.settings.get_amass_test_files",
"mocap.visualization.sequence.SequenceVisualizer"
] | [((11, 36), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (26, 36), False, 'import sys\n'), ((617, 645), 'mocap.settings.get_amass_validation_files', 'get_amass_validation_files', ([], {}), '()\n', (643, 645), False, 'from mocap.settings import get_amass_validation_files, get_amass_test_files\n'), ((653, 675), 'mocap.settings.get_amass_test_files', 'get_amass_test_files', ([], {}), '()\n', (673, 675), False, 'from mocap.settings import get_amass_validation_files, get_amass_test_files\n'), ((682, 718), 'mocap.datasets.amass.AMASS_SMPL3d', 'AMASS_SMPL3d', (['val'], {'data_loc': 'data_loc'}), '(val, data_loc=data_loc)\n', (694, 718), False, 'from mocap.datasets.amass import AMASS_SMPL3d, AMASS_QUAT, AMASS_EXP\n'), ((1112, 1137), 'numpy.array', 'np.array', (['[[[0.4, 0, 0]]]'], {}), '([[[0.4, 0, 0]]])\n', (1120, 1137), True, 'import numpy as np\n'), ((1142, 1168), 'numpy.array', 'np.array', (['[[[-0.4, 0, 0]]]'], {}), '([[[-0.4, 0, 0]]])\n', (1150, 1168), True, 'import numpy as np\n'), ((1229, 1315), 'mocap.visualization.sequence.SequenceVisualizer', 'SequenceVisualizer', (['vis_dir', '"""vis_amass_vs_h36m"""'], {'to_file': '(True)', 'mark_origin': '(False)'}), "(vis_dir, 'vis_amass_vs_h36m', to_file=True, mark_origin=\n False)\n", (1247, 1315), False, 'from mocap.visualization.sequence import SequenceVisualizer\n'), ((785, 797), 'mocap.datasets.combined.Combined', 'Combined', (['ds'], {}), '(ds)\n', (793, 797), False, 'from mocap.datasets.combined import Combined\n'), ((886, 972), 'mocap.datasets.h36m.H36M_FixedSkeleton', 'H36M.H36M_FixedSkeleton', ([], {'actors': "['S5']", 'actions': "['walking']", 'remove_global_Rt': '(True)'}), "(actors=['S5'], actions=['walking'],\n remove_global_Rt=True)\n", (909, 972), True, 'import mocap.datasets.h36m as H36M\n')] |
from django.test import TestCase
from review.models import Review
class TestReviewModel(TestCase):
'''
Test suite for review modules.
'''
def setUp(self):
'''
Set up test data for the review model.
'''
Review.objects.create(
feedback='Test review',
riderReview='Test review content',
)
def tearDown(self):
'''
Clean up test data for the review model.
'''
Review.objects.all().delete()
def test_review_feedback(self):
'''
Test review model for feedback.
'''
review = Review.objects.get(feedback='Test review')
self.assertEqual(review.feedback, 'Test review')
def test_review_rider_review(self):
'''
Test review model for rider review.
'''
review = Review.objects.get(riderReview='Test review content')
self.assertEqual(review.riderReview, 'Test review content')
def test_review_verbose_name_plural(self):
'''
Test review model for verbose name plural.
'''
self.assertEqual(str(Review._meta.verbose_name_plural), 'Customer feedback')
| [
"review.models.Review.objects.all",
"review.models.Review.objects.create",
"review.models.Review.objects.get"
] | [((266, 351), 'review.models.Review.objects.create', 'Review.objects.create', ([], {'feedback': '"""Test review"""', 'riderReview': '"""Test review content"""'}), "(feedback='Test review', riderReview='Test review content'\n )\n", (287, 351), False, 'from review.models import Review\n'), ((651, 693), 'review.models.Review.objects.get', 'Review.objects.get', ([], {'feedback': '"""Test review"""'}), "(feedback='Test review')\n", (669, 693), False, 'from review.models import Review\n'), ((884, 937), 'review.models.Review.objects.get', 'Review.objects.get', ([], {'riderReview': '"""Test review content"""'}), "(riderReview='Test review content')\n", (902, 937), False, 'from review.models import Review\n'), ((497, 517), 'review.models.Review.objects.all', 'Review.objects.all', ([], {}), '()\n', (515, 517), False, 'from review.models import Review\n')] |
"""Script to start webserving."""
from wembedder.app import create_app
app = create_app()
if __name__ == '__main__':
app.run(debug=True)
| [
"wembedder.app.create_app"
] | [((81, 93), 'wembedder.app.create_app', 'create_app', ([], {}), '()\n', (91, 93), False, 'from wembedder.app import create_app\n')] |
from openprocurement.tender.core.procedure.serializers.base import ListSerializer
from openprocurement.tender.core.procedure.serializers.document import ConfidentialDocumentSerializer
from openprocurement.tender.core.procedure.serializers.parameter import ParameterSerializer
from openprocurement.tender.esco.procedure.serializers.lot_value import LotValueSerializer
from openprocurement.tender.esco.procedure.serializers.value import ValueSerializer
from openprocurement.tender.openeu.procedure.serializers import BidSerializer as BaseBidSerializer
class BidSerializer(BaseBidSerializer):
serializers = {
"value": ValueSerializer,
"lotValues": ListSerializer(LotValueSerializer),
"documents": ListSerializer(ConfidentialDocumentSerializer),
"parameters": ListSerializer(ParameterSerializer),
}
| [
"openprocurement.tender.core.procedure.serializers.base.ListSerializer"
] | [((667, 701), 'openprocurement.tender.core.procedure.serializers.base.ListSerializer', 'ListSerializer', (['LotValueSerializer'], {}), '(LotValueSerializer)\n', (681, 701), False, 'from openprocurement.tender.core.procedure.serializers.base import ListSerializer\n'), ((724, 770), 'openprocurement.tender.core.procedure.serializers.base.ListSerializer', 'ListSerializer', (['ConfidentialDocumentSerializer'], {}), '(ConfidentialDocumentSerializer)\n', (738, 770), False, 'from openprocurement.tender.core.procedure.serializers.base import ListSerializer\n'), ((794, 829), 'openprocurement.tender.core.procedure.serializers.base.ListSerializer', 'ListSerializer', (['ParameterSerializer'], {}), '(ParameterSerializer)\n', (808, 829), False, 'from openprocurement.tender.core.procedure.serializers.base import ListSerializer\n')] |
from typing import List
import argparse
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.config import get_cfg
from detectron2 import model_zoo
from detectron2.data.datasets import register_coco_instances
from detectron2.data import build_detection_test_loader
from trainers import MyTrainer
def test(args):
register_coco_instances(args.ds_test, {}, args.ds_test_json, args.ds_test_imgs)
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(args.yaml_config_file))
# set parameters
cfg.DATASETS.TEST = (args.ds_test,)
cfg.DATASETS.TRAIN = (args.ds_test,)
cfg.OUTPUT_DIR = args.out_folder_val
cfg.MODEL.WEIGHTS = args.weights
cfg.MODEL.RETINANET.NUM_CLASSES = args.num_classes
# perform inference
trainer = MyTrainer(cfg)
trainer.resume_or_load(resume=False)
evaluator = COCOEvaluator(args.ds_test, cfg, False, output_dir=args.out_folder_val)
val_loader = build_detection_test_loader(cfg, args.ds_test)
inference_on_dataset(trainer.model, val_loader, evaluator)
def parse_arguments():
"""
Get parameter to train.
"""
parser = argparse.ArgumentParser(description="Training / Evaluating RetinaNet")
parser.add_argument("--ds_test", type=str, required=True)
parser.add_argument("--ds_test_json", type=str, required=True)
parser.add_argument("--ds_test_imgs", type=str, required=True)
parser.add_argument("--out_folder_val", type=str, required=True, help="output folder for val results")
parser.add_argument("--yaml_config_file", type=str, required=True, help="config file of the model to use")
parser.add_argument("--weights", type=str, default=None, help="load weights")
parser.add_argument("--num_classes", type=int, required=True, help="number of classes in the dataset")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
test(args) | [
"trainers.MyTrainer",
"detectron2.data.datasets.register_coco_instances",
"detectron2.config.get_cfg",
"argparse.ArgumentParser",
"detectron2.model_zoo.get_config_file",
"detectron2.evaluation.inference_on_dataset",
"detectron2.data.build_detection_test_loader",
"detectron2.evaluation.COCOEvaluator"
] | [((356, 435), 'detectron2.data.datasets.register_coco_instances', 'register_coco_instances', (['args.ds_test', '{}', 'args.ds_test_json', 'args.ds_test_imgs'], {}), '(args.ds_test, {}, args.ds_test_json, args.ds_test_imgs)\n', (379, 435), False, 'from detectron2.data.datasets import register_coco_instances\n'), ((446, 455), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (453, 455), False, 'from detectron2.config import get_cfg\n'), ((809, 823), 'trainers.MyTrainer', 'MyTrainer', (['cfg'], {}), '(cfg)\n', (818, 823), False, 'from trainers import MyTrainer\n'), ((881, 952), 'detectron2.evaluation.COCOEvaluator', 'COCOEvaluator', (['args.ds_test', 'cfg', '(False)'], {'output_dir': 'args.out_folder_val'}), '(args.ds_test, cfg, False, output_dir=args.out_folder_val)\n', (894, 952), False, 'from detectron2.evaluation import COCOEvaluator, inference_on_dataset\n'), ((970, 1016), 'detectron2.data.build_detection_test_loader', 'build_detection_test_loader', (['cfg', 'args.ds_test'], {}), '(cfg, args.ds_test)\n', (997, 1016), False, 'from detectron2.data import build_detection_test_loader\n'), ((1021, 1079), 'detectron2.evaluation.inference_on_dataset', 'inference_on_dataset', (['trainer.model', 'val_loader', 'evaluator'], {}), '(trainer.model, val_loader, evaluator)\n', (1041, 1079), False, 'from detectron2.evaluation import COCOEvaluator, inference_on_dataset\n'), ((1166, 1236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training / Evaluating RetinaNet"""'}), "(description='Training / Evaluating RetinaNet')\n", (1189, 1236), False, 'import argparse\n'), ((480, 528), 'detectron2.model_zoo.get_config_file', 'model_zoo.get_config_file', (['args.yaml_config_file'], {}), '(args.yaml_config_file)\n', (505, 528), False, 'from detectron2 import model_zoo\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'npc/gui/uis/new_character.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_NewCharacterDialog(object):
def setupUi(self, NewCharacterDialog):
NewCharacterDialog.setObjectName("NewCharacterDialog")
NewCharacterDialog.resize(450, 432)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(NewCharacterDialog.sizePolicy().hasHeightForWidth())
NewCharacterDialog.setSizePolicy(sizePolicy)
NewCharacterDialog.setMinimumSize(QtCore.QSize(450, 382))
NewCharacterDialog.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(NewCharacterDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.infoForm = QtWidgets.QFormLayout()
self.infoForm.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.infoForm.setObjectName("infoForm")
self.typeLabel = QtWidgets.QLabel(NewCharacterDialog)
self.typeLabel.setObjectName("typeLabel")
self.infoForm.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.typeLabel)
self.typeSelect = QtWidgets.QComboBox(NewCharacterDialog)
self.typeSelect.setObjectName("typeSelect")
self.infoForm.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.typeSelect)
self.nameLine = QtWidgets.QLabel(NewCharacterDialog)
self.nameLine.setObjectName("nameLine")
self.infoForm.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.nameLine)
self.characterName = QtWidgets.QLineEdit(NewCharacterDialog)
self.characterName.setObjectName("characterName")
self.infoForm.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.characterName)
self.groupLabel = QtWidgets.QLabel(NewCharacterDialog)
self.groupLabel.setObjectName("groupLabel")
self.infoForm.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.groupLabel)
self.groupName = QtWidgets.QLineEdit(NewCharacterDialog)
self.groupName.setObjectName("groupName")
self.infoForm.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.groupName)
self.locLabel = QtWidgets.QLabel(NewCharacterDialog)
self.locLabel.setObjectName("locLabel")
self.infoForm.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.locLabel)
self.locName = QtWidgets.QLineEdit(NewCharacterDialog)
self.locName.setObjectName("locName")
self.infoForm.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.locName)
self.verticalLayout.addLayout(self.infoForm)
self.foreignBox = QtWidgets.QGroupBox(NewCharacterDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.foreignBox.sizePolicy().hasHeightForWidth())
self.foreignBox.setSizePolicy(sizePolicy)
self.foreignBox.setMinimumSize(QtCore.QSize(0, 71))
self.foreignBox.setCheckable(True)
self.foreignBox.setChecked(False)
self.foreignBox.setObjectName("foreignBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.foreignBox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.foreignText = QtWidgets.QLineEdit(self.foreignBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.foreignText.sizePolicy().hasHeightForWidth())
self.foreignText.setSizePolicy(sizePolicy)
self.foreignText.setClearButtonEnabled(True)
self.foreignText.setObjectName("foreignText")
self.verticalLayout_2.addWidget(self.foreignText)
self.verticalLayout.addWidget(self.foreignBox)
self.deceasedBox = QtWidgets.QGroupBox(NewCharacterDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.deceasedBox.sizePolicy().hasHeightForWidth())
self.deceasedBox.setSizePolicy(sizePolicy)
self.deceasedBox.setMinimumSize(QtCore.QSize(0, 116))
self.deceasedBox.setCheckable(True)
self.deceasedBox.setChecked(False)
self.deceasedBox.setObjectName("deceasedBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.deceasedBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.deceasedText = QtWidgets.QPlainTextEdit(self.deceasedBox)
self.deceasedText.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.deceasedText.setObjectName("deceasedText")
self.verticalLayout_3.addWidget(self.deceasedText)
self.verticalLayout.addWidget(self.deceasedBox)
self.buttonBox = QtWidgets.QDialogButtonBox(NewCharacterDialog)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.typeLabel.setBuddy(self.typeSelect)
self.nameLine.setBuddy(self.characterName)
self.groupLabel.setBuddy(self.groupName)
self.retranslateUi(NewCharacterDialog)
self.buttonBox.accepted.connect(NewCharacterDialog.accept)
self.buttonBox.rejected.connect(NewCharacterDialog.reject)
QtCore.QMetaObject.connectSlotsByName(NewCharacterDialog)
NewCharacterDialog.setTabOrder(self.typeSelect, self.characterName)
NewCharacterDialog.setTabOrder(self.characterName, self.groupName)
NewCharacterDialog.setTabOrder(self.groupName, self.locName)
NewCharacterDialog.setTabOrder(self.locName, self.foreignBox)
NewCharacterDialog.setTabOrder(self.foreignBox, self.foreignText)
NewCharacterDialog.setTabOrder(self.foreignText, self.deceasedBox)
NewCharacterDialog.setTabOrder(self.deceasedBox, self.deceasedText)
def retranslateUi(self, NewCharacterDialog):
_translate = QtCore.QCoreApplication.translate
NewCharacterDialog.setWindowTitle(_translate("NewCharacterDialog", "New Character"))
self.typeLabel.setText(_translate("NewCharacterDialog", "T&ype"))
self.typeSelect.setToolTip(_translate("NewCharacterDialog", "Type of character. Determines which fields are available."))
self.nameLine.setText(_translate("NewCharacterDialog", "&Name"))
self.characterName.setToolTip(_translate("NewCharacterDialog", "The character\'s name. Use \' - \' to add a brief note."))
self.groupLabel.setText(_translate("NewCharacterDialog", "&Group"))
self.groupName.setToolTip(_translate("NewCharacterDialog", "Main group that the character belongs to"))
self.locLabel.setText(_translate("NewCharacterDialog", "Location"))
self.locName.setToolTip(_translate("NewCharacterDialog", "Place where the character lives within the main setting"))
self.foreignBox.setTitle(_translate("NewCharacterDialog", "Fore&ign"))
self.foreignText.setPlaceholderText(_translate("NewCharacterDialog", "Where do they live?"))
self.deceasedBox.setTitle(_translate("NewCharacterDialog", "&Deceased"))
self.deceasedText.setPlaceholderText(_translate("NewCharacterDialog", "How did they die?"))
| [
"PyQt5.QtWidgets.QDialogButtonBox",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QPlainTextEdit",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QSizePolicy",
"PyQt5.QtWidgets.QFormLayout",
"PyQ... | [((465, 571), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.MinimumExpanding', 'QtWidgets.QSizePolicy.MinimumExpanding'], {}), '(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.\n QSizePolicy.MinimumExpanding)\n', (486, 571), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((932, 973), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (953, 973), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1058, 1081), 'PyQt5.QtWidgets.QFormLayout', 'QtWidgets.QFormLayout', ([], {}), '()\n', (1079, 1081), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1230, 1266), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (1246, 1266), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1427, 1466), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (1446, 1466), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1628, 1664), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (1644, 1664), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1825, 1864), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (1844, 1864), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2037, 2073), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (2053, 2073), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2236, 2275), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (2255, 2275), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2434, 2470), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (2450, 2470), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2625, 2664), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (2644, 2664), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2872, 2911), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (2891, 2911), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2933, 3021), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Preferred', 'QtWidgets.QSizePolicy.Fixed'], {}), '(QtWidgets.QSizePolicy.Preferred, QtWidgets.\n QSizePolicy.Fixed)\n', (2954, 3021), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3467, 3505), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.foreignBox'], {}), '(self.foreignBox)\n', (3488, 3505), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3597, 3633), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.foreignBox'], {}), '(self.foreignBox)\n', (3616, 3633), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3655, 3754), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.MinimumExpanding', 'QtWidgets.QSizePolicy.Preferred'], {}), '(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.\n QSizePolicy.Preferred)\n', (3676, 3754), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4220, 4259), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (4239, 4259), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4281, 4373), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Expanding'], {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Expanding)\n', (4302, 4373), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4827, 4866), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.deceasedBox'], {}), '(self.deceasedBox)\n', (4848, 4866), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4959, 5001), 'PyQt5.QtWidgets.QPlainTextEdit', 'QtWidgets.QPlainTextEdit', (['self.deceasedBox'], {}), '(self.deceasedBox)\n', (4983, 5001), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5268, 5314), 'PyQt5.QtWidgets.QDialogButtonBox', 'QtWidgets.QDialogButtonBox', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (5294, 5314), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5865, 5922), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['NewCharacterDialog'], {}), '(NewCharacterDialog)\n', (5902, 5922), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((836, 858), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(450)', '(382)'], {}), '(450, 382)\n', (848, 858), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3277, 3296), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(71)'], {}), '(0, 71)\n', (3289, 3296), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4632, 4652), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(116)'], {}), '(0, 116)\n', (4644, 4652), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Contains a context manager for temporarily introducing an environment var.
import os
import contextlib
@contextlib.contextmanager
def use_environment_variable(key, value):
""" Used to temporarily introduce a new environment variable as if it
was set by the execution environment.
:param str key: key of environment variable
:param str value: value of environment variable
"""
assert type(value) == str
assert key not in os.environ
os.environ[key] = value
assert key in os.environ
yield
assert key in os.environ
os.environ.pop(key)
assert key not in os.environ
| [
"os.environ.pop"
] | [((617, 636), 'os.environ.pop', 'os.environ.pop', (['key'], {}), '(key)\n', (631, 636), False, 'import os\n')] |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from PySide2.QtWidgets import QVBoxLayout, QWidget
from traitlets import HasTraits, Instance, Bool, directional_link
from regexport.model import AppState
from regexport.views.utils import HasWidget
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT
from matplotlib.figure import Figure
class PlotModel(HasTraits):
selected_data = Instance(np.ndarray, allow_none=True)
data = Instance(np.ndarray, allow_none=True)
show = Bool(default_value=True)
def register(self, model: AppState):
self.model = model
model.observe(self.update, ['cells', 'selected_cells', 'column_to_plot', 'show_plots'])
directional_link((model, 'show_plots'), (self, 'show'))
def update(self, change):
model = self.model
if model.selected_cells is None or model.selected_cells[model.column_to_plot].dtype.name == 'category':
self.selected_data = None
else:
self.data = model.cells[model.column_to_plot].values
self.selected_data = model.selected_cells[model.column_to_plot].values
class PlotView(HasWidget):
# Code from https://www.pythonguis.com/tutorials/plotting-matplotlib/
def __init__(self, model: PlotModel, width=5, height=4, dpi=100):
# Make a figure, turn it into a canvas widget
widget = QWidget()
layout = QVBoxLayout()
widget.setLayout(layout)
HasWidget.__init__(self, widget=widget)
self.fig, self.axes = plt.subplots(ncols=2, figsize=(width, height), dpi=dpi)
self.canvas = FigureCanvasQTAgg(figure=self.fig)
layout.addWidget(self.canvas)
self.toolbar = NavigationToolbar2QT(self.canvas, widget)
layout.addWidget(self.toolbar)
self.model = model
self.model.observe(self.render)
def render(self, change):
if self.model.show:
for ax in self.axes:
ax.cla()
if change.new is None:
return
else:
selected_data = self.model.selected_data
if selected_data is not None:
data = selected_data
_, edges = np.histogram(data[data > 0], bins='auto')
all_edges = np.concatenate([[0, 1], edges])
self.axes[0].hist(
data,
bins=all_edges,
cumulative=False,
# density=True,
)
data = self.model.data
ax: plt.Axes = self.axes[1]
ax.hist(
data,
bins=50,
cumulative=True,
density=True,
)
if selected_data is not None:
ax.vlines(selected_data.max(), 0, 1, colors='black', linestyles='dotted')
# self.axes[1].set_ylim(0, 1)
self.canvas.draw()
| [
"traitlets.directional_link",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.histogram",
"regexport.views.utils.HasWidget.__init__",
"matplotlib.use",
"traitlets.Instance",
"PySide2.QtWidgets.QWidget",
"numpy.concatenate",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
... | [((269, 293), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (283, 293), False, 'import matplotlib\n'), ((470, 507), 'traitlets.Instance', 'Instance', (['np.ndarray'], {'allow_none': '(True)'}), '(np.ndarray, allow_none=True)\n', (478, 507), False, 'from traitlets import HasTraits, Instance, Bool, directional_link\n'), ((519, 556), 'traitlets.Instance', 'Instance', (['np.ndarray'], {'allow_none': '(True)'}), '(np.ndarray, allow_none=True)\n', (527, 556), False, 'from traitlets import HasTraits, Instance, Bool, directional_link\n'), ((568, 592), 'traitlets.Bool', 'Bool', ([], {'default_value': '(True)'}), '(default_value=True)\n', (572, 592), False, 'from traitlets import HasTraits, Instance, Bool, directional_link\n'), ((766, 821), 'traitlets.directional_link', 'directional_link', (["(model, 'show_plots')", "(self, 'show')"], {}), "((model, 'show_plots'), (self, 'show'))\n", (782, 821), False, 'from traitlets import HasTraits, Instance, Bool, directional_link\n'), ((1437, 1446), 'PySide2.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (1444, 1446), False, 'from PySide2.QtWidgets import QVBoxLayout, QWidget\n'), ((1465, 1478), 'PySide2.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (1476, 1478), False, 'from PySide2.QtWidgets import QVBoxLayout, QWidget\n'), ((1520, 1559), 'regexport.views.utils.HasWidget.__init__', 'HasWidget.__init__', (['self'], {'widget': 'widget'}), '(self, widget=widget)\n', (1538, 1559), False, 'from regexport.views.utils import HasWidget\n'), ((1591, 1646), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(width, height)', 'dpi': 'dpi'}), '(ncols=2, figsize=(width, height), dpi=dpi)\n', (1603, 1646), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1703), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvasQTAgg', ([], {'figure': 'self.fig'}), '(figure=self.fig)\n', (1686, 1703), False, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT\n'), ((1766, 1807), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar2QT', (['self.canvas', 'widget'], {}), '(self.canvas, widget)\n', (1786, 1807), False, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT\n'), ((2283, 2324), 'numpy.histogram', 'np.histogram', (['data[data > 0]'], {'bins': '"""auto"""'}), "(data[data > 0], bins='auto')\n", (2295, 2324), True, 'import numpy as np\n'), ((2357, 2388), 'numpy.concatenate', 'np.concatenate', (['[[0, 1], edges]'], {}), '([[0, 1], edges])\n', (2371, 2388), True, 'import numpy as np\n')] |
"""
Run all the example files and convert them to markdown files containing the output.
Uses `pweave`. It is not installed by default. To install:
pip install pweave
"""
import pweave, datetime, glob, os
def publish_to_markdown(python_file: str, output_file: str):
doc = pweave.Pweb(python_file, kernel="python3", doctype="markdown", output=output_file)
doc.theme = "skeleton" # The default option is skeleton , other options are pweave (the old theme), bootstrap , cerulean and journal. All look the same to me.
doc.read()
doc.run()
doc.format()
doc.formatted += f"\n---\nMarkdown generated automatically from [{python_file}]({python_file}) using [Pweave](http://mpastell.com/pweave) {pweave.__version__} on {datetime.date.today()}.\n"
doc.write()
if __name__ == "__main__":
for python_file in glob.glob("*.py"):
print(python_file)
if python_file != os.path.basename(__file__):
output_file = python_file.replace(".py", ".md")
publish_to_markdown(python_file, output_file)
| [
"datetime.date.today",
"os.path.basename",
"glob.glob",
"pweave.Pweb"
] | [((284, 371), 'pweave.Pweb', 'pweave.Pweb', (['python_file'], {'kernel': '"""python3"""', 'doctype': '"""markdown"""', 'output': 'output_file'}), "(python_file, kernel='python3', doctype='markdown', output=\n output_file)\n", (295, 371), False, 'import pweave, datetime, glob, os\n'), ((841, 858), 'glob.glob', 'glob.glob', (['"""*.py"""'], {}), "('*.py')\n", (850, 858), False, 'import pweave, datetime, glob, os\n'), ((746, 767), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (765, 767), False, 'import pweave, datetime, glob, os\n'), ((913, 939), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (929, 939), False, 'import pweave, datetime, glob, os\n')] |
import binascii
import uuid
from collections import UserDict
from functools import cmp_to_key, wraps
from nanolib import Block as RawBlock
from nanolib import nbase32_to_bytes, get_account_id
__all__ = (
"RawBlock", "BlockProxy", "Callbacks", "CallbackSlot", "AccountIDDict"
)
class BlockProxy(object):
"""
Allow an object containing 'block' Block instance to access underlying
RawBlock fields directly.
Eg.
instead of `block.block.account`, you can access `block.account`
"""
# Writable fields
block_type = property(
lambda x: x.block.block_type,
lambda x, v: x.block.set_block_type(v))
account = property(
lambda x: x.block.account,
lambda x, v: x.block.set_account(v))
source = property(
lambda x: x.block.source,
lambda x, v: x.block.set_source(v))
previous = property(
lambda x: x.block.previous,
lambda x, v: x.block.set_previous(v))
destination = property(
lambda x: x.block.destination,
lambda x, v: x.block.set_destination(v))
representative = property(
lambda x: x.block.representative,
lambda x, v: x.block.set_representative(v))
balance = property(
lambda x: x.block.balance,
lambda x, v: x.block.set_balance(v))
link = property(
lambda x: x.block.link,
lambda x, v: x.block.set_link(v))
link_as_account = property(
lambda x: x.block.link_as_account,
lambda x, v: x.block.set_link_as_account(v))
signature = property(
lambda x: x.block.signature,
lambda x, v: x.block.set_signature(v))
work = property(
lambda x: x.block.work,
lambda x, v: x.block.set_work(v))
difficulty = property(
lambda x: x.block.difficulty,
lambda x, v: x.block.set_difficulty(v))
# Read-only fields
tx_type = property(lambda x: x.block.tx_type)
block_hash = property(lambda x: x.block.block_hash)
work_block_hash = property(lambda x: x.block.work_block_hash)
complete = property(lambda x: x.block.complete)
has_valid_work = property(lambda x: x.block.has_valid_work)
has_valid_signature = property(lambda x: x.block.has_valid_signature)
work_value = property(lambda x: x.block.work_value)
# Methods
verify_work = property(lambda x: x.block.verify_work)
verify_signature = property(lambda x: x.block.verify_signature)
sign = property(lambda x: x.block.sign)
solve_work = property(lambda x: x.block.solve_work)
json = property(lambda x: x.block.json)
# 'account' alias
account_id = property(
lambda x: x.block.account,
lambda x, v: x.block.set_account(v))
class CallbackSlot:
"""
Callback container with an identifier and arbitrary amount of
callback functions
"""
def __init__(self, name):
"""
:param str name: Name for the action triggering a callback
"""
self.name = name
self.funcs = []
def add(self, func, func_id=None):
"""
Add a callback function. If `func_id` is not provided, an
identifier for the callback function is created automatically
:param func: Callback function to add
:param str func_id: Unique identifier for the callback identifier
to remove the callback function later
"""
if not func_id:
func_id = uuid.uuid4().hex
self.funcs.append((func_id, func))
return func_id
def remove(self, func_id):
"""
Remove a callback function using its identifier.
:param str func_id: Unique identifier for a callback function
"""
func_id_to_remove = func_id
self.funcs = [
(func_id, func) for func_id, func in self.funcs
if func_id_to_remove != func_id
]
def invoke(self, *args, **kwargs):
"""
Run all callback functions with the given arguments
"""
for _, func in self.funcs:
func(*args, **kwargs)
class Callbacks:
"""
Collection of CallbackSlot instances
Allows a collection of callbacks to be more easily passed to
methods that execute callbacks
"""
def __init__(self, names):
"""
:param list names: List of action names
"""
for name in names:
setattr(self, name, CallbackSlot(name))
def account_id_to_bytes(account_id):
"""
Convert an account ID to bytes, ignoring the checksum and the prefix.
Used for data structures that accept account IDs.
:param str account_id: Account ID
:return: Account ID as bytes
:rtype: bytes
"""
has_valid_prefix = (
account_id.startswith("xrb_") or account_id.startswith("nano_")
)
if not has_valid_prefix:
raise ValueError("Account ID has invalid prefix")
# Get the public key portion of the account ID and decode it into
# raw bytes
return nbase32_to_bytes(account_id[-60:-8])
def account_ids_equal(account_id_a, account_id_b):
"""
Compare two account IDs while discarding the account prefix
:return: Whether the account IDs are equal
:rtype: bool
"""
return account_id_a[-60:-8] == account_id_b[-60:-8]
def normalize_account_id(account_id):
"""
Normalize account ID to make it possible to use normal string
comparisons between different account IDs
:return: Normalized account ID
:rtype: str
"""
return account_id.replace("nano_", "xrb_")
class AccountIDDict(UserDict):
"""
A dictionary that accepts NANO account IDs and uses the public key as
the underlying key.
The account prefix is ignored, meaning that the same public key represented
with different account prefixes are considered identical.
The account checksum (last 8 chars) is also ignored to improve performance.
"""
def __getitem__(self, key):
key = account_id_to_bytes(key)
return self.data[key]
def __setitem__(self, key, val):
key = account_id_to_bytes(key)
self.data[key] = val
def __delitem__(self, key):
key = account_id_to_bytes(key)
del self.data[key]
def __contains__(self, key):
key = account_id_to_bytes(key)
return key in self.data
def items(self):
return [
(
get_account_id(
public_key=binascii.hexlify(key).decode(),
prefix="xrb_"
),
value
) for key, value in self.data.items()
]
def values(self):
return self.data.values()
def keys(self):
return [
get_account_id(
public_key=binascii.hexlify(key).decode(),
prefix="xrb_"
) for key in self.data.keys()
]
def to_dict(self):
return {
get_account_id(
public_key=binascii.hexlify(key).decode(),
prefix="xrb_"
): value
for key, value in self.data.items()
}
| [
"binascii.hexlify",
"nanolib.nbase32_to_bytes",
"uuid.uuid4"
] | [((4989, 5025), 'nanolib.nbase32_to_bytes', 'nbase32_to_bytes', (['account_id[-60:-8]'], {}), '(account_id[-60:-8])\n', (5005, 5025), False, 'from nanolib import nbase32_to_bytes, get_account_id\n'), ((3431, 3443), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3441, 3443), False, 'import uuid\n'), ((6761, 6782), 'binascii.hexlify', 'binascii.hexlify', (['key'], {}), '(key)\n', (6777, 6782), False, 'import binascii\n'), ((6971, 6992), 'binascii.hexlify', 'binascii.hexlify', (['key'], {}), '(key)\n', (6987, 6992), False, 'import binascii\n'), ((6444, 6465), 'binascii.hexlify', 'binascii.hexlify', (['key'], {}), '(key)\n', (6460, 6465), False, 'import binascii\n')] |
import json
from copy import deepcopy
from functools import total_ordering
from typing import List, Any, Union
from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, \
CXList, CXDict, CXRGBColor, CXRGBAColor
from canvasxpress.data.convert import CXDictConvertable, CXListConvertable
@total_ordering
class CXConfigs(
CXDictConvertable,
CXListConvertable
):
"""
CXConfigs provides support for addressing a collection of `CXConfig` values.
"""
__configs: List[CXConfig] = None
"""
The `CXConfig` objects associated with this collection.
"""
def __init__(self, *configs: Union[CXConfig, tuple, dict, list]):
"""
Initializes a new `CXConfigs` object with zero or more `CXConfig`
objects.
Example:
```python
configs = CXConfigs(
CXString("colorScheme", "ColorSpectrum"),
("lineType", "spline"),
{ "objectColorTransparency": 0.3 }
)
```
:param configs: `Union[CXConfig, tuple, dict], ...`
A list of zero or more `CXConfig` objects to associate.
"""
self.__configs: List[CXConfig] = list()
for config in configs:
self.add(config)
def remove(self, label: str) -> Union[CXConfig, None]:
"""
Removes the CXConfig if found, and if found the removed config is
provided.
:param label: 'str`
The label of the CXConfig to remove.
:returns: `Union[CXConfig, None]`
If a CXConfig is removed then it is returned, otherwise None.
"""
candidate = None
for config in self.configs:
if str(label) == config.label:
candidate = config
self.configs.remove(config)
break
return candidate
def add(self, config: Union[CXConfig, tuple, dict, list]) -> 'CXConfigs':
"""
Adds the specified configuration to the collection. This method
supports chaining for efficient additions of `CXConfig` objects.
Example:
```python
configs = CXConfigs()
configs \
.add(CXString("colorScheme", "ColorSpectrum") \
.add(("lineType", "spline")) \
.add({ "objectColorTransparency": 0.3 })
```
:param config: `Union[CXConfig, tuple, dict, list]`
The `CXConfig` to associate. Cannot be `None`. `tuple` an d`list`
config values are expected to be two elements in length, with the
first representing the label and the second representing the value.
The label portion will be converted to a string using `str`.
"""
if config is None:
raise ValueError("configs cannot be None.")
elif isinstance(config, dict):
for param in config.keys():
self.set_param(
str(param),
config.get(param)
)
elif isinstance(config, (tuple, list)):
if len(config) > 0:
if isinstance(config[0], (tuple, list)):
for item in config:
self.add(item)
else:
if len(config) < 2:
raise ValueError(
"list and tuple config representations of "
"configuration parameters must have two elements: "
"the first is the label, and the second is the "
"value."
)
self.set_param(
str(config[0]),
config[1]
)
elif isinstance(config, CXConfig):
if config not in self.__configs:
self.__configs.append(config)
else:
raise TypeError("configs must be a type of CXConfig.")
return self
def get_param(
self,
label: str
) -> Union[CXConfig, None]:
"""
Provides the CXConfig with the indicated label.
:param label: `str`
The name of the congig to find.
:returns: `Union[CXConfig, None]`
The CXConfig or None if such a labelled item is not associated.
"""
candidate = None
for config in self.configs:
if config.label == label:
candidate = config
break
return candidate
def set_param(
self,
label: str,
value: Any
) -> 'CXConfigs':
"""
Adds a parameter to the configs. Attempts to infer the kind of param to
add, and if a type can be deduced then an appropriate CXConfig is used.
If a type cannot be inferred the a text type is assumed. This method
supports chaining for efficient additions of `CXConfig` objects.
Example:
```python
configs = CXConfigs()
configs \
.set_param("1", "rgb(3, 172, 198)") \
.set_param("2", 2) \
.set_param("3", True)
```
:param value: `Any`
The parameter to infer and associate. Cannot be `None`. Defaults
to `str` if the type cannot otherwise be deduced.
"""
if (label is None) or (value is None):
raise ValueError("Neither label nor value can be None.")
else:
existing_config_used = False
for config in self.configs:
if config.label == label:
if not isinstance(config.value, type(value)):
raise ValueError(
f"CXConfig {repr(config)} is already a member and"
f" has a different type than what is provided."
f" Remove the existing CXConfig object first or use"
f" the same type."
)
else:
config.value = value
existing_config_used = True
break
if not existing_config_used:
value_type = type(value)
if value_type is int:
candidate = CXInt(
label,
value
)
elif value_type is float:
candidate = CXFloat(
label,
value
)
elif value_type is bool:
candidate = CXBool(
label,
value
)
elif value_type is dict:
if CXRGBAColor.is_color_dict(value):
candidate = CXRGBAColor(
label,
value
)
elif CXRGBColor.is_color_dict(value):
candidate = CXRGBColor(
label,
value
)
else:
candidate = CXDict(
label,
value
)
elif value_type is list:
if CXRGBAColor.is_color_list(value):
candidate = CXRGBAColor(
label,
value
)
elif CXRGBColor.is_color_list(value):
candidate = CXRGBColor(
label,
value
)
else:
candidate = CXList(
label,
value
)
elif value_type is set:
set_persona: set = value
candidate = CXList(
label,
list(set_persona)
)
else:
if CXRGBAColor.is_color_str(value):
candidate = CXRGBAColor(
label,
value
)
elif CXRGBColor.is_color_str(value):
candidate = CXRGBColor(
label,
value
)
else:
candidate = CXString(
label,
value
)
self.add(candidate)
return self
@property
def configs(self) -> List[CXConfig]:
"""
Provides access to the list of associated `CXConfig` objects.
:returns: `List[CXConfig]`
"""
return self.__configs
def render_to_dict(self) -> dict:
"""
Provides a `dict` representation of the configuration values.
:returns: `dict`
A `dict` representing the configuration values arranged as a map
of keys and values.
Given:
```python
configs = CXConfigs()
configs \
.set_param("1", "rgb(3, 172, 198)") \
.set_param("2", 2) \
.set_param("3", True)
```
Then `render_to_dict()` results in:
```python
{
"1": "rgb(3, 172, 198)",
"2": 2,
"3": True,
}
```
"""
return CXConfigs.merge_configs(self.configs)
def render_to_list(
self,
**kwargs
) -> list:
"""
Provides a `list` representation of the configuration values.
:returns: `list`
A `list` representing the configuration values arranged as a map
of keys and values.
Given:
```python
configs = CXConfigs()
configs \
.set_param("1", "rgb(3, 172, 198)") \
.set_param("2", 2) \
.set_param("3", True)
```
Then `render_to_list()` results in:
```python
[
["1", "rgb(3, 172, 198)"],
["2": 2],
["3": True],
]
```
"""
configs = self.render_to_dict()
return [
[
str(key), # function name
configs[key] # list of parameter values
]
for key in configs.keys()
]
@classmethod
def merge_configs(
cls,
configs: List[CXConfig]
) -> dict:
"""
Given a list of CXConfig objects, a dictionary of unique attributes is
generated and provided.
:returns: `dict`
A dict of zero or more keys representing the CXConfigs.
"""
unique_configs = list()
if not configs is None:
for config in configs:
if not config in unique_configs:
unique_configs.append(config)
dict_configs = dict()
for config in unique_configs:
dict_configs = {
**dict_configs,
**(config.render())
}
return dict_configs
def __copy__(self) -> 'CXConfigs':
"""
*copy* constructor. Returns the `CXConfig` objects within a new
`CXConfigs` object.
"""
return CXConfigs(
*self.configs
)
def __deepcopy__(
self,
memo
) -> 'CXConfigs':
"""
*deepcopy* constructor. Returns a deepcopy of the `CXConfig` objects
within a new `CXConfigs` object.
"""
return CXConfigs(
*([deepcopy(config) for config in self.configs])
)
def __lt__(
self,
other: 'CXConfigs'
) -> bool:
"""
*less than* comparison. Also see `@total_ordering` in `functools`.
:param other:
`CXConfigs` The object to compare.
:returns: `bool`
<ul>
<li> If `other` is `None` then `False`
<li> If `other` is not a `CXConfigs` object then False
<li> If `other` is a `CXConfigs` object then True of all `CXConfig`
objects are also less than the events tracked by `self`.
</ul>
"""
if other is None:
return False
if not isinstance(other, CXConfigs):
return False
else:
if (len(self.configs) + len(other.configs)) == 0:
return False
if len(self.configs) == len(other.configs):
for config in self.configs:
for oconfig in other.configs:
if not config < oconfig:
return False
return True
else:
return len(self.configs) < len(other.configs)
def __eq__(
self,
other: 'CXConfigs'
) -> bool:
"""
*equals* comparison. Also see `@total_ordering` in `functools`.
:param other:
`CXConfigs` The object to compare.
:returns: `bool`
<ul>
<li> If `other` is `None` then `False`
<li> If `other` is not a `CXConfigs` object then False
<li> If `other` is a `CXConfigs` object then True of all `CXConfig`
objects are also equal to the events tracked by `self`.
</ul>
"""
if other is None:
return False
if not isinstance(other, CXConfigs):
return False
else:
if len(self.configs) == len(other.configs):
for config in self.configs:
alt_config = other.get_param(config.label)
if alt_config is None:
return False
if alt_config.value != config.value:
return False
return True
else:
return len(self.configs) == len(other.configs)
def __str__(self) -> str:
"""
*str* function. Converts the `CXConfigs` object into a JSON
representation.
:returns" `str`
JSON form of the collection.
"""
return json.dumps(
self.render_to_dict()
)
def __repr__(self) -> str:
"""
*repr* function. Converts the `CXConfigs` object into a pickle string
that can be used with `eval` to establish a copy of the object.
:returns: `str` An evaluatable representation of the object.
"""
config_rep_list = ", ".join([repr(config) for config in self.configs])
rep_candidate = f'CXConfigs(' \
f'{config_rep_list}' \
f')'
return rep_candidate
| [
"canvasxpress.config.type.CXRGBColor",
"copy.deepcopy",
"canvasxpress.config.type.CXRGBAColor.is_color_list",
"canvasxpress.config.type.CXString",
"canvasxpress.config.type.CXRGBAColor.is_color_dict",
"canvasxpress.config.type.CXList",
"canvasxpress.config.type.CXInt",
"canvasxpress.config.type.CXBool... | [((6342, 6361), 'canvasxpress.config.type.CXInt', 'CXInt', (['label', 'value'], {}), '(label, value)\n', (6347, 6361), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((12090, 12106), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (12098, 12106), False, 'from copy import deepcopy\n'), ((6506, 6527), 'canvasxpress.config.type.CXFloat', 'CXFloat', (['label', 'value'], {}), '(label, value)\n', (6513, 6527), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((6671, 6691), 'canvasxpress.config.type.CXBool', 'CXBool', (['label', 'value'], {}), '(label, value)\n', (6677, 6691), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((6826, 6858), 'canvasxpress.config.type.CXRGBAColor.is_color_dict', 'CXRGBAColor.is_color_dict', (['value'], {}), '(value)\n', (6851, 6858), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((6896, 6921), 'canvasxpress.config.type.CXRGBAColor', 'CXRGBAColor', (['label', 'value'], {}), '(label, value)\n', (6907, 6921), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((7029, 7060), 'canvasxpress.config.type.CXRGBColor.is_color_dict', 'CXRGBColor.is_color_dict', (['value'], {}), '(value)\n', (7053, 7060), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((7434, 7466), 'canvasxpress.config.type.CXRGBAColor.is_color_list', 'CXRGBAColor.is_color_list', (['value'], {}), '(value)\n', (7459, 7466), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((7098, 7122), 'canvasxpress.config.type.CXRGBColor', 'CXRGBColor', (['label', 'value'], {}), '(label, value)\n', (7108, 7122), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((7267, 7287), 'canvasxpress.config.type.CXDict', 'CXDict', (['label', 'value'], {}), '(label, value)\n', (7273, 7287), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((7504, 7529), 'canvasxpress.config.type.CXRGBAColor', 'CXRGBAColor', (['label', 'value'], {}), '(label, value)\n', (7515, 7529), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((7637, 7668), 'canvasxpress.config.type.CXRGBColor.is_color_list', 'CXRGBColor.is_color_list', (['value'], {}), '(value)\n', (7661, 7668), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((8243, 8274), 'canvasxpress.config.type.CXRGBAColor.is_color_str', 'CXRGBAColor.is_color_str', (['value'], {}), '(value)\n', (8267, 8274), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((7706, 7730), 'canvasxpress.config.type.CXRGBColor', 'CXRGBColor', (['label', 'value'], {}), '(label, value)\n', (7716, 7730), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((7875, 7895), 'canvasxpress.config.type.CXList', 'CXList', (['label', 'value'], {}), '(label, value)\n', (7881, 7895), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((8312, 8337), 'canvasxpress.config.type.CXRGBAColor', 'CXRGBAColor', (['label', 'value'], {}), '(label, value)\n', (8323, 8337), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((8445, 8475), 'canvasxpress.config.type.CXRGBColor.is_color_str', 'CXRGBColor.is_color_str', (['value'], {}), '(value)\n', (8468, 8475), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((8513, 8537), 'canvasxpress.config.type.CXRGBColor', 'CXRGBColor', (['label', 'value'], {}), '(label, value)\n', (8523, 8537), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n'), ((8682, 8704), 'canvasxpress.config.type.CXString', 'CXString', (['label', 'value'], {}), '(label, value)\n', (8690, 8704), False, 'from canvasxpress.config.type import CXConfig, CXString, CXInt, CXFloat, CXBool, CXList, CXDict, CXRGBColor, CXRGBAColor\n')] |
# Generated by Django 3.1.5 on 2021-01-17 15:24
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='Following',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InstaPhotos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, null=True)),
('dp', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image')),
('following', models.ManyToManyField(to='people.Following')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='image')),
('name', models.CharField(max_length=30)),
('caption', models.TextField()),
('likes', models.IntegerField(blank=True, null=True)),
('pub_date', models.DateTimeField(auto_now_add=True, null=True)),
('comments', models.ManyToManyField(to='people.Comment')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='people.profile')),
],
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField... | [((272, 329), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (303, 329), False, 'from django.db import migrations, models\n'), ((461, 554), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (477, 554), False, 'from django.db import migrations, models\n'), ((581, 613), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (597, 613), False, 'from django.db import migrations, models\n'), ((748, 841), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (764, 841), False, 'from django.db import migrations, models\n'), ((865, 984), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to=settings.AUTH_USER_MODEL)\n', (882, 984), False, 'from django.db import migrations, models\n'), ((1116, 1209), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1132, 1209), False, 'from django.db import migrations, models\n'), ((1233, 1264), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1249, 1264), False, 'from django.db import migrations, models\n'), ((1520, 1613), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1536, 1613), False, 'from django.db import migrations, models\n'), ((1636, 1675), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1652, 1675), False, 'from django.db import migrations, models\n'), ((1828, 1873), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""people.Following"""'}), "(to='people.Following')\n", (1850, 1873), False, 'from django.db import migrations, models\n'), ((1901, 2000), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL)\n', (1921, 2000), False, 'from django.db import migrations, models\n'), ((2126, 2219), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2142, 2219), False, 'from django.db import migrations, models\n'), ((2354, 2385), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (2370, 2385), False, 'from django.db import migrations, models\n'), ((2416, 2434), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2432, 2434), False, 'from django.db import migrations, models\n'), ((2463, 2505), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2482, 2505), False, 'from django.db import migrations, models\n'), ((2537, 2587), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (2557, 2587), False, 'from django.db import migrations, models\n'), ((2619, 2662), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""people.Comment"""'}), "(to='people.Comment')\n", (2641, 2662), False, 'from django.db import migrations, models\n'), ((2693, 2807), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""people.profile"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.DO_NOTHING, to='people.profile')\n", (2710, 2807), False, 'from django.db import migrations, models\n')] |
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import __builtin__
# appends to line, by typing <typeWhat> after <insertAfterLine> text into <codeArea> widget
def appendToLine(codeArea, insertAfterLine, typeWhat):
if not placeCursorToLine(codeArea, insertAfterLine):
return False
type(codeArea, typeWhat)
return True
# checks if error is properly reported, returns True if succeeded and False if not.
# Current implementation is focused on allowing different compilers, and it is enough if one of the expected messages
# is found in issues view. warnIfMoreIssues should warn if there are more than one issue, no matter how many
# expected texts are in array (because they are alternatives).
def checkSyntaxError(issuesView, expectedTextsArray, warnIfMoreIssues = True):
issuesModel = issuesView.model()
# wait for issues
waitFor("issuesModel.rowCount() > 0", 5000)
# warn if more issues reported
if(warnIfMoreIssues and issuesModel.rowCount() > 1):
test.warning("More than one expected issues reported")
# iterate issues and check if there exists "Unexpected token" message
for description, type in zip(dumpItems(issuesModel, role=Qt.UserRole + 3),
dumpItems(issuesModel, role=Qt.UserRole + 5)):
# enum Roles { File = Qt::UserRole, Line, MovedLine, Description, FileNotFound, Type, Category, Icon, Task_t };
# check if at least one of expected texts found in issue text
for expectedText in expectedTextsArray:
if expectedText in description:
# check if it is error and warn if not - returns False which leads to fail
if type is not "1":
test.warning("Expected error text found, but is not of type: 'error'")
return False
else:
return True
return False
# change autocomplete options to manual
def changeAutocompleteToManual(toManual=True):
invokeMenuItem("Tools", "Options...")
mouseClick(waitForObjectItem(":Options_QListView", "Text Editor"), 5, 5, 0, Qt.LeftButton)
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Completion")
ensureChecked(waitForObject(":Behavior.Autocomplete common prefix_QCheckBox"), not toManual)
activateCompletion = "Always"
if toManual:
activateCompletion = "Manually"
selectFromCombo(":Behavior.completionTrigger_QComboBox", activateCompletion)
verifyEnabled(":Options.OK_QPushButton")
clickButton(waitForObject(":Options.OK_QPushButton"))
# wait and verify if object item exists/not exists
def checkIfObjectItemExists(object, item, timeout = 3000):
try:
waitForObjectItem(object, item, timeout)
return True
except:
return False
# this function creates a string holding the real name of a Qml Item
# param type defines the Qml type (support is limited)
# param container defines the container of the Qml item - can be a real or symbolic name
# param clip defines the state of the clip property (true/false)
# param text a string holding the complete text property (e.g. "text='example'", "text~='ex.*'")
def getQmlItem(type, container, clip, text=""):
if (container.startswith(":")):
container = "'%s'" % container
if clip != None:
clip = ("%s" % __builtin__.bool(clip)).lower()
return ("{clip='%s' container=%s enabled='true' %s type='%s' unnamed='1' visible='true'}"
% (clip, container, text, type))
else:
return ("{container=%s enabled='true' %s type='%s' unnamed='1' visible='true'}"
% (container, text, type))
| [
"__builtin__.bool"
] | [((4444, 4466), '__builtin__.bool', '__builtin__.bool', (['clip'], {}), '(clip)\n', (4460, 4466), False, 'import __builtin__\n')] |
# MIT License
# Copyright (c) 2018-2019 <NAME>, <NAME>, <NAME>, <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import rhino3dm as r3d
from . import utils
from mathutils import Vector
CONVERT = {}
def import_null(rcurve, bcurve, scale):
print("Failed to convert type", type(rcurve))
return None
def import_line(rcurve, bcurve, scale):
fr = rcurve.Line.From
to = rcurve.Line.To
line = bcurve.splines.new('POLY')
line.points.add(1)
line.points[0].co = (fr.X * scale, fr.Y * scale, fr.Z * scale, 1)
line.points[1].co = (to.X * scale, to.Y * scale, to.Z * scale, 1)
return line
CONVERT[r3d.LineCurve] = import_line
def import_polyline(rcurve, bcurve, scale):
N = rcurve.PointCount
polyline = bcurve.splines.new('POLY')
polyline.use_cyclic_u = rcurve.IsClosed
if rcurve.IsClosed:
N -= 1
polyline.points.add(N - 1)
for i in range(0, N):
rpt = rcurve.Point(i)
polyline.points[i].co = (rpt.X * scale, rpt.Y * scale, rpt.Z * scale, 1)
return polyline
CONVERT[r3d.PolylineCurve] = import_polyline
def import_nurbs_curve(rcurve, bcurve, scale):
N = len(rcurve.Points)
nurbs = bcurve.splines.new('NURBS')
nurbs.use_cyclic_u = rcurve.IsClosed
nurbs.points.add(N - 1)
for i in range(0, N):
rpt = rcurve.Points[i]
nurbs.points[i].co = (rpt.X * scale, rpt.Y * scale, rpt.Z * scale, rpt.W * scale)
#nurbs.use_bezier_u = True
nurbs.use_endpoint_u = True
nurbs.order_u = rcurve.Order
return nurbs
CONVERT[r3d.NurbsCurve] = import_nurbs_curve
def import_arc(rcurve, bcurve, scale):
spt = Vector((rcurve.Arc.StartPoint.X, rcurve.Arc.StartPoint.Y, rcurve.Arc.StartPoint.Z)) * scale
ept = Vector((rcurve.Arc.EndPoint.X, rcurve.Arc.EndPoint.Y, rcurve.Arc.EndPoint.Z)) * scale
cpt = Vector((rcurve.Arc.Center.X, rcurve.Arc.Center.Y, rcurve.Arc.Center.Z)) * scale
r1 = spt - cpt
r2 = ept - cpt
r1.normalize()
r2.normalize()
d = rcurve.Arc.Length * scale
normal = r1.cross(r2)
t1 = normal.cross(r1)
t2 = normal.cross(r2)
'''
Temporary arc
'''
arc = bcurve.splines.new('NURBS')
arc.use_cyclic_u = False
arc.points.add(3)
arc.points[0].co = (spt.x, spt.y, spt.z, 1)
sspt = spt + t1 * d * 0.33
arc.points[1].co = (sspt.x, sspt.y, sspt.z, 1)
eept = ept - t2 * d * 0.33
arc.points[2].co = (eept.x, eept.y, eept.z, 1)
arc.points[3].co = (ept.x, ept.y, ept.z, 1)
'''
print("ARC")
print(" StartPoint:", rcurve.Arc.StartPoint)
print(" EndPoint:", rcurve.Arc.EndPoint)
print(" Center:", rcurve.Arc.Center)
print(" Radius:", rcurve.Radius)
'''
arc.use_endpoint_u = True
arc.order_u = 3
return arc
CONVERT[r3d.ArcCurve] = import_arc
def import_polycurve(rcurve, bcurve, scale):
for seg in range(rcurve.SegmentCount):
segcurve = rcurve.SegmentCurve(seg)
if type(segcurve) in CONVERT.keys():
CONVERT[type(segcurve)](segcurve, bcurve, scale)
CONVERT[r3d.PolyCurve] = import_polycurve
def import_curve(context, ob, name, scale, options):
og = ob.Geometry
oa = ob.Attributes
curve_data = context.blend_data.curves.new(name, type="CURVE")
if type(og) in CONVERT.keys():
curve_data.dimensions = '3D'
curve_data.resolution_u = 2
CONVERT[type(og)](og, curve_data, scale)
return curve_data
| [
"mathutils.Vector"
] | [((2668, 2756), 'mathutils.Vector', 'Vector', (['(rcurve.Arc.StartPoint.X, rcurve.Arc.StartPoint.Y, rcurve.Arc.StartPoint.Z)'], {}), '((rcurve.Arc.StartPoint.X, rcurve.Arc.StartPoint.Y, rcurve.Arc.\n StartPoint.Z))\n', (2674, 2756), False, 'from mathutils import Vector\n'), ((2770, 2847), 'mathutils.Vector', 'Vector', (['(rcurve.Arc.EndPoint.X, rcurve.Arc.EndPoint.Y, rcurve.Arc.EndPoint.Z)'], {}), '((rcurve.Arc.EndPoint.X, rcurve.Arc.EndPoint.Y, rcurve.Arc.EndPoint.Z))\n', (2776, 2847), False, 'from mathutils import Vector\n'), ((2866, 2937), 'mathutils.Vector', 'Vector', (['(rcurve.Arc.Center.X, rcurve.Arc.Center.Y, rcurve.Arc.Center.Z)'], {}), '((rcurve.Arc.Center.X, rcurve.Arc.Center.Y, rcurve.Arc.Center.Z))\n', (2872, 2937), False, 'from mathutils import Vector\n')] |
#!/usr/bin/env python3
import sys
import subprocess
from align_videos_by_soundtrack.align import SyncDetector, cli_common
remove = []
remove_specified = False
remove_all = False
add = []
add_specified = False
copy_subtitles = False
files = []
print_offset_only = False
for argument in sys.argv[1:]:
if argument == "-copy-subtitles":
copy_subtitles = True
elif argument == "-print-offset-only":
print_offset_only = True
elif argument.startswith("-"):
if argument == "-all":
remove_all = True
else:
remove_specified = True
remove = argument[1:].split(",")
elif argument.startswith("+"):
add_specified = True
add = argument[1:].split(",")
else:
files.append(argument)
if print_offset_only:
main_src, audio_src = files
destination = None
else:
main_src, audio_src, destination = files
cli_common.logger_config()
with SyncDetector() as det:
result = det.align([audio_src, main_src])
offset = 0
if result[0]["trim"] > 0:
offset = int(-result[0]["trim"]*1000)
else:
offset = int(result[0]["pad"]*1000)
arguments = ["mkvmerge","-o",destination,"-D","-B","-T"]
if not copy_subtitles:
arguments.extend(["-S","-M"])
if add_specified:
arguments.extend(["-a",",".join(add)])
arguments.extend(["-y", "-1:" + str(offset), audio_src])
if remove_specified:
arguments.extend(["-a","!"+(",".join(remove))])
elif remove_all:
arguments.append("-A")
arguments.append(main_src)
if print_offset_only:
print(offset)
else:
print("Sync: {0}".format(offset))
subprocess.check_call(arguments)
| [
"align_videos_by_soundtrack.align.cli_common.logger_config",
"align_videos_by_soundtrack.align.SyncDetector",
"subprocess.check_call"
] | [((909, 935), 'align_videos_by_soundtrack.align.cli_common.logger_config', 'cli_common.logger_config', ([], {}), '()\n', (933, 935), False, 'from align_videos_by_soundtrack.align import SyncDetector, cli_common\n'), ((942, 956), 'align_videos_by_soundtrack.align.SyncDetector', 'SyncDetector', ([], {}), '()\n', (954, 956), False, 'from align_videos_by_soundtrack.align import SyncDetector, cli_common\n'), ((1603, 1635), 'subprocess.check_call', 'subprocess.check_call', (['arguments'], {}), '(arguments)\n', (1624, 1635), False, 'import subprocess\n')] |
import numpy as np
import pandas as pd
import sys
import tensorflow as tf
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D
from keras.models import Model, Sequential, load_model
from keras import backend as K
from keras import metrics
from keras import optimizers
from keras.callbacks import History, ModelCheckpoint
class WaveNetClassifier():
def __init__(self, input_shape, output_shape, kernel_size = 2, dilation_depth = 9, n_filters = 40, task = 'classification', regression_range = None, load=False, load_dir='./'):
"""
Parameters:
input_shape: (tuple) tuple of input shape. (e.g. If input is 6s raw waveform with sampling rate = 16kHz, (96000,) is the input_shape)
output_shape: (tuple)tuple of output shape. (e.g. If we want classify the signal into 100 classes, (100,) is the output_shape)
kernel_size: (integer) kernel size of convolution operations in residual blocks
dilation_depth: (integer) type total depth of residual blocks
n_filters: (integer) # of filters of convolution operations in residual blocks
task: (string) 'classification' or 'regression'
regression_range: (list or tuple) target range of regression task
load: (bool) load previous WaveNetClassifier or not
load_dir: (string) the directory where the previous model exists
"""
# save task info
if task == 'regression':
if regression_range[0] == 0:
self.activation = 'sigmoid'
self.scale_ratio = regression_range[1]
elif regression_range[0] == - regression_range[1]:
self.activation = 'tanh'
self.scale_ratio = regression_range[1]
elif regression_range == None:
self.activation = 'linear'
self.scale_ratio = 1
else:
print('ERROR: wrong regression range')
sys.exit()
elif task == 'classification':
self.activation = 'softmax'
self.scale_ratio = 1
else:
print('ERROR: wrong task')
sys.exit()
# save input info
if len(input_shape) == 1:
self.expand_dims = True
elif len(input_shape) == 2:
self.expand_dims = False
else:
print('ERROR: wrong input shape')
sys.exit()
self.input_shape = input_shape
# save output info
if len(output_shape) == 1:
self.time_distributed = False
elif len(output_shape) == 2:
self.time_distributed = True
else:
print('ERROR: wrong output shape')
sys.exit()
self.output_shape = output_shape
# save hyperparameters of WaveNet
self.kernel_size = kernel_size
self.dilation_depth = dilation_depth
self.n_filters = n_filters
self.manual_loss = None
self.task = task
if load is True:
self.model = load_model(load_dir+"saved_wavenet_clasifier.h5", custom_objects={'tf':tf})
self.prev_history = pd.read_csv(load_dir+'wavenet_classifier_training_history.csv')
self.start_idx = len(self.prev_history)
self.history = None
else:
self.model = self.construct_model()
self.start_idx = 0
self.history = None
self.prev_history = None
def residual_block(self, x, i):
tanh_out = Conv1D(self.n_filters,
self.kernel_size,
dilation_rate = self.kernel_size**i,
padding='causal',
name='dilated_conv_%d_tanh' % (self.kernel_size ** i),
activation='tanh'
)(x)
sigm_out = Conv1D(self.n_filters,
self.kernel_size,
dilation_rate = self.kernel_size**i,
padding='causal',
name='dilated_conv_%d_sigm' % (self.kernel_size ** i),
activation='sigmoid'
)(x)
z = Multiply(name='gated_activation_%d' % (i))([tanh_out, sigm_out])
skip = Conv1D(self.n_filters, 1, name='skip_%d'%(i))(z)
res = Add(name='residual_block_%d' % (i))([skip, x])
return res, skip
def construct_model(self):
x = Input(shape=self.input_shape, name='original_input')
if self.expand_dims == True:
x_reshaped = Reshape(self.input_shape + (1,), name='reshaped_input')(x)
else:
x_reshaped = x
skip_connections = []
out = Conv1D(self.n_filters, 2, dilation_rate=1, padding='causal', name='dilated_conv_1')(x_reshaped)
for i in range(1, self.dilation_depth + 1):
out, skip = self.residual_block(out,i)
skip_connections.append(skip)
out = Add(name='skip_connections')(skip_connections)
out = Activation('relu')(out)
out = Conv1D(self.n_filters, 80, strides = 1, padding='same', name='conv_5ms', activation = 'relu')(out)
out = AveragePooling1D(80, padding='same', name='downsample_to_200Hz')(out)
if self.time_distributed:
target_kernel_size = (int) (self.input_shape[0] / 80 / self.output_shape[0]) # prev_len / x = target_len => x = prev_len / target_len
out = Conv1D(self.n_filters, target_kernel_size, padding='same', name = 'conv_fit_to_target', activation='relu')(out)
out = Conv1D(self.output_shape[1], target_kernel_size, padding='same', name='conv_final')(out)
out = AveragePooling1D(target_kernel_size, padding='same')(out)
out = TimeDistributed(Activation(self.activation))(out)
else:
out = Conv1D(self.n_filters, 100, padding='same', activation='relu', name='conv_500ms')(out)
out = Conv1D(self.output_shape[0], 100, padding='same', activation='relu', name='conv_500ms_target_shape')(out)
out = AveragePooling1D(100, padding='same',name = 'downsample_to_2Hz')(out)
out = Conv1D(self.output_shape[0], (int) (self.input_shape[0] / 8000), padding='same', name='final_conv')(out)
out = AveragePooling1D((int) (self.input_shape[0] / 8000), name='final_pooling')(out)
out = Reshape(self.output_shape)(out)
out = Activation(self.activation)(out)
if self.scale_ratio != 1:
out = Lambda(lambda x: x * self.scale_ratio, name='output_reshaped')(out)
model = Model(x, out)
model.summary()
return model
def get_model(self):
return self.model
def add_loss(self, loss):
self.manual_loss = loss
def fit(self, X, Y, validation_data = None, epochs = 100, batch_size = 32, optimizer='adam', save=False, save_dir='./'):
# set default losses if not defined
if self.manual_loss is not None:
loss = self.manual_loss
metrics = None
else:
if self.task == 'classification':
loss = 'categorical_crossentropy'
metrics = ['accuracy']
else:
loss = 'mean_squared_error'
metrics = None
# set callback functions
if save:
saved = save_dir + "saved_wavenet_clasifier.h5"
hist = save_dir + 'wavenet_classifier_training_history.csv'
if validation_data is None:
checkpointer = ModelCheckpoint(filepath=saved, monitor='loss', verbose=1, save_best_only=True)
else:
checkpointer = ModelCheckpoint(filepath=saved, monitor='val_loss', verbose=1, save_best_only=True)
history = History()
callbacks = [history, checkpointer]
else:
callbacks = None
# compile the model
self.model.compile(optimizer, loss, metrics)
try:
self.history = self.model.fit(X, Y, shuffle = True, batch_size=batch_size, epochs = epochs, validation_data = validation_data, callbacks=callbacks, initial_epoch=self.start_idx)
except:
if save:
df = pd.DataFrame.from_dict(history.history)
df.to_csv(hist, encoding='utf-8', index=False)
raise
sys.exit()
return self.history
def predict(self, x):
return self.model.predict(x) | [
"keras.models.load_model",
"keras.layers.AveragePooling1D",
"pandas.read_csv",
"keras.callbacks.ModelCheckpoint",
"keras.callbacks.History",
"keras.layers.Lambda",
"pandas.DataFrame.from_dict",
"keras.layers.Add",
"keras.layers.Input",
"keras.layers.Multiply",
"keras.models.Model",
"keras.laye... | [((4182, 4234), 'keras.layers.Input', 'Input', ([], {'shape': 'self.input_shape', 'name': '"""original_input"""'}), "(shape=self.input_shape, name='original_input')\n", (4187, 4234), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((6174, 6187), 'keras.models.Model', 'Model', (['x', 'out'], {}), '(x, out)\n', (6179, 6187), False, 'from keras.models import Model, Sequential, load_model\n'), ((2860, 2938), 'keras.models.load_model', 'load_model', (["(load_dir + 'saved_wavenet_clasifier.h5')"], {'custom_objects': "{'tf': tf}"}), "(load_dir + 'saved_wavenet_clasifier.h5', custom_objects={'tf': tf})\n", (2870, 2938), False, 'from keras.models import Model, Sequential, load_model\n'), ((2962, 3027), 'pandas.read_csv', 'pd.read_csv', (["(load_dir + 'wavenet_classifier_training_history.csv')"], {}), "(load_dir + 'wavenet_classifier_training_history.csv')\n", (2973, 3027), True, 'import pandas as pd\n'), ((3287, 3462), 'keras.layers.Conv1D', 'Conv1D', (['self.n_filters', 'self.kernel_size'], {'dilation_rate': '(self.kernel_size ** i)', 'padding': '"""causal"""', 'name': "('dilated_conv_%d_tanh' % self.kernel_size ** i)", 'activation': '"""tanh"""'}), "(self.n_filters, self.kernel_size, dilation_rate=self.kernel_size **\n i, padding='causal', name='dilated_conv_%d_tanh' % self.kernel_size **\n i, activation='tanh')\n", (3293, 3462), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((3613, 3791), 'keras.layers.Conv1D', 'Conv1D', (['self.n_filters', 'self.kernel_size'], {'dilation_rate': '(self.kernel_size ** i)', 'padding': '"""causal"""', 'name': "('dilated_conv_%d_sigm' % self.kernel_size ** i)", 'activation': '"""sigmoid"""'}), "(self.n_filters, self.kernel_size, dilation_rate=self.kernel_size **\n i, padding='causal', name='dilated_conv_%d_sigm' % self.kernel_size **\n i, activation='sigmoid')\n", (3619, 3791), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((3935, 3975), 'keras.layers.Multiply', 'Multiply', ([], {'name': "('gated_activation_%d' % i)"}), "(name='gated_activation_%d' % i)\n", (3943, 3975), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((4011, 4056), 'keras.layers.Conv1D', 'Conv1D', (['self.n_filters', '(1)'], {'name': "('skip_%d' % i)"}), "(self.n_filters, 1, name='skip_%d' % i)\n", (4017, 4056), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((4070, 4103), 'keras.layers.Add', 'Add', ([], {'name': "('residual_block_%d' % i)"}), "(name='residual_block_%d' % i)\n", (4073, 4103), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((4413, 4501), 'keras.layers.Conv1D', 'Conv1D', (['self.n_filters', '(2)'], {'dilation_rate': '(1)', 'padding': '"""causal"""', 'name': '"""dilated_conv_1"""'}), "(self.n_filters, 2, dilation_rate=1, padding='causal', name=\n 'dilated_conv_1')\n", (4419, 4501), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((4648, 4676), 'keras.layers.Add', 'Add', ([], {'name': '"""skip_connections"""'}), "(name='skip_connections')\n", (4651, 4676), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((4705, 4723), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4715, 4723), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((4739, 4832), 'keras.layers.Conv1D', 'Conv1D', (['self.n_filters', '(80)'], {'strides': '(1)', 'padding': '"""same"""', 'name': '"""conv_5ms"""', 'activation': '"""relu"""'}), "(self.n_filters, 80, strides=1, padding='same', name='conv_5ms',\n activation='relu')\n", (4745, 4832), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((4848, 4912), 'keras.layers.AveragePooling1D', 'AveragePooling1D', (['(80)'], {'padding': '"""same"""', 'name': '"""downsample_to_200Hz"""'}), "(80, padding='same', name='downsample_to_200Hz')\n", (4864, 4912), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((7229, 7238), 'keras.callbacks.History', 'History', ([], {}), '()\n', (7236, 7238), False, 'from keras.callbacks import History, ModelCheckpoint\n'), ((2084, 2094), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2092, 2094), False, 'import sys\n'), ((2301, 2311), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2309, 2311), False, 'import sys\n'), ((2567, 2577), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2575, 2577), False, 'import sys\n'), ((4287, 4342), 'keras.layers.Reshape', 'Reshape', (['(self.input_shape + (1,))'], {'name': '"""reshaped_input"""'}), "(self.input_shape + (1,), name='reshaped_input')\n", (4294, 4342), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((5100, 5209), 'keras.layers.Conv1D', 'Conv1D', (['self.n_filters', 'target_kernel_size'], {'padding': '"""same"""', 'name': '"""conv_fit_to_target"""', 'activation': '"""relu"""'}), "(self.n_filters, target_kernel_size, padding='same', name=\n 'conv_fit_to_target', activation='relu')\n", (5106, 5209), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((5224, 5312), 'keras.layers.Conv1D', 'Conv1D', (['self.output_shape[1]', 'target_kernel_size'], {'padding': '"""same"""', 'name': '"""conv_final"""'}), "(self.output_shape[1], target_kernel_size, padding='same', name=\n 'conv_final')\n", (5230, 5312), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((5325, 5377), 'keras.layers.AveragePooling1D', 'AveragePooling1D', (['target_kernel_size'], {'padding': '"""same"""'}), "(target_kernel_size, padding='same')\n", (5341, 5377), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((5467, 5553), 'keras.layers.Conv1D', 'Conv1D', (['self.n_filters', '(100)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""conv_500ms"""'}), "(self.n_filters, 100, padding='same', activation='relu', name=\n 'conv_500ms')\n", (5473, 5553), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((5566, 5671), 'keras.layers.Conv1D', 'Conv1D', (['self.output_shape[0]', '(100)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""conv_500ms_target_shape"""'}), "(self.output_shape[0], 100, padding='same', activation='relu', name=\n 'conv_500ms_target_shape')\n", (5572, 5671), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((5684, 5747), 'keras.layers.AveragePooling1D', 'AveragePooling1D', (['(100)'], {'padding': '"""same"""', 'name': '"""downsample_to_2Hz"""'}), "(100, padding='same', name='downsample_to_2Hz')\n", (5700, 5747), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((5975, 6001), 'keras.layers.Reshape', 'Reshape', (['self.output_shape'], {}), '(self.output_shape)\n', (5982, 6001), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((6019, 6046), 'keras.layers.Activation', 'Activation', (['self.activation'], {}), '(self.activation)\n', (6029, 6046), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((6094, 6156), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x * self.scale_ratio)'], {'name': '"""output_reshaped"""'}), "(lambda x: x * self.scale_ratio, name='output_reshaped')\n", (6100, 6156), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((7014, 7093), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'saved', 'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath=saved, monitor='loss', verbose=1, save_best_only=True)\n", (7029, 7093), False, 'from keras.callbacks import History, ModelCheckpoint\n'), ((7129, 7216), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'saved', 'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath=saved, monitor='val_loss', verbose=1,\n save_best_only=True)\n", (7144, 7216), False, 'from keras.callbacks import History, ModelCheckpoint\n'), ((7738, 7748), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7746, 7748), False, 'import sys\n'), ((5411, 5438), 'keras.layers.Activation', 'Activation', (['self.activation'], {}), '(self.activation)\n', (5421, 5438), False, 'from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Activation, Dropout, Add, TimeDistributed, Multiply, Conv1D, Conv2D, MaxPooling1D, AveragePooling1D\n'), ((7626, 7665), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['history.history'], {}), '(history.history)\n', (7648, 7665), True, 'import pandas as pd\n'), ((1920, 1930), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1928, 1930), False, 'import sys\n')] |
# Generated by Django 3.1.6 on 2021-03-08 10:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idd', models.CharField(max_length=50)),
('name', models.CharField(max_length=50)),
('artist', models.CharField(max_length=50)),
('release_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('track_id', models.CharField(max_length=50)),
('tags', models.CharField(max_length=200)),
],
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((301, 394), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (317, 394), False, 'from django.db import migrations, models\n'), ((417, 448), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (433, 448), False, 'from django.db import migrations, models\n'), ((476, 507), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (492, 507), False, 'from django.db import migrations, models\n'), ((537, 568), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (553, 568), False, 'from django.db import migrations, models\n'), ((604, 626), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (624, 626), False, 'from django.db import migrations, models\n'), ((757, 850), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (773, 850), False, 'from django.db import migrations, models\n'), ((878, 909), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (894, 909), False, 'from django.db import migrations, models\n'), ((937, 969), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (953, 969), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
import platform
import serial
import sys
from config import Settings
dev = serial.Serial(Settings.SERIAL_DEVICE, Settings.BAUD_RATE)
print("> Returned data:", file=sys.stderr)
while True:
x = dev.read()
sys.stdout.buffer.write(x)
sys.stdout.flush()
| [
"sys.stdout.flush",
"sys.stdout.buffer.write",
"serial.Serial"
] | [((99, 156), 'serial.Serial', 'serial.Serial', (['Settings.SERIAL_DEVICE', 'Settings.BAUD_RATE'], {}), '(Settings.SERIAL_DEVICE, Settings.BAUD_RATE)\n', (112, 156), False, 'import serial\n'), ((237, 263), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (['x'], {}), '(x)\n', (260, 263), False, 'import sys\n'), ((268, 286), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (284, 286), False, 'import sys\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import brbn
import email.utils as _email
import json as _json
import logging as _logging
import os as _os
import quopri as _quopri
import re as _re
import sqlite3 as _sqlite
import time as _time
import textwrap as _textwrap
from datetime import datetime as _datetime
from pencil import *
_log = _logging.getLogger("haystack")
_strings = StringCatalog(__file__)
_topics = _json.loads(_strings["topics"])
class Haystack(brbn.Application):
def __init__(self, home_dir):
super().__init__(home_dir)
path = _os.path.join(self.home, "data", "data.sqlite")
self.database = Database(path)
self.root_resource = _IndexPage(self)
self.search_page = _SearchPage(self)
self.thread_page = _ThreadPage(self)
self.message_page = _MessagePage(self)
def receive_request(self, request):
request.database_connection = self.database.connect()
try:
return super().receive_request(request)
finally:
request.database_connection.close()
class _IndexPage(brbn.Page):
def __init__(self, app):
super().__init__(app, "/", _strings["index_page_body"])
def get_title(self, request):
return "Haystack"
@brbn.xml
def render_topics(self, request):
items = list()
for topic in _topics:
href = self.app.search_page.get_href(request, query=topic)
text = xml_escape(topic)
items.append(html_a(text, href))
return html_ul(items, class_="four-column")
class _SearchPage(brbn.Page):
def __init__(self, app):
super().__init__(app, "/search", _strings["search_page_body"])
def get_title(self, request):
query = request.get("query")
return "Search '{}'".format(query)
def render_query(self, request):
return request.get("query")
@brbn.xml
def render_threads(self, request):
query = request.get("query")
sql = ("select * from messages where id in "
"(select distinct thread_id from messages_fts "
" where messages_fts match ? limit 1000) "
"order by date desc")
escaped_query = query.replace("\"", "\"\"")
records = self.app.database.query(request, sql, escaped_query)
thread = Thread()
rows = list()
for record in records:
thread.load_from_record(record)
thread_link = thread.get_link(request)
row = [
thread_link,
xml_escape(thread.from_address),
thread.authored_words,
xml_escape(str(_email.formatdate(thread.date)[:-6])),
]
rows.append(row)
return html_table(rows, False, class_="messages four")
class _ThreadPage(brbn.Page):
def __init__(self, app):
super().__init__(app, "/thread", _strings["thread_page_body"])
def get_title(self, request):
return "Thread '{}'".format(request.thread.subject)
def process(self, request):
id = request.get("id")
request.thread = self.app.database.get(request, Message, id)
sql = ("select * from messages "
"where thread_id = ? "
"order by thread_position, date asc "
"limit 1000")
records = self.app.database.query(request, sql, request.thread.id)
request.messages = list()
request.messages_by_id = dict()
for record in records:
message = Message()
message.load_from_record(record)
request.messages.append(message)
request.messages_by_id[message.id] = message
def render_title(self, request):
return request.thread.subject
@brbn.xml
def render_index(self, request):
rows = list()
for i, message in enumerate(request.messages):
date = _time.strftime("%d %b %Y", _time.gmtime(message.date))
number = i + 1
title = self.get_message_title(request, message, number)
row = [
html_a(xml_escape(title), "#{}".format(number)),
xml_escape(date),
message.authored_words,
]
rows.append(row)
return html_table(rows, False, class_="messages")
@brbn.xml
def render_messages(self, request):
out = list()
for i, message in enumerate(request.messages):
number = i + 1
title = self.get_message_title(request, message, number)
out.append(html_elem("h2", title, id=str(number)))
out.append(html_elem("pre", xml_escape(message.content)))
return "\n".join(out)
def get_message_title(self, request, message, number):
title = "{}. {}".format(number, message.from_name)
if message.in_reply_to_id is not None:
rmessage = request.messages_by_id.get(message.in_reply_to_id)
if rmessage is not None:
rperson = rmessage.from_name
title = "{} replying to {}".format(title, rperson)
return title
class _MessagePage(brbn.Page):
def __init__(self, app):
super().__init__(app, "/message", _strings["message_page_body"])
def get_title(self, request):
return "Message '{}'".format(request.message.subject)
def process(self, request):
id = request.get("id")
request.message = self.app.database.get(request, Message, id)
def render_title(self, request):
return request.message.subject
@brbn.xml
def render_thread_link(self, request):
thread = None
thread_id = request.message.thread_id
thread_link = xml_escape(thread_id)
if thread_id is not None:
try:
thread = self.app.database.get(request, Message, thread_id)
except ObjectNotFound:
pass
if thread is not None:
thread_link = thread.get_link(request)
return thread_link
@brbn.xml
def render_in_reply_to_link(self, request):
rmessage = None
rmessage_id = request.message.in_reply_to_id
rmessage_link = nvl(xml_escape(rmessage_id), "[None]")
if rmessage_id is not None:
try:
rmessage = self.database.get(request, Message, rmessage_id)
except ObjectNotFound:
pass
if rmessage is not None:
rmessage_link = rmessage.get_link(request)
return rmessage_link
@brbn.xml
def render_headers(self, request):
message = request.message
from_field = "{} <{}>".format(message.from_name, message.from_address)
items = (
("ID", xml_escape(message.id)),
("List", xml_escape(message.list_id)),
("From", xml_escape(from_field)),
("Date", xml_escape(_email.formatdate(message.date))),
("Subject", xml_escape(message.subject)),
)
return html_table(items, False, True, class_="headers")
@brbn.xml
def render_content(self, request):
message = request.message
content = ""
if message.content is not None:
lines = list()
for line in message.content.splitlines():
line = line.strip()
if line.startswith(">"):
m = _re.match("^[> ]+", line)
prefix = "\n{}".format(m.group(0))
line = prefix.join(_textwrap.wrap(line, 80))
line = html_span(xml_escape(line), class_="quoted")
else:
line = "\n".join(_textwrap.wrap(line, 80))
line = xml_escape(line)
lines.append(line)
content = "\n".join(lines)
return content
class Database:
def __init__(self, path):
self.path = path
_log.info("Using database at {}".format(self.path))
def connect(self):
# XXX thread local connections
return _sqlite.connect(self.path)
def create_schema(self):
columns = list()
for name in Message.fields:
field_type = Message.field_types.get(name, str)
column_type = "text"
if field_type == int:
column_type = "integer"
column = "{} {}".format(name, column_type)
columns.append(column)
statements = list()
columns = ", ".join(columns)
ddl = "create table messages ({});".format(columns)
statements.append(ddl)
ddl = "create index messages_id_idx on messages (id);"
statements.append(ddl)
columns = ", ".join(Message.fts_fields)
ddl = ("create virtual table messages_fts using fts4 "
"({}, notindexed=id, notindexed=thread_id, tokenize=porter)"
"".format(columns))
statements.append(ddl)
conn = self.connect()
cursor = conn.cursor()
try:
for statement in statements:
cursor.execute(statement)
finally:
conn.close()
def optimize(self):
conn = self.connect()
cursor = conn.cursor()
ddl = "insert into messages_fts (messages_fts) values ('optimize')"
try:
cursor.execute(ddl)
finally:
conn.close()
def cursor(self, request):
return request.database_connection.cursor()
def query(self, request, sql, *args):
cursor = self.cursor(request)
try:
cursor.execute(sql, args)
return cursor.fetchall()
finally:
cursor.close()
def get(self, request, cls, id):
_log.debug("Getting {} with ID {}".format(cls.__name__, id))
assert issubclass(cls, _DatabaseObject), cls
assert id is not None
sql = "select * from {} where id = ?".format(cls.table)
cursor = self.cursor(request)
try:
cursor.execute(sql, [id])
record = cursor.fetchone()
finally:
cursor.close()
if record is None:
raise ObjectNotFound()
obj = cls()
obj.load_from_record(record)
return obj
class ObjectNotFound(Exception):
pass
class _DatabaseObject:
table = None
def __init__(self, id, name, parent=None):
self.id = id
self._name = name
self.parent = parent
def __repr__(self):
return format_repr(self, self.id)
@property
def name(self):
return self._name
def get_link_href(self, request):
raise NotImplementedError()
def get_link_text(self, request):
return self.name
def get_link(self, request, text=None):
href = self.get_link_href(request)
if text is None:
text = self.get_link_text(request)
return "<a href=\"{}\">{}</a>".format(href, xml_escape(text))
class Message(_DatabaseObject):
table = "messages"
fields = [
"id",
"in_reply_to_id",
"from_name",
"from_address",
"list_id",
"date",
"subject",
"content_type",
"content",
"authored_content",
"authored_words",
"thread_id",
"thread_position",
]
field_types = {
"date": int,
"authored_words": int,
"thread_position": int,
}
field_mbox_keys = {
"id": "Message-ID",
"in_reply_to_id": "In-Reply-To",
"list_id": "List-Id",
"subject": "Subject",
"content_type": "Content-Type",
}
fts_fields = [
"id",
"thread_id",
"subject",
"authored_content",
]
def __init__(self):
super().__init__(None, None)
for name in self.fields:
setattr(self, name, None)
@property
def name(self):
return self.subject
def load_from_mbox_message(self, mbox_message):
for name in self.field_mbox_keys:
mbox_key = self.field_mbox_keys[name]
value = mbox_message.get(mbox_key)
field_type = self.field_types.get(name, str)
if value is not None:
value = field_type(value)
setattr(self, name, value)
name, address = _email.parseaddr(mbox_message["From"])
self.from_name = name
self.from_address = address
tup = _email.parsedate(mbox_message["Date"])
self.date = _time.mktime(tup)
content = _get_mbox_content(mbox_message)
assert content is not None
self.content = content
self.authored_content = _get_authored_content(self.content)
self.authored_words = len(self.authored_content.split())
def load_from_record(self, record):
for i, name in enumerate(self.fields):
value = record[i]
field_type = self.field_types.get(name, str)
if value is not None:
value = field_type(value)
setattr(self, name, value)
def save(self, cursor):
columns = ", ".join(self.fields)
values = ", ".join("?" * len(self.fields))
args = [getattr(self, x) for x in self.fields]
dml = "insert into messages ({}) values ({})".format(columns, values)
cursor.execute(dml, args)
columns = ", ".join(self.fts_fields)
values = ", ".join("?" * len(self.fts_fields))
args = [getattr(self, x) for x in self.fts_fields]
dml = "insert into messages_fts ({}) values ({})".format(columns, values)
cursor.execute(dml, args)
def get_link_href(self, request):
return request.app.message_page.get_href(request, id=self.id)
def get_link_title(self, request):
return self.subject
class Thread(Message):
def get_link_href(self, request):
return request.app.thread_page.get_href(request, id=self.id)
def _get_mbox_content(mbox_message):
content_type = None
content_encoding = None
content = None
if mbox_message.is_multipart():
for part in mbox_message.walk():
if part.get_content_type() == "text/plain":
content_type = "text/plain"
content_encoding = part["Content-Transfer-Encoding"]
content = part.get_payload()
if content_type is None:
content_type = mbox_message.get_content_type()
content_encoding = mbox_message["Content-Transfer-Encoding"]
content = mbox_message.get_payload()
assert content_type is not None
assert content is not None
if content_encoding == "quoted-printable":
content = _quopri.decodestring(content)
content = content.decode("utf-8", errors="replace")
if content_type == "text/html":
content = strip_tags(content)
return content
def _get_authored_content(content):
lines = list()
for line in content.splitlines():
line = line.strip()
if line.startswith(">"):
continue
lines.append(line)
return "\n".join(lines)
| [
"logging.getLogger",
"json.loads",
"sqlite3.connect",
"email.utils.parseaddr",
"time.mktime",
"os.path.join",
"re.match",
"email.utils.parsedate",
"textwrap.wrap",
"email.utils.formatdate",
"quopri.decodestring",
"time.gmtime"
] | [((1087, 1117), 'logging.getLogger', '_logging.getLogger', (['"""haystack"""'], {}), "('haystack')\n", (1105, 1117), True, 'import logging as _logging\n'), ((1163, 1194), 'json.loads', '_json.loads', (["_strings['topics']"], {}), "(_strings['topics'])\n", (1174, 1194), True, 'import json as _json\n'), ((1315, 1362), 'os.path.join', '_os.path.join', (['self.home', '"""data"""', '"""data.sqlite"""'], {}), "(self.home, 'data', 'data.sqlite')\n", (1328, 1362), True, 'import os as _os\n'), ((8838, 8864), 'sqlite3.connect', '_sqlite.connect', (['self.path'], {}), '(self.path)\n', (8853, 8864), True, 'import sqlite3 as _sqlite\n'), ((13121, 13159), 'email.utils.parseaddr', '_email.parseaddr', (["mbox_message['From']"], {}), "(mbox_message['From'])\n", (13137, 13159), True, 'import email.utils as _email\n'), ((13242, 13280), 'email.utils.parsedate', '_email.parsedate', (["mbox_message['Date']"], {}), "(mbox_message['Date'])\n", (13258, 13280), True, 'import email.utils as _email\n'), ((13301, 13318), 'time.mktime', '_time.mktime', (['tup'], {}), '(tup)\n', (13313, 13318), True, 'import time as _time\n'), ((15473, 15502), 'quopri.decodestring', '_quopri.decodestring', (['content'], {}), '(content)\n', (15493, 15502), True, 'import quopri as _quopri\n'), ((4694, 4720), 'time.gmtime', '_time.gmtime', (['message.date'], {}), '(message.date)\n', (4706, 4720), True, 'import time as _time\n'), ((7681, 7712), 'email.utils.formatdate', '_email.formatdate', (['message.date'], {}), '(message.date)\n', (7698, 7712), True, 'import email.utils as _email\n'), ((8179, 8204), 're.match', '_re.match', (['"""^[> ]+"""', 'line'], {}), "('^[> ]+', line)\n", (8188, 8204), True, 'import re as _re\n'), ((8300, 8324), 'textwrap.wrap', '_textwrap.wrap', (['line', '(80)'], {}), '(line, 80)\n', (8314, 8324), True, 'import textwrap as _textwrap\n'), ((8457, 8481), 'textwrap.wrap', '_textwrap.wrap', (['line', '(80)'], {}), '(line, 80)\n', (8471, 8481), True, 'import textwrap as _textwrap\n'), ((3411, 3441), 'email.utils.formatdate', '_email.formatdate', (['thread.date'], {}), '(thread.date)\n', (3428, 3441), True, 'import email.utils as _email\n')] |
""" Initialization file for a GDC API client.
"""
from copy import copy
import types
from .client import BaseClient
# Aliases
COMMON_ALIASES = {
"_get_cases": 'get_cases',
"_get_mappings": 'get_mappings',
"_get_genes": 'get_genes',
"_get_ssm_occurrences": 'get_ssm_occurrences',
}
# API specific aliases
GDCAPI_ALIASES = copy(COMMON_ALIASES)
# Kwargs
COMMON_KWARGS = {
"_default_url": 'https://api.gdc.cancer.gov',
"_projects_endpoint": '/projects',
"_files_endpoint": '/files',
"_cases_endpoint": '/cases',
"_genes_endpoint": '/genes',
"_ssm_occurrences_endpoint": '/ssm_occurrences',
"_app_id": None,
"_app_key": None,
}
# API specific kwargs
GDCAPI_KWARGS = copy(COMMON_KWARGS)
# GDC client settings
CLIENT_SETTINGS = {
"gdc": {
"class_name": 'GdcClient',
"class_kwargs": GDCAPI_KWARGS,
"attr_aliases": GDCAPI_ALIASES,
"base_class": BaseClient,
"mixins": []
},
}
def copy_func(f, name=None):
""" Returns a function with the same code, globals, defaults, closure, and name (unless provided a different name).
"""
fn = types.FunctionType(f.__code__,
f.__globals__, name or f.__name__,
f.__defaults__,
f.__closure__)
fn.__dict__.update(f.__dict__)
return fn
def get_client(api=None, instance=True, *args, **kwargs):
""" Function that returns the necessary Edemam API client.
:param api: The api wrapper to be returned.
:type api: str
"""
if not api:
url = kwargs.get('url', False)
if not url:
raise RuntimeError('No API type or url specified.')
api = api.lower()
if (api not in CLIENT_SETTINGS and not kwargs.get('url', False)):
raise Exception('No api {}, currently avaliable. Available apis are: {}'.format(api, list(CLIENT_SETTINGS.keys())))
_settings = CLIENT_SETTINGS[api]
_class = type(_settings["class_name"], tuple([_settings["base_class"]] + _settings["mixins"]), _settings["class_kwargs"])
# Set aliases
for (src_attr, target_attr) in _settings["attr_aliases"].items():
if getattr(_class, src_attr, False):
setattr(_class, target_attr, copy_func(getattr(_class, src_attr), name=target_attr))
_client = _class(*args, **kwargs) if instance else _class
return _client
class GdcClient(get_client('gdc', instance=False)):
pass
| [
"types.FunctionType",
"copy.copy"
] | [((349, 369), 'copy.copy', 'copy', (['COMMON_ALIASES'], {}), '(COMMON_ALIASES)\n', (353, 369), False, 'from copy import copy\n'), ((723, 742), 'copy.copy', 'copy', (['COMMON_KWARGS'], {}), '(COMMON_KWARGS)\n', (727, 742), False, 'from copy import copy\n'), ((1144, 1245), 'types.FunctionType', 'types.FunctionType', (['f.__code__', 'f.__globals__', '(name or f.__name__)', 'f.__defaults__', 'f.__closure__'], {}), '(f.__code__, f.__globals__, name or f.__name__, f.\n __defaults__, f.__closure__)\n', (1162, 1245), False, 'import types\n')] |
import logging
from typing import Any, Dict, List, NewType
import mlflow
import numpy as np
import pandas as pd
import torch
import transformers
from mlflow.models import ModelSignature
from mlflow.pyfunc import PythonModel
from mlflow.types import ColSpec, DataType, Schema, TensorSpec
from mlflow.utils.environment import _mlflow_conda_env
from transformers.pipelines.pt_utils import KeyDataset
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
Trainer,
pipeline
)
from tagflip.model.autonlp_arguments import AutoNLPArguments
from tagflip.model.mlflow.mlflow_savable import LogArgs, MLflowSavable, Name, Path
Model = NewType("Model", Any)
Tokenizer = NewType("Tokenizer", Any)
Config = NewType("Config", Any)
logger = logging.getLogger(__name__)
class HuggingFaceSequenceClassificationSavable(MLflowSavable):
"""
Saves a NLP model for text classification that has been trained using Hugging Face Transformer API.
"""
def __init__(self, trainer: Trainer, label_list: List[str]):
"""
The constructor
:param trainer: the Hugging Face Trainer being used to train the model
:param label_list: the list of labels
"""
self.trainer = trainer
self.label_list = label_list
def local_artifact_paths(self, autonlp_args: AutoNLPArguments) -> Dict[Name, Path]:
model_identifier = autonlp_args.training_id
logger.info("Saving model locally...")
self.trainer.save_model(model_identifier)
return {model_identifier: model_identifier}
def log_args(self, _: AutoNLPArguments) -> LogArgs:
conda_env = _mlflow_conda_env(
additional_conda_deps=[],
additional_pip_deps=[
"pandas~={}".format(pd.__version__),
"torch~={}".format(torch.__version__),
"transformers=={}".format(transformers.__version__),
"mlflow=={}".format(mlflow.__version__),
])
return LogArgs(artifact_path="huggingface-pyfunc",
input_example=[
"This is some example sentence. Maybe a second sentence in same context.",
"This is some other sentence."
],
signature=ModelSignature(
Schema([ColSpec(type=DataType.string)]),
Schema([TensorSpec(np.dtype('str'), (-1, -1, 2))])
),
conda_env=conda_env
)
def python_model(self, autonlp_args: AutoNLPArguments) -> PythonModel:
model_identifier = autonlp_args.training_id
return HuggingFaceSequenceClassificationSavable.create_python_model(model_identifier, self.label_list)
@classmethod
def create_python_model(cls, model_identifier, label_list):
class SequenceClassificationPythonModel(PythonModel):
def __init__(self):
self.trained_model = None
self.tokenizer = None
self.label_list = None
def load_context(self, context):
model_artifact_path = context.artifacts[model_identifier]
self.trained_model = AutoModelForSequenceClassification.from_pretrained(
f"{model_artifact_path}")
self.tokenizer = AutoTokenizer.from_pretrained(
f"{model_artifact_path}")
self.label_list = label_list
def predict(self, context, input_df):
sentences = input_df.values.tolist()
logger.info(f"predict: input={sentences}")
sentences = list(map(lambda sentence: sentence[0], sentences))
pipe = pipeline(
"text-classification", model=self.trained_model, tokenizer=self.tokenizer)
pipe_sentences = []
for text, result in zip(sentences, pipe(sentences)):
result["text"] = text
pipe_sentences.append(result)
return pipe_sentences
return SequenceClassificationPythonModel()
| [
"logging.getLogger",
"typing.NewType",
"transformers.AutoModelForSequenceClassification.from_pretrained",
"transformers.AutoTokenizer.from_pretrained",
"numpy.dtype",
"transformers.pipeline",
"mlflow.types.ColSpec"
] | [((666, 687), 'typing.NewType', 'NewType', (['"""Model"""', 'Any'], {}), "('Model', Any)\n", (673, 687), False, 'from typing import Any, Dict, List, NewType\n'), ((700, 725), 'typing.NewType', 'NewType', (['"""Tokenizer"""', 'Any'], {}), "('Tokenizer', Any)\n", (707, 725), False, 'from typing import Any, Dict, List, NewType\n'), ((735, 757), 'typing.NewType', 'NewType', (['"""Config"""', 'Any'], {}), "('Config', Any)\n", (742, 757), False, 'from typing import Any, Dict, List, NewType\n'), ((768, 795), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (785, 795), False, 'import logging\n'), ((3263, 3339), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['f"""{model_artifact_path}"""'], {}), "(f'{model_artifact_path}')\n", (3313, 3339), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, pipeline\n'), ((3394, 3449), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['f"""{model_artifact_path}"""'], {}), "(f'{model_artifact_path}')\n", (3423, 3449), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, pipeline\n'), ((3781, 3869), 'transformers.pipeline', 'pipeline', (['"""text-classification"""'], {'model': 'self.trained_model', 'tokenizer': 'self.tokenizer'}), "('text-classification', model=self.trained_model, tokenizer=self.\n tokenizer)\n", (3789, 3869), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, pipeline\n'), ((2366, 2395), 'mlflow.types.ColSpec', 'ColSpec', ([], {'type': 'DataType.string'}), '(type=DataType.string)\n', (2373, 2395), False, 'from mlflow.types import ColSpec, DataType, Schema, TensorSpec\n'), ((2445, 2460), 'numpy.dtype', 'np.dtype', (['"""str"""'], {}), "('str')\n", (2453, 2460), True, 'import numpy as np\n')] |
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from functools import partial
from my_robot_interfaces.action import Test
class TestActionClient(Node):
def __init__(self):
super().__init__('test_action_client')
self.declare_parameter("secs", 2)
self.secs_ = self.get_parameter("secs").value
self.get_logger().info('Action Client and ready to request moving robots for {} secs'.format(self.secs_))
self.action_client_ = None
self.call_action_server()
def destroy(self):
self.action_client_.destroy()
super().destroy_node()
def call_action_server(self):
self.action_client_ = ActionClient(self, Test, 'my_action_move')
while(not self.action_client_.wait_for_server(1.0)):
self.get_logger().warn("Waiting for server my_action_move to be up")
goal_request = Test.Goal()
goal_request.secs = self.secs_
future = self.action_client_.send_goal_async(goal_request, feedback_callback=self.feedback_callback)
future.add_done_callback(partial(self.callback_action_performed, secs=goal_request.secs))
def feedback_callback(self, feedback_msg):
self.get_logger().info("Feedback received")
self.get_logger().info("Feedback is '{0}' for action \"my_action_move".format(feedback_msg.feedback))
def callback_action_performed(self, future, secs):
try:
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected :(')
return
self.get_logger().info('Goal accepted :)')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(partial(self.get_result_callback, secs=secs))
except Exception as e:
self.get_logger().error("Action call failed %r" % (e,))
def get_result_callback(self, future, secs):
try:
status = future.result().result.status
self.get_logger().info("Final response is {0} for action \"my_action_move\ with secs={1}".format(status, secs))
self.destroy()
# Shutdown after receiving a result
rclpy.shutdown()
except Exception as e:
self.get_logger().error("Action call response reading failed %r" % (e,))
def main(args=None):
rclpy.init(args=args)
test_action_client = TestActionClient()
rclpy.spin(test_action_client)
rclpy.shutdown() # last line of any ROS2 .py node
if __name__ == '__main__':
main() | [
"rclpy.spin",
"rclpy.action.ActionClient",
"functools.partial",
"my_robot_interfaces.action.Test.Goal",
"rclpy.init",
"rclpy.shutdown"
] | [((2464, 2485), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (2474, 2485), False, 'import rclpy\n'), ((2534, 2564), 'rclpy.spin', 'rclpy.spin', (['test_action_client'], {}), '(test_action_client)\n', (2544, 2564), False, 'import rclpy\n'), ((2569, 2585), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (2583, 2585), False, 'import rclpy\n'), ((716, 758), 'rclpy.action.ActionClient', 'ActionClient', (['self', 'Test', '"""my_action_move"""'], {}), "(self, Test, 'my_action_move')\n", (728, 758), False, 'from rclpy.action import ActionClient\n'), ((926, 937), 'my_robot_interfaces.action.Test.Goal', 'Test.Goal', ([], {}), '()\n', (935, 937), False, 'from my_robot_interfaces.action import Test\n'), ((1121, 1184), 'functools.partial', 'partial', (['self.callback_action_performed'], {'secs': 'goal_request.secs'}), '(self.callback_action_performed, secs=goal_request.secs)\n', (1128, 1184), False, 'from functools import partial\n'), ((2283, 2299), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (2297, 2299), False, 'import rclpy\n'), ((1811, 1855), 'functools.partial', 'partial', (['self.get_result_callback'], {'secs': 'secs'}), '(self.get_result_callback, secs=secs)\n', (1818, 1855), False, 'from functools import partial\n')] |
import sys
import pygame
def draw_canvas(screen, colors):
screen.fill(colors[0])
pygame.draw.rect(screen, colors[5], (20, 20, 500, 500))
index = 1 # skip light grey
for row in range(2):
for column in range(4):
pygame.draw.rect(screen, colors[index], ((60 * column) + 20, (60 * row) + 530, 60, 60))
index += 1
# Draw clear button
pygame.draw.rect(screen, colors[5], (280, 530, 120, 55))
font = pygame.font.SysFont(None, 48)
text_surface = font.render('Clear', True, colors[0])
screen.blit(text_surface, (295, 540))
# Draw sizing buttons
offset = 0
sizes = ['1', '2', '3']
for i in range(3):
if i > 0:
offset = 5 * i
pygame.draw.rect(screen, colors[5], ((36 * i) + 280 + offset, 595, 36, 36))
text_surface = font.render(sizes[i], True, colors[0])
screen.blit(text_surface, ((36 * i) + 288 + offset, 598))
def fill_square(screen, color, column, row):
pygame.draw.rect(screen, color, (20 + (5 * column), 20 + (5 * row), 5, 5))
pygame.display.update()
def clear_canvas(screen):
pygame.draw.rect(screen, (255, 255, 255), (20, 20, 500, 500)) | [
"pygame.display.update",
"pygame.draw.rect",
"pygame.font.SysFont"
] | [((97, 152), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'colors[5]', '(20, 20, 500, 500)'], {}), '(screen, colors[5], (20, 20, 500, 500))\n', (113, 152), False, 'import pygame\n'), ((405, 461), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'colors[5]', '(280, 530, 120, 55)'], {}), '(screen, colors[5], (280, 530, 120, 55))\n', (421, 461), False, 'import pygame\n'), ((474, 503), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', '(48)'], {}), '(None, 48)\n', (493, 503), False, 'import pygame\n'), ((1020, 1090), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'color', '(20 + 5 * column, 20 + 5 * row, 5, 5)'], {}), '(screen, color, (20 + 5 * column, 20 + 5 * row, 5, 5))\n', (1036, 1090), False, 'import pygame\n'), ((1100, 1123), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1121, 1123), False, 'import pygame\n'), ((1160, 1221), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 255, 255)', '(20, 20, 500, 500)'], {}), '(screen, (255, 255, 255), (20, 20, 500, 500))\n', (1176, 1221), False, 'import pygame\n'), ((759, 832), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'colors[5]', '(36 * i + 280 + offset, 595, 36, 36)'], {}), '(screen, colors[5], (36 * i + 280 + offset, 595, 36, 36))\n', (775, 832), False, 'import pygame\n'), ((261, 349), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'colors[index]', '(60 * column + 20, 60 * row + 530, 60, 60)'], {}), '(screen, colors[index], (60 * column + 20, 60 * row + 530, \n 60, 60))\n', (277, 349), False, 'import pygame\n')] |
from suds.client import Client
from suds import WebFault
from model.project import Project
class SoapHelper:
def __init__(self, app):
self.app = app
def can_login(self, username, password):
client = Client(self.app.base_url + "api/soap/mantisconnect.php?wsdl")
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def get_project_list_user(self, username, password):
self.can_login(username, password)
client = Client(self.app.base_url + "api/soap/mantisconnect.php?wsdl")
try:
response = client.service.mc_projects_get_user_accessible(username, password)
project_list = []
for element in response:
project = Project(id=element.id, name_project=element.name, description_project=element.description)
project_list.append(project)
return project_list
except WebFault:
return False
| [
"suds.client.Client",
"model.project.Project"
] | [((226, 287), 'suds.client.Client', 'Client', (["(self.app.base_url + 'api/soap/mantisconnect.php?wsdl')"], {}), "(self.app.base_url + 'api/soap/mantisconnect.php?wsdl')\n", (232, 287), False, 'from suds.client import Client\n'), ((549, 610), 'suds.client.Client', 'Client', (["(self.app.base_url + 'api/soap/mantisconnect.php?wsdl')"], {}), "(self.app.base_url + 'api/soap/mantisconnect.php?wsdl')\n", (555, 610), False, 'from suds.client import Client\n'), ((808, 903), 'model.project.Project', 'Project', ([], {'id': 'element.id', 'name_project': 'element.name', 'description_project': 'element.description'}), '(id=element.id, name_project=element.name, description_project=\n element.description)\n', (815, 903), False, 'from model.project import Project\n')] |
import pprint
import requests
import argparse
def login_admin(admin_id, admin_secret, url, verbose):
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
payload = {'grant_type': 'client_credentials',
'response_type': 'token',
'token_format': 'opaque'}
response = requests.post(f'http://{url}/oauth/token', headers=headers,
params=payload,
auth=(admin_id, admin_secret))
resp_json = response.json()
if verbose:
pprint.pprint(resp_json)
access_token = resp_json.get('access_token')
if verbose:
print("Access code: {}".format(access_token))
return access_token
def get_user_by_username(token, username, verbose):
headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(token)}
url = f"{url}/Users?filter=userName+eq+%22{username}%22"
response = requests.get(url, headers=headers)
if response.status_code != 200 or response.json()['totalResults'] == 0:
print(f"Couldn't find a user with username {username}")
exit(1)
if verbose:
pprint.pprint(response.json())
return response.json()['resources'][0]['id']
def update_user_details(token, username, user_code, first_name, last_name, email, verbose):
headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(token)}
payload = {'id': user_code,
'userName': username,
'name': {'familyName': last_name,
'givenName': first_name},
'emails': [{'value': email,
'primary': True}]}
url = f"{url}/Users/{user_code}"
response = requests.put(url, headers=headers, payload=payload)
if response.status_code != 200:
print(f"Error updating user {username} in UAA, status code {response.status_code}")
exit(1)
if verbose:
print(response.status_code)
pprint.pprint(response.json())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Change client secret")
parser.add_argument("-a", "--admin_id", required=True, dest="admin_id", help="The admin id for the organization")
parser.add_argument("-as", "--admin_secret", required=True, dest="admin_secret",
help="The admin password for the organization")
parser.add_argument("-url", "--url", required=True, dest="url",
help="The UAA url to target")
parser.add_argument("-u", "--userid", required=True, dest="userid", help="The id of the user requiring a password change")
parser.add_argument("-f", "--first_name", required=True, dest="first_name", help="The user's first name")
parser.add_argument("-l", "--last_name", required=True, dest="last_name", help="The user's last name")
parser.add_argument("-e", "--email", required=True, dest="email", help="The user's email")
parser.add_argument("-v", "--verbose", default=False, required=False, dest="verbose",
help="To enable verbose output", action="store_true")
args = parser.parse_args()
# Get an admin access token
token = login_admin(args.admin_id, args.admin_secret, args.url, verbose=args.verbose)
# Get the user ID as stored in UAA
user_code = get_user_by_username(token=token, username=args.userid, verbose=args.verbose)
update_user_details(token=token, username=args.userid, user_code=user_code, first_name=args.first_name,
last_name=args.last_name, email=args.email, verbose=args.verbose)
| [
"requests.post",
"argparse.ArgumentParser",
"requests.get",
"requests.put",
"pprint.pprint"
] | [((364, 474), 'requests.post', 'requests.post', (['f"""http://{url}/oauth/token"""'], {'headers': 'headers', 'params': 'payload', 'auth': '(admin_id, admin_secret)'}), "(f'http://{url}/oauth/token', headers=headers, params=payload,\n auth=(admin_id, admin_secret))\n", (377, 474), False, 'import requests\n'), ((1042, 1076), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1054, 1076), False, 'import requests\n'), ((1902, 1953), 'requests.put', 'requests.put', (['url'], {'headers': 'headers', 'payload': 'payload'}), '(url, headers=headers, payload=payload)\n', (1914, 1953), False, 'import requests\n'), ((2232, 2291), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Change client secret"""'}), "(description='Change client secret')\n", (2255, 2291), False, 'import argparse\n'), ((585, 609), 'pprint.pprint', 'pprint.pprint', (['resp_json'], {}), '(resp_json)\n', (598, 609), False, 'import pprint\n')] |
"Code used to generate data for experiments with synthetic data"
import math
import typing as ty
import numba
import numpy as np
import torch
import torch.nn as nn
from numba.experimental import jitclass
from tqdm.auto import tqdm
class MLP(nn.Module):
def __init__(
self,
*,
d_in: int,
d_layers: ty.List[int],
d_out: int,
bias: bool = True,
) -> None:
super().__init__()
self.layers = nn.ModuleList(
[
nn.Linear(d_layers[i - 1] if i else d_in, x, bias=bias)
for i, x in enumerate(d_layers)
]
)
self.head = nn.Linear(d_layers[-1] if d_layers else d_in, d_out)
def init_weights(m):
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_in')
if m.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(m.bias, -bound, bound)
self.apply(init_weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for layer in self.layers:
x = layer(x)
x = torch.relu(x)
x = self.head(x)
x = x.squeeze(-1)
return x
@jitclass(
spec=[
('left_children', numba.int64[:]),
('right_children', numba.int64[:]),
('feature', numba.int64[:]),
('threshold', numba.float32[:]),
('value', numba.float32[:]),
('is_leaf', numba.int64[:]),
]
)
class Tree:
"Randomly initialized decision tree"
def __init__(self, n_features, n_nodes, max_depth):
assert (2 ** np.arange(max_depth + 1)).sum() >= n_nodes, "Too much nodes"
self.left_children = np.ones(n_nodes, dtype=np.int64) * -1
self.right_children = np.ones(n_nodes, dtype=np.int64) * -1
self.feature = np.random.randint(0, n_features, (n_nodes,))
self.threshold = np.random.randn(n_nodes).astype(np.float32)
self.value = np.random.randn(n_nodes).astype(np.float32)
depth = np.zeros(n_nodes, dtype=np.int64)
# Root is 0
self.is_leaf = np.zeros(n_nodes, dtype=np.int64)
self.is_leaf[0] = 1
# Keep adding nodes while we can (new node must have 2 children)
while True:
idx = np.flatnonzero(self.is_leaf)[np.random.choice(self.is_leaf.sum())]
if depth[idx] < max_depth:
unused = np.flatnonzero(
(self.left_children == -1)
& (self.right_children == -1)
& ~self.is_leaf
)
if len(unused) < 2:
break
lr_child = unused[np.random.permutation(unused.shape[0])[:2]]
self.is_leaf[lr_child] = 1
self.is_leaf[lr_child] = 1
depth[lr_child] = depth[idx] + 1
self.left_children[idx] = lr_child[0]
self.right_children[idx] = lr_child[1]
self.is_leaf[idx] = 0
def apply(self, x):
y = np.zeros(x.shape[0])
for i in range(x.shape[0]):
idx = 0
while not self.is_leaf[idx]:
if x[i, self.feature[idx]] < self.threshold[idx]:
idx = self.left_children[idx]
else:
idx = self.right_children[idx]
y[i] = self.value[idx]
return y
class TreeEnsemble:
"Combine multiple trees"
def __init__(self, *, n_trees, n_features, n_nodes, max_depth):
self.trees = [
Tree(n_features=n_features, n_nodes=n_nodes, max_depth=max_depth)
for _ in range(n_trees)
]
def apply(self, x):
return np.mean([t.apply(x) for t in tqdm(self.trees)], axis=0)
| [
"numpy.ones",
"numpy.flatnonzero",
"torch.relu",
"torch.nn.init.kaiming_normal_",
"numba.experimental.jitclass",
"math.sqrt",
"torch.nn.init._calculate_fan_in_and_fan_out",
"numpy.random.randint",
"numpy.zeros",
"torch.nn.init.uniform_",
"torch.nn.Linear",
"tqdm.auto.tqdm",
"numpy.random.ran... | [((1341, 1556), 'numba.experimental.jitclass', 'jitclass', ([], {'spec': "[('left_children', numba.int64[:]), ('right_children', numba.int64[:]), (\n 'feature', numba.int64[:]), ('threshold', numba.float32[:]), ('value',\n numba.float32[:]), ('is_leaf', numba.int64[:])]"}), "(spec=[('left_children', numba.int64[:]), ('right_children', numba.\n int64[:]), ('feature', numba.int64[:]), ('threshold', numba.float32[:]),\n ('value', numba.float32[:]), ('is_leaf', numba.int64[:])])\n", (1349, 1556), False, 'from numba.experimental import jitclass\n'), ((654, 706), 'torch.nn.Linear', 'nn.Linear', (['(d_layers[-1] if d_layers else d_in)', 'd_out'], {}), '(d_layers[-1] if d_layers else d_in, d_out)\n', (663, 706), True, 'import torch.nn as nn\n'), ((1960, 2004), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_features', '(n_nodes,)'], {}), '(0, n_features, (n_nodes,))\n', (1977, 2004), True, 'import numpy as np\n'), ((2155, 2188), 'numpy.zeros', 'np.zeros', (['n_nodes'], {'dtype': 'np.int64'}), '(n_nodes, dtype=np.int64)\n', (2163, 2188), True, 'import numpy as np\n'), ((2233, 2266), 'numpy.zeros', 'np.zeros', (['n_nodes'], {'dtype': 'np.int64'}), '(n_nodes, dtype=np.int64)\n', (2241, 2266), True, 'import numpy as np\n'), ((3165, 3185), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (3173, 3185), True, 'import numpy as np\n'), ((1256, 1269), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (1266, 1269), False, 'import torch\n'), ((1831, 1863), 'numpy.ones', 'np.ones', (['n_nodes'], {'dtype': 'np.int64'}), '(n_nodes, dtype=np.int64)\n', (1838, 1863), True, 'import numpy as np\n'), ((1899, 1931), 'numpy.ones', 'np.ones', (['n_nodes'], {'dtype': 'np.int64'}), '(n_nodes, dtype=np.int64)\n', (1906, 1931), True, 'import numpy as np\n'), ((506, 561), 'torch.nn.Linear', 'nn.Linear', (['(d_layers[i - 1] if i else d_in)', 'x'], {'bias': 'bias'}), '(d_layers[i - 1] if i else d_in, x, bias=bias)\n', (515, 561), True, 'import torch.nn as nn\n'), ((794, 848), 'torch.nn.init.kaiming_normal_', 'torch.nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_in"""'}), "(m.weight, mode='fan_in')\n", (823, 848), False, 'import torch\n'), ((2030, 2054), 'numpy.random.randn', 'np.random.randn', (['n_nodes'], {}), '(n_nodes)\n', (2045, 2054), True, 'import numpy as np\n'), ((2095, 2119), 'numpy.random.randn', 'np.random.randn', (['n_nodes'], {}), '(n_nodes)\n', (2110, 2119), True, 'import numpy as np\n'), ((2407, 2435), 'numpy.flatnonzero', 'np.flatnonzero', (['self.is_leaf'], {}), '(self.is_leaf)\n', (2421, 2435), True, 'import numpy as np\n'), ((2538, 2631), 'numpy.flatnonzero', 'np.flatnonzero', (['((self.left_children == -1) & (self.right_children == -1) & ~self.is_leaf)'], {}), '((self.left_children == -1) & (self.right_children == -1) & ~\n self.is_leaf)\n', (2552, 2631), True, 'import numpy as np\n'), ((920, 973), 'torch.nn.init._calculate_fan_in_and_fan_out', 'torch.nn.init._calculate_fan_in_and_fan_out', (['m.weight'], {}), '(m.weight)\n', (963, 973), False, 'import torch\n'), ((1044, 1089), 'torch.nn.init.uniform_', 'torch.nn.init.uniform_', (['m.bias', '(-bound)', 'bound'], {}), '(m.bias, -bound, bound)\n', (1066, 1089), False, 'import torch\n'), ((3864, 3880), 'tqdm.auto.tqdm', 'tqdm', (['self.trees'], {}), '(self.trees)\n', (3868, 3880), False, 'from tqdm.auto import tqdm\n'), ((1006, 1023), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (1015, 1023), False, 'import math\n'), ((1740, 1764), 'numpy.arange', 'np.arange', (['(max_depth + 1)'], {}), '(max_depth + 1)\n', (1749, 1764), True, 'import numpy as np\n'), ((2802, 2840), 'numpy.random.permutation', 'np.random.permutation', (['unused.shape[0]'], {}), '(unused.shape[0])\n', (2823, 2840), True, 'import numpy as np\n')] |