seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12009423058 | from __future__ import annotations
from dataclasses import fields
from typing import Tuple
import numpy as np
from napari.layers import Image
def update_layer_contrast_limits(
layer: Image,
contrast_limits_quantiles: Tuple[float, float] = (0.01, 0.98),
contrast_limits_range_quantiles: Tuple[float, float] = (0.0, 1.0),
) -> None:
nonzero_mask = layer.data > 0
if (~nonzero_mask).all():
return
limit_0, limit_1, limit_range_0, limit_range_1 = np.quantile(
layer.data[nonzero_mask],
(*contrast_limits_quantiles, *contrast_limits_range_quantiles),
)
layer.contrast_limits = (limit_0, limit_1 + 1e-8)
layer.contrast_limits_range = (limit_range_0, limit_range_1 + 1e-8)
def array_safe_eq(a, b) -> bool:
"""Check if a and b are equal, even if they are numpy arrays"""
if a is b:
return True
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
return a.shape == b.shape and (a == b).all()
try:
return a == b
except TypeError:
return NotImplemented
def dataclass_eq(dc1, dc2) -> bool:
"""checks if two dataclasses which hold numpy arrays are equal"""
if dc1 is dc2:
return True
if dc1.__class__ is not dc2.__class__:
return NotImplemented
fields_names = [f.name for f in fields(dc1)]
return all(
array_safe_eq(getattr(dc1, field_name), getattr(dc2, field_name))
for field_name in fields_names
)
| bkntr/napari-brainways | src/napari_brainways/utils.py | utils.py | py | 1,473 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "napari.layers.Image",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.quantile",
"l... |
24270752412 | import numpy as np
import matplotlib.pyplot as plt
import os
import torch
import torchvision
import numpy as np
from torchvision import transforms
from sklearn.metrics import precision_recall_curve, average_precision_score, auc, roc_auc_score, roc_curve
import matplotlib.pyplot as plt
from config import *
import random
maha_dists = np.load('maha_dists.npy',allow_pickle=True)
input_data = np.load('/network/tmp1/bhattdha/detectron2_kitti/embeddings_storage/final_data.npy', allow_pickle=True)[()]
input_data_OOD = np.load('/network/tmp1/bhattdha/detectron2_kitti/embeddings_storage/final_data_OOD.npy', allow_pickle=True)[()]
## the dataset
X_org = input_data['features']
y_org = input_data['labels']
# X_ood = input_data_OOD['features']
# y_ood = input_data_OOD['labels']
# y_ood[y_ood == 6] = 5
# y_ood[y_ood == 7] = 5
# ood_class = [5, 6, 7]
X = X_org
y = y_org
## total reprodicibility
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
X_ood = input_data_OOD['features']
y_ood = input_data_OOD['labels']
val_data_all_classes = {}
means = {}
covars = {}
# train_fraction = 0.7
num_classes = max(y)+1
class_labels = np.arange(num_classes)
# class_labels = np.random.permutation(num_classes)
# class_labels = np.random.permutation(class_labels)
for class_label in class_labels:
print('Training Class# {}'.format(class_label))
indices = np.where(y==class_label)[0]
# indices = np.random.permutation(indices)
data = X[indices,:]
# class_label_fake = (class_label + 5)%len(class_labels)
# indices_fake = np.where(y==class_label_fake)[0]
# val_other_class_data = X[indices_fake,:]
# data = np.random.permutation(data)
train_data_samples = int(len(data)*train_fraction)
val_data_samples = int(len(data) - train_data_samples)
train_data = 1e2*data[:train_data_samples,:]
val_data = 1e2*data[train_data_samples:, :]
# data = {'train_data': train_data, 'val_data': val_data}
val_data_all_classes[str(class_label)] = val_data
mean = np.mean(train_data, axis = 0)
cov = np.cov(train_data.T)
means[str(class_label)] = mean
## may be wrong!
covars[str(class_label)] = np.linalg.inv(cov + 1e-10*np.identity(1024))
maha_class = maha_dists[:,5].astype(int)
maha_true_dist = []
maha_false_dist = []
# for ind, m in enumerate(maha_dists):
# maha_true_dist.append(m[maha_class[ind]])
# m[maha_class[ind]] = np.inf
# m[5] = np.inf
# maha_false_dist.append(m.min())
## loading the results
# maha_true_dist = np.array(maha_true_dist)
# maha_false_dist = np.array(maha_false_dist)
# input_data_OOD = np.load('/network/tmp1/bhattdha/detectron2_kitti/embeddings_storage/final_data_OOD.npy', allow_pickle=True)[()]
# X_ood = input_data_OOD['features']
# y_ood = input_data_OOD['labels']
acc_threshs = [60.0, 70.0, 80.0, 85.0, 90.0, 95.0]
ood_stats = {}
for acc_thresh in acc_threshs:
print("For accuracy: ", acc_thresh)
mahathresh = {}
class_dist = {}
for i in range(num_classes):
class_dist[i] = maha_dists[maha_dists[:,5]==i][:,i]
class_dist[i].sort()
class_dist[i] = class_dist[i][::-1]
index = int(len(class_dist[i]) - len(class_dist[i])*acc_thresh/100.0)
mahathresh[i] = class_dist[i][index]
# mahathresh = {0: 3093.944707607109, 1: 5710.413855647991, 2: 28235.425795092746, 3: 79163.39452332728, 4: 2313.9860080440644}
tp = 0
fp = 0
for x in X_ood:
data_point = 1e2*x
flag = True
mds = [] ## has mahalanobis distances
for mean_label in means.keys():
diff = (data_point - means[mean_label]).reshape(len(data_point), 1)
mahalanobis_distance = np.dot(diff.T, np.dot(covars[mean_label], diff))[0][0]
# maha_all.append(mahalanobis_distance)
mds.append(mahalanobis_distance)
for i in mahathresh.keys():
if mahathresh[i] > mds[i]:
fp += 1
flag = False
break
else:
continue
if flag:
tp += 1
ood_stats[acc_thresh] = {'tp':tp, 'fp':fp, 'accuracy': tp/(tp+fp)}
import ipdb; ipdb.set_trace()
colors = ['C'+str(i+1) for i in range(5)]
for i in range(4):
plt.plot(class_dist[i], '-o', alpha=0.7, color=colors[i], label="class maha dists"+str(i).zfill(5))
# plt.plot(maha_false_dist, '-o', alpha=0.7, color=colors[1], label="maha_false_dist")
# [1e3, 1e4, 1e3]
plt.legend()
plt.legend(loc='upper right')
plt.xlabel('datapoint ->')
plt.ylabel('mahalanobis distance -> ')
plt.title('Mahalanobis distance plot')
plt.savefig('maha_dists.png')
import ipdb; ipdb.set_trace() | dhaivat1729/detectron2_CL | generative_classifier/maha_dist_analysis.py | maha_dist_analysis.py | py | 4,431 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number... |
74957082106 | # Sinw wave plot tool
import numpy as np
import matplotlib.pyplot as plt
f =0.5 #frequency of sine wave
# f =2
A =5# maximum amplitude of sine wave
# A = 1
x = np.arange(-6.28, 6.28, 0.01)# array arranged from -pi to +pi and with small increment of 0.01
# x = np.arange(-3.14, 3.14, 0.01)
#y = A*np.sin(f*x)
y = A*np.tan(f*x)
plt.plot(x,y)
plt.xlabel('angle')
plt.ylabel('amplitude')
plt.show()
| dilshad-geol/IRIS-2022-Seismology-Skill-Building-Workshop | 00_UNIX_DataFiles/python/numpy/sine.py | sine.py | py | 397 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.tan",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
12643025727 | from playsound import playsound
import os
import pandas as pd
path = "audio/" # path to the dataset
files = os.listdir(path)
df = pd.DataFrame([], columns = ["file_name", "label"])
for file, i in zip(files, range(len(files))):
print("Currently playing " + file)
playsound(path + file)
label = input("Please, provide the label(n for noisy and c for clean audio files): ")
while(label != "c" and label != "n"):
label = input("Provided label is neither n nor c. Try again... ")
df.loc[i] = [file, label]
df.to_json("data.json", orient = "records")
| Barsegh-A/audio-labelling | script.py | script.py | py | 556 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "playsound.playsound",
"line_number": 12,
"usage_type": "call"
}
] |
27250989956 | import torch.nn.functional as F
import torch.nn as nn
import torch
filters = torch.tensor([[[[2, 0, 0],
[1, 0, 1],
[0, 3, 0]],
[[1, 0, 1],
[0, 0, 0],
[1, 1, 0]],
[[0, 0, 1],
[1, 1, 1],
[1, 1, 0]]]]) # [1,3,3,3] [ [filter_nums/output_channels,input_channels,high,width]
inputs = torch.tensor([0, 2, 0, 1, 0, 0, 2, 0, 1, 1, 2, 1, 2, 0, 0, 1, 0, 0, 1, 0, -1, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,
# [batch_size,in_channels,high,width]
1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]).reshape([1, 3, 5, 5])
bias = torch.tensor([1])
result = F.conv2d(inputs, filters, bias=bias, stride=1, padding=0)
print("输入数据为:", inputs)
print("输入数据的形状为:", inputs.shape)
print("卷积核的形状:[filter_nums/output_channels,input_channels,high,width] ==>", filters.shape)
print("卷积后的结果:", result)
print("结果的形状:", result.shape)
| moon-hotel/DeepLearningWithMe | Archived/02_ConvolutionalNN/01_CNNOP/01_CnnOpSingleFilter.py | 01_CnnOpSingleFilter.py | py | 1,225 | python | en | code | 116 | github-code | 6 | [
{
"api_name": "torch.tensor",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.conv2d",
... |
33153414975 | import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax
class GCNLayer(nn.Module):
def __init__(self,
in_feats,
out_feats,
activation,
dropout,
bias=True):
super(GCNLayer, self).__init__()
self.weight = nn.Parameter(torch.Tensor(in_feats, out_feats))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_feats))
else:
self.bias = None
self.activation = activation
if dropout:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = 0.
self.reset_parameters()
def reset_parameters(self):
'''uniform init.
'''
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, g, h):
g = g.local_var()
if self.dropout:
h = self.dropout(h)
h = torch.mm(h, self.weight)
# normalization by square root of src degree
h = h * g.ndata['norm']
g.ndata['h'] = h
g.update_all(fn.copy_src(src='h', out='m'),
fn.sum(msg='m', out='h'))
h = g.ndata.pop('h')
# normalization by square root of dst degree
h = h * g.ndata['norm']
# bias
if self.bias is not None:
h = h + self.bias
if self.activation:
h = self.activation(h)
return h
class GATLayer(nn.Module):
r"""Apply `Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`__
over an input signal.
.. math::
h_i^{(l+1)} = \sum_{j\in \mathcal{N}(i)} \alpha_{i,j} W^{(l)} h_j^{(l)}
where :math:`\alpha_{ij}` is the attention score bewteen node :math:`i` and
node :math:`j`:
.. math::
\alpha_{ij}^{l} & = \mathrm{softmax_i} (e_{ij}^{l})
e_{ij}^{l} & = \mathrm{LeakyReLU}\left(\vec{a}^T [W h_{i} \| W h_{j}]\right)
Parameters
----------
in_feats : int
Input feature size.
out_feats : int
Output feature size.
num_heads : int
Number of heads in Multi-Head Attention.
feat_drop : float, optional
Dropout rate on feature, defaults: ``0``.
attn_drop : float, optional
Dropout rate on attention weight, defaults: ``0``.
negative_slope : float, optional
LeakyReLU angle of negative slope.
residual : bool, optional
If True, use residual connection.
activation : callable activation function/layer or None, optional.
If not None, applies an activation function to the updated node features.
Default: ``None``.
"""
def __init__(self,
in_feats,
out_feats,
num_heads,
feat_drop=0.,
attn_drop=0.,
negative_slope=0.2,
residual=False,
activation=None):
super(GATLayer, self).__init__()
self._num_heads = num_heads
self._in_feats = in_feats
self._out_feats = out_feats
self.fc = nn.Linear(in_feats, out_feats * num_heads, bias=False)
self.attn_l = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
self.attn_r = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
self.feat_drop = nn.Dropout(feat_drop)
self.attn_drop = nn.Dropout(attn_drop)
self.leaky_relu = nn.LeakyReLU(negative_slope)
if residual:
if in_feats != out_feats:
self.res_fc = nn.Linear(in_feats, num_heads * out_feats, bias=False)
else:
self.res_fc = lambda x:x
else:
self.register_buffer('res_fc', None)
self.reset_parameters()
self.activation = activation
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.fc.weight, gain=gain)
nn.init.xavier_normal_(self.attn_l, gain=gain)
nn.init.xavier_normal_(self.attn_r, gain=gain)
if isinstance(self.res_fc, nn.Linear):
nn.init.xavier_normal_(self.res_fc.weight, gain=gain)
def forward(self, graph, feat):
r"""Compute graph attention network layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
is size of input feature, :math:`N` is the number of nodes.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, H, D_{out})` where :math:`H`
is the number of heads, and :math:`D_{out}` is size of output feature.
"""
graph = graph.local_var()
h = self.feat_drop(feat)
feat = self.fc(h).view(-1, self._num_heads, self._out_feats)
el = (feat * self.attn_l).sum(dim=-1).unsqueeze(-1)
er = (feat * self.attn_r).sum(dim=-1).unsqueeze(-1)
graph.ndata.update({'ft': feat, 'el': el, 'er': er})
# compute edge attention
graph.apply_edges(fn.u_add_v('el', 'er', 'e'))
e = self.leaky_relu(graph.edata.pop('e'))
# compute softmax
graph.edata['a'] = self.attn_drop(edge_softmax(graph, e))
# message passing
graph.update_all(fn.u_mul_e('ft', 'a', 'm'),
fn.sum('m', 'ft'))
rst = graph.ndata['ft']
# residual
if self.res_fc is not None:
resval = self.res_fc(h).view(h.shape[0], -1, self._out_feats)
rst = rst + resval
# activation
if self.activation:
rst = self.activation(rst)
return rst
def adaptive_message_func(edges):
'''
send data for computing metrics and update.
'''
return {'feat':edges.src['h'],'logits': edges.src['logits']}
def adaptive_attn_message_func(edges):
return {'feat': edges.src['ft']* edges.data['a'],
'logits': edges.src['logits'],
'a': edges.data['a']}
def adaptive_attn_reduce_func(nodes):
# (n_nodes, n_edges, n_classes)
_, pred = torch.max(nodes.mailbox['logits'], dim=2)
_, center_pred = torch.max(nodes.data['logits'], dim=1)
n_degree = nodes.data['degree']
# case 1
# ratio of common predictions
a = nodes.mailbox['a'].squeeze(3) #(n_node, n_neighbor, n_head, 1)
n_head = a.size(2)
idxs = torch.eq(pred, center_pred.unsqueeze(1)).unsqueeze(2).expand_as(a)
f1 = torch.div(torch.sum(a*idxs, dim=1), n_degree.unsqueeze(1)) # (n_node, n_head)
f1 = f1.detach()
# case 2
# entropy of neighborhood predictions
uniq = torch.unique(pred)
# (n_unique)
cnts_p = torch.zeros((pred.size(0), n_head, uniq.size(0),)).cuda()
for i,val in enumerate(uniq):
idxs = torch.eq(pred, val).unsqueeze(2).expand_as(a)
tmp = torch.div(torch.sum(a*idxs, dim=1),n_degree.unsqueeze(1)) # (n_nodes, n_head)
cnts_p[:,:, i] = tmp
cnts_p = torch.clamp(cnts_p, min=1e-5)
f2 = (-1)* torch.sum(cnts_p * torch.log(cnts_p),dim=2)
f2 = f2.detach()
neighbor_agg = torch.sum(nodes.mailbox['feat'], dim=1) #(n_node, n_head, n_feat)
return {
'f1': f1,
'f2':f2,
'agg': neighbor_agg,
}
def adaptive_reduce_func(nodes):
'''
compute metrics and determine if we need to do neighborhood aggregation.
'''
# (n_nodes, n_edges, n_classes)
_, pred = torch.max(nodes.mailbox['logits'], dim=2)
_, center_pred = torch.max(nodes.data['logits'], dim=1)
n_degree = nodes.data['degree']
# case 1
# ratio of common predictions
f1 = torch.sum(torch.eq(pred,center_pred.unsqueeze(1)), dim=1)/n_degree
f1 = f1.detach()
# case 2
# entropy of neighborhood predictions
uniq = torch.unique(pred)
# (n_unique)
cnts_p = torch.zeros((pred.size(0), uniq.size(0),)).cuda()
for i,val in enumerate(uniq):
tmp = torch.sum(torch.eq(pred, val), dim=1)/n_degree
cnts_p[:, i] = tmp
cnts_p = torch.clamp(cnts_p, min=1e-5)
f2 = (-1)* torch.sum(cnts_p * torch.log(cnts_p),dim=1)
f2 = f2.detach()
return {
'f1': f1,
'f2':f2,
}
class GatedAttnLayer(nn.Module):
def __init__(self, g, in_feats, out_feats, activation, dropout, num_heads,
attn_drop=0.,
negative_slope=0.2,lidx=1):
super(GatedAttnLayer, self).__init__()
self._num_heads = num_heads
self._in_feats = in_feats
self._out_feats = out_feats
if in_feats != out_feats:
self.fc = nn.Linear(in_feats, out_feats * num_heads, bias=False) # for first layer
self.feat_drop = nn.Dropout(dropout)
self.attn_drop = nn.Dropout(attn_drop)
self.leaky_relu = nn.LeakyReLU(negative_slope)
self.activation = activation
self.tau_1 = nn.Parameter(torch.zeros((1,)))
self.tau_2 = nn.Parameter(torch.zeros((1,)))
self.ln_1 = nn.LayerNorm((g.number_of_nodes(), num_heads),elementwise_affine=False)
self.ln_2 = nn.LayerNorm((g.number_of_nodes(),num_heads), elementwise_affine=False)
self.reset_parameters(lidx)
def reset_parameters(self, lidx, how='layerwise'):
gain = nn.init.calculate_gain('relu')
if how == 'normal':
nn.init.normal_(self.tau_1)
nn.init.normal_(self.tau_2)
else:
nn.init.constant_(self.tau_1, 1/(lidx+1))
nn.init.constant_(self.tau_2, 1/(lidx+1))
return
def forward(self, g, h, logits, old_z, attn_l, attn_r, shared_tau=True, tau_1=None, tau_2=None):
g = g.local_var()
if self.feat_drop:
h = self.feat_drop(h)
if hasattr(self, 'fc'):
feat = self.fc(h).view(-1, self._num_heads, self._out_feats)
else:
feat = h
g.ndata['h'] = feat # (n_node, n_feat)
g.ndata['logits'] = logits
el = (feat * attn_l).sum(dim=-1).unsqueeze(-1)
er = (feat * attn_r).sum(dim=-1).unsqueeze(-1)
g.ndata.update({'ft': feat, 'el': el, 'er': er})
# compute edge attention
g.apply_edges(fn.u_add_v('el', 'er', 'e'))
e = self.leaky_relu(g.edata.pop('e'))
# compute softmax
g.edata['a'] = self.attn_drop(edge_softmax(g, e))
g.update_all(message_func=adaptive_attn_message_func, reduce_func=adaptive_attn_reduce_func)
f1 = g.ndata.pop('f1')
f2 = g.ndata.pop('f2')
norm_f1 = self.ln_1(f1)
norm_f2 = self.ln_2(f2)
if shared_tau:
z = F.sigmoid((-1)*(norm_f1-tau_1)) * F.sigmoid((-1)*(norm_f2-tau_2))
else:
# tau for each layer
z = F.sigmoid((-1)*(norm_f1-self.tau_1)) * F.sigmoid((-1)*(norm_f2-self.tau_2))
gate = torch.min(old_z, z)
agg = g.ndata.pop('agg')
normagg = agg * g.ndata['norm'].unsqueeze(1) # normalization by tgt degree
if self.activation:
normagg = self.activation(normagg)
new_h = feat + gate.unsqueeze(2)*normagg
return new_h,z
class GatedLayer(nn.Module):
def __init__(self,g,in_feats, out_feats, activation, dropout, lidx=1):
super(GatedLayer, self).__init__()
self.weight_neighbors= nn.Linear(in_feats, out_feats)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.tau_1 = nn.Parameter(torch.zeros((1,)))
self.tau_2 = nn.Parameter(torch.zeros((1,)))
self.ln_1 = nn.LayerNorm((g.number_of_nodes()),elementwise_affine=False)
self.ln_2 = nn.LayerNorm((g.number_of_nodes()), elementwise_affine=False)
self.reset_parameters(lidx)
def reset_parameters(self,lidx, how='layerwise'):
# initialize params
if how == 'normal':
nn.init.normal_(self.tau_1)
nn.init.normal_(self.tau_2)
else:
nn.init.constant_(self.tau_1, 1/(lidx+1))
nn.init.constant_(self.tau_2, 1/(lidx+1))
return
def forward(self, g, h, logits, old_z, shared_tau=True, tau_1=None, tau_2=None):
# operates on a node
g = g.local_var()
if self.dropout:
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['logits'] = logits
g.update_all(message_func=fn.copy_u('logits','logits'), reduce_func=adaptive_reduce_func)
f1 = g.ndata.pop('f1')
f2 = g.ndata.pop('f2')
norm_f1 = self.ln_1(f1)
norm_f2 = self.ln_2(f2)
if shared_tau:
z = F.sigmoid((-1)*(norm_f1-tau_1)) * F.sigmoid((-1)*(norm_f2-tau_2))
else:
# tau for each layer
z = F.sigmoid((-1)*(norm_f1-self.tau_1)) * F.sigmoid((-1)*(norm_f2-self.tau_2))
gate = torch.min(old_z, z)
g.update_all(message_func=fn.copy_u('h','feat'), reduce_func=fn.sum(msg='feat', out='agg'))
agg = g.ndata.pop('agg')
normagg = agg * g.ndata['norm'] # normalization by tgt degree
if self.activation:
normagg = self.activation(normagg)
new_h = h + gate.unsqueeze(1)*normagg
return new_h,z
class GatedAPPNPConv(nn.Module):
r"""Approximate Personalized Propagation of Neural Predictions
layer from paper `Predict then Propagate: Graph Neural Networks
meet Personalized PageRank <https://arxiv.org/pdf/1810.05997.pdf>`__.
.. math::
H^{0} & = X
H^{t+1} & = (1-\alpha)\left(\hat{D}^{-1/2}
\hat{A} \hat{D}^{-1/2} H^{t}\right) + \alpha H^{0}
Parameters
----------
k : int
Number of iterations :math:`K`.
alpha : float
The teleport probability :math:`\alpha`.
edge_drop : float, optional
Dropout rate on edges that controls the
messages received by each node. Default: ``0``.
"""
def __init__(self,
g, k,
n_hidden, n_classes,
edge_drop=0., lidx=1):
super(GatedAPPNPConv, self).__init__()
self._k = k
self.edge_drop = nn.Dropout(edge_drop)
self.tau_1 = nn.Parameter(torch.zeros((1,)))
self.tau_2 = nn.Parameter(torch.zeros((1,)))
self.ln_1 = nn.LayerNorm((g.number_of_nodes()),elementwise_affine=False)
self.ln_2 = nn.LayerNorm((g.number_of_nodes()), elementwise_affine=False)
self.weight_y = nn.Linear(n_hidden, n_classes)
self.reset_parameters(lidx)
def reset_parameters(self,lidx, how='layerwise'):
# initialize params
if how == 'normal':
nn.init.normal_(self.tau_1)
nn.init.normal_(self.tau_2)
else:
nn.init.constant_(self.tau_1, 1/(lidx+1))
nn.init.constant_(self.tau_2, 1/(lidx+1))
return
def forward(self, graph, feat, logits):
r"""Compute APPNP layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, *)` :math:`N` is the
number of nodes, and :math:`*` could be of any shape.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
graph = graph.local_var()
norm = torch.pow(graph.in_degrees().float().clamp(min=1), -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = torch.reshape(norm, shp).to(feat.device)
feat_0 = feat
z = torch.FloatTensor([1.0,]).cuda()
for lidx in range(self._k):
# normalization by src node
old_z = z
feat = feat * norm
graph.ndata['h'] = feat
old_feat = feat
if lidx != 0:
logits = self.weight_y(feat)
graph.ndata['logits'] = logits
graph.update_all(message_func=fn.copy_u('logits','logits'), reduce_func=adaptive_reduce_func)
f1 = graph.ndata.pop('f1')
f2 = graph.ndata.pop('f2')
norm_f1 = self.ln_1(f1)
norm_f2 = self.ln_2(f2)
z = F.sigmoid((-1)*(norm_f1-self.tau_1)) * F.sigmoid((-1)*(norm_f2-self.tau_2))
gate = torch.min(old_z, z)
graph.edata['w'] = self.edge_drop(
torch.ones(graph.number_of_edges(), 1).to(feat.device))
graph.update_all(fn.u_mul_e('h', 'w', 'm'),
fn.sum('m', 'h'))
feat = graph.ndata.pop('h')
# normalization by dst node
feat = feat * norm
feat = z.unsqueeze(1)* feat + old_feat # raw features
return feat
class GraphTopoAttention(nn.Module):
def __init__(self,
g,
in_dim,
topo_dim,
out_dim,
num_heads,
feat_drop,
attn_drop,
residual=False,
concat=True,
last_layer=False):
super(GraphTopoAttention, self).__init__()
self.g = g
self.num_heads = num_heads
if feat_drop:
self.feat_drop = nn.Dropout(feat_drop)
else:
self.feat_drop = lambda x : x
if attn_drop:
self.attn_drop = nn.Dropout(attn_drop)
else:
self.attn_drop = lambda x : x
# weight matrix Wl for leverage property
if last_layer:
self.fl = nn.Linear(in_dim+topo_dim, out_dim, bias=False)
else:
self.fl = nn.Linear(in_dim, num_heads*out_dim, bias=False)
# weight matrix Wc for aggregation context
self.fc = nn.Parameter(torch.Tensor(size=(in_dim+topo_dim, num_heads*out_dim)))
# weight matrix Wq for neighbors' querying
self.fq = nn.Parameter(torch.Tensor(size=(in_dim, num_heads*out_dim)))
nn.init.xavier_normal_(self.fl.weight.data)
nn.init.constant_(self.fc.data, 10e-3)
nn.init.constant_(self.fq.data, 10e-3)
self.attn_activation = nn.ELU()
self.softmax = edge_softmax
self.residual = residual
if residual:
if in_dim != out_dim:
self.res_fl = nn.Linear(in_dim, num_heads * out_dim, bias=False)
nn.init.xavier_normal_(self.res_fl.weight.data)
else:
self.res_fl = None
self.concat = concat
self.last_layer = last_layer
def forward(self, inputs, topo=None):
# prepare
h = self.feat_drop(inputs) # NxD
if topo:
t = self.feat_drop(topo) #N*T
if not self.last_layer:
ft = self.fl(h).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
if topo:
ft_c = torch.matmul(torch.cat((h, t), 1), self.fc).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
else:
ft_c = torch.matmul(h, self.fc).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
ft_q = torch.matmul(h, self.fq).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
self.g.ndata.update({'ft' : ft, 'ft_c' : ft_c, 'ft_q' : ft_q})
self.g.apply_edges(self.edge_attention)
self.edge_softmax()
l_s = int(0.713*self.g.edata['a_drop'].shape[0])
topk, _ = torch.topk(self.g.edata['a_drop'], l_s, largest=False, dim=0)
thd = torch.squeeze(topk[-1])
self.g.edata['a_drop'] = self.g.edata['a_drop'].squeeze()
self.g.edata['a_drop'] = torch.where(self.g.edata['a_drop']-thd<0, self.g.edata['a_drop'].new([0.0]), self.g.edata['a_drop'])
attn_ratio = torch.div((self.g.edata['a_drop'].sum(0).squeeze()+topk.sum(0).squeeze()), self.g.edata['a_drop'].sum(0).squeeze())
self.g.edata['a_drop'] = self.g.edata['a_drop'] * attn_ratio
self.g.edata['a_drop'] = self.g.edata['a_drop'].unsqueeze(-1)
self.g.update_all(fn.src_mul_edge('ft', 'a_drop', 'ft'), fn.sum('ft', 'ft'))
ret = self.g.ndata['ft']
if self.residual:
if self.res_fl is not None:
resval = self.res_fl(h).reshape((h.shape[0], self.num_heads, -1)) # NxHxD'
else:
resval = torch.unsqueeze(h, 1) # Nx1xD'
ret = resval + ret
ret = torch.cat((ret.flatten(1), ft.mean(1).squeeze()), 1) if self.concat else ret.flatten(1)
else:
if topo:
ret = self.fl(torch.cat((h, t), 1))
else:
ret = self.fl(h)
return ret
def edge_attention(self, edges):
c = edges.dst['ft_c']
q = edges.src['ft_q'] - c
a = (q * c).sum(-1).unsqueeze(-1)
return {'a': self.attn_activation(a)}
def edge_softmax(self):
attention = self.softmax(self.g, self.g.edata.pop('a'))
self.g.edata['a_drop'] = self.attn_drop(attention) | raspberryice/ala-gcn | layers.py | layers.py | py | 21,424 | python | en | code | 21 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
50777891 | from typing import List
class Solution:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
ans = [0] * len(temperatures)
stack = [0]
for i in range(1, len(temperatures)):
if temperatures[i] <= temperatures[stack[-1]]:
# if temp is not larger, just push into stack
stack.append(i)
else:
while len(stack) != 0 and temperatures[i] > temperatures[stack[-1]]:
# if temp is larger, update the index
ans[stack[-1]] = i - stack[-1]
stack.pop()
stack.append(i)
return ans
if __name__ == "__main__":
temperatures = [73,74,75,71,69,72,76,73]
s = Solution()
print(s.dailyTemperatures(temperatures))
| code-cp/leetcode | solutions/739/main.py | main.py | py | 802 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
34405330102 | import matplotlib.pyplot as plt
import numpy as np
import os, tkinter, tkinter.filedialog, tkinter.messagebox
# show the file selection filedialog
root = tkinter.Tk()
root.withdraw()
fTyp = [('','*')]
iDir = os.path.abspath(os.path.dirname(__file__))
# tkinter.messagebox.showinfo('簡易プロットプログラムです','どのフォルダのcsvでグラフを作る?')
# output the processing file name
file = tkinter.filedialog.askopenfilename(filetypes = fTyp,initialdir = iDir)
# tkinter.messagebox.showinfo('oxプログラム',file)
df = np.loadtxt(file, skiprows=5, delimiter=',', encoding='utf-8')
rows = len(df[:,0])
x = np.arange(rows)
# 横軸の単位をsecondにしてグラフを見たいときはこちら↓を使う。
# plt.plot(df[:,0], df[:,1])
# 横軸を「1~csvファイルの行数」としてグラフを見たいときはこちら↓を使う。
plt.plot(x, df[:,1])
# plt.vlines(np.arange(24800,26400,200), -0.05, 0.05, color='k', linestyle=':', lw=0.5)
# plt.fill_between([24800,26400], -0.05, 0.05, color='skyblue')
plt.show()
| kobashin/GHz-ultrasonic | easy_plot.py | easy_plot.py | py | 1,071 | python | ja | code | 1 | github-code | 6 | [
{
"api_name": "tkinter.Tk",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_numb... |
38840281994 | from django.shortcuts import render, get_object_or_404, redirect, HttpResponse
from django.contrib.auth.decorators import login_required
from account.models import User
from product.models import Product
from .models import Message, MessageRoom
@login_required
def check_message_room(request, product_id):
product = get_object_or_404(Product, id=product_id)
receiver = product.creator
sender = request.user
message_room = MessageRoom.objects.filter(product=product,sender=sender)
if not message_room.exists():
message_room =MessageRoom.objects.create(product=product,sender=sender, receiver=receiver)
else:
message_room = MessageRoom.objects.get(product=product,sender=sender)
# if request.method == 'POST':
# content = request.POST.get('content')
# sender = request.user
# message = Message.objects.create(
# sender=sender,
# receiver=receiver,
# product=product,
# content=content
# )
# Perform any additional actions, notifications, or redirects
return redirect('messaging:message-room',id=message_room.id)
@login_required
def message_room(request,id):
message_room = get_object_or_404(MessageRoom, id=id)
if request.user == message_room.sender or request.user == message_room.receiver:
return render(request, 'messaging/messageroom.html', {'message_room':message_room})
else:
return HttpResponse('Unauthorized access. Sorry')
def view_messages(request):
# messages = MessageRoom.objects.filter(receiver=user).order_by('-id') | MessageRoom.objects.filter(sender=user).order_by('-id')
messages = MessageRoom.with_messages(request.user)
return render(request, 'messaging/view_messages.html', {'messages': messages})
@login_required
def send_messages(request):
if request.method == "POST":
message = request.POST.get('message')
room_id = request.POST.get("roomid")
room = MessageRoom.objects.get(id=room_id)
Message.objects.create(room=room,content=message, sender=request.user)
return redirect('messaging:message-room',id=room.id)
return HttpResponse('Something went wrong.')
| bishwopr/creative-poultry | messaging/views.py | views.py | py | 2,261 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "product.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "product.models.Product",
"line_number": 10,
"usage_type": "argument"
},
{
"api_... |
27082622973 | from fastapi import APIRouter, Depends, HTTPException
from ...celery.tasks import ExcelParser
from ..crud.dishes_crud import DishesCrud
from ..crud.menu_crud import MenuCrud
from ..crud.submenu_crud import SubmenuCrud
parser_router = APIRouter(prefix='/parser', tags=['Parser'])
@parser_router.post('/parse-excel')
async def parse_excel(
menu_service: MenuCrud = Depends(),
submenu_service: SubmenuCrud = Depends(),
dish_service: DishesCrud = Depends()
):
try:
excel_parser = ExcelParser(menu_service, submenu_service, dish_service)
await excel_parser.parser()
return {'message': 'Excel data parsed and loaded successfully'}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
| puplishe/testproject | fastapi1/api/routes/excel_router.py | excel_router.py | py | 756 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "crud.menu_crud.MenuCrud",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "crud.submenu_crud.SubmenuCrud",
"line_number": 14,
"usage_type": "name"
},
{
"api_name":... |
45635574383 | from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .conv_tasnet import TCN, GatedTCN
from .lobe.activation import get_activation
from .lobe.norm import get_norm
from .lobe.rnn import FSMN, ConditionFSMN
class Unet(nn.Module):
"""
Generic_Args:
input_type: Real or RI(real+image)
input_dim: input feature dimension
activation_type: activation function
norm_type: normalization function
dropout: if not 0, add dropout in down-CNN layers
Unet_Args:
channels: controlled input/output channel for Unet
kernel_t: kernel size in time axis for each down cnn layer
kernel_f: kernel size in freq axis for each down/up cnn layer
stride_t: stride size in time axis for each down cnn layer
stride_f: stride size in freq axis for each down/up cnn layer
dilation_t: dilation size in time axis for each down cnn layer
dilation_f: dilation size in freq axis for each down/up cnn layer
delay: add lookahead frames in each down cnn layers, if 0 means causal cnn operation
transpose_t_size: the kernel size of ConvTranspose2d's time axis for up cnn layer
skip_conv
"""
def __init__(
self,
input_type: str = "RI",
input_dim: int = 512,
activation_type: str = "PReLU",
norm_type: str = "bN2d",
dropout: float = 0.05,
channels: Tuple = (1, 1, 8, 8, 16, 16),
transpose_t_size: int = 2,
skip_conv: bool = False,
kernel_t: Tuple = (5, 1, 9, 1, 1),
stride_t: Tuple = (1, 1, 1, 1, 1),
dilation_t: Tuple = (1, 1, 1, 1, 1),
kernel_f: Tuple = (1, 5, 1, 5, 1),
stride_f: Tuple = (1, 4, 1, 4, 1),
dilation_f: Tuple = (1, 1, 1, 1, 1),
delay: Tuple = (0, 0, 1, 0, 0),
multi_output: int = 1,
):
super().__init__()
assert (
len(kernel_t)
== len(kernel_f)
== len(stride_t)
== len(stride_f)
== len(dilation_t)
== len(dilation_f)
)
self.input_type = input_type
self.input_dim = input_dim
self.multi_output = multi_output
self.activation_type = activation_type
self.norm_type = norm_type
self.dropout = dropout
self.skip_conv = skip_conv
# Structure information
self.kernel_t = kernel_t
self.kernel_f = kernel_f
self.stride_t = stride_t
self.stride_f = stride_f
self.dilation_t = dilation_t
self.dilation_f = dilation_f
self.transpose_t_size = transpose_t_size
active_cls = get_activation(activation_type.lower())
norm_cls = get_norm(norm_type)
self.n_cnn = len(kernel_t)
self.channels = list(channels)
self.kernel = list(
zip(kernel_f, kernel_t)
) # each layer's kernel size (freq, time)
self.delay = delay # how much delay for each layer
self.dilation = list(zip(dilation_f, dilation_t))
self.stride = list(zip(stride_f, stride_t))
self.t_kernel = transpose_t_size
# Check relationship between feature-type and input-channel
if input_type.lower() == "ri":
self.num_freq = input_dim // 2
self.channels[0] = self.channels[0] * 2 # will expand RI channel
elif input_type.lower() == "real":
self.num_freq = input_dim
else:
raise TypeError("Input feature type should be RI-concate, RI-stack or Real")
# CNN-down, downsample in frequency axis
self.cnn_down = nn.ModuleList()
for i in range(self.n_cnn):
encode = []
freq_pad = (
self.kernel[i][0] // 2,
self.kernel[i][0] // 2,
) # center padding in frequency axis
time_pad = (self.kernel[i][1] - self.delay[i] - 1, self.delay[i])
encode += [
nn.ZeroPad2d(time_pad + freq_pad), # (left, right, top, down)
nn.Conv2d(
self.channels[i],
self.channels[i + 1],
kernel_size=self.kernel[i],
stride=self.stride[i],
dilation=self.dilation[i],
),
norm_cls(self.channels[i + 1]),
active_cls(),
nn.Dropout(self.dropout),
]
self.cnn_down.append(nn.Sequential(*encode))
# CNN-up, upsample in frequency axis
self.cnn_up = nn.ModuleList()
skip_double = 2 if not skip_conv else 1
skip_double = [skip_double] * self.n_cnn
for i in reversed(range(self.n_cnn)):
s, _ = self.stride[i]
k = self.kernel[i][0]
p = k // 2
op = s - k + 2 * p
encode = []
if i != 0:
encode += [
nn.ConvTranspose2d(
self.channels[i + 1] * skip_double[i],
self.channels[i],
kernel_size=(k, self.t_kernel),
stride=self.stride[i],
dilation=self.dilation[i],
padding=(p, 0),
output_padding=(op, 0),
),
norm_cls(self.channels[i]),
active_cls(),
]
else:
# linear output
encode += [
nn.ConvTranspose2d(
self.channels[i + 1] * skip_double[i],
self.channels[i] * self.multi_output,
kernel_size=(k, self.t_kernel),
stride=self.stride[i],
dilation=self.dilation[i],
padding=(p, 0),
output_padding=(op, 0),
)
]
self.cnn_up.append(nn.Sequential(*encode))
if skip_conv:
self.skip_cnn = nn.ModuleList()
for i in reversed(range(self.n_cnn)):
encode = []
encode += [
nn.Conv2d(
self.channels[i + 1],
self.channels[i + 1],
kernel_size=(1, 1),
stride=1,
),
active_cls(),
]
self.skip_cnn.append(nn.Sequential(*encode))
def shape_info(self):
# input_shape = [N, ch, C, T]
# conv-transpose output size is:
# (freq): (input_shape[2] -1) * stride[0] - 2*padding[0] + dilation[0] * (kernel_size[0]-1) + output_padding[0] + 1
# (time): (input_shape[2] -1) * stride[1] - 2*padding[1] + dilation[1] * (kernel_size[1]-1) + output_padding[1] + 1
down_shape = [self.num_freq]
for i in range(self.n_cnn):
stride, _ = self.stride[i]
if down_shape[i - 1] % stride == 0:
_f = down_shape[-1] // stride
else:
_f = down_shape[-1] // stride
_f += 1
down_shape.append(_f)
up_shape = [_f]
for i in range(self.n_cnn):
stride, _ = self.stride[-i - 1]
kernel_size = self.kernel[-i - 1][0]
padding = kernel_size // 2
output_padding = stride - kernel_size + 2 * padding
_f = (
(up_shape[-1] - 1) * stride
- 2 * padding
+ self.dilation[-i - 1][0] * (kernel_size - 1)
+ output_padding
+ 1
)
up_shape.append(_f)
return down_shape, up_shape
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: input tensor shape [N, C, T]
Returns:
output tensor has shape [N, C, T]
"""
if self.input_type.lower() == "ri":
_re, _im = torch.chunk(x, 2, dim=-2)
x = torch.stack([_re, _im], dim=1) # [N, C, T] -> [N, 2, C, T]
else:
if x.dim() == 3:
x = x.unsqueeze(1) # [N, 1, C, T]
skip = [x.clone()]
# forward CNN-down layers
for cnn_layer in self.cnn_down:
x = cnn_layer(x) # [N, ch, C, T]
skip.append(x)
# forward CNN-up layers
for i, cnn_layer in enumerate(self.cnn_up):
if self.skip_conv:
x += self.skip_cnn[i](skip[-i - 1])
else:
x = torch.cat([x, skip[-i - 1]], dim=1)
x = cnn_layer(x)
if self.t_kernel != 1:
x = x[
..., : -(self.t_kernel - 1)
] # transpose-conv with t-kernel size would increase (t-1) length
if self.multi_output != 1:
batch, ch, fdim, tdim = x.shape
x = x.reshape(batch, self.multi_output, -1, fdim, tdim)
if self.input_type.lower() == "ri":
_re = x[:, :, 0, :, :]
_im = x[:, :, 1, :, :]
x = torch.cat([_re, _im], dim=2)
else:
x = x.squeeze(2) # [N, M, 1, C, T] -> [N, C, T]
else:
if self.input_type.lower() == "ri":
_re = x[:, 0, :, :]
_im = x[:, 1, :, :]
x = torch.cat([_re, _im], dim=1)
else:
x = x.squeeze(1) # [N, 1, C, T] -> [N, C, T]
return x
@property
def get_args(self) -> Dict:
return {
"input_type": self.input_type,
"input_dim": self.input_dim,
"activation_type": self.activation_type,
"norm_type": self.norm_type,
"dropout": self.dropout,
"channels": self.channels,
"transpose_t_size": self.transpose_t_size,
"skip_conv": self.skip_conv,
"kernel_t": self.kernel_t,
"stride_t": self.stride_t,
"dilation_t": self.dilation_t,
"kernel_f": self.kernel_f,
"stride_f": self.stride_f,
"dilation_f": self.dilation_f,
"delay": self.delay,
"multi_output": self.multi_output,
}
class UnetTcn(Unet):
"""
Improve temporal modeling ability by inserting a TCN inside an Unet model.
Args:
embed_dim: Embedding feature dimension.
embed_norm: If True, applies the 2-norm on the input embedding.
"""
def __init__(
self,
embed_dim: int = 0,
embed_norm: bool = False,
input_type: str = "RI",
input_dim: int = 512,
activation_type: str = "PReLU",
norm_type: str = "bN2d",
dropout: float = 0.05,
channels: Tuple = (1, 1, 8, 8, 16, 16),
transpose_t_size: int = 2,
transpose_delay: bool = False,
skip_conv: bool = False,
kernel_t: Tuple = (5, 1, 9, 1, 1),
stride_t: Tuple = (1, 1, 1, 1, 1),
dilation_t: Tuple = (1, 1, 1, 1, 1),
kernel_f: Tuple = (1, 5, 1, 5, 1),
stride_f: Tuple = (1, 4, 1, 4, 1),
dilation_f: Tuple = (1, 1, 1, 1, 1),
delay: Tuple = (0, 0, 1, 0, 0),
tcn_layer: str = "normal",
tcn_kernel: int = 3,
tcn_dim: int = 256,
tcn_dilated_basic: int = 2,
per_tcn_stack: int = 5,
repeat_tcn: int = 4,
tcn_with_embed: List = [1, 0, 0, 0, 0],
tcn_use_film: bool = False,
tcn_norm: str = "gLN",
dconv_norm: str = "gGN",
causal: bool = False,
):
super().__init__(
input_type,
input_dim,
activation_type,
norm_type,
dropout,
channels,
transpose_t_size,
skip_conv,
kernel_t,
stride_t,
dilation_t,
kernel_f,
stride_f,
dilation_f,
delay,
)
self.embed_dim = embed_dim
self.embed_norm = embed_norm
self.tcn_layer = tcn_layer
self.tcn_dim = tcn_dim
self.tcn_kernel = tcn_kernel
self.per_tcn_stack = per_tcn_stack
self.repeat_tcn = repeat_tcn
self.tcn_dilated_basic = tcn_dilated_basic
self.tcn_with_embed = tcn_with_embed
self.tcn_norm = tcn_norm
self.dconv_norm = dconv_norm
self.tcn_use_film = tcn_use_film
self.causal = causal
self.transpose_delay = transpose_delay
# TCN module's
temporal_input_dim = self.num_freq
for stride, _ in self.stride:
if temporal_input_dim % stride == 0:
temporal_input_dim //= stride
else:
temporal_input_dim //= stride
temporal_input_dim += 1
temporal_input_dim *= self.channels[-1] # extend by channel size
if self.tcn_layer.lower() == "normal":
tcn_cls = TCN
elif self.tcn_layer.lower() == "gated":
print("GatedTCN would ignore dconv_norm configuration.")
tcn_cls = GatedTCN
else:
raise NameError
assert per_tcn_stack == len(tcn_with_embed)
self.tcn_list = nn.ModuleList()
for _ in range(repeat_tcn):
_tcn = []
for i in range(per_tcn_stack):
if tcn_with_embed[i]:
if self.tcn_layer.lower() == "normal":
_tcn.append(
tcn_cls(
temporal_input_dim,
tcn_dim,
kernel=tcn_kernel,
dilation=tcn_dilated_basic ** i,
emb_dim=embed_dim,
causal=causal,
tcn_norm=tcn_norm,
dconv_norm=dconv_norm,
)
)
else:
_tcn.append(
tcn_cls(
temporal_input_dim,
tcn_dim,
kernel=tcn_kernel,
dilation=tcn_dilated_basic ** i,
emb_dim=embed_dim,
causal=causal,
tcn_norm=tcn_norm,
use_film=tcn_use_film,
)
)
else:
if self.tcn_layer.lower() == "normal":
_tcn.append(
tcn_cls(
temporal_input_dim,
tcn_dim,
kernel=tcn_kernel,
dilation=tcn_dilated_basic ** i,
emb_dim=0,
causal=causal,
tcn_norm=tcn_norm,
dconv_norm=dconv_norm,
)
)
else:
_tcn.append(
tcn_cls(
temporal_input_dim,
tcn_dim,
kernel=tcn_kernel,
dilation=tcn_dilated_basic ** i,
emb_dim=0,
causal=causal,
tcn_norm=tcn_norm,
use_film=False,
)
)
self.tcn_list.append(nn.ModuleList(_tcn))
def forward(
self, x: torch.Tensor, dvec: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
Args:
x: input tensor shape [N, C, T]
dvec: conditional tensor shape [N, C]
Returns:
output tensor has shape [N, C, T]
"""
# normalize
if self.embed_norm and dvec is not None:
dvec = F.normalize(dvec, p=2, dim=1)
if self.input_type.lower() == "ri":
_re, _im = torch.chunk(x, 2, dim=-2)
x = torch.stack([_re, _im], dim=1) # [N, C, T] -> [N, 2, C, T]
else:
if x.dim() == 3:
x = x.unsqueeze(1) # [N, 1, C, T]
skip = [x.clone()]
# forward CNN-down layers
for cnn_layer in self.cnn_down:
x = cnn_layer(x) # [N, ch, C, T]
skip.append(x)
# forward TCN block
N_ori, ch, C_ori, T = x.shape
x = x.reshape(N_ori, ch * C_ori, T)
for r in range(self.repeat_tcn):
for i in range(len(self.tcn_list[r])):
if self.tcn_with_embed[i]:
x = self.tcn_list[r][i](x, dvec)
else:
x = self.tcn_list[r][i](x)
x = x.reshape(N_ori, ch, C_ori, T)
# forward CNN-up layers
for i, cnn_layer in enumerate(self.cnn_up):
if self.skip_conv:
x += self.skip_cnn[i](skip[-i - 1])
else:
x = torch.cat([x, skip[-i - 1]], dim=1)
x = cnn_layer(x)
if self.t_kernel != 1:
if self.transpose_delay:
x = x[
..., (self.t_kernel - 1) :
] # transpose-conv with t-kernel size would increase (t-1) length
else:
x = x[
..., : -(self.t_kernel - 1)
] # transpose-conv with t-kernel size would increase (t-1) length
if self.input_type.lower() == "ri":
_re = x[:, 0, :, :]
_im = x[:, 1, :, :]
x = torch.cat([_re, _im], dim=1)
else:
x = x.squeeze(1) # [N, 1, C, T] -> [N, C, T]
return x
@property
def get_args(self) -> Dict:
return {
"input_type": self.input_type,
"input_dim": self.input_dim,
"activation_type": self.activation_type,
"norm_type": self.norm_type,
"dropout": self.dropout,
"channels": self.channels,
"transpose_t_size": self.transpose_t_size,
"transpose_delay": self.transpose_delay,
"skip_conv": self.skip_conv,
"kernel_t": self.kernel_t,
"stride_t": self.stride_t,
"dilation_t": self.dilation_t,
"kernel_f": self.kernel_f,
"stride_f": self.stride_f,
"dilation_f": self.dilation_f,
"delay": self.delay,
"embed_dim": self.embed_dim,
"embed_norm": self.embed_norm,
"tcn_norm": self.tcn_norm,
"dconv_norm": self.dconv_norm,
"tcn_layer": self.tcn_layer,
"tcn_dim": self.tcn_dim,
"tcn_kernel": self.tcn_kernel,
"tcn_dilated_basic": self.tcn_dilated_basic,
"repeat_tcn": self.repeat_tcn,
"per_tcn_stack": self.per_tcn_stack,
"tcn_with_embed": self.tcn_with_embed,
"tcn_use_film": self.tcn_use_film,
"causal": self.causal,
}
class UnetFsmn(Unet):
"""
Improve temporal modeling ability by inserting a FSMN inside an Unet model.
Args:
embed_dim: Embedding feature dimension.
embed_norm: If True, applies the 2-norm on the input embedding.
"""
def __init__(
self,
embed_dim: int = 0,
embed_norm: bool = False,
input_type: str = "RI",
input_dim: int = 512,
activation_type: str = "PReLU",
norm_type: str = "bN2d",
dropout: float = 0.05,
channels: Tuple = (1, 1, 8, 8, 16, 16),
transpose_t_size: int = 2,
transpose_delay: bool = False,
skip_conv: bool = False,
kernel_t: Tuple = (5, 1, 9, 1, 1),
stride_t: Tuple = (1, 1, 1, 1, 1),
dilation_t: Tuple = (1, 1, 1, 1, 1),
kernel_f: Tuple = (1, 5, 1, 5, 1),
stride_f: Tuple = (1, 4, 1, 4, 1),
dilation_f: Tuple = (1, 1, 1, 1, 1),
delay: Tuple = (0, 0, 1, 0, 0),
fsmn_l_context: int = 3,
fsmn_r_context: int = 0,
fsmn_dim: int = 256,
num_fsmn: int = 8,
fsmn_with_embed: List = [1, 1, 1, 1, 1, 1, 1, 1],
fsmn_norm: str = "gLN",
use_film: bool = True,
):
super().__init__(
input_type,
input_dim,
activation_type,
norm_type,
dropout,
channels,
transpose_t_size,
skip_conv,
kernel_t,
stride_t,
dilation_t,
kernel_f,
stride_f,
dilation_f,
delay,
)
self.transpose_delay = transpose_delay
self.embed_dim = embed_dim
self.embed_norm = embed_norm
self.fsmn_l_context = fsmn_l_context
self.fsmn_r_context = fsmn_r_context
self.fsmn_dim = fsmn_dim
self.num_fsmn = num_fsmn
self.fsmn_with_embed = fsmn_with_embed
self.fsmn_norm = fsmn_norm
self.use_film = use_film
# FSMN module's
temporal_input_dim = self.num_freq
for stride, _ in self.stride:
if temporal_input_dim % stride == 0:
temporal_input_dim //= stride
else:
temporal_input_dim //= stride
temporal_input_dim += 1
temporal_input_dim *= self.channels[-1] # extend by channel size
assert num_fsmn == len(fsmn_with_embed)
self.fsmn_list = nn.ModuleList()
for i in range(num_fsmn):
if fsmn_with_embed[i]:
self.fsmn_list.append(
ConditionFSMN(
temporal_input_dim,
temporal_input_dim,
fsmn_dim,
embed_dim,
fsmn_l_context,
fsmn_r_context,
norm_type=fsmn_norm,
use_film=use_film,
)
)
else:
self.fsmn_list.append(
FSMN(
temporal_input_dim,
temporal_input_dim,
fsmn_dim,
fsmn_l_context,
fsmn_r_context,
norm_type=fsmn_norm,
)
)
def forward(
self, x: torch.Tensor, dvec: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
Args:
x: input tensor shape [N, C, T]
dvec: conditional tensor shape [N, C]
Returns:
output tensor has shape [N, C, T]
"""
# normalize
if self.embed_norm and dvec is not None:
dvec = F.normalize(dvec, p=2, dim=1)
if self.input_type.lower() == "ri":
_re, _im = torch.chunk(x, 2, dim=-2)
x = torch.stack([_re, _im], dim=1) # [N, C, T] -> [N, 2, C, T]
else:
if x.dim() == 3:
x = x.unsqueeze(1) # [N, 1, C, T]
skip = [x.clone()]
# forward CNN-down layers
for cnn_layer in self.cnn_down:
x = cnn_layer(x) # [N, ch, C, T]
skip.append(x)
# forward FSMN block
N_ori, ch, C_ori, T = x.shape
x = x.reshape(N_ori, ch * C_ori, T)
memory = None
for i in range(len(self.fsmn_list)):
if self.fsmn_with_embed[i]:
x, memory = self.fsmn_list[i](x, dvec, memory)
else:
x, memory = self.fsmn_list[i](x, memory)
x = x.reshape(N_ori, ch, C_ori, T)
# forward CNN-up layers
for i, cnn_layer in enumerate(self.cnn_up):
if self.skip_conv:
x += self.skip_cnn[i](skip[-i - 1])
else:
x = torch.cat([x, skip[-i - 1]], dim=1)
x = cnn_layer(x)
if self.t_kernel != 1:
if self.transpose_delay:
x = x[
..., (self.t_kernel - 1) :
] # transpose-conv with t-kernel size would increase (t-1) length
else:
x = x[
..., : -(self.t_kernel - 1)
] # transpose-conv with t-kernel size would increase (t-1) length
if self.input_type.lower() == "ri":
_re = x[:, 0, :, :]
_im = x[:, 1, :, :]
x = torch.cat([_re, _im], dim=1)
else:
x = x.squeeze(1) # [N, 1, C, T] -> [N, C, T]
return x
@property
def get_args(self) -> Dict:
return {
"input_type": self.input_type,
"input_dim": self.input_dim,
"activation_type": self.activation_type,
"norm_type": self.norm_type,
"dropout": self.dropout,
"channels": self.channels,
"transpose_t_size": self.transpose_t_size,
"transpose_delay": self.transpose_delay,
"skip_conv": self.skip_conv,
"kernel_t": self.kernel_t,
"stride_t": self.stride_t,
"dilation_t": self.dilation_t,
"kernel_f": self.kernel_f,
"stride_f": self.stride_f,
"dilation_f": self.dilation_f,
"delay": self.delay,
"embed_dim": self.embed_dim,
"embed_norm": self.embed_norm,
"fsmn_l_context": self.fsmn_l_context,
"fsmn_r_context": self.fsmn_r_context,
"fsmn_dim": self.fsmn_dim,
"num_fsmn": self.num_fsmn,
"fsmn_with_embed": self.fsmn_with_embed,
"fsmn_norm": self.fsmn_norm,
"use_film": self.use_film,
}
| mcw519/PureSound | puresound/nnet/unet.py | unet.py | py | 26,136 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_n... |
30338109347 | from collections import deque
n, k = map(int, input().split())
graph = [[] * (k+1) for _ in range(k+1)]
gmap = [[] * (n+1) for _ in range(n+1)]
for i in range(1, n+1):
arr = list(map(int, input().split()))
gmap[i] = arr
for j, m in zip(arr, range(1, n+1)):
if j != 0:
graph[j].append((i, m))
s, a, b = map(int, input().split())
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
graph.sort()
for k in range(s):
print(k)
for i in range(1, k+1):
queue = deque()
while graph[i]:
queue.append(graph[i].pop())
x, y = queue.popleft()
for j in range(4):
nx = x + dx[j]
ny = y + dy[j]
if nx <= 0 or ny <= 0 or nx > n or ny > n:
continue
if gmap[nx][ny-1] == 0:
gmap[nx][ny-1] = i
queue.append((nx, ny))
graph[i] = queue
print(gmap[a][b-1])
| minju7346/CordingTest | bfs3.py | bfs3.py | py | 944 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 18,
"usage_type": "call"
}
] |
8310320668 | from stevedore import extension
class Extensions:
"""Lazy singleton container for stevedore extensions.
Loads each namespace when requested for the first time.
"""
_managers = {}
def __init__(self):
raise NotImplementedError()
@classmethod
def get(cls, namespace, name):
manager = cls._managers.get(namespace)
if manager is None:
manager = cls._load_namespace(namespace)
return manager[name].plugin
@classmethod
def _load_namespace(cls, namespace):
manager = extension.ExtensionManager(namespace)
cls._managers[namespace] = manager
return manager
| dwtcourses/SHARE | share/util/extensions.py | extensions.py | py | 658 | python | en | code | null | github-code | 6 | [
{
"api_name": "stevedore.extension.ExtensionManager",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "stevedore.extension",
"line_number": 24,
"usage_type": "name"
}
] |
17202898458 | #!/usr/bin/env python
import robot
import time
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import json
import logging
def send_json(data):
msg = {'action':data}
msg = json.dumps(msg)
return (msg)
def action(client, userdata, message):
data = message.payload.decode()
data = json.loads(data)
data = data['action']
print (data)
if data == 'forward':
print ('forward')
robot.forward()
elif data == 'stop':
print ('stop')
robot.stop()
elif data == 'left':
print ('left')
robot.left()
elif data == 'right':
print ('right')
robot.right()
elif data == 'backward':
print ('backward')
robot.backward()
elif data == 'servo left':
print ('servo left')
robot.servo_left()
elif data == 'servo center':
print ('servo center')
robot.servo_center()
elif data == 'servo right':
print ('servo right')
robot.servo_right()
elif data == 'lights':
print ('lights')
robot.lights()
elif data == 'blinkers':
print ('blinkers')
robot.blinkers()
elif data == 'voltage':
print('voltage')
voltage = robot.voltage()
voltage = send_json(voltage)
myMQTT.publish('from_robot', voltage, 0)
elif data == 'distance':
print('distance')
distance = robot.distance()
distance = send_json(distance)
myMQTT.publish('from_robot', distance, 0)
else:
pass
key_dir = '/home/pi/aaa_mikes_gopigo/keys/'
myMQTT = AWSIoTMQTTClient('Leonard')
myMQTT.configureEndpoint('a111amujev1y9r.iot.us-west-2.amazonaws.com', 8883)
myMQTT.configureCredentials(key_dir+'root-CA.crt', key_dir+'Leonard.private.key', key_dir+'Leonard.cert.pem')
myMQTT.configureOfflinePublishQueueing(-1)
myMQTT.configureDrainingFrequency(2)
myMQTT.configureConnectDisconnectTimeout(10)
myMQTT.connect()
myMQTT.subscribe('to_robot', 1, action)
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
print('Waiting for data...')
while True:
pass
| mkyle1121/gopigo | web/robot_sock_client2.py | robot_sock_client2.py | py | 2,084 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "robot.forward",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "robot.stop",
"line_number": 2... |
2075941699 | import pandas as pd
from datetime import datetime
import os
def get_csv(source):
try:
df = pd.read_csv('data/' + source + '.csv')
except (OSError, IOError) as e:
df = pd.DataFrame()
print(e)
return df;
def get_status(source_name):
return '';
def set_status(source_name, status):
return;
def get_data(source_name, meta_filter):
df = get_csv(source_name)
df = df[df['meta'].str.contains(meta_filter)]
return df
def put_all_data(source_name, descr, df):
local = get_csv(source_name)
result = pd.concat([local, df]).drop_duplicates(['ref', 'date'])
result = result.sort_values(by=['ref', 'date'])
if not os.path.exists('data'):
os.makedirs('data')
result.to_csv('data/'+source_name+'.csv', columns=['ref', 'date', 'meta', 'value', 'file_date'], quoting=1, index=False)
df['file_date'] = pd.to_datetime(df['file_date'])
date = df['file_date'].max()
date = date.today().replace(microsecond=0)
lu = pd.DataFrame(data=[[source_name, date, 'None']], columns=['Name', 'Date', 'Status'])
try:
lu_new = pd.read_csv('data/last-update.csv')
except (OSError, IOError) as e:
lu_new = lu
result = pd.concat([lu, lu_new]).drop_duplicates(['Name'])
result.to_csv('data/last-update.csv', quoting=1, index=False)
print(result)
def get_last_update(source_name, alternative_date):
try:
df = pd.read_csv('data/last-update.csv', index_col='Name')
except (OSError, IOError) as e:
return None
if df.empty or source_name not in df.index:
return alternative_date;
date = df.get_value(source_name, "Date", takeable=False)
return datetime.strptime(date, '%Y-%m-%d %H:%M:%S') | shodnebo/datafetch | csv_helper.py | csv_helper.py | py | 1,729 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"li... |
44248042853 | import cv2
import numpy as np
import glob
import uuid
import caffe
import skimage.io
from util import histogram_equalization
from scipy.ndimage import zoom
from skimage.transform import resize
import random
import cv2
import numpy as np
from matplotlib import pyplot as plt
import dlib
from project_face import frontalizer
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
class mouth_detector():
def __init__(self):
self.PATH_face_model = '../lib/shape_predictor_68_face_landmarks.dat'
self.face_cascade = cv2.CascadeClassifier('../lib/haarcascade/haarcascade_frontalface_default.xml')
self.eye_cascade = cv2.CascadeClassifier('../lib/haarcascade/haarcascade_eye.xml')
self.mouth_cascade = cv2.CascadeClassifier('../lib/haarcascade/mouth.xml')
self.md_face = dlib.shape_predictor(self.PATH_face_model)
self.fronter = frontalizer('../lib/ref3d.pkl')
def mouth_detect_single(self,image,isPath):
if isPath == True:
img = cv2.imread(image, cv2.IMREAD_UNCHANGED)
else:
img = image
img = histogram_equalization(img)
gray_img1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray_img1, 1.3, 5)
for (x,y,w,h) in faces:
roi_gray = gray_img1[y:y+h, x:x+w]
eyes = self.eye_cascade.detectMultiScale(roi_gray)
if(len(eyes)>0):
p = x
q = y
r = w
s = h
face_region = gray_img1[q:q+s, p:p+r]
face_region_rect = dlib.rectangle(long(q),long(p),long(q+s),long(p+r))
rectan = dlib.rectangle(long(x),long(y),long(x+w),long(y+h))
shape = self.md_face(img,rectan)
p2d = np.asarray([(shape.part(n).x, shape.part(n).y,) for n in range(shape.num_parts)], np.float32)
rawfront, symfront = self.fronter.frontalization(img,face_region_rect,p2d)
face_hog_mouth = symfront[165:220, 130:190]
gray_img = cv2.cvtColor(face_hog_mouth, cv2.COLOR_BGR2GRAY)
crop_img_resized = cv2.resize(gray_img, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation = cv2.INTER_CUBIC)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_crop_rezized.jpg",gray_img)
return crop_img_resized,rectan.left(),rectan.top(),rectan.right(),rectan.bottom()
else:
return None,-1,-1,-1,-1
def mouth_detect_bulk(self,input_folder,output_folder):
transformed_data_set = [img for img in glob.glob(input_folder+"/*jpg")]
for in_idx, img_path in enumerate(transformed_data_set):
mouth,x,y,w,h = self.mouth_detect_single(img_path,True)
if 'showingteeth' in img_path:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = output_folder+"/"+str_guid+"_showingteeth.jpg"
cv2.imwrite(path,mouth)
else:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = output_folder+"/"+str_guid+".jpg"
cv2.imwrite(path,mouth)
def negative_image(self,imagem):
imagem = (255-imagem)
return imagem
def adaptative_threashold(self,input_img_path):
img = cv2.imread(input_img_path,0)
img = cv2.medianBlur(img,3)
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
#cv2.imwrite("../img/output_test_img/hmouthdetectsingle_adaptative.jpg",th3)
return th3 | juanzdev/TeethClassifierCNN | src/mouth_detector_opencv.py | mouth_detector_opencv.py | py | 3,870 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "dli... |
40650003765 | import re
import requests
from selenium import webdriver
from xvfbwrapper import Xvfb
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common import exceptions
class YTGrabber:
'''
Класс принимает Youtube URL страниц, все плейлисты, плейлист и все видео,
и возваращает все видеоматериалы данной страницы.
'''
driver = None
vdisplay = None
def _check_valid_url(self, url):
if type(url) is int:
raise TypeError("URL is not to be int type!")
self.url = url.strip()
if re.match(r"https://www\.youtube\.com/(playlist\?list=|channel/)[\w]+(/playlists|/videos)", self.url):
return True
if re.match(r"https://www\.youtube\.com/(playlist\?list=|channel/)[\w]+", self.url):
return True
raise ValueError("URL is not correct!")
def _get_page(self, url):
self._check_valid_url(url)
resp = requests.get(self.url)
if resp.text.find("404 Not Found") >= 0:
raise ValueError("'{}' , страница не найдена либо не существует".format(self.url))
if resp.text.find("Произошла ошибка! - YouTube") >= 0:
raise ValueError("'{}' , Произошла ошибка! - YouTube".format(self.url))
self.driver.get(self.url)
return True
def get_content(self, url):
self._get_page(url)
preload = True
html = WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.TAG_NAME , "html")), "Содержимое не найден или его нет !")
while preload:
html.send_keys(Keys.END)
try:
WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.CSS_SELECTOR, "#contents #contents #continuations #spinner")))
except:
preload = False
items = self.driver.find_elements(By.CSS_SELECTOR , "#contents #contents #items > *")
if not items:
items = self.driver.find_elements(By.CSS_SELECTOR , "#contents #contents #contents > *")
if not items:
raise ValueError("Содержимое не найден или его нет !")
videos = []
for item in items:
videos.append({
"title": item.find_element_by_id("video-title").get_attribute("title"),
"href": item.find_element_by_id("video-title").get_attribute("href") or item.find_element_by_class_name("ytd-thumbnail").get_attribute("href"),
"thumbnail": item.find_element_by_id("img").get_attribute("src"),
})
return videos
def __enter__(self):
self.vdisplay = Xvfb()
self.vdisplay.start()
options = webdriver.ChromeOptions()
options.handless = False
options.add_argument("--no-sandbox")
options.add_argument("--disable-setuid-sandbox")
self.driver = webdriver.Chrome(options=options, executable_path="driver/chromedriver")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.driver:
self.driver.close()
if self.vdisplay:
self.vdisplay.stop() | Foxonn/ytgrabber | ytgrabber.py | ytgrabber.py | py | 3,726 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "re.match",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDrive... |
45356075426 | import numpy as np
from PyQt5.QtCore import QSize
from PyQt5.QtGui import QIcon, QColor
from PyQt5.QtWidgets import QListWidgetItem, QPushButton
from LGEprocess.flags_LGE import *
from skimage import exposure, img_as_float
import torch.utils.data as Datas
from LGEprocess import Network as Network
import nibabel as nib
import os
import torch
def Seg(img):
print(img.shape)
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
device = torch.device("cuda:0")
print(torch.__version__)
data=img
dataloder = Datas.DataLoader(dataset=data, batch_size=1, shuffle=False)
Segnet = Network.DenseBiasNet(n_channels=1, n_classes=4).to(device)
pretrained_dict = torch.load('./model/net_epoch_source-Seg-Network.pkl', map_location='cpu')
model_dict = Segnet.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
Segnet.load_state_dict(model_dict)
with torch.no_grad():
for epoch in range(1):
for step, (img) in enumerate(dataloder):
print(img.shape)
img=img.to(device).float()
print(img.shape)
img=Segnet(img)
img= img[0, 1, :, :, :] * 1 + img[0, 2, :, :, :] * 2 + img[0, 3, :, :, :] * 3
img = img.data.cpu().numpy()
print(img.shape)
return img
def Reg(mov,fix):
print(mov.shape)
print(fix.shape)
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
device = torch.device("cuda:0")
print(torch.__version__)
data = mov,fix
dataloder = Datas.DataLoader(dataset=data, batch_size=2, shuffle=False)
Flownet = Network.VXm(2).to(device)
##
pretrained_dict = torch.load('./model/net_epoch_source-Flow-Network.pkl')
model_dict = Flownet.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
Flownet.load_state_dict(model_dict)
with torch.no_grad():
for epoch in range(1):
for step, (mov,fix) in enumerate(dataloder):
print(mov.shape)
print(fix.shape)
mov = mov.to(device).float()
fix = fix.to(device).float()
print(mov.shape)
print(fix.shape)
flow_field_x1, mov_fix, flow_field_x2, es_source = Flownet(fix, mov, fix)
mov_fix = mov_fix[0, 0, :, :, :].data.cpu().numpy()
print(mov_fix.shape)
return mov_fix
def load_nii(path):
image = nib.load(path)
affine = image.affine
image = np.asarray(image.dataobj)
return image, affine
def normor(image):
image -=image.mean()
image /=image.std()
return image
def crop_img(label_es, img, box_height=128, box_width=128):
a = label_es.nonzero()
a_x = a[0]
a_x_middle = np.median(a[0])
a_height = max((a_x)) - min((a_x)) + 1
assert a_height < box_height, 'height小了'
a_x_start = max(0,int(a_x_middle - box_height / 2))
if(int(a_x_middle - box_height / 2)>=0):
a_x_end = int(a_x_middle + box_height / 2)
else:
a_x_end=box_height
print('axs',a_x_start)
print('axe',a_x_end)
print('x:',a_x_end-a_x_start)
a_y = a[1]
a_y_middle = np.median(a_y)
a_width = max(a_y) - min(a_y) + 1
# print(a_width,a_height)
assert a_width < box_width, 'width小了'
a_y_start = max(0,int(a_y_middle - box_width / 2))
if(int(a_y_middle - box_width / 2)>=0):
a_y_end = int(a_y_middle + box_width / 2)
else:
a_y_end=box_width
img_1 = img[a_x_start:a_x_end, a_y_start:a_y_end, :]
print('img1',img_1.shape)
#plt.imshow(img_1[:,:,5], cmap='gray')
return img_1
class MyItem_LGE(QListWidgetItem):
def __init__(self, name=None, parent=None):
super(MyItem_LGE, self).__init__(name, parent=parent)
self.setIcon(QIcon('icons/color.png'))
self.setSizeHint(QSize(60, 60)) # size
print('MyItem_LGE')
def get_params(self):
protected = [v for v in dir(self) if v.startswith('_') and not v.startswith('__')]
param = {}
for v in protected:
param[v.replace('_', '', 1)] = self.__getattribute__(v)
return param
def update_params(self, param):
for k, v in param.items():
if '_' + k in dir(self):
self.__setattr__('_' + k, v)
class LabelItem(MyItem_LGE):
def __init__(self, parent=None):
super(LabelItem, self).__init__('添加GT', parent=parent)
def __call__(self, label):
# blank = np.zeros(img.shape, img.dtype)
# img = cv2.addWeighted(img, self._alpha, blank, 1 - self._alpha, self._beta)
return label
class NormItem(MyItem_LGE):
def __init__(self, parent=None):
super(NormItem, self).__init__('归一化', parent=parent)
def __call__(self, img):
max = img.max()
min = img.min()
img = (img - min) / (max - min)
return img
class LightItem(MyItem_LGE):
def __init__(self, parent=None):
super(LightItem, self).__init__('亮度', parent=parent)
self.alpha = 1
def __call__(self, img):
img = img_as_float(img)
if (self.alpha <=1 & self.alpha >0):
img = exposure.adjust_gamma(img, self.alpha) # 图片调暗
elif (self.alpha > 1):
img = exposure.adjust_gamma(img, 0.5) # 图片调亮
else:
print('请输入大于0的数字!')
return img
class ROIItem(MyItem_LGE):
def __init__(self, parent=None):
super(ROIItem, self).__init__('ROI提取', parent=parent)
def __call__(self, img):
print(img.shape)
label_path='./image/patient081_frame01_gt.nii.gz'
label=nib.load(label_path).get_data()
print(label.shape)
img=crop_img(label_es=label,img=img,box_height=128,box_width=128)
print(img.shape)
return img
class RegItem(MyItem_LGE):
def __init__(self, parent=None):
super(RegItem, self).__init__('配准', parent=parent)
def __call__(self, img):
path='./image/_es.nii.gz'
fix=nib.load(path).get_data()
img = np.transpose(img, (2, 1, 0)) # xyz-zyx
img = normor(img)
img = img[np.newaxis, np.newaxis, :, :, :]
fix = np.transpose(fix, (2, 1, 0)) # xyz-zyx
fix = normor(fix)
fix = fix[np.newaxis, np.newaxis, :, :, :]
mov=img
img=Reg(mov,fix)
img = np.transpose(img, (2, 1, 0)) # zyx-xyz
return img
class SegItem(MyItem_LGE):
def __init__(self, parent=None):
super(SegItem, self).__init__('分割', parent=parent)
def __call__(self, img):
img = np.transpose(img, (2, 1, 0)) # xyz-zyx
img=normor(img)
img = img[np.newaxis,np.newaxis, :, :, :]
# print(img.shape)
img=Seg(img)
img=np.transpose(img,(2,1,0))#zyx-xyz
print(img.shape)
return img
| JefferyCYH/pyqt_medical | LGEprocess/listWidgetItems_LGE.py | listWidgetItems_LGE.py | py | 6,988 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.__version__",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.... |
19579927717 | from pymongo import MongoClient
from flask import Flask, jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route("/")
def hello():
new_list = []
client = MongoClient()
db = client.variables
variables = db.variables
cursor = variables.find({})
# print(variables)
for doc in cursor:
message = ''
symbol = doc['symbol']
fiveMinSuccess = doc['values']["5MIN"]['success']
fiveMinBlackX = doc['values']["5MIN"]['black_x']
fiveMinPrice = doc['values']["5MIN"]['price']
fiveMinMa = doc['values']["5MIN"]['ma']
fifteenMinSuccess = doc['values']["15MIN"]['success']
fifteenMinBlackX = doc['values']["15MIN"]['black_x']
fifteenMinPrice = doc['values']["15MIN"]['price']
fifteenMinMa = doc['values']["15MIN"]['ma']
oneHourSuccess = doc['values']["1HRS"]['success']
oneHourBlackX = doc['values']["1HRS"]['black_x']
oneHourPrice = doc['values']["1HRS"]['price']
oneHourMa = doc['values']["1HRS"]['ma']
fourHourSuccess = doc['values']["4HRS"]['success']
fourHourBlackX = doc['values']["4HRS"]['black_x']
fourHourPrice = doc['values']["4HRS"]['price']
fourHourMa = doc['values']["4HRS"]['ma']
oneDaySuccess = doc['values']["1DAY"]['success']
oneDayBlackX = doc['values']["1DAY"]['black_x']
oneDayPrice = doc['values']["1DAY"]['price']
oneDayMa = doc['values']["1DAY"]['ma']
new_dict = {"symbol": symbol, "fiveMin": f"{fiveMinSuccess}/{fiveMinBlackX} {calculate_difference(fiveMinPrice, fiveMinMa)}", "fifteenMin": f"{fifteenMinSuccess}/{fifteenMinBlackX} {calculate_difference(fifteenMinPrice, fifteenMinMa)}", "oneHour": f"{oneHourSuccess}/{oneHourBlackX} {calculate_difference(oneHourPrice, oneHourMa)}", "fourHour": f"{fourHourSuccess}/{fourHourBlackX} {calculate_difference(fourHourPrice, fourHourMa)}", "oneDay": f"{oneDaySuccess}/{oneDayBlackX} {calculate_difference(oneDayPrice, oneDayMa)}"}
new_list.append(new_dict)
print(new_list)
return jsonify(new_list)
def calculate_difference(price, ma) -> str:
up = '↗'
down = '↘'
if price > ma:
return up
return down
app.run(debug=True)
| OlzyInnovation/DaveBot_Forex | server.py | server.py | py | 2,308 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"li... |
3660394854 | import sqlite3
conn=sqlite3.connect("Stationary_inventt.db")
c = conn.cursor()
print(" database successful")
#using drop table to avoid duplicate copy
c.execute("DROP TABLE IF EXISTS Stationery_stock")
c.execute("""
CREATE TABLE Stationery_stock(
ITEM_ID INTEGER,
ITEMS TEXT,
COST_PRICE INTEGER,
QUANTITY_IN_STOCK INTEGER
)
""")
print("table created successfully")
Avail_items = [
(1,"2b 60 leaves bks",600,10),
(2,"Staple pin",800,5),
(3,"Gum",1000,15),
(4,"Pencils",500,30),
(5,"A4 paper",5000,7),
(6,"Flexible Ruler",1500,22),
(7,"set square",4000,5),
(8,"Math set",2500,3),
(9,"Eraser",750,8),
(10,"Calculator",3000,10)
]
c.executemany("INSERT INTO Stationery_stock VALUES(?,?,?,?)",Avail_items)
#amount the business owner invested in the procurement of the items.
c.execute("SELECT SUM(COST_PRICE) FROM Stationery_stock" )
print(c.fetchall())
# average quantity of items in stock.
c.execute("SELECT AVG(QUANTITY_IN_STOCK) FROM Stationery_stock")
print(c.fetchall())
#item with the least quantity in stock
c.execute("SELECT ITEMS, MIN(QUANTITY_IN_STOCK) FROM Stationery_stock")
print(c.fetchall())
# item with the most quantity in stock
c.execute("SELECT ITEMS,MAX(QUANTITY_IN_STOCK) FROM Stationery_stock")
print(c.fetchall())
conn.commit()
conn.close()
| debbytech22/module-5-solutions | Lesson_3_solution.py | Lesson_3_solution.py | py | 1,271 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 2,
"usage_type": "call"
}
] |
21204997139 | # -*- coding: utf-8 -*-
"""
This is a prototype script.
"""
import numpy as np
from PIL import Image
from PIL import ImageEnhance
from scipy.ndimage import gaussian_filter
import cv2
from skimage import io as ip
frame_rate = 24 #output frame rate
vidcap = cv2.VideoCapture('video9.mov')
success,image = vidcap.read()
count = 1
print('Demuxing video')
while success:
cv2.imwrite("frame%d.png" % count, image) # save frame as JPEG file
success,image = vidcap.read()
count += 1
def initial_processing(iminit, low_val, max_val):
img = Image.open(iminit)
converter = ImageEnhance.Contrast(img)
print(low_val)
print(max_val)
cont = (1/(max_val/low_val))*2.0
img = converter.enhance(cont)
array = np.array(img)
ip.imsave('temp1.png', array)
def calc_val(im1):
img = Image.open(im1)
array = np.array(img)
low_val = np.mean(array)
max_val = np.amax(array)
return low_val, max_val
def imadd(I, K):
import numbers
if isinstance(K, numbers.Number):
J = I.astype('int32')
J += K
elif isinstance(K, np.ndarray):
assert K.shape == I.shape, f'Cannot add images with sizes {I.shape} and {K.shape}.'
J = I.astype('int32') + K.astype('int32')
else:
raise TypeError('K must be a number or an array.')
np.clip(J, 0, 255, out=J)
J = J.astype('uint8')
return J
def gaussian_filt(I, sigma, pad=0):
import numbers
assert isinstance(pad, numbers.Number) or pad in ['reflect', 'nearest', 'wrap'], \
'Choose a correct value for pad: a number (0-255), ''reflect'', ''nearest'', or ''wrap''.'
if isinstance(pad, numbers.Number):
md = 'constant'
c = pad
else:
md = pad
c = 0
return gaussian_filter(I, sigma, mode=md, cval=c)
def final_processing(finalim, k):
I = ip.imread(finalim)
R = np.logical_and(I[:, :, 0] > 254, I[:, :, 1] < 255)
new_R = gaussian_filt(255 * R, 5)
J = I.copy()
J[:, :, 0] = imadd(new_R, J[:, :, 0])
ip.imsave('temp.png', J)
img2 = Image.open('temp.png')
converter = ImageEnhance.Color(img2)
img2 = converter.enhance(1.4)
im = np.array(img2)
ip.imsave('final{}.png'.format(k), im)
def process_loop():
for i in range(count):
low_val, max_val=calc_val('frame{}.png'.format(i+1))
print('Processing image {}'.format(i+1))
initial_processing('frame{}.png'.format(i+1), low_val, max_val)
final_processing('temp1.png', i+1)
def video_mux():
print("Remuxing Files")
pathOut = 'video_out.mp4'
fps = frame_rate
frame_array = []
files = ['final{}.png'.format(i+1) for i in range(count)]
for i in range(len(files)):
#filename=pathIn + files[i]
filename=files[i]
#reading each files
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
#inserting the frames into an image array
frame_array.append(img)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'H264'), fps, size)
for i in range(len(frame_array)):
# writing to a image array
out.write(frame_array[i])
out.release()
count = count-1
process_loop()
video_mux()
| PindareTech/video-modding-script | editing_script.py | editing_script.py | py | 3,414 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_num... |
194935106 | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import serializers
from rest_framework import generics
from rest_framework import viewsets
from rest_framework.decorators import detail_route, list_route
from rest_framework.views import APIView
from core.models import *
from django.forms import widgets
from django.conf.urls import patterns, url
from services.cord.models import VOLTTenant, VBNGTenant, CordSubscriberRoot
from core.xoslib.objects.cordsubscriber import CordSubscriber
from plus import PlusSerializerMixin, XOSViewSet
from django.shortcuts import get_object_or_404
from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
from xos.exceptions import *
import json
import subprocess
if hasattr(serializers, "ReadOnlyField"):
# rest_framework 3.x
ReadOnlyField = serializers.ReadOnlyField
else:
# rest_framework 2.x
ReadOnlyField = serializers.Field
class CordSubscriberIdSerializer(serializers.ModelSerializer, PlusSerializerMixin):
id = ReadOnlyField()
service_specific_id = ReadOnlyField()
vlan_id = ReadOnlyField() # XXX remove this
c_tag = ReadOnlyField()
s_tag = ReadOnlyField()
vcpe_id = ReadOnlyField()
instance = ReadOnlyField()
image = ReadOnlyField()
vbng_id = ReadOnlyField()
firewall_enable = serializers.BooleanField()
firewall_rules = serializers.CharField()
url_filter_enable = serializers.BooleanField()
url_filter_rules = serializers.CharField()
url_filter_level = serializers.CharField(required=False)
cdn_enable = serializers.BooleanField()
instance_name = ReadOnlyField()
image_name = ReadOnlyField()
routeable_subnet = serializers.CharField(required=False)
ssh_command = ReadOnlyField()
bbs_account = ReadOnlyField()
wan_container_ip = ReadOnlyField()
uplink_speed = serializers.CharField(required=False)
downlink_speed = serializers.CharField(required=False)
status = serializers.CharField()
enable_uverse = serializers.BooleanField()
lan_ip = ReadOnlyField()
wan_ip = ReadOnlyField()
nat_ip = ReadOnlyField()
private_ip = ReadOnlyField()
wan_mac = ReadOnlyField()
vcpe_synced = serializers.BooleanField()
humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
class Meta:
model = CordSubscriber
fields = ('humanReadableName', 'id',
'service_specific_id', 'vlan_id', 's_tag', 'c_tag',
'vcpe_id', 'instance', 'instance_name', 'image', 'image_name',
'firewall_enable', 'firewall_rules',
'url_filter_enable', 'url_filter_rules', 'url_filter_level',
'bbs_account',
'ssh_command',
'vcpe_synced',
'cdn_enable', 'vbng_id', 'routeable_subnet', 'nat_ip', 'lan_ip', 'wan_ip', 'private_ip', 'wan_mac',
'wan_container_ip',
'uplink_speed', 'downlink_speed', 'status', 'enable_uverse')
def getHumanReadableName(self, obj):
return obj.__unicode__()
#------------------------------------------------------------------------------
# The "old" API
# This is used by the xoslib-based GUI
#------------------------------------------------------------------------------
class CordSubscriberList(XOSListCreateAPIView):
queryset = CordSubscriber.get_tenant_objects().select_related().all()
serializer_class = CordSubscriberIdSerializer
method_kind = "list"
method_name = "cordsubscriber"
class CordSubscriberDetail(XOSRetrieveUpdateDestroyAPIView):
queryset = CordSubscriber.get_tenant_objects().select_related().all()
serializer_class = CordSubscriberIdSerializer
method_kind = "detail"
method_name = "cordsubscriber"
# We fake a user object by pulling the user data struct out of the
# subscriber object...
def serialize_user(subscriber, user):
return {"id": "%d-%d" % (subscriber.id, user["id"]),
"name": user["name"],
"level": user.get("level",""),
"mac": user.get("mac", ""),
"subscriber": subscriber.id }
class CordUserList(APIView):
method_kind = "list"
method_name = "corduser"
def get(self, request, format=None):
instances=[]
for subscriber in CordSubscriber.get_tenant_objects().all():
for user in subscriber.users:
instances.append( serialize_user(subscriber, user) )
return Response(instances)
def post(self, request, format=None):
data = request.DATA
subscriber = CordSubscriber.get_tenant_objects().get(id=int(data["subscriber"]))
user = subscriber.create_user(name=data["name"],
level=data["level"],
mac=data["mac"])
subscriber.save()
return Response(serialize_user(subscriber,user))
class CordUserDetail(APIView):
method_kind = "detail"
method_name = "corduser"
def get(self, request, format=None, pk=0):
parts = pk.split("-")
subscriber = CordSubscriber.get_tenant_objects().filter(id=parts[0])
for user in subscriber.users:
return Response( [ serialize_user(subscriber, user) ] )
raise XOSNotFound("Failed to find user %s" % pk)
def delete(self, request, pk):
parts = pk.split("-")
subscriber = CordSubscriber.get_tenant_objects().get(id=int(parts[0]))
subscriber.delete_user(parts[1])
subscriber.save()
return Response("okay")
def put(self, request, pk):
kwargs={}
if "name" in request.DATA:
kwargs["name"] = request.DATA["name"]
if "level" in request.DATA:
kwargs["level"] = request.DATA["level"]
if "mac" in request.DATA:
kwargs["mac"] = request.DATA["mac"]
parts = pk.split("-")
subscriber = CordSubscriber.get_tenant_objects().get(id=int(parts[0]))
user = subscriber.update_user(parts[1], **kwargs)
subscriber.save()
return Response(serialize_user(subscriber,user))
#------------------------------------------------------------------------------
# The "new" API with many more REST endpoints.
# This is for integration with with the subscriber GUI
#------------------------------------------------------------------------------
class CordSubscriberViewSet(XOSViewSet):
base_name = "subscriber"
method_name = "rs/subscriber"
method_kind = "viewset"
queryset = CordSubscriber.get_tenant_objects().select_related().all()
serializer_class = CordSubscriberIdSerializer
def get_vcpe(self):
subscriber = self.get_object()
if not subscriber.vcpe:
raise XOSMissingField("vCPE object is not present for subscriber")
return subscriber.vcpe
@classmethod
def get_urlpatterns(self):
patterns = super(CordSubscriberViewSet, self).get_urlpatterns()
patterns.append( self.detail_url("vcpe_synced/$", {"get": "get_vcpe_synced"}, "vcpe_synced") )
patterns.append( self.detail_url("url_filter/$", {"get": "get_url_filter"}, "url_filter") )
patterns.append( self.detail_url("url_filter/(?P<level>[a-zA-Z0-9\-_]+)/$", {"put": "set_url_filter"}, "url_filter") )
patterns.append( self.detail_url("services/$", {"get": "get_services"}, "services") )
patterns.append( self.detail_url("services/(?P<service>[a-zA-Z0-9\-_]+)/$", {"get": "get_service"}, "get_service") )
patterns.append( self.detail_url("services/(?P<service>[a-zA-Z0-9\-_]+)/true/$", {"put": "enable_service"}, "enable_service") )
patterns.append( self.detail_url("services/(?P<service>[a-zA-Z0-9\-_]+)/false/$", {"put": "disable_service"}, "disable_service") )
patterns.append( self.detail_url("users/$", {"get": "get_users", "post": "create_user"}, "users") )
patterns.append( self.detail_url("users/clearusers/$", {"get": "clear_users", "put": "clear_users", "post": "clear_users"}, "clearusers") )
patterns.append( self.detail_url("users/newuser/$", {"put": "create_user", "post": "create_user"}, "newuser") )
patterns.append( self.detail_url("users/(?P<uid>[0-9\-]+)/$", {"delete": "delete_user"}, "user") )
patterns.append( self.detail_url("users/(?P<uid>[0-9\-]+)/url_filter/$", {"get": "get_user_level"}, "user_level") )
patterns.append( self.detail_url("users/(?P<uid>[0-9\-]+)/url_filter/(?P<level>[a-zA-Z0-9\-_]+)/$", {"put": "set_user_level"}, "set_user_level") )
patterns.append( self.detail_url("bbsdump/$", {"get": "get_bbsdump"}, "bbsdump") )
patterns.append( url("^rs/initdemo/$", self.as_view({"put": "initdemo", "get": "initdemo"}), name="initdemo") )
patterns.append( url("^rs/subidlookup/(?P<ssid>[0-9\-]+)/$", self.as_view({"get": "ssiddetail"}), name="ssiddetail") )
patterns.append( url("^rs/subidlookup/$", self.as_view({"get": "ssidlist"}), name="ssidlist") )
patterns.append( url("^rs/vbng_mapping/$", self.as_view({"get": "get_vbng_mapping"}), name="vbng_mapping") )
return patterns
def list(self, request):
object_list = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(object_list, many=True)
return Response({"subscribers": serializer.data})
def get_vcpe_synced(self, request, pk=None):
subscriber = self.get_object()
return Response({"vcpe_synced": subscriber.vcpe_synced})
def get_url_filter(self, request, pk=None):
subscriber = self.get_object()
return Response({"level": subscriber.url_filter_level})
def set_url_filter(self, request, pk=None, level=None):
subscriber = self.get_object()
subscriber.url_filter_level = level
subscriber.save()
return Response({"level": subscriber.url_filter_level})
def get_users(self, request, pk=None):
subscriber = self.get_object()
return Response(subscriber.users)
def get_user_level(self, request, pk=None, uid=None):
subscriber = self.get_object()
user = subscriber.find_user(uid)
if user and user.get("level", None):
level = user["level"]
else:
level = self.get_object().url_filter_level
return Response( {"id": uid, "level": level} )
def set_user_level(self, request, pk=None, uid=None, level=None):
subscriber = self.get_object()
subscriber.update_user(uid, level=level)
subscriber.save()
return self.get_user_level(request, pk, uid)
def create_user(self, request, pk=None):
data = request.DATA
name = data.get("name",None)
mac = data.get("mac",None)
if (not name):
raise XOSMissingField("name must be specified when creating user")
if (not mac):
raise XOSMissingField("mac must be specified when creating user")
subscriber = self.get_object()
newuser = subscriber.create_user(name=name, mac=mac)
subscriber.save()
return Response(newuser)
def delete_user(self, request, pk=None, uid=None):
subscriber = self.get_object()
subscriber.delete_user(uid)
subscriber.save()
return Response( {"id": uid, "deleted": True} )
def clear_users(self, request, pk=None):
subscriber = self.get_object()
subscriber.users = []
subscriber.save()
return Response( "Okay" )
def get_services(self, request, pk=None):
subscriber = self.get_object()
return Response(subscriber.services)
def get_service(self, request, pk=None, service=None):
service_attr = service+"_enable"
subscriber = self.get_object()
return Response({service: getattr(subscriber, service_attr)})
def enable_service(self, request, pk=None, service=None):
service_attr = service+"_enable"
subscriber = self.get_object()
setattr(subscriber, service_attr, True)
subscriber.save()
return Response({service: getattr(subscriber, service_attr)})
def disable_service(self, request, pk=None, service=None):
service_attr = service+"_enable"
subscriber = self.get_object()
setattr(subscriber, service_attr, False)
subscriber.save()
return Response({service: getattr(subscriber, service_attr)})
def get_bbsdump(self, request, pk=None):
subscriber = self.get_object()
if not subsciber.volt or not subscriber.volt.vcpe:
raise XOSMissingField("subscriber has no vCPE")
if not subscriber.volt.vcpe.bbs_account:
raise XOSMissingField("subscriber has no bbs_account")
result=subprocess.check_output(["python", "/opt/xos/observers/vcpe/broadbandshield.py", "dump", subscriber.volt.vcpe.bbs_account, "123"])
if request.GET.get("theformat",None)=="text":
from django.http import HttpResponse
return HttpResponse(result, content_type="text/plain")
else:
return Response( {"bbs_dump": result } )
def setup_demo_subscriber(self, subscriber):
# nuke the users and start over
subscriber.users = []
subscriber.create_user(name="Mom's PC", mac="010203040506", level="PG_13")
subscriber.create_user(name="Dad's PC", mac="90E2Ba82F975", level="PG_13")
subscriber.create_user(name="Jack's Laptop", mac="685B359D91D5", level="PG_13")
subscriber.create_user(name="Jill's Laptop", mac="34363BC9B6A6", level="PG_13")
subscriber.save()
def initdemo(self, request):
object_list = CordSubscriber.get_tenant_objects().all()
# reset the parental controls in any existing demo vCPEs
for o in object_list:
if str(o.service_specific_id) in ["0", "1"]:
self.setup_demo_subscriber(o)
demo_subscribers = [o for o in object_list if o.is_demo_user]
if demo_subscribers:
return Response({"id": demo_subscribers[0].id})
subscriber = CordSubscriberRoot(service_specific_id=1234,
name="demo-subscriber",)
subscriber.is_demo_user = True
subscriber.save()
self.setup_demo_subscriber(subscriber)
return Response({"id": subscriber.id})
def ssidlist(self, request):
object_list = CordSubscriber.get_tenant_objects().all()
ssidmap = [ {"service_specific_id": x.service_specific_id, "subscriber_id": x.id} for x in object_list ]
return Response({"ssidmap": ssidmap})
def ssiddetail(self, pk=None, ssid=None):
object_list = CordSubscriber.get_tenant_objects().all()
ssidmap = [ {"service_specific_id": x.service_specific_id, "subscriber_id": x.id} for x in object_list if str(x.service_specific_id)==str(ssid) ]
if len(ssidmap)==0:
raise XOSNotFound("didn't find ssid %s" % str(ssid))
return Response( ssidmap[0] )
def get_vbng_mapping(self, request):
object_list = VBNGTenant.get_tenant_objects().all()
mappings = []
for vbng in object_list:
if vbng.mapped_ip and vbng.routeable_subnet:
mappings.append( {"private_ip": vbng.mapped_ip, "routeable_subnet": vbng.routeable_subnet, "mac": vbng.mapped_mac, "hostname": vbng.mapped_hostname} )
return Response( {"vbng_mapping": mappings} )
class CordDebugIdSerializer(serializers.ModelSerializer, PlusSerializerMixin):
# Swagger is failing because CordDebugViewSet has neither a model nor
# a serializer_class. Stuck this in here as a placeholder for now.
id = ReadOnlyField()
class Meta:
model = CordSubscriber
class CordDebugViewSet(XOSViewSet):
base_name = "cord_debug"
method_name = "rs/cord_debug"
method_kind = "viewset"
serializer_class = CordDebugIdSerializer
@classmethod
def get_urlpatterns(self):
patterns = []
patterns.append( url("^rs/cord_debug/vbng_dump/$", self.as_view({"get": "get_vbng_dump"}), name="vbng_dump"))
return patterns
# contact vBNG service and dump current list of mappings
def get_vbng_dump(self, request, pk=None):
result=subprocess.check_output(["curl", "http://10.0.3.136:8181/onos/virtualbng/privateip/map"])
if request.GET.get("theformat",None)=="text":
from django.http import HttpResponse
result = json.loads(result)["map"]
lines = []
for row in result:
for k in row.keys():
lines.append( "%s %s" % (k, row[k]) )
return HttpResponse("\n".join(lines), content_type="text/plain")
else:
return Response( {"vbng_dump": json.loads(result)["map"] } )
| xmaruto/mcord | xos/core/xoslib/methods/cordsubscriber.py | cordsubscriber.py | py | 17,144 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.serializers",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "rest_framework.serializers.ReadOnlyField",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 23,
"usage_type"... |
40224803539 | import sys
from PyQt6.QtWidgets import QApplication, QWidget, QLabel
from PyQt6.QtGui import QPixmap, QFont
class MainWindow(QWidget):
def __init__(self):
"""constructor for Empty windows """
super().__init__()
self.initializeUI()
def initializeUI(self):
"""Set up the application"""
# setGeometry()两个参数用于指定窗口显示的位置, 后两个参数用于指定窗口大小
self.setGeometry(500, 500, 250, 400)
self.setWindowTitle("User profile")
self.setupMainWindow()
self.show()
def creatImageLabels(self):
"""Create Qlabel to be displayed in the main windows"""
skyblue_path = '../Beginning-PyQt-resource/Chapter02/images/skyblue.png'
try:
with open(skyblue_path):
skyblue = QLabel(self)
pixmap = QPixmap(skyblue_path)
skyblue.setPixmap(pixmap)
except FileNotFoundError as error:
print(f"Image not found. \nError: {error}")
profile_path = '../Beginning-PyQt-resource/Chapter02/images/profile_image.png'
try:
with open(profile_path):
profile = QLabel(self)
pixmap = QPixmap(profile_path)
profile.setPixmap(pixmap)
profile.move(80, 20)
except FileNotFoundError as error:
print(f"Image not found. \nError: {error}")
def setupMainWindow(self):
self.creatImageLabels()
user_label = QLabel(self)
user_label.setText("John Doe")
user_label.setFont(QFont("Arial", 20))
user_label.move(85, 140)
bio_label = QLabel(self)
bio_label.setText("Biography")
bio_label.setFont(QFont("Arial", 17))
bio_label.move(15, 170)
about_label = QLabel(self)
about_label.setText("I'm a software Engineer with 10 years\
exprience creating awesome code!")
about_label.setWordWrap(True) # set auto line
about_label.move(15, 190)
# Run the program
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec())
| grant-Gan/programing_learn | pyqt6_learn/ch2-Building_a_simple_GUI/user_profile.py | user_profile.py | py | 2,148 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt6.QtWidgets.QWidget",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "PyQt6.QtWidgets.QLabel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtGui.QPixmap",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PyQ... |
29582028401 | # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import logging
from odoo import api, models, _
_logger = logging.getLogger(__name__)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def action_auto_open(self):
return_item = super(AccountInvoice, self).action_auto_open()
# action_send_account_invoice_create_message_slack
for item in self:
item.action_send_account_invoice_create_message_slack()
# return
return return_item
@api.multi
def action_send_account_invoice_create_message_slack(self):
self.ensure_one()
web_base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
url_item = '%s/web?#id=%s&view_type=form&model=account.invoice' % (
web_base_url,
self.id
)
attachments = [
{
"title": _('Invoice has been created automatically'),
"text": self.number,
"color": "#36a64f",
"fallback": "View invoice %s %s" % (
self.number,
url_item
),
"actions": [
{
"type": "button",
"text": _("View invoice %s") % self.number,
"url": url_item
}
],
"fields": [
{
"title": _("Customer"),
"value": self.partner_id.name,
'short': True,
},
{
"title": _("Origin"),
"value": self.origin,
'short': True,
}
],
}
]
vals = {
'attachments': attachments,
'model': 'account.invoice',
'res_id': self.id,
'channel': self.env['ir.config_parameter'].sudo().get_param(
'slack_log_contabilidad_channel'
),
}
self.env['slack.message'].sudo().create(vals)
| OdooNodrizaTech/slack | slack_sale_orders_generate_invoice/models/account_invoice.py | account_invoice.py | py | 2,180 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "odoo.api.multi",
... |
33942211702 | import uvicorn
import datetime
from loguru import logger
from fastapi import FastAPI
from sqlalchemy import select
from fastapi.middleware.cors import CORSMiddleware
from SAGIRIBOT.ORM.AsyncORM import orm
from SAGIRIBOT.Core.AppCore import AppCore
from SAGIRIBOT.command_parse.Commands import *
from SAGIRIBOT.ORM.AsyncORM import Setting, FunctionCalledRecord
app = FastAPI(docs_url=None, redoc_url=None)
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get('/getGroups')
async def getGroups():
groups = await orm.fetchall(select(Setting.group_id, Setting.group_name).where(Setting.active == True))
return [{"value": group[0], "label": group[1]} for group in groups]
@app.get('/getGroupSetting')
async def getGroupSetting(groupId: int):
options_bool = ["repeat", "frequency_limit", "setu", "real", "real_high_quality", "bizhi", "r18", "img_search",
"bangumi_search", "debug", "compile", "anti_revoke", "online_notice", "switch"]
options_str = ["long_text_type", "r18_process", "speak_mode", "music"]
valid_str_option_value = {
"long_text_type": LongTextType.valid_values,
"r18_process": R18Process.valid_values,
"speak_mode": SpeakMode.valid_values,
"music": Music.valid_values
}
bool_result = await orm.fetchone(select(
Setting.repeat,
Setting.frequency_limit,
Setting.setu, Setting.real, Setting.real_high_quality, Setting.bizhi, Setting.r18,
Setting.img_search,
Setting.bangumi_search,
Setting.debug,
Setting.compile,
Setting.anti_revoke,
Setting.online_notice,
Setting.switch
).where(
Setting.group_id == groupId
))
str_result = await orm.fetchone(select(
Setting.long_text_type,
Setting.r18_process,
Setting.speak_mode,
Setting.music
).where(
Setting.group_id == groupId
))
return [
[{"label": options_bool[i], "value": bool_result[i]} for i in range(len(bool_result))],
[{"label": options_str[i], "value": str_result[i], "validValue": valid_str_option_value[options_str[i]]} for i in range(len(str_result))]
]
@app.get('/modifyGroupSetting')
async def modifyGroupSetting(groupId: int, settingName: str, newValue):
if newValue in ["true", "false"]:
newValue = True if newValue == "true" else False
try:
await orm.update(
Setting,
[Setting.group_id == groupId],
{"group_id": groupId, settingName: newValue}
)
except Exception as e:
logger.error(f"api error: {e}")
return False
return True
@app.get("/getStatus")
async def getStatus():
return {
"functionCalled": len(await orm.fetchall(
select(FunctionCalledRecord).where(FunctionCalledRecord.time >= datetime.date.today())
)),
"handlerCount": len(AppCore.get_core_instance().get_group_chains()),
"sayaCount": len(AppCore.get_core_instance().get_saya_channels())
}
def run_api():
uvicorn.run(app, host="127.0.0.1", port=8000, log_level="error")
| m310n/sagiri-bot | WebManager/web_manager.py | web_manager.py | py | 3,231 | python | en | code | null | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "SAGIRIBOT.ORM.AsyncORM.orm.fetchall",
"line_number": 28,
"usage_type": "call"
... |
43953915570 | from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
# My Code
from util import *
app = Flask(__name__)
# Channel Access Token
line_bot_api = LineBotApi('++7wQ1tXdLomUPrrUbvcKEE12HAh+eeIh1s46ynQESIAH2zkobGXkk19oxFSHS/5fgOju9fHnX3wu02ALT70wQSYcrFuE5ZoKd5vYwkr+VRIdTiMfFSVFerWzr5j1Syf5YlS5NGCFoXbPBiF730F3AdB04t89/1O/w1cDnyilFU=')
# Channel Secret
handler = WebhookHandler('095348740b93fb668776aa36c9571a44')
# 監聽所有來自 /callback 的 Post Request
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
# 處理訊息
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
msg = event.message.text
if '聯絡方式' in msg:
message = imagemap_message()
line_bot_api.reply_message(event.reply_token, message)
elif '你是誰' in msg:
message = TextSendMessage(text= "嗨我是吳岳,很高興認識你!")
line_bot_api.reply_message(event.reply_token, message)
elif '你會什麼' in msg:
message = Carousel_Template()
line_bot_api.reply_message(event.reply_token, message)
elif '你喜歡什麼' in msg:
message = image_gallery()
line_bot_api.reply_message(event.reply_token, message)
elif "你想去哪裡工作" in msg:
line_bot_api.reply_message(event.reply_token,LocationSendMessage(title='LINE Taiwan', address='No. 333號, Ruiguang Rd, Neihu District, Taipei City, 114', latitude=25.07726625171245, longitude=121.57513202616131))
else:
message = TextSendMessage(text='echo: ' + msg)
line_bot_api.reply_message(event.reply_token, message)
import os
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| asianpwnage422/myLineBot | line-bot-kevin/app.py | app.py | py | 2,208 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "linebot.LineBotApi",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "linebot.WebhookHandler",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.request.h... |
15915660899 | """initial
Revision ID: 977a56225963
Revises: None
Create Date: 2016-09-24 22:05:55.701455
"""
# revision identifiers, used by Alembic.
revision = '977a56225963'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('series',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title')
)
op.create_table('chapter',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=120), nullable=True),
sa.Column('body', sa.String(length=1000000), nullable=True),
sa.Column('series_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['series_id'], ['series.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('chapter')
op.drop_table('series')
### end Alembic commands ###
| cloudiirain/Website | migrations/versions/977a56225963_.py | 977a56225963_.py | py | 1,139 | python | en | code | null | github-code | 6 | [
{
"api_name": "alembic.op.create_table",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
72000465789 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import datetime
import locale
import os
from tqdm import tqdm
from collections import *
from typing import Optional,List,Tuple
from trident.backend.common import *
from trident.backend.pytorch_ops import *
from trident.backend.pytorch_backend import to_tensor, get_device, load,fix_layer,set_device
from trident.data.utils import download_model_from_google_drive,download_file_from_google_drive
from trident.layers.pytorch_layers import *
from trident import context
from trident.context import make_dir_if_need,split_path,sanitize_path
ctx=context._context()
__all__ = ['Word2Vec','ChineseWord2Vec']
_trident_dir = get_trident_dir()
dirname = os.path.join(_trident_dir, 'models')
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
download_path= os.path.join(_trident_dir, 'download','vocabs_tw.txt')
make_dir_if_need(download_path)
class Word2Vec(Embedding):
"""中文詞向量
繼承Embedding Layer
"""
def __init__(self, pretrained=False, embedding_dim: Optional[int] = None, num_embeddings: Optional[int] = None, vocabs: Optional[List[str]] = None,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
sparse: bool = False, _weight: Optional[Tensor] = None, filter_index=-1, keep_output: bool = False, name: Optional[str] = None) -> None:
"""
Py Word2vec结构
"""
super().__init__(num_embeddings=num_embeddings, embedding_dim=embedding_dim, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse,
_weight=_weight,padding_idx=padding_idx, keep_output=keep_output, name=name)
self.pretrained=pretrained
self.filter_index=filter_index
self.locale =ctx.locale
print('locale:', self.locale)
self._vocabs = OrderedDict()
if vocabs is not None:
for k in range(len(vocabs)):
self._vocabs[vocabs[k]] = k
download_file_from_google_drive(file_id='16yDlJJ4-O9pHF-ZbXy7XPZZk6vo3aw4e', dirname=os.path.join(_trident_dir, 'download'), filename='vocabs_tw.txt')
@property
def vocabs(self):
# 詞彙表
return self._vocabs
def word2idx(self, word: str):
# 文字轉索引(根據locale處理繁簡轉換)
if self.locale != 'zh_cn' and word in self.tw2cn:
word = self.tw2cn[word]
if word in self._vocabs:
return self._vocabs[word]
else:
return None
def idx2word(self, index: int):
# 索引轉文字(根據locale處理繁簡轉換)
if index < len(self._vocabs):
word = self._vocabs.key_list[index]
if self.locale != 'zh_cn' and word in self.cn2tw:
word = self.cn2tw[word]
return word
else:
return None
@classmethod
def load(cls):
# 從google drive載入模型
st = datetime.datetime.now()
set_device('cpu')
dirname = os.path.join(get_trident_dir(), 'models')
download_model_from_google_drive('13XZPWh8QhEsC8EdIp1niLtZz0ipatSGC', dirname, 'word2vec_chinese.pth')
recovery_model = load(os.path.join(dirname, 'word2vec_chinese.pth'))
recovery_weight=recovery_model.state_dict()['weight']
shp=int_shape(recovery_weight)
v = cls(pretrained=True,num_embeddings=shp[0], embedding_dim=shp[-1],_weight=recovery_weight,name='word2vec_chinese')
v._vocabs=copy.deepcopy(recovery_model._vocabs)
v.tw2cn =copy.deepcopy(recovery_model.tw2cn)
v.cn2tw = copy.deepcopy(recovery_model.cn2tw)
del recovery_model
v.locale =ctx.locale
v.to(get_device())
et = datetime.datetime.now()
print('total loading time:{0}'.format(et - st))
return v
def find_similar(self, reprt: (str, Tensor), n: int = 10, ignore_indexes=None):
# 根據文字或是向量查詢空間中最近文字
reprt_idx = None
if ignore_indexes is None:
ignore_indexes = []
if isinstance(reprt, str):
reprt_idx = self.word2idx(reprt)
ignore_indexes.append(reprt_idx)
reprt = self.weight[reprt_idx].expand_dims(0) if reprt in self._vocabs else None
if is_tensor(reprt):
correlate = element_cosine_distance(reprt, self.weight)[0]
sorted_idxes = argsort(correlate, descending=True)
sorted_idxes = sorted_idxes[:n + len(ignore_indexes)]
sorted_idxes = to_tensor([idx for idx in sorted_idxes if idx.item() not in ignore_indexes]).long()
probs = to_list(correlate[sorted_idxes])[:n]
words = [self.idx2word(idx.item()) for idx in sorted_idxes][:n]
return OrderedDict(zip(words, probs))
else:
raise ValueError('Valid reprt should be a word or a tensor .')
def analogy(self, reprt1: (str, Tensor, list), reprt2: (str, Tensor, list), reprt3: (str, Tensor, list), n: int = 10):
# 類比關係 (男人之於女人等於國王之於皇后)
reprt1_idx = None
reprt2_idx = None
reprt3_idx = None
reprt1_arr = None
reprt2_arr = None
reprt3_arr = None
exclude_list = []
if isinstance(reprt1, str):
reprt1_idx = self.word2idx(reprt1)
exclude_list.append(reprt1_idx)
reprt1_arr = self.weight[reprt1_idx].expand_dims(0) if reprt1_idx is not None else None
elif isinstance(reprt1, Tensor):
reprt1_arr = reprt1
elif isinstance(reprt1, list):
if isinstance(reprt1[0], str):
reprt1_arr = self.get_words_centroid(*reprt1)
for item in reprt1:
exclude_list.append(self.word2idx(item))
if isinstance(reprt2, str):
reprt2_idx = self.word2idx(reprt2)
exclude_list.append(reprt2_idx)
reprt2_arr = self.weight[reprt2_idx].expand_dims(0) if reprt2_idx is not None else None
elif isinstance(reprt2, Tensor):
reprt2_arr = reprt2
elif isinstance(reprt2, list):
if isinstance(reprt2[0], str):
reprt2_arr = self.get_words_centroid(*reprt2)
for item in reprt2:
exclude_list.append(self.word2idx(item))
if isinstance(reprt3, str):
reprt3_idx = self.word2idx(reprt3)
exclude_list.append(reprt3_idx)
reprt3_arr = self.weight[reprt3_idx].expand_dims(0) if reprt3_idx is not None else None
elif isinstance(reprt3, Tensor):
reprt3_arr = reprt3
elif isinstance(reprt3, list):
if isinstance(reprt3[0], str):
reprt3_arr = self.get_words_centroid(*reprt3)
for item in reprt3:
exclude_list.append(self.word2idx(item))
if reprt1_arr is not None and reprt2_arr is not None and reprt3_arr is not None:
reprt4 = reprt2_arr - reprt1_arr + reprt3_arr
return self.find_similar(reprt4, n=n, ignore_indexes=exclude_list)
else:
not_find = []
if reprt1_arr is None:
not_find.append(reprt1)
if reprt2_arr is None:
not_find.append(reprt2)
if reprt3_arr is None:
not_find.append(reprt3)
raise ValueError(' ,'.join(not_find) + ' was not in vocabs.')
def get_words_centroid(self, *args):
# 取得數個文字的向量均值
centroid = 0
for arg in args:
reprt_idx = self.word2idx(arg)
if reprt_idx is not None:
centroid += self.weight[reprt_idx].expand_dims(0) if reprt_idx is not None else None
return centroid / len(args)
def get_words_vector(self, word):
# 取得單一文字的向量
reprt_idx = self.word2idx(word)
if reprt_idx is not None:
return self.weight[reprt_idx].expand_dims(0) if reprt_idx is not None else None
return None
def get_enumerators(self, *args, negative_case=None, n=10, exclude_samples=True):
# 取得整體距離輸入案例最接近,但是離負案例最遠(negative_case)的文字列表
positive_correlate = 0
negative_correlate = 0
exclude_list = []
for arg in args:
positive_correlate += element_cosine_distance(self.get_words_vector(arg), self.weight)[0]
correlate = positive_correlate
if negative_case is None:
pass
else:
if isinstance(negative_case, str):
negative_case = [negative_case]
if isinstance(negative_case, (list, tuple)):
for arg in negative_case:
negative_correlate += element_cosine_distance(self.get_words_vector(arg), self.weight)[0]
correlate = positive_correlate - negative_correlate
sorted_idxes = argsort(correlate, descending=True)
sorted_idxes = sorted_idxes[:n + len(exclude_list)]
sorted_idxes = to_tensor([idx for idx in sorted_idxes if idx.item() not in exclude_list]).long()
probs = to_list(correlate[sorted_idxes])[:n]
words = [self.idx2word(idx.item()) for idx in sorted_idxes][:n]
return OrderedDict(zip(words, probs))
def ChineseWord2Vec(pretrained=True, freeze_features=True, **kwargs):
if pretrained==True:
model=Word2Vec.load()
if freeze_features:
model.trainable=False
return model
else:
return Word2Vec()
| AllanYiin/trident | trident/models/pytorch_embedded.py | pytorch_embedded.py | py | 9,944 | python | en | code | 74 | github-code | 6 | [
{
"api_name": "trident.context._context",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "trident.context",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"... |
11160464439 | from django.db import models
from book_archive.models import Genre
from config.models import User
class BookRequest(models.Model):
title = models.CharField('Наименование', max_length=128)
author = models.CharField('Автор', max_length=128, null=True, blank=True)
genre = models.ForeignKey(Genre, on_delete=models.CASCADE, verbose_name='Жанры', null=True, blank=True)
year = models.PositiveIntegerField('Год', default=0)
created_at = models.DateTimeField('Дата запроса', auto_now_add=True)
user = models.ForeignKey(
User, null=True, blank=True,
on_delete=models.CASCADE, verbose_name='Кто добавил',
)
is_approved = models.BooleanField('Одобрено ли', null=True, blank=True)
class Meta:
db_table = 'book_request'
verbose_name = 'Запрос книги'
verbose_name_plural = 'Запросы книг'
def __str__(self):
return f'{self.title}'
| SliceOfMind/thesombot_web | book_request/models.py | models.py | py | 989 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
8655579477 | from torch import nn
import torch
import utils
import cv2
import numpy as np
import supervisely_lib as sly
def inference(model: nn.Module, input_height, input_width, image_path, device=None):
with torch.no_grad():
model.eval()
image = sly.image.read(image_path) # RGB
input = utils.prepare_image_input(image, input_width, input_height)
input = torch.unsqueeze(input, 0)
input = utils.cuda(input, device)
output = model(input)
image_height, image_width = image.shape[:2]
predicted_classes_indices = output.data.cpu().numpy().argmax(axis=1)[0]
result = cv2.resize(predicted_classes_indices, (image_width, image_height), interpolation=cv2.INTER_NEAREST)
return result
def convert_prediction_to_sly_format(predicted_class_indices, classes_json, model_classes: sly.ProjectMeta):
height, width = predicted_class_indices.shape[:2]
labels = []
for idx, class_info in enumerate(classes_json): # curr_col2cls.items():
class_mask = predicted_class_indices == idx # exact match (3-channel img & rgb color)
if not np.any(class_mask):
# 0 pixels for class
continue
bitmap = sly.Bitmap(data=class_mask)
obj_class = model_classes.get_obj_class(class_info["title"])
labels.append(sly.Label(bitmap, obj_class))
ann = sly.Annotation(img_size=(height, width), labels=labels)
return ann
def load_model(weights_path, num_classes, model_name, device):
from train import model_list
model_class = model_list[model_name]["class"]
model = model_class(num_classes=num_classes)
state = torch.load(weights_path)
model.load_state_dict(state)
model.to(device)
model.eval()
return model
| supervisely-ecosystem/unet | custom_net/inference.py | inference.py | py | 1,728 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "supervisely_lib.image.read"... |
16013407471 | import cv2
import numpy as np
import matplotlib.pyplot as plt
img=cv2.imread('/home/hasantha/Desktop/repos/old-yolov4-deepsort-master/data/download2.png' ,0)
#img=img[423:998,806:1408]
ret, bw_img = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY) #165
kernel = np.ones((1,1),np.uint8)
#erosion = cv2.erode(img,kernel,iterations = 1)
opening = cv2.morphologyEx(bw_img, cv2.MORPH_OPEN, kernel)
#closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
#Append lane line coordinates to a matrix
lane_line_co=[]
x_cord_lane = np.where(opening == 255)[1]#+806 #+805
y_cord_lane = np.where(opening == 255)[0]#+423 #+389
for q in range(0, len(x_cord_lane)):
lane_line_co.append((x_cord_lane[q],y_cord_lane[q]))
def get_lane_line_co():
return x_cord_lane,y_cord_lane,lane_line_co
#print(lane_line_co)
#print(get_lane_line_co()[2]) | hasantha-nirmal/Traffic_Violation_Detection_Yolov4_Deep-Sort | lane_line_extract3.py | lane_line_extract3.py | py | 850 | python | en | code | 23 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_n... |
37349408 | """See `TestSet` for an example."""
from typing import Type, MutableSet
from tests.collection_testing import unordered_equal
class MutableSetTests:
mutable_set: Type = None
@classmethod
def create_mutable_set(cls) -> MutableSet:
return cls.mutable_set()
@staticmethod
def get_element(i):
return i
def test_add_coverage(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
instance.add(element1)
# And add a second time for good measure
instance.add(element1)
def test_discard_missing_element_passes(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
instance.discard(element1)
def test_discard_passes(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
instance.add(element1)
instance.discard(element1)
def test_contains_len(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
assert len(instance) == 0
assert element1 not in instance
instance.add(element1)
assert element1 in instance
assert len(instance) == 1
element2 = self.get_element(2)
assert element2 not in instance
instance.add(element2)
assert element2 in instance
assert len(instance) == 2
assert element1 in instance
instance.discard(element1)
assert element1 not in instance
assert len(instance) == 1
assert element2 in instance
instance.discard(element2)
assert element1 not in instance
assert element2 not in instance
assert len(instance) == 0
def test_iter(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
element2 = self.get_element(2)
assert list(iter(instance)) == []
instance.add(element1)
assert unordered_equal(iter(instance), [element1])
instance.add(element2)
assert unordered_equal(iter(instance), [element1, element2])
class TestMutableSet(MutableSetTests):
mutable_set = set | BlackHC/mamo | tests/collection_testing/test_mutable_set.py | test_mutable_set.py | py | 2,179 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Type",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.MutableSet",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "tests.collection_testing.unordered_equal",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": ... |
32347991199 | import cv2 as cv
import numpy as np
def nothing(x):
pass
cap= cv.VideoCapture('pranay1.avi')
fourcc= cv.VideoWriter_fourcc('X', 'V', 'I', 'D')
out= cv.VideoWriter('final1.avi', fourcc, 20.0, (640,480) )
#cv.namedWindow('Tracking')
#cv.createTrackbar('l_h', 'Tracking', 0, 255, nothing)
#cv.createTrackbar('l_s', 'Tracking', 0, 255, nothing)
#cv.createTrackbar('l_v', 'Tracking', 0, 255, nothing)
#cv.createTrackbar('u_h', 'Tracking', 255, 255, nothing)
#cv.createTrackbar('u_s', 'Tracking', 255, 255, nothing)
#cv.createTrackbar('u_v', 'Tracking', 255, 255, nothing)
while(True):
ret, frame= cap.read()
frame= cv.resize(frame, (640,480))
hsv= cv.cvtColor(frame, cv.COLOR_BGR2HSV)
#lh = cv.getTrackbarPos('l_h', 'Tracking')
#ls = cv.getTrackbarPos('l_s', 'Tracking')
#lv = cv.getTrackbarPos('l_v', 'Tracking')
#uh = cv.getTrackbarPos('u_h', 'Tracking')
#us = cv.getTrackbarPos('u_s', 'Tracking')
#uv = cv.getTrackbarPos('u_v', 'Tracking')
lh= 82
ls= 51
lv= 51
uh= 133
us= 255
uv= 255
lhb= 0
lsb= 0
lvb= 0
uhb= 255
usb= 255
uvb= 0
lbb= np.array([lhb, lsb, lvb])
ubb= np.array([uhb, usb, uvb])
lb= np.array([lh,ls,lv])
ub= np.array([uh,us,uv])
backg= cv.imread('pranay42.JPG')
backg= cv.resize(backg, (640,480))
mask= cv.inRange(hsv, lb, ub)
masknot= cv.bitwise_not(mask)
res= cv.bitwise_and(frame, frame, mask = masknot)
a= cv.bitwise_and(backg, backg, mask=mask)
fres= cv.addWeighted(res,1,a,1,1)
cv.imshow('frame', frame)
cv.imshow('fres', fres)
key= cv.waitKey(1)
if ret==True:
out.write(fres)
if key == 27:
break
cap.release()
cv.destroyAllWindows()
| pranayvarmas/Virtual-Keyboard | Mini-Projects/Invisible Cloak.py | Invisible Cloak.py | py | 1,721 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
... |
20497360293 | import random
from datetime import datetime
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
conteudo = 'There are many variations of passages of Lorem Ipsum available, but the ' \
'majority have suffered alteration in some form, by injected humour, or randomised ' \
'words which look even slightly believable. '
excerto = 'Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots' \
' in a piece of classical Latin'
now = datetime.now()
year = str(now.year).zfill(4)
month = str(now.month).zfill(2)
hour = str(now.hour).zfill(2)
minute = str(now.minute).zfill(2)
second = str(now.second).zfill(2)
for categoria_post_id in range(1, 5): # Pressupõe 4 categorias
for i in range(5): # 5 posts para cada categoria
titulo_post = ' '.join([''.join(random.choices(ALPHABET, k=random.randint(3, 8))) for i in range(3)])
data_post = f'{year}-{month}-{str((now.day - i) % 29).zfill(2)} {hour}:{minute}:{second}'
conteudo_post = (conteudo * random.randint(3, 25) + '.</br>') * 5
excerto_post = excerto * random.randint(3, 5) + '. '
imagem_post = f'post_img/2022/02/26/13estacio-sistemas.jpg' # Mude para uma imagem que você tenha feito upload
publicado_post = random.randint(0, 1)
autor_post_id = 1 # id do seu super usuário
sql_post = (f"INSERT INTO blog_django.posts_post"
f"(titulo_post,data_post,conteudo_post,excerto_post,imagem_post,"
f"publicado_post,autor_post_id,categoria_post_id)"
f"VALUES ('{titulo_post}','{data_post}','{conteudo_post}',"
f"'{excerto_post}','{imagem_post}',{publicado_post},"
f"{autor_post_id},{categoria_post_id});")
print(sql_post)
| Adriano1976/Curso-de-Python | Secao11-Django-com-Python-Projetos/Projeto-Blog/posts-generator.py | posts-generator.py | py | 1,787 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "random.choices",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.randint... |
855819574 | #!/usr/bin/env python
#
# This example creates a polygonal model of a cone, and then renders it to
# the screen. It will rotate the cone 360 degrees and then exit. The basic
# setup of source -> mapper -> actor -> renderer -> renderwindow is
# typical of most VTK programs.
#
#
# First we include the VTK Python packages that will make available
# all of the VTK commands to Python.
#
import vtk
import time
#
# Next we create an instance of vtkConeSource and set some of its
# properties. The instance of vtkConeSource "cone" is part of a visualization
# pipeline (it is a source process object); it produces data (output type is
# vtkPolyData) which other filters may process.
#
cone = vtk.vtkConeSource()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
#
# In this example we terminate the pipeline with a mapper process object.
# (Intermediate filters such as vtkShrinkPolyData could be inserted in
# between the source and the mapper.) We create an instance of
# vtkPolyDataMapper to map the polygonal data into graphics primitives. We
# connect the output of the cone souece to the input of this mapper.
#
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection( cone.GetOutputPort() )
#
# Create an actor to represent the cone. The actor orchestrates rendering of
# the mapper's graphics primitives. An actor also refers to properties via a
# vtkProperty instance, and includes an internal transformation matrix. We
# set this actor's mapper to be coneMapper which we created above.
#
coneActor = vtk.vtkActor()
coneActor.SetMapper( coneMapper )
#
# Create the Renderer and assign actors to it. A renderer is like a
# viewport. It is part or all of a window on the screen and it is
# responsible for drawing the actors it has. We also set the background
# color here
#
ren1= vtk.vtkRenderer()
ren1.AddActor( coneActor )
ren1.SetBackground( 0.1, 0.2, 0.4 )
#
# Finally we create the render window which will show up on the screen
# We put our renderer into the render window using AddRenderer. We also
# set the size to be 300 pixels by 300
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren1 )
renWin.SetSize( 300, 300 )
#
# now we loop over 360 degreeees and render the cone each time
#
# for i in range(0,360):
# time.sleep(0.03)
# renWin.Render()
# ren1.GetActiveCamera().Azimuth( 1 )
| VisTrails/VisTrails | examples/vtk_examples/Tutorial/Step1/Cone.py | Cone.py | py | 2,362 | python | en | code | 100 | github-code | 6 | [
{
"api_name": "vtk.vtkConeSource",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "vtk.vtkPolyDataMapper",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "vtk.vtkActor",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "vtk.vtkRenderer"... |
32506954132 | import csv
import numpy as np
import os
import pydicom
from segmentation_models.backbones import get_preprocessing
import tensorflow as tf
from pneumothorax_segmentation.constants import image_size, folder_path
from pneumothorax_segmentation.data_augment import apply_random_data_augment
from pneumothorax_segmentation.params import tf_image_size
# Documentation for reading dicom files at https://pydicom.github.io/pydicom/stable/viewing_images.html#using-pydicom-with-matplotlib
preprocess_input = get_preprocessing("resnet34")
def get_all_images_list(folder):
"Load all images filenames in folder. Returns a list of (filepath, filename)"
all_images_in_folder = []
for dirName, _, fileList in os.walk(folder_path + "/data/dicom-images-%s" % folder):
for filename in fileList:
if ".dcm" in filename.lower():
all_images_in_folder.append((os.path.join(dirName,filename), filename.replace(".dcm", "")))
return all_images_in_folder
def get_dicom_data(file_path):
"Return the dicom raw data of a given file"
return pydicom.dcmread(file_path)
cached_csv = []
def get_raw_masks(name):
"""
Returns a list of the masks as they appear in train-rle.csv. Masks '-1' are filtered out\n
Note side-effect: loads the csv on the first run and caches it
"""
global cached_csv
# The csv data is stored in a cache. This way, the csv is read only once
if (len(cached_csv) == 0):
with open(folder_path + '/data/train-rle.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
cached_csv.append(row)
# Retrieve masks as they are in the csv
raw_masks = []
for row in cached_csv:
if row[0] == name:
raw_masks.append(row[1])
# Remove the -1 from images with no mask
if (raw_masks[0] == " -1"):
raw_masks = []
return raw_masks
def get_image_label(name):
"Returns 1 if there is a pneumothorax, 0 otherwise. Based on data in train-rle.csv"
raw_masks = get_raw_masks(name)
if len(raw_masks) == 0:
return 0
return 1
def get_true_mask(name):
"Takes the name of the image as input and returns the mask mapping as a numpy matrix of shape (image_size, image_size) and values 0-1"
raw_masks = get_raw_masks(name)
# Format the masks to an exploitable format
masks = []
for raw_mask in raw_masks:
mask = raw_mask.split(" ")
mask = mask[1:] # raw_mask starts with a space
mask = [int(m) for m in mask]
masks.append(mask)
# Use the masks to create the actual mapping of image_size * image_size
mask_mapping = np.zeros(image_size ** 2, dtype=np.int)
for mask in masks:
is_it_a_mask = False
current_pixel = 0
for pixel_long_movement in mask:
if is_it_a_mask:
for i in range(pixel_long_movement):
mask_mapping[current_pixel + i] = 1
current_pixel += pixel_long_movement
is_it_a_mask = not is_it_a_mask
mask_mapping = np.reshape(mask_mapping, (image_size, image_size))
mask_mapping = np.transpose(mask_mapping, (1, 0))
return mask_mapping
def format_pixel_array_for_tf(pixel_array, apply_data_augment_technique=None):
"""
Inputs pixel_array as they are stroed in the dicom file. Outputs a tensor ready to go through the models\n
apply_data_augment_technique can be used to apply data augmentation. See apply_random_data_augment for values
"""
image = tf.convert_to_tensor(pixel_array, dtype=tf.float32)
image = tf.reshape(image, (1, image_size, image_size, 1))
if (apply_data_augment_technique != None):
image = apply_random_data_augment(image, apply_data_augment_technique)
# tf.image.resize behaves weirdly with the default method when reducing size. AREA method makes more sense in our case, thought the default bilinear method makes more sense when making an image bigger
image = tf.image.resize(image, (tf_image_size, tf_image_size), align_corners=True, method=tf.image.ResizeMethod.AREA)
image = tf.image.grayscale_to_rgb(image)
image = preprocess_input(image)
return image
| benoitkoenig/pneumothorax-segmentation | preprocess.py | preprocess.py | py | 4,252 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "segmentation_models.backbones.get_preprocessing",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pneumothorax_segmentation.constants.folder_path",
"line_number": 20,
"usage_type"... |
39459918108 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='index'),
# url(r'^games/(?P<steamid>[0-9]+)$', views.games, name='games'),
url(r'^home/$', views.home, name='home'),
url(r'^games/', views.games, name='games'),
url(r'^friends/', views.friends, name='friends'),
url(r'^calculator/', views.calculator, name='calculator')
] | ryanchesler/allauth_django | core/urls.py | urls.py | py | 401 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.c... |
44399481784 | import gym
from collections import deque
import numpy as np
import time
import torch
torch.manual_seed(0) # set random seed
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from policy import Policy
from gym.wrappers.monitoring.video_recorder import VideoRecorder
env = gym.make('Acrobot-v1')
env.seed(0)
print('observation space:', env.observation_space)
print('action space:', env.action_space)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
policy = Policy().to(device)
optimizer = optim.Adam(policy.parameters(), lr=0.001)
def reinforce(n_episodes=5000, max_t=1000, gamma=1.0, print_every=100):
scores_deque = deque(maxlen=100)
scores = []
for i_episode in range(1, n_episodes+1):
saved_log_probs = []
rewards = []
state = env.reset()
for t in range(max_t):
action, log_prob = policy.act(state)
saved_log_probs.append(log_prob)
state, reward, done, _ = env.step(action)
rewards.append(reward)
if done:
break
scores_deque.append(sum(rewards))
scores.append(sum(rewards))
discounts = [gamma**i for i in range(len(rewards)+1)]
R = sum([a*b for a,b in zip(discounts, rewards)])
policy_loss = []
for log_prob in saved_log_probs:
policy_loss.append(-log_prob * R)
policy_loss = torch.cat(policy_loss).sum()
optimizer.zero_grad()
policy_loss.backward()
optimizer.step()
if i_episode % print_every == 0:
torch.save(policy.state_dict(), 'checkpoint.pth')
print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
return scores
scores = reinforce()
| david-wb/acrobot-v1 | train.py | train.py | py | 1,870 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "torch.manual_seed",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
... |
29943421716 | import argparse
import yaml
from pyspark.sql.functions import udf, when, year
class CreateSqlInput:
def __init__(self):
self.name = 'CalculateStats'
@staticmethod
@udf
def extract_production(dict_string):
try:
production_array = yaml.load(dict_string, Loader=yaml.FullLoader)
parsed_production = []
for production in production_array:
parsed_production.append(production['name'])
except ValueError:
parsed_production = []
return parsed_production
@staticmethod
def main(spark, config):
joined_parquet_path = config.get('PATHS', 'joined_parquet_path')
sql_input_path = config.get('PATHS', 'sql_input_path')
joined_df = spark.read.parquet(joined_parquet_path)
joined_df = joined_df.withColumn('production_companies',
CreateSqlInput.extract_production('production_companies'))
joined_df = joined_df.withColumn('ratio',
when(joined_df['revenue'] != 0, joined_df['budget']/joined_df['revenue'])
.otherwise(0.0))
joined_df = joined_df.withColumn('year', year('release_date'))
joined_df = joined_df.orderBy('ratio', ascending=False)
joined_df.select('title', 'production_companies', 'budget', 'revenue', 'ratio', 'year').show(5, False)
joined_df.select(['title',
'budget',
'year',
'revenue',
'vote_average',
'ratio',
'production_companies',
'url',
'abstract']).write.mode('overwrite').parquet(sql_input_path)
if __name__ == '__main__':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tfi_etl.sparkscript import SparkScriptRunner
parser = argparse.ArgumentParser()
parser.add_argument('-config')
args = parser.parse_args()
config_path = str(args.config)
calculate_stats = CreateSqlInput()
script_runner = SparkScriptRunner(config_path, calculate_stats)
script_runner.run()
| richierichard99/TrueFilmIngest | tfi_etl/CreateSqlInput.py | CreateSqlInput.py | py | 2,302 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "yaml.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.functions.udf",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.... |
33671678331 | from __future__ import annotations
from datetime import datetime
from datetime import timedelta
from unittest.mock import MagicMock
import pytest
from common.reddit_client import RedditClient
from prawcore.exceptions import Forbidden
from requests import Response
@pytest.fixture
def reddit_client():
return RedditClient(
reddit_client_id="YOUR_CLIENT_ID",
reddit_client_secret="YOUR_CLIENT_SECRET",
reddit_user_agent="YOUR_USER_AGENT",
)
@pytest.fixture
def subreddit_mock(reddit_client):
reddit_client.reddit_client.subreddit = MagicMock()
return reddit_client.reddit_client.subreddit.return_value
@pytest.fixture
def post_data():
date = datetime(2023, 7, 28)
post1 = MagicMock()
post1.id = "post1"
post1.title = "Test Post 1"
post1.selftext = "This is test post 1."
post1.subreddit.display_name = "test_subreddit"
post1.upvote_ratio = 0.75
post1.ups = 10
post1.downs = 2
post1.total_awards_received = 1
post1.num_comments = 5
post1.created_utc = date.timestamp()
post2 = MagicMock()
post2.id = "post2"
post2.title = "Test Post 2"
post2.selftext = "This is test post 2."
post2.subreddit.display_name = "test_subreddit"
post2.upvote_ratio = 0.80
post2.ups = 15
post2.downs = 3
post2.total_awards_received = 2
post2.num_comments = 8
post2.created_utc = (date + timedelta(days=1)).timestamp()
return [post1, post2]
@pytest.fixture
def subreddit_new_mock(subreddit_mock, post_data):
subreddit_mock.new = MagicMock(return_value=post_data)
return subreddit_mock.new
def test_remove_submissions_not_on_date(reddit_client):
date = datetime(2021, 10, 4).date()
submissions = [
{"created_utc": 1633393174},
{"created_utc": 1633306774},
{"created_utc": 1633220374},
]
expected_submissions = [{"created_utc": 1633306774}]
result = reddit_client._remove_submissions_not_on_date(submissions, date)
assert result == expected_submissions
def test_fetch_submissions_made_on_date_forbidden_error(reddit_client, subreddit_new_mock):
forbidden_response = Response()
forbidden_response.status_code = 403
subreddit_new_mock.side_effect = Forbidden(forbidden_response)
result = reddit_client.fetch_submissions_made_on_date("test_subreddit", datetime(2023, 7, 28))
assert result == []
def test_fetch_submissions_made_on_date_no_posts(reddit_client, subreddit_new_mock):
subreddit_new_mock.return_value = []
result = reddit_client.fetch_submissions_made_on_date("test_subreddit", datetime(2023, 7, 28))
assert result == []
| kelvinou01/university-subreddits | tests/unit/test_reddit_client.py | test_reddit_client.py | py | 2,642 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "common.reddit_client.RedditClient",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.MagicMock",
"line_number": 24,
"usage_type": "call"
},
{
"api... |
35031048423 | from celery import Celery
from celery.schedules import crontab
app = Celery(
"vivaldi",
broker_url="redis://localhost:6379/0",
result_backend="redis://localhost:6379/0",
imports=["tasks"],
task_serializer="json",
result_serializer="json",
accept_content=["json"],
timezone="Europe/Lisbon",
)
beat_schedule = {
"set_default": {
"task": "tasks.default_browser",
"schedule": crontab(minute=0, hour=1),
"args": (),
},
}
if __name__ == "__main__":
app.start()
| miccaldas/celery_and_friends | celery_and_friends/vivaldi/__init__.py | __init__.py | py | 525 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "celery.Celery",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "celery.schedules.crontab",
"line_number": 17,
"usage_type": "call"
}
] |
36961545751 | #!/usr/bin/env python
# coding: utf-8
# ## App Creation
#
# First, import all necessary libraries:
# In[1]:
#App Libraries
import json
import dash
from dash import html, dcc, Input, Output, State, dash_table
import dash_bootstrap_components as dbc
#Distributions
from scipy.stats import gamma
from scipy.stats import lognorm
from scipy.stats import weibull_min
#Calculation libraries
import math
import pandas as pd
import numpy as np
import ast
import statsmodels.api as sm
import matplotlib.pyplot as plt
import scipy.stats as stats
from scipy.optimize import minimize
from scipy.integrate import odeint
from scipy.optimize import fsolve
#from sympy import symbols, Eq, solve
#Plot libraries
import plotly.express as px
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
# In[2]:
#==================================================================#
# CREATE GENERATION INTERVAL DATA #
#==================================================================#
def create_gi(pop_mean, sd, m):
'''
pop_mean: population mean of the standard deviation
'''
#Set seed for consistency:
np.random.seed(1234)
#=========GAMMA============
gamma_shape = (pop_mean**2)/(sd**2)
gamma_scale = (sd**2)/(pop_mean)
gi_gamma_obs = np.random.gamma(gamma_shape, gamma_scale, m)
#=========LOGNORMAL============
log_mean = pop_mean
log_sd = sd
log_var = log_sd**2
norm_mean = np.log(log_mean)-0.5*np.log((log_sd/log_mean)**2+1) #scale=e^norm_mean
norm_var = np.log((log_sd/log_mean)**2+1)
norm_sd = np.sqrt(norm_var) # equivalent to the shape
gi_lognorm_obs = lognorm.rvs(s=norm_sd, scale=math.exp(norm_mean), size=m)
#=========WEIBULL============
weibull_mean = pop_mean
weibull_std = sd
def G(k):
return math.gamma(1+2/k)/(math.gamma(1+1/k)**2)
def f(k,b):
return G(k)-b #function solves for k
b = (weibull_std**2)/(weibull_mean**2)+1
init = 1 # The starting estimate for the root of f(x) = 0.
weibull_shape = fsolve(f,init,args=(b))[0]
weibull_scale = weibull_mean/math.gamma(1+1/weibull_shape)
gi_weibull_obs = weibull_min.rvs(weibull_shape,scale=weibull_scale, size=m)
return gi_gamma_obs, gi_lognorm_obs, gi_weibull_obs
#==================================================================#
# VISUALIZE GENERATION INTERVAL DATA #
#==================================================================#
def gi_visualize(gi_gamma, gi_lognorm, gi_weibull):
color=["skyblue","darkorange","green"]
fig = make_subplots(rows=2, cols=2,)
fig.append_trace(go.Histogram(x=gi_gamma, histnorm='percent', name='Gamma',
marker_color=color[0], opacity=1,),row=1,col=1)
fig.append_trace(go.Histogram(x=gi_lognorm, histnorm='percent', name='Lognorm',
marker_color=color[1], opacity=1), row=1,col=2)
fig.append_trace(go.Histogram(x=gi_weibull, histnorm='percent', name='Weibull',
marker_color=color[2], opacity=1), row=2,col=1)
group_labels = ['Gamma Curve', 'Lognormal Curve', 'Weibull Curve']
hist_data = [gi_gamma, gi_lognorm, gi_weibull]
distplfig = ff.create_distplot(hist_data, group_labels, colors=color,
bin_size=.2, show_hist=False, show_rug=False)
for k in range(len(distplfig.data)):
fig.append_trace(distplfig.data[k],
row=2, col=2
)
fig.update_layout(barmode='overlay')
return(fig)
#==================================================================#
# OBJECTIVE FUNCTION #
#==================================================================#
def objective(w_lamb,tau):
'''
Objective: To maximize the log likelihood of W(u) (ie min(-W(u)))
Inputs:
w_lamb= weights [w_1, w_2,...,w_n] and lambda in one list
tau = set of times since infection [tau_1, tau_2,...,tau_m]
Outputs:
objective: value (-W(u))
'''
w=w_lamb[:-1]
lamb=w_lamb[-1]
n=len(w)
objective = 0
for tau_i in tau: #FOR EACH TIME SINCE EXPOSURE
wlog_val = w[0]
for j in range(1,n): #CALCULATE TERMS WITHIN LOG
wlog_val = wlog_val + (w[j]*(((lamb*tau_i)**j)/math.factorial(j)))
objective = objective + (math.log(wlog_val) - (lamb*tau_i) + math.log(lamb))
return(-1 *objective)
#==================================================================#
# CONSTRAINT FUNCTION #
#==================================================================#
def constraint(w_lamb):
'''
Constraint 1: Weights must sum to 1
Inputs:
w: list of weights
Outputs:
constraint1: value of 1 - sum of the weights
'''
w=w_lamb[:-1]
n = len(w)
constraint1 = 1
for j in range(n):
constraint1 = constraint1 - w[j]
return constraint1
#==================================================================#
# CALCULATE WEIGHTS, HOLDING PERIOD, RATES #
#==================================================================#
def solver(tau, R_0, n, dist_type):
'''
The following function returns a list of weights given the 5 inputs.
Inputs:
tau: list of generation intervals times (in days)
R_0: basic reproduction number
n: number of infectious comparments
Output:
w_val: list of weights (based on minimization)
lambda: lambda value (based on minimization)
b_val: list of betas (based on minimization)
'''
wl_0 = np.zeros(n+1)
if dist_type == "gamma" or dist_type == "lognorm":
shape = (np.mean(tau)**2)/np.var(tau) #shape of the disribution
w2 = shape - math.trunc(shape) #expected weight of "second" compartment
w1 = 1 - w2 #expected weight of "first" compartment
comps = [math.trunc(shape)-1, math.trunc(shape)] #location of the "first" and "second" compartments where weights should exceed one
weights = [w1, w2]
for c, w in zip(comps,weights):
wl_0[c] = w
wl_0[-1]= np.mean(tau)/np.var(tau)
#elif dist_type == "lognorm":
# for i in range(n):
# wl_0[i] = 1/n
# log_mean = np.mean(tau)
# log_std = np.std(tau)
# norm_mean = np.log(log_mean)-0.5*np.log((log_std/log_mean)**2+1)
# wl_0[-1]= norm_mean
elif dist_type == "weibull":
for i in range(n):
wl_0[i] = 1/n
wl_0[-1]= np.std(tau)
b = (0, 1)
bnds=()
for i in range(n):
bnds = bnds + (b,)
b_lamb = (0.00000000001, None)
bnds = bnds + (b_lamb,)
#specify constraints
con1 = {'type': 'eq', 'fun': constraint}
cons = ([con1])
#optimize
solution = minimize(objective, wl_0, method='SLSQP', args=(tau), bounds=bnds,constraints=cons)
#get weights
w_val = solution.x[:-1]
lamb = solution.x[-1]
b_val = [weight*lamb*R_0 for weight in w_val]
return(w_val, lamb, b_val)
#==================================================================#
# OBJECTIVE FUNCTION #
#==================================================================#
def solutions(gi_data,min_n, max_n, R_0, dist_type):
weight = []
lambda_n = []
beta = []
obj = []
for n_val in list(range(min_n, max_n+1)):
if n_val == min_n:
str_p = "Solving: "+ str(dist_type)+ " with R_0="+str(R_0)+" for n in "+ str(min_n)+",...,"+str(max_n)
print(str_p)
w, l, b = solver(gi_data, R_0, n_val, dist_type)
o = objective(list(w)+[l],gi_data)
if n_val == int((max_n+1-min_n)/4+min_n):
print("n=",str(n_val)," is done. 25% Done!")
if n_val == int((max_n+1-min_n)/2+min_n):
print("n=",str(n_val)," is done. Half way there!")
if n_val == int(3*(max_n+1-min_n)/4 + min_n):
print("n=",str(n_val)," is done. 75% Done!")
if n_val == max_n:
print("Done!")
weight.append(w)
lambda_n.append(l)
beta.append(b)
obj.append(o)
return weight, lambda_n, beta, obj
#==================================================================#
# EXPECTED INFECTIOUS CURVE #
#==================================================================#
def beta_u(u_i,beta_vals, lambda_n):
'''
Beta(u): Find transmission rate for every time in u_i
Inputs:
u_i: list of generation intervals times (in days)
beta_vals: list of betas (based on minimization)
lambda_n: rate that infected move to the next compartment
Outputs:
y:
'''
n = len(beta_vals)
y = []
for u in u_i:
transmission=0
for j in range(n):
transmission = transmission + beta_vals[j]*((np.exp(-lambda_n*u)*(lambda_n*u)**j)/math.factorial(j))
y.append(transmission)
return(y)
#==================================================================#
# VISUALIZE EXPECTED INFECTIOUS CURVE #
#==================================================================#
def beta_u_plot(lambda_n, beta_vals):
#create x axis
x = np.linspace(0, 15, 100)
#create df of x, y data
beta_df = pd.DataFrame(x, columns=["x"])
beta_df[str(len(beta_vals))] = [float(b) for b in beta_u(x,beta_vals,lambda_n)]
fig = go.Figure()
fig.add_trace(go.Scatter(x=beta_df.x, y=beta_df.iloc[:, 1]))
#format graph
fig.update_layout(legend_title_text='Compartment')
fig.update_xaxes(title_text='Days', nticks=20)
fig.update_yaxes(title_text='Transmission Rate')
return(fig)
#==================================================================#
# VISUALIZE EXPECTED INFECTIOUS CURVE #
#==================================================================#
def plot_beta_dist(betas, lambdas):
color=["skyblue","darkorange","green"]
dist = ["Gamma", "Lognormal","Weibull"]
count = 0
fig = make_subplots(rows=1, cols=1)
for beta,lamb in zip(betas,lambdas):
data = beta_u_plot(lamb, beta)
fig.add_trace(
go.Scatter(x=data['data'][0]['x'], y=data['data'][0]['y'],
name=dist[count], line_color=color[count],
line=dict(width=3)),
row=1, col=1
)
count+=1
#-----STYLING--------
fig.update_layout(
title="Estimated Infectious Curve of each Distribution", #β(𝜏)
xaxis_title="Duration of Infection (days)",
yaxis_title="Transmission Rate", legend_title="Legend", font=dict(size=14),
legend=dict(yanchor="top", y=0.99, xanchor="right", x=0.99))
return(fig)
#==================================================================#
# SInR(S) MODEL #
#==================================================================#
def SInR(y, t, N, c, h, beta, lambda_n):
'''
Inputs:
y: initial condition vector (S, I, R)
t: time points (in days)
N: total population
c: contact rate
h: waning immunity rate
beta: transmission rate
lambda_n: optimized holding period
Outputs:
pop_status: returns a list of how many people are in each compartment
'''
S = y[0]
I = y[1:-1]
R = y[-1]
npI = np.array(I)
npBeta = np.array(beta)
#Calculate dSdt
dSdt = -S/N * np.sum(npI * npBeta)*c+ h*R
#Calculate dI1dt
dI1dt = S/N* np.sum(npI * npBeta)*c - lambda_n* I[0]
#Calculate dI_dt values from n=2,..,n
dIndt = []
for index in range(len(I)-1):
dIndt.append(lambda_n* I[index] - lambda_n* I[index+1])
#Calculate dRdt
dRdt = lambda_n * I[len(I)-1] - h*R
#Create list of results for S through R
pop_status = [dSdt, dI1dt]
pop_status.extend(dIndt)
pop_status.append(dRdt)
return pop_status
#==================================================================#
# VISUALIZE SInR(S) MODEL #
#==================================================================#
def SInR_plot(y_t0, t, N, c, h, beta, lambda_n):
# Integrate the SIR equations over the time grid, t.
ret = odeint(SInR, y_t0, t, args=(N, c, h, beta, lambda_n))
S = ret.T[0]
I = sum(ret.T[1:-1])
R = ret.T[-1]
fig = go.Figure()
fig.add_trace(go.Scatter(x=t, y=S/N,name="Susceptible"))
fig.add_trace(go.Scatter(x=t, y=I/N,name="Sum of Infected Compartments"))
fig.add_trace(go.Scatter(x=t, y=R/N,name="Recovered"))#
fig.update_layout(legend_title_text='Compartment')
fig.update_xaxes(title_text='Time (Days)')#,nticks=20)
fig.update_yaxes(title_text='Percentage of the Population')
return(fig)
# In[3]:
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
##########################################################################
# HELPER FUNCTIONS #
##########################################################################
def create_dropdown_options(series):
options = [{'label': i, 'value': i} for i in series.sort_values().unique()]
return options
def create_dropdown_value(series):
value = series.sort_values().unique().tolist()
return value
def create_slider_marks(values): #NEED
marks = {i: {'label': str(i)} for i in values}
return marks
##########################################################################
# ADD ONS TO APP (images, files,etc) #
##########################################################################
#pull Gamma Data
gi_df = pd.read_excel ('GI_Values.xlsx')[["Source", "Mean","SD", "Dist_Type"]]
gi_df.sort_values(by=["Dist_Type",'Mean'], inplace=True)
gi_df.reset_index(drop=True, inplace=True)
colors = {'background': '#111111','text': 'black'}
subheader_size = 20
##########################################################################
# DASHBOARD APP #
##########################################################################
app.layout = html.Div(children=[
dcc.Location(id="url",refresh=False),
html.Div(id="output-div")
])
##########################################################################
# HOME PAGE #
##########################################################################
home_layout = html.Div(
#==========Create "Header" Data=================
children=[
html.Div(
[
html.Img(src=app.get_asset_url("edinburgh.png"), height=50),
html.Div([html.H4(children=('SI',html.Sub("n"),'R Covid-19 Modeling'), style={'textAlign': 'center', "font-weight": "bold"}),]),
], style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}
),
#---Page Links---
html.Div([dbc.Row(
[
dbc.Col(html.Div(dcc.Link('Home',href="/")), style={'textAlign': 'center'}),
dbc.Col(html.Div(dcc.Link('Simulate Data',href="/simulate")), style={'textAlign': 'center'}),
dbc.Col(html.A("Github Code", href='https://github.com/annette-bell/SInR-Covid-Dissertation',
target="_blank"), style={'textAlign': 'center'}),
],
className="g-0",
)]),
#---Line Break---
html.Div([html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700"}),]),
#===============Home Page Information==========
html.Div(
[
html.H6("About this app", style={"margin-top": "0","font-weight": "bold","text-align": "left"}),
#html.Hr(),
html.P("The Susceptible-Infected-Recovered (SIR) compartmental model is used in epidemiology to identify\
and categorize members of a population based on their status with regards to a disease. Less\
studied variations of this problem are the SInR and SInRS models. These models, which have applications\
in latent infection and varying transmission rates, will be used on three different generation\
interval—the time between primary exposure and secondary infection—distributions: gamma, lognormal,\
and Weibull. The distributions are ultimately tested against one another to see not only \
which provides most realistic data, but how these data-sets interact.\
This app is meant to help people understand dynamics of COVID-19 modeling through a simply dashboard application.\
To see a more in-depth explanation, please see the Github repository which includes my dissertation.",
className="control_label",style={"text-align": "justify"}),
],
className="pretty_container almost columns",
),
#============AUTHOR=============
html.Div(
[
html.H6("Authors", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P("Annette Bell (nettebell97@gmail.com)", style={"text-align": "center", "font-size":"10pt"}),
],
className="pretty_container almost columns",
),
#============ACKNOWLEDGEMENTS=============
html.Div(
[
html.H6("Acknowledgements", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P("John Dagpunar: Dr. Dagpunar was my thesis advisor and extremely helpful throughout the project.)", style={"text-align": "left", "font-size":"10pt"}),
],
className="pretty_container almost columns",
),
#============SOURCES=============
html.Div(
[
html.H6("Sources", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
dcc.Markdown(
"""\
- Code Layout was used from Plotly public dash application: https://dash.gallery/dash-food-consumption/
- I examined another dash application to better understand how to use it. In addition to dash application resources, I analyzed the source code to clarify how to implement dash: https://github.com/FranzMichaelFrank/health_eu
- Edinburgh PNG: https://uploads-ssl.webflow.com/5eb13d58c8c08b73773d6b1c/600ea3810bde89c60e317be7_uni-of-edinburgh.png
"""
,style={"font-size":"10pt"}),
],
className="pretty_container almost columns",
),
])
##########################################################################
# SIMULATION PAGE #
##########################################################################
sim_layout = html.Div(
#==========Create "Header" Data=================
children=[
#---Title---
html.Div(
[
html.Img(src=app.get_asset_url("edinburgh.png"), height=50),
html.Div([html.H4(children=('SI',html.Sub("n"),'R Covid-19 Modeling'), style={'textAlign': 'center', "font-weight": "bold"}),]),
], style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}
),
#---Page Links---
html.Div([dbc.Row(
[
dbc.Col(html.Div(dcc.Link('Home',href="/")), style={'textAlign': 'center'}),
dbc.Col(html.Div(dcc.Link('Simulate Data',href="/simulate")), style={'textAlign': 'center'}),
dbc.Col(html.A("Github Code", href='https://github.com/annette-bell/SInR-Covid-Dissertation',
target="_blank"), style={'textAlign': 'center'}),
], className="g-0",
)]),
#---Line Break---
html.Div([html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700"}),]),
#============OVERIEW OF THE SIMULATION DATA=================
html.Div(
[
html.H6(["Overview of Distributions", html.Br()], style={"margin-top": "0","font-weight": "bold","text-align": "left"}),
html.Div(
[
dbc.Row(
[#---Table of Previous Analysis---:
dbc.Col(dash_table.DataTable(gi_df.to_dict('records'), [{"name": i, "id": i} for i in gi_df.columns],
style_header={'text-align': 'center', 'fontWeight': 'bold',},
style_table={'height': '200px', 'overflowY': 'auto'},
style_cell={'textAlign': 'left'},), width=3),
#---Commentary on the table
dbc.Col(html.Div([html.P("There are three main commonly used distributions to model the generation interval-\
the time from primary exposure to secondary infectiousness. These distributions include gamma, weibull, and log-normal.\
To the left, you can see a table of means and standard deviations from others previous work.",className="control_label",style={"text-align": "justify"}),]))]),]),
],
className="pretty_container",
),
#================= GENERATION INTERVAL SIMULATION =====================
html.Div(
[
html.Div(
#----------------INPUTS-----------
[
html.H6("Generation Interval Distribution:", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P("Please select the distribution you wish to base your simulated generation interval data off of. Note: Seed=1234.", className="control_label",style={"text-align": "justify"}),
html.Br(),
#Shows the inputs for the specified distribution
html.Div(
[
dbc.Row([
dbc.Col(html.Div([
#---Mean---
html.P("Input the population mean:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='pop_mean',placeholder='', type='number', value= 4.9),
])),
dbc.Col(html.Div([
#---SD---
html.P("Input the standard deviation:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='stan_dev',placeholder='', type='number', value= 2),
])),
],),
#---Data Set Size---
html.P("Select size of data:", className="control_label", style={"font-weight": "bold", "text-align": "center"}),
dcc.Slider(id='gi_size', min=1000, max=10000, step=500, value=5000,
marks=create_slider_marks(list(range(1000,10001))[::1000])),
html.Br(),
#---Update Button---
html.Button(id='update_button', children="Simulate", n_clicks=0,style=dict(width='220px')),
],),
],
className="pretty_container four columns",
),
#----------------GI Plot-----------
html.Div(
[
html.H6("Generation Interval Simulations", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
#---Information Regarding Shape and Scale---
html.P(id='shape_scale', style={"text-align": "justify"}),
#---GI Histogram---
html.Div(id='gammaplot_container',children=[
dcc.Graph(id="gi_plots", style={'height': '80vh'}),
]),
],
className="pretty_container seven columns",
),
],
className="row flex-display",
),
#===============Transmission Rate==========
html.Br(),
html.Div([dbc.Row(
[
dbc.Col(html.Div(html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700", 'align':'right'},)), width=5),
dbc.Col(html.Div(html.H6("Transmission Rate", style={"text-align": "center", "font-weight": "bold",
"margin-top": "14px", "color": "#384142"})), width=1.5),
dbc.Col(html.Div(html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700", 'align':'left'},)), width=5),
],
)]),
html.Div(
[
html.Div(
[#----------------Parameters of Beta(u)-----------
html.H6("Parameters of \u03b2(u):", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P("As the transmission rate is not constant, we create a function that simulates transmission a non constant transmission rate.", className="control_label",style={"text-align": "justify"}),
#---R_0---
html.P("Input the basic reproduction number:", className="control_label", style={"font-weight": "bold", "text-align": "center"}),
dcc.Input(id='R0', placeholder='', type='number', value= 2.3),
html.Br(),
html.Br(),
#---Update Button---
html.Button(id='beta_update_button', children="Calculate B(u)", n_clicks=0, style=dict(width='220px')),
], className="pretty_container four columns",
),
html.Div(
[#----------------Beta(u) Plot-----------
html.H6("Expected Infectious Curve", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
#html.P("Visualize the transmission rates. Note: the following results are based on the parameters and the GI Data simulated above.", className="control_label",style={"text-align": "justify"}),
html.Br(),
#---return weights and betas---
#html.P(id='weights_beta_info', style={"text-align": "justify"}),
#html.P(id='lambdas', style={"text-align": "justify"}),
#html.P(id='weights', style={"text-align": "justify"}),
#html.P(id='betas', style={"text-align": "justify"}),
dcc.Graph(id="beta_plot", style={'height': '60vh'}),
],
className="pretty_container seven columns",
),
],
className="row flex-display",
),
#=====================SI(n)R Model=====================
html.Br(),
html.Div([dbc.Row(
[
dbc.Col(html.Div(html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700", 'align':'right'},)), width=5),
dbc.Col(html.Div(html.H6("Modeling COVID-19", style={"text-align": "center", "font-weight": "bold",
"margin-top": "14px", "color": "#384142"})), width=1.5),
dbc.Col(html.Div(html.Hr(style={'borderWidth': "0.3vh", "color": "#FEC700", 'align':'left'},)), width=5),
],
)]),
html.Div(
[
html.Div(
[#----------------Parameters of SInR Model-----------
html.H6("Parameters of the Model:", style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
#html.P("Beta(u) was last calculated using the following:", className="control_label",style={"text-align": "justify"}),
html.Div([
dcc.RadioItems(
id='distribution-dropdown',
options=[
{'label': 'gamma', 'value': 'gamma'},
{'label': 'weibull', 'value': 'weibull'},
{'label': 'log-normal','value': 'lognorm'},
{'label': 'all','value': 'all'}],
value='all',
labelStyle={"display": "inline-block"},
style={"font-weight": "bold", "text-align": "center"},
),],),
dbc.Row(
[
dbc.Col(html.Div([
#---Total Population---
html.P("Total Population (N):", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='N_size', placeholder='', type='number', value = 67886011),
])),
dbc.Col(html.Div([
#---simulated days---
html.P("Total Days to Simulate Over:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='t_days', placeholder='', type='number', value = 180),
])),
], className="g-0",
),
dbc.Row(
[
dbc.Col(html.Div([
#---Recovered---
html.P("Initial Recovered (R):", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='R_size', placeholder='', type='number', value = 0),
])),
dbc.Col(html.Div([
#---simulated days---
html.P("Contact Rate:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='c', placeholder='', type='number', value = 1),
])),
], className="g-0",
),
dbc.Row(
[
dbc.Col(html.Div([
#---Infected---
html.P("Initail Infected (I):", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='I_size', placeholder='', type='number', value = 1),
])),
dbc.Col(html.Div([
#---simulated days---
html.P("Waning Immunity Rate:", className="control_label", style={"font-weight": "bold", "text-align": "left"}),
dcc.Input(id='h', placeholder='', type='number', value = 0),
])),
], className="g-0",
),
#---n_slider---
html.P("Select the compartment size: ", className="control_label", style={"font-weight": "bold", "text-align": "center"}),
dcc.Slider(1, 20, step=1, value=10, id='n_val'),
#---SInR Button---
html.Br(),
html.Br(),
html.Button(id='model_button', children="Model", n_clicks=0, style=dict(width='220px')),
], className="pretty_container four columns",
),
html.Div(
[#----------------SInR Plot-----------
html.H6(('SI',html.Sub("n"),'R Covid-19 Modeling'), style={"margin-top": "0","font-weight": "bold","text-align": "center"}),
html.P(id='model_parameters', style={"text-align": "justify"}),
#html.P("Visualize the how th population shifts.", className="control_label",style={"text-align": "justify"}),
dcc.Graph(id="SInR_plot", style={'height': '60vh'}),
html.Div([ ], id='plot1'),
],
className="pretty_container seven columns",
),
],
className="row flex-display",
),
],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
##########################################################################
# LINK TO EACH PAGE #
##########################################################################
@app.callback(
Output(component_id="output-div",component_property="children"),
Input(component_id="url",component_property="pathname"))
def update_page_layout(pathname):
if pathname == "/simulate":
return sim_layout
else:
return home_layout
##########################################################################
# SIMULATE AND VISUALIZE DISTRIBUTION DATA SETS #
##########################################################################
@app.callback(
[Output('gi_plots', 'figure'),
Output('shape_scale', 'children')],
[Input(component_id='update_button', component_property='n_clicks')],
[State('stan_dev', 'value'),
State('pop_mean', 'value'),
State('gi_size', 'value')]
)
def update_sim_gi(n_clicks, sd, mean, size):
'''
This callback and function combination simulates
a desired distribution (either gamma, weibull, or log-normal)
given the information.
'''
#----------CREATE DISTRIBUTIONS---------
gamma_data, lognorm_data, weibull_data = create_gi(mean, sd, size)
mean_vals = [np.mean(gamma_data), np.mean(lognorm_data), np.mean(weibull_data)]
std_vals = [np.std(gamma_data), np.std(lognorm_data), np.std(weibull_data)]
#--------------VISUALIZE----------------
gi_fig = gi_visualize(gamma_data, lognorm_data, weibull_data)
return gi_fig, f'Given the input mean and standard deviation of {mean} and {sd} respectively, the distributions are as follows: Gamma (x\u0305={round(mean_vals[0],3)}, s={round(std_vals[0],3)}). Lognormal(x\u0305 ={round(mean_vals[1],3)}, s={round(std_vals[1],3)}). Weibull(x\u0305={round(mean_vals[2],3)}, s={round(std_vals[2],3)}).'
##########################################################################
# CREATE AND PLOT BETA(u) #
##########################################################################
@app.callback(
[Output('beta_plot', 'figure')], #Output('weights_beta_info', 'children'), Output('lambdas', 'children'), Output('weights', 'children'), Output('betas', 'children')],
[Input(component_id='beta_update_button', component_property='n_clicks')],
[State('R0', 'value'), #DISTRIBUTION STATES
State('stan_dev', 'value'),
State('pop_mean', 'value'),
State('gi_size', 'value'),
]
)
def update_beta_u_plot(n_click, R0, sd, mean, size):
'''
Function will run beta_u function once "Calculate Beta(u)" button is clicked
'''
#----------CREATE DISTRIBUTIONS---------
gamma_data, lognorm_data, weibull_data = create_gi(mean, sd, size)
#----determine mimnimal acceptable size -----
g_min_comp = math.ceil((np.mean(gamma_data)**2)/(np.var(gamma_data)))
l_min_comp = math.ceil((np.mean(lognorm_data)**2)/(np.var(lognorm_data)))
w_min_comp = math.ceil((np.mean(weibull_data)**2)/(np.var(weibull_data)))
min_acceptable = max(g_min_comp, l_min_comp, w_min_comp)
#----------------CALC VALS------------------
w_gamma, l_gamma, b_gamma = solver(gamma_data, R0, min_acceptable, "gamma")
w_lognorm, l_lognorm, b_lognorm = solver(lognorm_data, R0, min_acceptable, "lognorm")
w_weibull, l_weibull, b_weibull = solver(weibull_data, R0, min_acceptable, "weibull")
#----------------PLOT Beta(u)------------------
b_n = [b_gamma, b_lognorm, b_weibull]
l_n = [l_gamma, l_lognorm, l_weibull]
w_n = [w_gamma, w_lognorm, w_weibull]
beta_plot = plot_beta_dist(b_n, l_n)
return [go.Figure(data=beta_plot)]
##########################################################################
# UPDATE SInR MODEL #
##########################################################################
@app.callback(
[Output('SInR_plot', 'figure'),],#Output(component_id="plot1", component_property="children"),],
#Output(component_id='model_parameters', component_property='children')],
[Input(component_id='model_button', component_property='n_clicks')],
[State('stan_dev', 'value'),
State('pop_mean', 'value'),
State('gi_size', 'value'),
State('distribution-dropdown', 'value'),
State('N_size', 'value'),
State('I_size', 'value'),
State('R_size', 'value'),
State('t_days', 'value'),
State('n_val', 'value'),
State('R0', 'value'),
State('c', 'value'),
State('h', 'value')]
)
def update_SInR_plot(n_click, sd, mean, size, show, N, I1_t0, R_t0, days, n, R0, c_val, h_val):
'''
Visualize the SInR(S) plot
'''
gamma_data, lognorm_data, weibull_data = create_gi(mean, sd, size)
#----determine mimnimal acceptable size -----
g_min_comp = math.ceil((np.mean(gamma_data)**2)/(np.var(gamma_data)))
l_min_comp = math.ceil((np.mean(lognorm_data)**2)/(np.var(lognorm_data)))
w_min_comp = math.ceil((np.mean(weibull_data)**2)/(np.var(weibull_data)))
#----------------CALC VALS------------------
w_gamma, l_gamma, b_gamma = solver(gamma_data, R0, n, "gamma")
w_lognorm, l_lognorm, b_lognorm = solver(lognorm_data, R0, n, "lognorm")
w_weibull, l_weibull, b_weibull = solver(weibull_data, R0, n, "weibull")
#-------Create lists of data-----
b_n = [b_gamma, b_lognorm, b_weibull]
l_n = [l_gamma, l_lognorm, l_weibull]
w_n = [w_gamma, w_lognorm, w_weibull]
print(b_n)
color=["skyblue","darkorange","green"]
dist = ["Gamma", "Lognormal","Weibull"]
count=0
#----specify compartments-------
I_t0 = [I1_t0]+(n-1)*[0]
S_t0 = N - sum(I_t0) - R_t0
y_t0 = [S_t0]+ I_t0 +[R_t0]
t = np.array(list(range(0,days+1)))
#----specify model type-----
if h_val== 0:
model_type = "SI\u2099R Model of "
else:
model_type = "SI\u2099RS Model of "
if show == "all":
SInR_compare = go.Figure()
dash = ["dot","dash"]
for b,l in zip(b_n, l_n):
#SIR MODEL
if count == 0:
fig = SInR_plot(y_t0, t, N, c_val, h_val, b, l)
s_data = list(fig['data'][0]['y'])
i_data = list(fig['data'][1]['y'])
r_data = list(fig['data'][2]['y'])
SInR_compare.update_layout(title=model_type+" for all Generation Intervals",
legend=dict(yanchor="top", y=-0.2, xanchor="left",x=0.02, orientation="h"),
font_size=14)
SInR_compare.add_trace(go.Scatter(x=t, y= s_data, name= "Susceptible: " +dist[count]+" GI",
line_color="blue",))
SInR_compare.add_trace(go.Scatter(x=t, y= i_data, name= "Infected: " +dist[count]+" GI",
line_color="red",))
SInR_compare.add_trace(go.Scatter(x=t, y= r_data, name= "Recovered: " +dist[count]+" GI",
line_color="green"))
count+=1
else:
fig2 = SInR_plot(y_t0, t, N, c_val, h_val, b, l)
s_data = list(fig2['data'][0]['y'])
i_data = list(fig2['data'][1]['y'])
r_data = list(fig2['data'][2]['y'])
SInR_compare.add_trace(go.Scatter(x=t, y= s_data, name= "Susceptible: " +dist[count]+" GI",
line_color="blue", line_dash=dash[count-1]))
SInR_compare.add_trace(go.Scatter(x=t, y= i_data, name= "Infected: " +dist[count]+" GI",
line_color="red", line_dash=dash[count-1]))
SInR_compare.add_trace(go.Scatter(x=t, y= r_data, name= "Recovered: " +dist[count]+" GI",
line_color="green", line_dash=dash[count-1]))
count+=1
SInR_compare.update_xaxes(title_text='Time (Days)')#,nticks=20)
SInR_compare.update_yaxes(title_text='Percentage of the Population')
return [go.Figure(data=SInR_compare)]
else:
if show == "gamma":
index = 0
if show == "lognorm":
index = 1
else:
index=2
I_t0 = [I1_t0]+(n-1)*[0]
S_t0 = N - sum(I_t0) - R_t0
y_t0 = [S_t0]+ I_t0 +[R_t0]
t = np.array(list(range(0,days+1)))
#SIR MODEL
fig = SInR_plot(y_t0, t, N, c_val, h_val, b_n[index], l_n[index])
fig.update_layout(title=model_type+" of "+dist[index]+" Generation Interval",
legend=dict(yanchor="top", y=0.35, xanchor="left", x=0.01),font_size=14)
return [go.Figure(data=fig)]
##########################################################################
# RUN THE APP #
##########################################################################
if __name__ == "__main__":
app.run_server(debug=False)
#TABLE STYLING:
#https://dash.plotly.com/datatable/style
# <br><br><br><br>
| annette-bell/SInR-Covid-Dissertation | dash_lambda.py | dash_lambda.py | py | 44,277 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.gamma",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.random",... |
6313183972 |
from django.urls import path, include
from django.conf.urls import url
from mysite import views
urlpatterns = [
path('search/', views.search_view, name='search'),
url(r'^request/(?P<record_id>[-\w]+)/$', views.send_req_view, name='request'),
path('requests/', views.req_view, name='requests'),
path('profile/', views.profile_view, name='profile'),
url(r'^profile/edit/$', views.edit_profile, name='edit_profile'),
path('', views.home_view, name='home'),
path('about/', views.about_view, name='about'),
url(r'^requests/delete/(?P<req_id>[-\w]+)/$', views.delete_req_view, name='d_request'),
url(r'^requests/accept/(?P<req_id>[-\w]+)/$', views.accept_req_view, name='accept_request'),
url(r'^requests/reject/(?P<req_id>[-\w]+)/$', views.reject_req_view, name='reject_request'),
path('notifications/', views.notification_view, name='notifications'),
path('profile/become_donor', views.become_donor_view, name='become_donor'),
path('add_record', views.add_record_view, name='add_record'),
]
| abpopal/sehat.af | mysite/urls.py | urls.py | py | 1,044 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mysite.views.search_view",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mysite.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.conf.... |
30188434989 | import bluetooth
class Alpha1S:
"""
Class to control the Ubtech Alpha 1S robot
"""
def __init__(self, name="ALPHA 1S"):
self.__bt = self.Alpha1S_bluetooth(name)
def battery(self):
"""
Get battery information.
Returns:
dict: Dictionary with fields:
percent: Remaining battery capacity
state:
0: Battery not charging
1: Battery charging
2: Battery not present
mV: Battery voltage in mV
"""
msg = b'\x18\x00'
parameter_len = 4
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
battery = {
"percent": int.from_bytes(ans[3:], "big"),
"state": int.from_bytes(ans[2:3], "big"),
"mV": int.from_bytes(ans[:2], "big")
}
return battery
return None
def leds(self, state):
"""
Turn LEDs on or off.
Parameters:
state: Set True to turn on, False to turn off.
"""
if state:
state = b'\x01'
else:
state = b'\x00'
msg = b'\x0D' + state
self.__bt.write(msg)
def servo_read(self, servo_id):
"""
Read the position of a single servo.
Note: Reading a servo will automatically power it off.
Parameters:
servo_id: Servo id between 0-15, integer
Returns:
int: Position of the servo between 0-180
"""
# Adding 1 to the servo_id because the robot starts counting at 1
servo_id = bytes([servo_id+1])
msg = b'\x24' + servo_id
parameter_len = 2
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
# Check that the received value corresponds to the specified servo
if ans[:1] == servo_id:
return int.from_bytes(ans[1:], "big")
return None
def servo_read_all(self):
"""
Read the positions for all the servos simultaneously.
Note: Reading a servo will automatically power it off.
Returns:
[int]: List of 16 integer positions between 0-180
"""
msg = b'\x25\x00'
parameter_len = 16
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
return [x for x in ans]
return None
def servo_write(self, servo_id, angle, travelling=20):
"""
Set a specific servo to an angle.
Parameters:
servo_id: Servo id between 0-15, integer
angle: Angle between 0-180, integer
travelling: Time the servo takes to move to the position
Returns:
int: Error code:
0: Success
1: Wrong servo servo_id
2: Allow servo angle excess
3: No reply from servo
"""
# Adding 1 to the servo_id because the robot starts counting at 1
servo_id = bytes([servo_id+1])
angle = bytes([angle])
run_time = bytes([travelling])
time_frames = b'\x00\x10'
msg = b'\x22' + servo_id + angle + run_time + time_frames
parameter_len = 2
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
# Check that the received value corresponds to the specified servo
if ans[:1] == servo_id:
return int.from_bytes(ans[1:], "big")
return None
def servo_write_all(self, angles, travelling=20):
"""
Set all servos to the specified positions simultaneously.
Parameters:
angles: List of integer angles between 0-180
travelling: Time the servo takes to move to the position, integer
Returns:
[int]: List of error codes for each servo:
0: Success
1: Wrong servo servo_id
2: Allow servo angle excess
3: No reply from servo
"""
if len(angles) != 16:
return None
angles = bytearray(angles)
run_time = bytes([travelling])
time_frames = b'\x00\x10'
msg = b'\x23' + angles + run_time + time_frames
parameter_len = 16
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
return [x for x in ans]
return None
def servo_off(self):
"""
Send command to power off all the servos in the robot.
"""
msg = b'\x0C\x00'
self.__bt.write(msg)
class Alpha1S_bluetooth:
"""
Class to handle the Alpha1S' bluetooth protocol
Download Bluetooth protocol datasheet from
https://assets-new.ubtrobot.com/downloads/Alpha%201%20Series%20Bluetooth%20communication%20protocol?download
""" # noqa
def __init__(self, name):
address = self.__discover(name)
assert(address is not None), f"Error: {name} not found"
self.__connect(address)
def __del__(self):
self.sock.close()
def __discover(self, name):
address = None
devices = bluetooth.discover_devices(lookup_names=True)
for add, text in devices:
if text == name:
address = add
break
return address
def __connect(self, addr):
self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.sock.connect((addr, 6))
self.sock.settimeout(10.0)
def write(self, msg):
"""
Compose an outgoing message following the required format and send
over the bluetooth socket. Takes bytes as input.
"""
cmd = self.__compose(msg)
self.sock.send(cmd)
def read(self, msg, ans_len):
"""
Use the write() function to send a command and receive its answer.
Returns the 'Parameter' field in bytes if the message was received
correctly, None otherwise.
"""
self.write(msg)
# Length is sum of header(2), length, check, cmd, ans_len and end
length = 6 + ans_len
ans = self.sock.recv(length)
if self.__check(ans):
return ans[4:-2]
return None
def __compose(self, msg):
"""
Compose a byte message with the header, length, check and end
bytes in the required format.
"""
header = b'\xFB\xBF'
end = b'\xED'
# Length is sum of header(2), length, check + msg bytes
length = bytes([4 + len(msg)])
# Check is sum of length + msg (length+(cmd+params)), with modulus
# to fit into a single byte
check_list = bytearray(length)
check_list.extend(msg)
check = bytes([sum(check_list) % 256])
return header + length + msg + check + end
def __check(self, msg):
"""
Check that the received message follows the correct format and that
the check byte is correct.
Returns True if message is correct, False otherwise
"""
msg = bytearray(msg)
# Check that header is correct
if msg[:2] != b'\xFB\xBF':
return False
# Check that ending is correct
elif msg[-1:] != b'\xED':
return False
# Check that check byte is correct
elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):
return False
else:
return True
| alvaroferran/Alpha1S | alpha1s/__init__.py | __init__.py | py | 7,677 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "bluetooth.discover_devices",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "bluetooth.BluetoothSocket",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "bluetooth.RFCOMM",
"line_number": 179,
"usage_type": "attribute"
}
] |
72532676669 | # pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import urllib.parse
from pathlib import Path
from random import choice
from typing import Awaitable, Callable
import pytest
from aiohttp import web
from aiohttp.test_utils import TestClient
from faker import Faker
from models_library.api_schemas_storage import FileMetaDataGet, SimcoreS3FileID
from models_library.projects import ProjectID
from models_library.users import UserID
from pydantic import ByteSize, parse_obj_as
from pytest_simcore.helpers.utils_assert import assert_status
pytest_simcore_core_services_selection = ["postgres"]
pytest_simcore_ops_services_selection = ["adminer"]
async def test_get_files_metadata(
upload_file: Callable[[ByteSize, str], Awaitable[tuple[Path, SimcoreS3FileID]]],
client: TestClient,
user_id: UserID,
location_id: int,
project_id: ProjectID,
faker: Faker,
):
assert client.app
url = (
client.app.router["get_files_metadata"]
.url_for(location_id=f"{location_id}")
.with_query(user_id=f"{user_id}")
)
# this should return an empty list
response = await client.get(f"{url}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
list_fmds = parse_obj_as(list[FileMetaDataGet], data)
assert not list_fmds
# now add some stuff there
NUM_FILES = 10
file_size = parse_obj_as(ByteSize, "15Mib")
files_owned_by_us = []
for _ in range(NUM_FILES):
files_owned_by_us.append(await upload_file(file_size, faker.file_name()))
# we should find these files now
response = await client.get(f"{url}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
list_fmds = parse_obj_as(list[FileMetaDataGet], data)
assert len(list_fmds) == NUM_FILES
# create some more files but with a base common name
NUM_FILES = 10
file_size = parse_obj_as(ByteSize, "15Mib")
files_with_common_name = []
for _ in range(NUM_FILES):
files_with_common_name.append(
await upload_file(file_size, f"common_name-{faker.file_name()}")
)
# we should find these files now
response = await client.get(f"{url}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
list_fmds = parse_obj_as(list[FileMetaDataGet], data)
assert len(list_fmds) == (2 * NUM_FILES)
# we can filter them now
response = await client.get(f"{url.update_query(uuid_filter='common_name')}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
list_fmds = parse_obj_as(list[FileMetaDataGet], data)
assert len(list_fmds) == (NUM_FILES)
@pytest.mark.xfail(
reason="storage get_file_metadata must return a 200 with no payload as long as legacy services are around!!"
)
async def test_get_file_metadata_is_legacy_services_compatible(
client: TestClient,
user_id: UserID,
location_id: int,
simcore_file_id: SimcoreS3FileID,
):
assert client.app
url = (
client.app.router["get_file_metadata"]
.url_for(
location_id=f"{location_id}",
file_id=f"{urllib.parse.quote(simcore_file_id, safe='')}",
)
.with_query(user_id=f"{user_id}")
)
# this should return an empty list
response = await client.get(f"{url}")
await assert_status(response, web.HTTPNotFound)
async def test_get_file_metadata(
upload_file: Callable[[ByteSize, str], Awaitable[tuple[Path, SimcoreS3FileID]]],
client: TestClient,
user_id: UserID,
location_id: int,
project_id: ProjectID,
simcore_file_id: SimcoreS3FileID,
faker: Faker,
):
assert client.app
url = (
client.app.router["get_file_metadata"]
.url_for(
location_id=f"{location_id}",
file_id=f"{urllib.parse.quote(simcore_file_id, safe='')}",
)
.with_query(user_id=f"{user_id}")
)
# this should return an empty list
response = await client.get(f"{url}")
# await assert_status(response, web.HTTPNotFound)
# NOTE: This needs to be a Ok response with empty data until ALL legacy services are gone, then it should be changed to 404! see test above
assert response.status == web.HTTPOk.status_code
assert await response.json() == {"data": {}, "error": "No result found"}
# now add some stuff there
NUM_FILES = 10
file_size = parse_obj_as(ByteSize, "15Mib")
files_owned_by_us = []
for _ in range(NUM_FILES):
files_owned_by_us.append(await upload_file(file_size, faker.file_name()))
selected_file, selected_file_uuid = choice(files_owned_by_us)
url = (
client.app.router["get_file_metadata"]
.url_for(
location_id=f"{location_id}",
file_id=f"{urllib.parse.quote(selected_file_uuid, safe='')}",
)
.with_query(user_id=f"{user_id}")
)
response = await client.get(f"{url}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
assert data
fmd = parse_obj_as(FileMetaDataGet, data)
assert fmd.file_id == selected_file_uuid
assert fmd.file_size == selected_file.stat().st_size
| ITISFoundation/osparc-simcore | services/storage/tests/unit/test_handlers_files_metadata.py | test_handlers_files_metadata.py | py | 5,239 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "typing.Callable",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pydantic.ByteSize",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.Awaitable",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
... |
15697493759 | import cv2 as cv
# Cargamos la imagen y transformamos en blanco y negro
img_original = cv.imread("imgs/Cuadrados.jpg")
img_bnw = cv.cvtColor(img_original, cv.COLOR_BGR2GRAY)
# Aplicamos la funcion de deteccion de esquinas
maxCorners = 20
esquinas = cv.goodFeaturesToTrack(img_bnw, maxCorners, 0.01, 10)
# Definimos el criterio de stop para la precision subpixel y lo aplicamos
criterio_stop = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, maxCorners, 0.0001)
esquinas_final = cv.cornerSubPix(img_bnw, esquinas, (5,5), (2,2), criterio_stop)
# Dibujamos sobre la imagen
for esquina in esquinas_final:
x, y = esquina.ravel()
cv.circle(img_original, (int(x), int(y)), 2, (0,0,0), -1)
cv.namedWindow("original", cv.WINDOW_NORMAL)
cv.imshow("original", img_original)
cv.waitKey()
cv.destroyAllWindows() | FadedGuy/Universidad | L3/visionComputador/tp5/cuestiones/8.py | 8.py | py | 816 | python | es | code | 2 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.goodFeaturesToTrack"... |
3835462701 | #coding: utf-8
import urllib
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import webbrowser
import os
from threading import Timer
import base64
from ui import Image
import console
import sys
import io
sys.stderr = io.StringIO()
console.show_activity('Creating images…')
imagefilenames = [os.path.splitext(i)[0] for i in open(os.path.expanduser('~/Pythonista.app/Typicons-M.txt')).readlines()]
imagenames = [i.replace('Typicons96_', '') for i in imagefilenames]
images = {n: Image.named(imagefilenames[i]) for (i, n) in enumerate(imagenames)}
imageurls = {k:'data:image/png;base64,'+base64.b64encode(images[k].to_png()) for k in images}
choosecolorpath = os.path.dirname(sys.argv[0].split('/Documents/',1)[1]) + '/choosecolor.py'
tagtemplate = '<a href="pythonista://' +choosecolorpath+ '?action=run&argv=%s"><img src="%s"></a>'
imagetags = [tagtemplate%(k,imageurls[k]) for k in imagenames]
imagesstring = ''.join(imagetags)
html = '''
<!DOCTYPE html>
<html>
<head>
<style type="text/css">
body {
background:#292929;
text-align:center;
line-height:0;
margin:0;
}
img {
width:48px;
height:48px;
padding:6px;
margin:8px;
background-color:#707070;
background: linear-gradient(#707070, #5a5a5a);
border-radius:14px;
box-shadow:0 2px 4px rgba(0,0,0,0.5);
}
h1 {
font-family:"Avenir Next";
color: white;
padding: 10px;
text-shadow: 0 2px 4px rgba(0,0,0,0.5);
}
</style>
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
</head>
<body>
<h1>Choose an Icon</h1>
%s
</body>
</html>
''' % imagesstring
class RequestHandler (SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html)
def log_message(self, format, *args):
pass
serv = BaseHTTPServer.HTTPServer(('', 0), RequestHandler)
port = serv.server_port
Timer(1, webbrowser.open, ('http://localhost:%d'%port,)).start()
console.show_activity('Starting server…')
serv.handle_request()
console.hide_activity()
| 0942v8653/pythonista-homescreen-icon | chooseicon.py | chooseicon.py | py | 2,087 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "sys.stderr",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "console.show_activity",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
... |
7571022590 | import numpy as np
import logging
import sys
import pkg_resources
import pytz
import datetime
import os
import re
from numpy import cos,sin
# Get the version
version_file = pkg_resources.resource_filename('pynortek','VERSION')
# Setup logging module
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
logger = logging.getLogger('pynortek')
def xyz2enu(u,v,w,head,pitch,roll,inverse=False):
"""
Transforms velocities in XYZ coordinates to ENU, or vice versa if
inverse=True. Transformation is done according to the Nortek
convention
"""
# convert to radians
hh = np.pi*(head-90)/180
pp = np.pi*pitch/180
rr = np.pi*roll/180
ut = np.zeros(np.shape(u))
vt = np.zeros(np.shape(u))
wt = np.zeros(np.shape(u))
for i in range(len(head)):
# generate heading matrix
H = np.matrix([[cos(hh[i]), sin(hh[i]), 0],[-sin(hh[i]), cos(hh[i]), 0],[0, 0, 1]])
# generate combined pitch and roll matrix
P = [[cos(pp[i]), -sin(pp[i])*sin(rr[i]), -cos(rr[i])*sin(pp[i])],
[0, cos(rr[i]), -sin(rr[i])],
[sin(pp[i]), sin(rr[i])*cos(pp[i]), cos(pp[i])*cos(rr[i])]]
R = H*P
#print(R)
if(inverse):
R = np.inv(R)
# do transformation
ut[i] = R[0,0]*u[i] + R[0,1]*v[i] + R[0,2]*w[i];
vt[i] = R[1,0]*u[i] + R[1,1]*v[i] + R[1,2]*w[i];
wt[i] = R[2,0]*u[i] + R[2,1]*v[i] + R[2,2]*w[i];
return [ut,vt,wt]
raw_data_files = ['.prf','.vec'] # Names of raw binary data files
class pynortek():
"""A Nortek parsing object
Author: Peter Holtermann (peter.holtermann@io-warnemuende.de)
Usage:
>>>filename='test'
>>>aquadopp = pynortek(filename)
"""
def __init__(self,filename, verbosity=logging.DEBUG, timezone=pytz.UTC):
"""
"""
logger.setLevel(verbosity)
self.timezone = timezone
self.deployment = os.path.split(filename)[-1]
self.fpath = os.path.split(filename)[0]
self.rawdata = {}
print(self.deployment)
print(self.fpath)
filename_hdr = filename + '.hdr'
logger.debug('Trying to open header file: ' + filename_hdr)
try:
fhdr = open(filename_hdr)
except Exception as e:
logger.warning('Could not open header file, exiting')
return
header = self.parse_header(fhdr)
self.header = header
print(header)
print('Loading files')
for fread in header['files']:
print(fread)
IS_RAW = False
for rawname in raw_data_files:
if(rawname in fread.lower()):
IS_RAW=True
if(IS_RAW == False):
print('Loading ' + fread)
suffix = fread.split('.')[-1]
fname_tmp = os.path.join(self.fpath,fread)
print(fname_tmp)
data_tmp = np.loadtxt(fname_tmp)
self.rawdata[suffix] = data_tmp
# Process the raw data just loaded
self.process_rawdata()
def parse_header(self,fhdr):
""" Parses a nortek header file
"""
header = {}
datefmt = '%d.%m.%Y %H:%M:%S'
header_field = None
header['files'] = []
while True:
l = fhdr.readline()
if(len(l) == 0):
break
# Find all files to be read
if((l[0] == '[')):
ftmp = l.split("\\")[-1].replace(']','').replace('\n','')
header['files'].append(ftmp)
# If we have a sensor file, check position of fields
if('.sen' in l[-7:]):
print('Sensor file')
header_field = 'sensors'
header[header_field] = {}
# Transducer distance
if(('Beam' in l) and ('Vertical' in l)):
print('Transducer distance')
header_field = 'distance'
header[header_field] = {'cell':[],'beam':[],'vertical':[]}
continue
# Check for the header field
if('User setup' in l):
print('User setup')
header_field = 'User setup'
header[header_field] = {}
elif('Hardware configuration' in l):
print('Hardware configuration')
header_field = 'Hardware configuration'
header[header_field] = {}
elif('Head configuration' in l):
header_field = 'Head configuration'
header[header_field] = {}
#print(l)
if(header_field is not None): # Check if field is over (one empty line)
if(len(l) <= 2):
print('Header ' + header_field + ' over')
header_field = None
# Check for a one line list
ind = l.find(' ')
if(ind >= 0):
if('Number of measurements' in l):
header['Number of measurements'] = int(l.split()[-1])
elif('Coordinate system' in l):
header['Coordinate system'] = l.split()[-1]
logger.debug('Coordinate system found: ' + header['Coordinate system'])
elif('Horizontal velocity range' in l):
header['Horizontal velocity range'] = float(l.split()[-2])
logger.debug('Horizontal velocity range: ' + str(header['Horizontal velocity range']))
elif('Vertical velocity range' in l):
header['Vertical velocity range'] = float(l.split()[-2])
logger.debug('Vertical velocity range: ' + str(header['Vertical velocity range']))
elif('Orientation' in l):
header['Orientation'] = l.split()[-1]
if('DOWN' in header['Orientation']):
header['updown'] = True
else:
header['updown'] = False
logger.debug('Orientation ' + header['Orientation'] + ' updown:' + str(header['updown']))
elif('Number of checksum errors' in l):
header['Number of checksum errors'] = int(l.split()[-1])
elif('Time of first measurement' in l):
ind2 = l.rfind(' ')
tstr = l[ind2+2:].replace('\n','')
ttmp = datetime.datetime.strptime(tstr,datefmt)
ttmp = ttmp.replace(tzinfo=self.timezone)
header['Time of first measurement'] = ttmp
elif('Time of last measurement' in l):
ind2 = l.rfind(' ')
tstr = l[ind2+2:].replace('\n','')
ttmp = datetime.datetime.strptime(tstr,datefmt)
ttmp = ttmp.replace(tzinfo=self.timezone)
header['Time of last measurement'] = ttmp
elif('Transformation matrix' in l):
logger.debug('Transformation matrix found')
header['Transformation matrix'] = np.zeros((3,3))
# Get all three lines
tmp = []
tmp.append(l)
tmp.append(fhdr.readline())
tmp.append(fhdr.readline())
for i in range(3):
T_tmp = np.asarray(tmp[i].split()[-3:]).astype(np.float)
header['Transformation matrix'][i,:] = T_tmp
logger.debug(str(header['Transformation matrix']))
elif('Magnetometer calibration matrix' in l):
logger.debug('Magnetometer calibration matrix found')
header['Magnetometer calibration matrix'] = np.zeros((3,3))
# Get all three lines
tmp = []
tmp.append(l)
tmp.append(fhdr.readline())
tmp.append(fhdr.readline())
for i in range(3):
T_tmp = np.asarray(tmp[i].split()[-3:]).astype(np.float)
header['Magnetometer calibration matrix'][i,:] = T_tmp
logger.debug(str(header['Magnetometer calibration matrix']))
else:
pass
if(header_field is not None):
if(header_field == 'sensors'):
l = l.replace('\n','').replace('\r','').strip() # remove return and trailing/leading blanks
lsp = re.sub(" +" , "\t", l).split('\t')
print('sensors',lsp)
field = lsp[1]
value = lsp[0]
header[header_field][field] = int(value)
elif(header_field == 'distance'):
l = l.replace('\n','').replace('\r','').strip() # remove return and trailing/leading blanks
lsp = re.sub(" +" , "\t", l).split('\t')
cell = lsp[0]
beam = lsp[1]
vertical = lsp[2]
print(cell,beam,vertical)
header[header_field]['cell'].append(int(cell))
header[header_field]['beam'].append(float(beam))
header[header_field]['vertical'].append(float(vertical))
else:
ind2 = l.rfind(' ')
data = l[ind2+2:].replace('\n','').replace('\r','')
field = l[:ind].replace('\n','').replace('\r','')
header[header_field][field] = data
#print(l.split())
return header
def process_rawdata(self):
""" Processes .sen data stored in data['sen'] and the remaining rawdata
"""
print('Creating time axis')
t = []
tu = []
for i in range(np.shape(self.rawdata['sen'][:,0])[0]):
month = int(self.rawdata['sen'][i,0])
day = int(self.rawdata['sen'][i,1])
year = int(self.rawdata['sen'][i,2])
hour = int(self.rawdata['sen'][i,3])
minute = int(self.rawdata['sen'][i,4])
millis = self.rawdata['sen'][i,5]%1
second = int(self.rawdata['sen'][i,5] - millis)
micro = int(millis*1000*1000)
ttmp = datetime.datetime(year,month,day,hour,minute,second,micro,tzinfo=self.timezone)
t.append(ttmp)
tu.append(ttmp.timestamp())
self.t = t # datetime time
self.tu = tu # unix time
self.data = {}
for k in self.header['sensors'].keys():
ind_key = self.header['sensors'][k] - 1
self.data[k] = self.rawdata['sen'][:,ind_key]
# Processing the remaining data
# For a profiler (Aquadopp)
aquadopp_keys = ['v1','v2','v3','a1','a2','a3','c1','c2','c3']
for key in aquadopp_keys:
if(key in self.rawdata.keys()):
print('Getting data from: ' + key + ' (profiler)')
self.data[key] = self.rawdata[key][:,2:]
if('distance' in self.header.keys()):
self.data['dis_beam'] = np.asarray(self.header['distance']['beam'])
self.data['dis_vertical'] = np.asarray(self.header['distance']['vertical'])
vector_keys = ['dat']
for key in vector_keys:
if(key in self.rawdata.keys()):
print('Getting data from: ' + key + ' (Vector)')
self.data[key] = self.rawdata[key][:,2:]
def rot_vel(self,coord,updown=None,save=False):
""" Rotates the velocities to different coordinate system
Args:
coord:
updown:
save:
"""
logger.debug('trans_coord():')
T = self.header['Transformation matrix'][:]
if(updown == None):
updown = self.header['updown']
# flip axes if instrument is pointing downward
# (so from here on, XYZ refers to a right-handed coordinate system
# with z pointing upward)
if updown:
logger.debug('Downlooking, changing matrix')
T[1,:] = -T[1,:];
T[2,:] = -T[2,:];
v1_rot = np.zeros(np.shape(self.data['v1']))
v2_rot = np.zeros(np.shape(self.data['v2']))
v3_rot = np.zeros(np.shape(self.data['v3']))
try:
v1_rep_rot = np.zeros(np.shape(self.data['v1_rep']))
v2_rep_rot = np.zeros(np.shape(self.data['v2_rep']))
v3_rep_rot = np.zeros(np.shape(self.data['v3_rep']))
repaired = True
except:
repaired_false = True
pass
print(np.shape(self.data['v1']))
if(coord == 'XYZ'):
if(self.header['Coordinate system'] == 'BEAM'):
logger.debug('BEAM to XYZ')
for i in range(np.shape(v1_rot)[0]):
for j in range(np.shape(v1_rot)[1]):
v1_rot[i,j] = T[0,0] * self.data['v1'][i,j] + T[0,1] * self.data['v2'][i,j] + T[0,2] * self.data['v3'][i,j]
v2_rot[i,j] = T[1,0] * self.data['v1'][i,j] + T[1,1] * self.data['v2'][i,j] + T[1,2] * self.data['v3'][i,j]
v3_rot[i,j] = T[2,0] * self.data['v1'][i,j] + T[2,1] * self.data['v2'][i,j] + T[2,2] * self.data['v3'][i,j]
if repaired:
v1_rep_rot[i,j] = T[0,0] * self.data['v1_rep'][i,j] + T[0,1] * self.data['v2_rep'][i,j] + T[0,2] * self.data['v3_rep'][i,j]
v2_rep_rot[i,j] = T[1,0] * self.data['v1_rep'][i,j] + T[1,1] * self.data['v2_rep'][i,j] + T[1,2] * self.data['v3_rep'][i,j]
v3_rep_rot[i,j] = T[2,0] * self.data['v1_rep'][i,j] + T[2,1] * self.data['v2_rep'][i,j] + T[2,2] * self.data['v3_rep'][i,j]
if save:
logger.debug('saving data in trans')
try: # Check if self.trans is existing
self.trans
except:
self.rotvel = {}
if(coord == 'XYZ'):
self.rotvel['u'] = v1_rot[:]
self.rotvel['v'] = v2_rot[:]
self.rotvel['w'] = v3_rot[:]
if repaired:
# Save the repaired data as well
self.rotvel['u_rep'] = v1_rep_rot[:]
self.rotvel['v_rep'] = v2_rep_rot[:]
self.rotvel['w_rep'] = v3_rep_rot[:]
return [v1_rot,v2_rot,v3_rot]
def repair_phase_shift(self,vel=None,threshold=None, save = False):
"""Tries to repair a phase shift in pulse coherent measurements. It
assumes that the first measured value is correct.
"""
if(vel == None):
vel = self.data
logger.debug('repairing native velocity')
coordinate_system = self.header['Coordinate system']
vel_all = [self.data['v1'],self.data['v2'],self.data['v3']]
else:
vel_all = [vel]
vel_rep_all = []
for vel_tmp in vel_all:
# Compute threshold from header data
if( coordinate_system == 'BEAM'):
logger.debug('Using thresholds for beam coordinates')
# Get the factor for the beam from the vertical velocity
fac = np.linalg.inv(self.header['Transformation matrix'])[0,2]
threshold_tmp = self.header['Vertical velocity range']# * fac
else:
logger.debug('Unknown threshold, returning')
return
vel_rep = np.zeros(np.shape(vel_tmp))
for i in range(np.shape(vel_rep)[1]):
vel_rep[:,i] = self.repair_phase_shift_vector(vel_tmp[:,i],threshold_tmp)
vel_rep_all.append(vel_rep)
print('hallo',vel is self.data)
if((vel is self.data) and save):
logger.debug("Saving data as data['v1_rep'] etc")
self.data['v1_rep'] = vel_rep_all[0]
self.data['v2_rep'] = vel_rep_all[1]
self.data['v3_rep'] = vel_rep_all[2]
def repair_phase_shift_vector(self,vel,threshold):
"""Tries to repair a phase shift in pulse coherent measurements. It
assumes that the first measured value is correct.
"""
vel_rep = vel.copy()
vthresh = threshold - 0.3 * threshold
for i in range(1,len(vel)):
if((np.sign(vel_rep[i-1]) != np.sign(vel_rep[i])) and (abs(vel_rep[i-1]) > vthresh) and (abs(vel_rep[i]) > vthresh)):
#print('Phase shift!')
dv = threshold - abs(vel_rep[i])
vel_rep[i] = np.sign(vel_rep[i-1]) * (threshold + dv)
return vel_rep
| MarineDataTools/pynortek | pynortek/pynortek.py | pynortek.py | py | 17,352 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "pkg_resources.resource_filename",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "l... |
70809071868 | # ------------------------------------------------------------------#
# AMPBA - 2021 Winter :: Data Collection assignment - PART2 #
# Group Id : 3 #
# Authors: #
# Nishant Jalasutram - PG ID: 012020051 #
# Ila Barshilia - PG ID: 012020022 #
# Deep Kamal Singh - PG ID: 012020053 #
# ------------------------------------------------------------------#
'''
Part 2:
For each match, go to the scorecard link like
https://www.espncricinfo.com/series/icc-cricket-world-cup-2019-1144415/india-vs-new-zealand-1st-semi-final-1144528/full-scorecard and extract the following:
1. Player of the match with the picture. Save the url to the picture in the csv.
2. Country that the player of the match belongs to.
3. Runs scored by every batsman.
4. Balls played by every batsman.
5. Strike rate for every batsman.
6. Wickets taken by every bowler.
7. Economy rate for every bowler.
8. which country won the toss.
9. who were the umpires?
10. who was the match referee
Save results in a file [your group no]_matchDetails.csv.
Name the .py file as [your group no]_matchDetails.py.
'''
import scrapy
# As per Assignments requirement we need to start our file name with GROUP number, thus we have to use import this
# way, instead of simply writing from <File_name> import <ClassName>
CricinfoPlayerProfileSpider = __import__(
'3_playerDetails').CricinfoPlayerProfileSpider # ,fromlist=['3_playerDetails'])
class CWC2019MatchStatsSpider(scrapy.Spider):
name = "cwc_2019_Scorecard_spider"
cricinfo_host = 'https://www.espncricinfo.com'
playerProfileParser = CricinfoPlayerProfileSpider(scrapy.Spider)
# This method will be called for every inning of the match , and it will extract Batsmen and Bowlers
def parseInning(self, anInning):
return {
"batsmen": self.parseBatsmen(anInning.css("table[class='table batsman']")),
"bowlers": self.parseBowler(anInning.css("table[class='table bowler']")),
}
# This method extracts match details - iterates over every TR and creates a KV Pair dictionary
# thus making it independent of sequence of occurrence
def parseMatchDetails(self, matchDetailSection):
trs = matchDetailSection.css("tbody").css("tr")
returnDict = {}
for aRow in trs:
tds = aRow.css("td")
if tds is not None and len(tds) > 1:
returnDict[tds[0].css('::text').extract()[0]] = tds[1].css('::text').extract()[0] if len(
tds[1].css('::text').extract()) == 1 else ",".join(tds[1].css('::text').extract())
return returnDict
# This method extract Player of the match information
# however, we found the player of the match image is lazy loaded and Scrapy is not good enough for this
# we tried with Selenium as well as with Beautifulsoup which made the code messy and inconsistent,
# so we are now getting player pic in PART3 execution,and setting it back inside PART2 data set
def parsePlayerOfMatchSection(self, playerOfMatchSection):
return {
"player_of_the_match": playerOfMatchSection.css('div[class="best-player-name"]').css(
'a::text').extract_first(),
"player_of_the_match_profile": playerOfMatchSection.css('div[class="best-player-name"]').css(
'a::attr(href)').extract_first(),
"player_of_the_match_image_url": "", # We are not loading image now, as it will be lazyimg.png anyway -
# we will get the player image from PART3 output dictionary later
"player_of_the_match_country": playerOfMatchSection.css(
'span[class="best-player-team-name"]::text').extract_first()
}
# Extracts batsmen details, also takes care of batsmen who did not bat
def parseBatsmen(self, battingTable):
# batting table parsing
batsmenList = []
for aBattingRow in battingTable.css("tbody").css("tr"):
tds = aBattingRow.css("td::text").extract()
if aBattingRow.css('.batsman-cell').css("a::text").extract_first() is not None:
# Found that when batsman is NOT out we get back "not out" in first element instead of RUNS,
# handling this
if tds[0].isnumeric():
batsmenList.append({
"name": aBattingRow.css('.batsman-cell').css("a::text").extract_first().strip(),
"profile_url": aBattingRow.css('.batsman-cell').css("a::attr('href')").extract_first(),
"runs": tds[0],
"balls_played": tds[1],
"strike_rate": tds[5]
})
else:
batsmenList.append({
"name": aBattingRow.css('.batsman-cell').css("a::text").extract_first().strip(),
"profile_url": aBattingRow.css('.batsman-cell').css("a::attr('href')").extract_first(),
"runs": tds[1],
"balls_played": tds[2],
"strike_rate": tds[6]
})
# Are there any "Yet to bat" players - lets add them too
if len(batsmenList) < 11 and len(battingTable.css("tfoot").css("tr")) > 1:
didNotBatLinks = battingTable.css("tfoot").css("tr")[1].css("td")[0].css("div")[0].css("a")
for aPlayer in didNotBatLinks:
batsmenList.append({
"name": aPlayer.css('span::text').extract_first().strip().replace(" ", ""),
"profile_url": aPlayer.css("::attr(href)").extract_first(),
"runs": "",
"balls_played": "",
"strike_rate": ""
})
return batsmenList
# Extracts Bowler details
def parseBowler(self, bowlingScores):
# parsing bowling scores to extract each bowler
bowlerList = []
for aBowlingStatRow in bowlingScores.css("tbody").css("tr"):
tds = aBowlingStatRow.css("td::text").extract()
if aBowlingStatRow.css('.text-nowrap').css("a::text").extract_first() is not None:
bowlerList.append({
"name": aBowlingStatRow.css('.text-nowrap').css("a::text").extract_first().strip(),
"profile_url": aBowlingStatRow.css('.text-nowrap').css("a::attr('href')").extract_first(),
"wickets": tds[3],
"econ": tds[4]
})
return bowlerList
# This function is called when Part 1 yields a scrapy request
# which is processed by Scrapy and response is handed over to this function
# in addition it is passed with match_details_dict which is dictionary to store the match details
# after crawling ends, the dictionary is used to output CSV file
# This method checks whether Match is having an outcome or is it abandoned or draw
def parse(self, resp, match_number, match_details_dict, match_players_dict):
inning = {}
batsmen = []
bowlers = []
# tsvDict = {}
# checking if match is abandoned
if len(resp.css(".Collapsible").css(".Collapsible__contentInner").css(
"table[class='w-100 table batsman']")) > 0:
# Match seems abandoned, iterate over .batsman .small , to get player list
for aBatsman in resp.css(".Collapsible").css(".Collapsible__contentInner"). \
css("table[class='w-100 table batsman']"). \
css("tbody").css("tr").css("td").css("a"):
batsmen.append({
"name": aBatsman.css('::text').extract_first().strip().replace(" ", ""),
"profile_url": aBatsman.css('::attr(href)').extract_first(),
"runs": "",
"balls_played": "",
"strike_rate": ""
})
match_detail = self.parseMatchDetails(resp.css("table[class='w-100 table match-details-table']"))
tsvDict = {
"player_of_the_match": "",
"player_of_the_match_image_url": "",
"player_of_the_match_country": "",
"batsmen_runs": "",
"batsmen_ball_played": "",
"batsmen_strike_rate": "",
"bowlers_wickets": "",
"bowlers_econ_rate": "",
"toss_won_by": match_detail["Toss"].split(",")[0].strip(),
"umpires": match_detail["Umpires"] + ",TV:" + match_detail["TV Umpire"] + ",Reserve:" + match_detail[
"Reserve Umpire"],
"match_referee": match_detail["Match Referee"]
}
else:
# valid non-abandoned match, follow normal processing
best_player_details = self.parsePlayerOfMatchSection(resp.css("div[class='best-player']"))
for anInning in resp.css(".Collapsible"):
inningCountry = anInning.css("h5").css(".header-title.label::text").extract_first().split("INNINGS")[
0].strip()
inning[inningCountry] = self.parseInning(anInning.css(".Collapsible__contentInner"))
batsmen.extend(inning[inningCountry]["batsmen"])
bowlers.extend(inning[inningCountry]["bowlers"])
batsmen_run_csv = ",".join([batter["runs"] for batter in batsmen])
batsmen_balls_csv = ",".join([batter["balls_played"] for batter in batsmen])
batsmen_strike_rate_csv = ",".join([batter["strike_rate"] for batter in batsmen])
bowlers_wickets_csv = ",".join([bowler["wickets"] for bowler in bowlers])
bowlers_econ_rate_csv = ",".join([bowler["econ"] for bowler in bowlers])
scrapedScorecard = {"innings": inning,
"match_detail": self.parseMatchDetails(
resp.css("table[class='w-100 table match-details-table']"))}
tsvDict = {
"player_of_the_match": best_player_details["player_of_the_match"],
"player_of_the_match_image_url": best_player_details["player_of_the_match_image_url"],
"player_of_the_match_country": best_player_details["player_of_the_match_country"],
"batsmen_runs": batsmen_run_csv,
"batsmen_ball_played": batsmen_balls_csv,
"batsmen_strike_rate": batsmen_strike_rate_csv,
"bowlers_wickets": bowlers_wickets_csv,
"bowlers_econ_rate": bowlers_econ_rate_csv,
"toss_won_by": scrapedScorecard["match_detail"]["Toss"].split(",")[0].strip(),
"umpires": scrapedScorecard["match_detail"]["Umpires"] + ",TV:" + scrapedScorecard["match_detail"][
"TV Umpire"] + ",Reserve:" + scrapedScorecard["match_detail"]["Reserve Umpire"],
"match_referee": scrapedScorecard["match_detail"]["Match Referee"]
}
match_details_dict[match_number] = tsvDict
players = batsmen
players.extend(bowlers)
# Invoke processing for part 3 for every player
for aPlayer in players:
# Duplication check :: SCRAPY checks that automatically,
# will fetch only if the new request is not already fetched
yield scrapy.Request(resp.urljoin(aPlayer["profile_url"]), callback=self.playerProfileParser.parse,
cb_kwargs={'match_players_dict': match_players_dict})
| deepkamal/DC_AMPBA_W2021 | 3_matchDetails.py | 3_matchDetails.py | py | 11,760 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Spider",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 226,
"usage_type": "call"
}
] |
38751132701 |
import os
import util
import config
scanned_files = 0
for directory, _, files_list in os.walk(config.td):
#directory looks like "c:\user\box\...\phonexx\images"
for ea_filename in files_list:
#ea_filename looks like "ADX-....jpg"
file_path = (directory+"\\"+ea_filename)
#file_path looks like "c:\td\...\[filename].jpg"
relative_dir = directory.replace(config.td, "")
# looks like "\phonexx\images"
file_attributes = ea_filename.split("_")
scanned_files += 1
if ( scanned_files % 1000 ) == 0 : print("Scanned ",scanned_files," files.")
if file_attributes[0] == 'PXL':
dest_dir = config.dd+"\\"+relative_dir
util.move_this_file(file_path,dest_dir,ea_filename)
config.moved_files += 1
if (config.moved_files % 1000) == 0 : print("Moved",config.moved_files,"files.")
else: continue
print("Moved",config.moved_files,"Files.")
| maravis05/EyeCup-File-Scanner | move_pxl.py | move_pxl.py | py | 1,001 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.walk",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.td",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "config.td",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "config.dd",
"line_number": 27... |
37314756203 | import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn
import load_data
train = load_data.get_data()[0]
testX = load_data.get_data()[1]
inv_labels = load_data.get_inverted_labels()
trainX = train.drop(['SalePrice'], axis = 1)
trainY = train['SalePrice']
def info_discrete(column):
scatter = seaborn.stripplot(x = column, y = train['SalePrice'], data = train)
if column in inv_labels.keys():
scatter.set_xticklabels(inv_labels[column].values())
else:
pass
plt.show()
def unsignificant_deletion():
correlation = train.corrwith(train.SalePrice)
to_delete = list()
for col_name, corr_value in correlation.items():
if corr_value < 0.35 and corr_value > -0.35:
to_delete.append(col_name)
return to_delete
trainX.drop(unsignificant_deletion(), axis = 1, inplace = True)
testX.drop(unsignificant_deletion(), axis = 1, inplace = True)
trainX['Alley'].fillna(2, inplace = True)
trainX['GarageType'].fillna(6, inplace = True)
trainX['GarageYrBlt'].fillna(0, inplace = True)
trainX['GarageFinish'].fillna(3, inplace = True)
trainX['BsmtQual'].fillna(4, inplace = True)
trainX['MasVnrArea'].fillna(0, inplace = True)
trainX['LotFrontage'].fillna(0, inplace = True)
trainX['PoolQC'].fillna(3, inplace = True)
# testX['Alley'].fillna(2, inplace = True)
# testX['GarageType'].fillna(6, inplace = True)
# testX['GarageYrBlt'].fillna(0, inplace = True)
# testX['GarageFinish'].fillna(3, inplace = True)
# testX['BsmtQual'].fillna(4, inplace = True)
# testX['MasVnrArea'].fillna(0, inplace = True)
# testX['LotFrontage'].fillna(0, inplace = True)
# testX['PoolQC'].fillna(3, inplace = True)
# print(trainX['Alley'].unique())
print(testX.info())
print(testX['KitchenQual'].unique()) | jantar44/reg_house_prices | house_prices/house_prices.py | house_prices.py | py | 1,771 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "load_data.get_data",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "load_data.get_data",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "load_data.get_inverted_labels",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "seab... |
24985299915 | #Imprting libraries
from dash import Dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from random import randint
import plotly.express as px
# Creating dash environment
app = Dash(__name__)
# Constructing the layout
app.layout = html.Div([
# Title
html.Div(dcc.Markdown("Random number plotter"), style={"textAlign":"center", 'font-size':'300%'}),
# Num. of point selection area, our N
html.Div(["Number of points: ", dcc.Input(id='number_of_pts', value=100, type='number', min=1, style={'height': '25px', 'width':'100px'})], style={'padding-left':'80px', 'padding-top':'30px', 'display': 'inline-block', 'font-size':'150%'}),
# Max range selection area, our K
html.Div(["Max range: ", dcc.Input(id='upper_bound', value=100, type='number', min=1, style={'height': '25px', 'width': '100px'})], style={'padding-left': '50px', 'display': 'inline-block', 'font-size':'150%'}),
# Our scatter plot
dcc.Graph(id='random_graph', style={'height':'800px'}),
# Title for the selected data area
html.Div([dcc.Markdown("Selected points: ", style={'padding-left':'80px', 'font-size':'200%'})]),
# Selected data area
html.Div(html.Pre(id='selected_data', style={'border': 'thin lightgrey solid', 'overflowY': 'scroll', 'font-size':'200%'}), style={'width':'90%', 'padding-left':'80px'})])
# Callback function for number of points and range selection
@app.callback(
Output("random_graph", "figure"),
[Input("number_of_pts", "value"),
Input("upper_bound", "value")])
def update_graph(number_of_pts, max_range):
if(number_of_pts and max_range != None): # Check whether arguments are null
A_array = []
for i in range(number_of_pts):
A_array.append(randint(1, max_range))
fig = px.scatter(y=A_array, labels={"x":"index", "y":"value"})
fig.update_layout(showlegend=False)
return fig # return updated scatter plot
return px.scatter() # Return empty scatter plot
# Callback function for graph selection
@app.callback(
Output("selected_data", "children"),
[Input("random_graph", "selectedData")])
def update_selecteData(data):
try:
data = data["points"]
print(data)
points = []
for point in data:
points.append("Index: {}, Value: {} \n".format(point["x"], point["y"])) # Make a string list of selected data, from Indexes and Values
return points
except:
return ""
# Run Dash app
app.run_server()
| Miautawn/simple-DashBoard-with-Dash | random_plotter.py | random_plotter.py | py | 2,553 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dash.Dash",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dash_core... |
10230332445 | """
This file defines the recorder classes which log eval results in different ways,
such as to a local JSON file or to a remote Snowflake database.
If you would like to implement a custom recorder, you can see how the
`LocalRecorder` and `Recorder` classes inherit from the `RecorderBase` class and
override certain methods.
"""
import atexit
import contextlib
import dataclasses
import logging
import threading
import time
from contextvars import ContextVar
from datetime import datetime, timezone
from typing import Any, List, Optional, Sequence, Text
import blobfile as bf
import requests
import evals
from evals.base import RunSpec
from evals.data import jsondumps
from evals.utils.misc import t
from evals.utils.snowflake import SnowflakeConnection
logger = logging.getLogger(__name__)
MIN_FLUSH_EVENTS = 100
MAX_SNOWFLAKE_BYTES = 16 * 10**6
MIN_FLUSH_SECONDS = 10
_default_recorder: ContextVar[Optional["RecorderBase"]] = ContextVar(
"default_recorder", default=None
)
def default_recorder() -> Optional["RecorderBase"]:
return _default_recorder.get()
@dataclasses.dataclass
class Event:
run_id: str
event_id: int
sample_id: Optional[str]
type: str
data: dict
created_by: str
created_at: str
class RecorderBase:
"""
The standard events for which recording methods are provided are:
- `match`: A match or non match, as specified by the `correct` bool, between
the `expected` and `picked` results.
- `embedding`: An embedding of the `prompt` of type `embedding_type`.
- `sampling`: What was `sampled` from the model given the input `prompt`.
- `cond_logp`: The conditional log probability, as `logp`, of the
`completion` from the model given the input `prompt`.
- `pick_option`: The option `picked` by the model out of the valid `options`
given the input `prompt`.
- `raw`: A raw sample specified by the `data`.
- `metrics`: A set of metrics specified by the `kwargs`.
- `error`: An `error` along with an accompanying `msg`.
- `extra`: Any extra `data` of interest to be recorded.
For these events, helper methods are defined at the bottom of this file.
More generally, you can record any event by calling `record_event` with the
event `type` and `data`.
Finally, you can also record a final report using `record_final_report`.
"""
def __init__(
self,
run_spec: evals.base.RunSpec,
) -> None:
self._sample_id: ContextVar[Optional[int]] = ContextVar("_sample_id", default=None)
self.run_spec = run_spec
self._events: List[Event] = []
self._last_flush_time = time.time()
self._flushes_done = 0
self._written_events = 0
self._flushes_started = 0
self._event_lock = threading.Lock()
self._paused_ids: List[str] = []
atexit.register(self.flush_events)
@contextlib.contextmanager
def as_default_recorder(self, sample_id: str):
sample_id_token = self._sample_id.set(sample_id)
default_recorder_token = _default_recorder.set(self)
yield
_default_recorder.reset(default_recorder_token)
self._sample_id.reset(sample_id_token)
def current_sample_id(self) -> Optional[str]:
return self._sample_id.get()
def pause(self):
sample_id = self.current_sample_id()
with self._event_lock:
if sample_id not in self._paused_ids:
self._paused_ids.append(sample_id)
def unpause(self):
sample_id = self.current_sample_id()
with self._event_lock:
if sample_id in self._paused_ids:
self._paused_ids.remove(sample_id)
def is_paused(self, sample_id: str = None):
if sample_id is None:
sample_id = self.current_sample_id()
with self._event_lock:
return sample_id in self._paused_ids
def get_events(self, type: str) -> Sequence[Event]:
with self._event_lock:
return [event for event in self._events if event.type == type]
def get_metrics(self):
return list(map(lambda x: x.data, self.get_events("metrics")))
def get_scores(self, key: str):
return list(map(lambda e: e.data[key], self.get_events("metrics")))
def _create_event(self, type, data=None, sample_id=None):
if sample_id is None:
sample_id = self.current_sample_id()
if sample_id is None:
raise ValueError("No sample_id set! Either pass it in or use as_default_recorder!")
return Event(
run_id=self.run_spec.run_id,
event_id=len(self._events),
type=type,
sample_id=sample_id,
data=data,
created_by=self.run_spec.created_by,
created_at=str(datetime.now(timezone.utc)),
)
def _flush_events_internal(self, events_to_write: Sequence[Event]):
pass
def flush_events(self):
with self._event_lock:
if len(self._events) == self._written_events:
return
events_to_write = self._events[self._written_events :]
self._written_events = len(self._events)
self._flushes_started += 1
self._flush_events_internal(events_to_write)
def record_event(self, type, data=None, sample_id=None):
if sample_id is None:
sample_id = self.current_sample_id()
if sample_id is None:
raise ValueError("No sample_id set! Either pass it in or use as_default_recorder!")
if self.is_paused(sample_id):
return
with self._event_lock:
event = Event(
run_id=self.run_spec.run_id,
event_id=len(self._events),
type=type,
sample_id=sample_id,
data=data,
created_by=self.run_spec.created_by,
created_at=str(datetime.now(timezone.utc)),
)
self._events.append(event)
if (
self._flushes_done < self._flushes_started
or len(self._events) < self._written_events + MIN_FLUSH_EVENTS
or time.time() < self._last_flush_time + MIN_FLUSH_SECONDS
):
return
events_to_write = self._events[self._written_events :]
self._written_events = len(self._events)
self._flushes_started += 1
self._flush_events_internal(events_to_write)
def record_match(self, correct: bool, *, expected=None, picked=None, sample_id=None, **extra):
assert isinstance(
correct, bool
), f"correct must be a bool, but was a {type(correct)}: {correct}"
if isinstance(expected, list) and len(expected) == 1:
expected = expected[0]
data = {
"correct": bool(correct),
"expected": expected,
"picked": picked,
**extra,
}
self.record_event("match", data, sample_id=sample_id)
def record_embedding(self, prompt, embedding_type, sample_id=None, **extra):
data = {
"prompt": prompt,
"embedding_type": embedding_type,
**extra,
}
self.record_event("embedding", data, sample_id=sample_id)
def record_sampling(self, prompt, sampled, sample_id=None, **extra):
data = {
"prompt": prompt,
"sampled": sampled,
**extra,
}
self.record_event("sampling", data, sample_id=sample_id)
def record_cond_logp(self, prompt, completion, logp, sample_id=None, **extra):
data = {
"prompt": prompt,
"completion": completion,
"logp": logp,
**extra,
}
self.record_event("cond_logp", data, sample_id=sample_id)
def record_pick_option(self, prompt, options, picked, sample_id=None, **extra):
data = {
"prompt": prompt,
"options": options,
"picked": picked,
**extra,
}
self.record_event("pick_option", data, sample_id=sample_id)
def record_raw(self, data):
self.record_event("raw_sample", data)
def record_metrics(self, **kwargs):
self.record_event("metrics", kwargs)
def record_error(self, msg: str, error: Exception, **kwargs):
data = {
"type": type(error).__name__,
"message": str(error),
}
data.update(kwargs)
self.record_event("error", data)
def record_extra(self, data, sample_id=None):
self.record_event("extra", data, sample_id=sample_id)
def record_final_report(self, final_report: Any):
logging.info(f"Final report: {final_report}. Not writing anywhere.")
def _green(str):
return f"\033[1;32m{str}\033[0m"
def _red(str):
return f"\033[1;31m{str}\033[0m"
class DummyRecorder(RecorderBase):
"""
A "recorder" which only logs certain events to the console.
Can be used by passing `--dry-run` when invoking `oaieval`.
"""
def __init__(self, run_spec: RunSpec, log: bool = True):
super().__init__(run_spec)
self.log = log
def record_event(self, type, data, sample_id=None):
from evals.registry import registry
if self.run_spec is None:
return
base_eval_spec = registry.get_base_eval(self.run_spec.base_eval)
if base_eval_spec and len(base_eval_spec.metrics) >= 1:
primary_metric = base_eval_spec.metrics[0]
else:
primary_metric = "accuracy"
with self._event_lock:
event = self._create_event(type, data)
self._events.append(event)
msg = f"Not recording event: {event}"
if type == "match":
accuracy_good = (
primary_metric == "accuracy" or primary_metric.startswith("pass@")
) and (data.get("correct", False) or data.get("accuracy", 0) > 0.5)
f1_score_good = primary_metric == "f1_score" and data.get("f1_score", 0) > 0.5
if accuracy_good or f1_score_good:
msg = _green(msg)
else:
msg = _red(msg)
if self.log:
logging.info(msg)
class LocalRecorder(RecorderBase):
"""
A recorder which logs events to the specified JSON file.
This is the default recorder used by `oaieval`.
"""
def __init__(self,
log_path: Optional[str],
run_spec: RunSpec,
hidden_data_fields: Sequence[Text] = []):
"""
Initializes a LocalRecorder.
Args:
log_path (Optional[str]): Path to which the LocalRecorder will
record events. Currently accepts local paths, google cloud
storage paths, or Azure blob paths.
run_spec (RunSpec): Passed to the superclass to provide metadata
about the current evals run.
hidden_data_fields (Sequence[Text]): Fields to avoid writing in the
output. This is particularly useful when using a language model
as an evaluator of sensitive customer data which should not be
written to disc.
"""
super().__init__(run_spec)
self.event_file_path = log_path
self.hidden_data_fields = hidden_data_fields
if log_path is not None:
with bf.BlobFile(log_path, "wb") as f:
f.write((jsondumps({"spec": dataclasses.asdict(run_spec)}) + "\n").encode("utf-8"))
def _flush_events_internal(self, events_to_write: Sequence[Event]):
start = time.time()
try:
lines = [jsondumps(event, exclude_keys=self.hidden_data_fields) + "\n" for event in events_to_write]
except TypeError as e:
logger.error(f"Failed to serialize events: {events_to_write}")
raise e
with bf.BlobFile(self.event_file_path, "ab") as f:
f.write(b"".join([line.encode("utf-8") for line in lines]))
logger.info(
f"Logged {len(lines)} rows of events to {self.event_file_path}: insert_time={t(time.time()-start)}"
)
self._last_flush_time = time.time()
self._flushes_done += 1
def record_final_report(self, final_report: Any):
with bf.BlobFile(self.event_file_path, "ab") as f:
f.write((jsondumps({"final_report": final_report}) + "\n").encode("utf-8"))
logging.info(f"Final report: {final_report}. Logged to {self.event_file_path}")
class HttpRecorder(RecorderBase):
def __init__(
self,
url: str,
run_spec: RunSpec,
local_fallback_path: str,
fail_percent_threshold: int = 5,
batch_size: int = 100,
):
super().__init__(run_spec)
self.url = url
self.batch_size = batch_size
self.fail_percent_threshold = fail_percent_threshold / 100
self.failed_requests = 0 # Add this line to track failed requests
self.local_fallback_path = local_fallback_path
self.local_fallback_recorder = LocalRecorder(local_fallback_path, run_spec)
logger.info(f"HttpRecorder initialized with URL {self.url}")
def _flush_events_internal(self, events_to_write: Sequence[Event]):
batch_size = self.batch_size
for i in range(0, len(events_to_write), batch_size):
batch = list(events_to_write[i : i + batch_size])
try:
self._send_event(batch)
except RuntimeError as e:
logger.error(f"Falling back to LocalRecorder due to error: {str(e)}")
self.local_fallback_recorder._flush_events_internal(batch)
raise RuntimeError(
"An error occurred when sending events. Your events have been saved locally using the Local recorder."
)
def _send_event(self, events: List[Event]):
# Convert the events to dictionaries
events_dict = [dataclasses.asdict(event) for event in events]
logger.debug(f"Sending events: {events_dict}")
try:
# Send the events to the specified URL
response = requests.post(self.url, json=events_dict)
# If the request succeeded, log a success message
if response.ok:
logger.debug(f"Events sent successfully")
# If the request failed, log a warning and increment failed_requests
else:
logger.warning(f"Failed to send events: {response.text}")
self.failed_requests += len(
events
) # Increase the count by the number of events in the failed request
except Exception as e:
logger.warning(f"Failed to send events: {str(e)}")
self.failed_requests += len(
events
) # Increase the count by the number of events in the failed request
# Check if the proportion of failed requests exceeds the threshold
fail_threshold = self.fail_percent_threshold
# Make a string for human comprehention
fail_threshold_str = str(fail_threshold * 100) + "%"
if self.failed_requests / len(self._events) > fail_threshold:
raise RuntimeError(
"The proportion of failed events has exceeded the threshold of: "
+ fail_threshold_str
+ "."
+ " Falling back to LocalRecorder. "
"You can modify this via the cli flag --http-fail-percent-threshold"
)
def record_final_report(self, final_report: Any):
# Convert the final report to a dictionary and prepare it as an event
report_event = Event(
run_id=self.run_spec.run_id,
event_id=len(self._events),
sample_id=None, # or you could use a specific id for final reports
type="final_report",
data=final_report,
created_by=self.run_spec.created_by,
created_at=str(datetime.now(timezone.utc)),
)
# Send the final report event
try:
self._send_event([report_event])
logging.info(f"Final report: {final_report}.")
logging.info(f"Data logged to: {self.url}")
except RuntimeError as e:
logger.error(f"Falling back to LocalRecorder due to error: {str(e)}")
self.local_fallback_recorder.record_final_report(final_report)
class Recorder(RecorderBase):
"""
A recorder which logs events to Snowflake.
Can be used by passing `--no-local-run` when invoking `oaieval`.
"""
def __init__(
self,
log_path: Optional[str],
run_spec: RunSpec,
snowflake_connection: Optional[SnowflakeConnection] = None,
) -> None:
super().__init__(run_spec)
self.event_file_path = log_path
self._writing_lock = threading.Lock()
if snowflake_connection is None:
snowflake_connection = SnowflakeConnection()
self._conn = snowflake_connection
if log_path is not None:
with bf.BlobFile(log_path, "wb") as f:
f.write((jsondumps({"spec": dataclasses.asdict(run_spec)}) + "\n").encode("utf-8"))
query = """
INSERT ALL INTO runs (run_id, model_name, eval_name, base_eval, split, run_config, settings, created_by, created_at)
VALUES (%(run_id)s, %(model_name)s, %(eval_name)s, %(base_eval)s, %(split)s, run_config, settings, %(created_by)s, %(created_at)s)
SELECT PARSE_JSON(%(run_config)s) AS run_config, PARSE_JSON(%(settings)s) AS settings
"""
self._conn.robust_query(
command=query,
params={
"run_id": run_spec.run_id,
# TODO: model_name -> completion_fns
"model_name": jsondumps(dict(completions=run_spec.completion_fns)),
"eval_name": run_spec.eval_name,
"base_eval": run_spec.base_eval,
"split": run_spec.split,
"run_config": jsondumps(run_spec.run_config),
"settings": jsondumps(run_spec.run_config.get("initial_settings", {})),
"created_by": run_spec.created_by,
"created_at": run_spec.created_at,
},
)
atexit.register(self.flush_events)
def _flush_events_internal(self, events_to_write: Sequence[Event]):
with self._writing_lock:
try:
lines = [jsondumps(event) + "\n" for event in events_to_write]
except TypeError as e:
logger.error(f"Failed to serialize events: {events_to_write}")
raise e
idx_l = 0
while idx_l < len(events_to_write):
total_bytes = 0
idx_r = idx_l
while (
idx_r < len(events_to_write)
and total_bytes + len(lines[idx_r]) < MAX_SNOWFLAKE_BYTES
):
total_bytes += len(lines[idx_r])
idx_r += 1
assert idx_r > idx_l
start = time.time()
buffer = [
(
event.run_id,
event.event_id,
event.sample_id,
event.type,
jsondumps(event.data),
event.created_by,
event.created_at,
)
for event in events_to_write[idx_l:idx_r]
]
query = """
INSERT INTO events (run_id, event_id, sample_id, type, data, created_by, created_at)
SELECT Column1 AS run_id, Column2 as event_id, Column3 AS sample_id, Column4 AS type, PARSE_JSON(Column5) AS data, Column6 AS created_by, Column7 AS created_at
FROM VALUES(%s, %s, %s, %s, %s, %s, %s)
"""
self._conn.robust_query(command=query, seqparams=buffer, many=True)
logger.info(
f"Logged {len(buffer)} rows of events to Snowflake: insert_time={t(time.time()-start)}"
)
idx_l = idx_r
with bf.BlobFile(self.event_file_path, "ab") as f:
f.write(b"".join([line.encode("utf-8") for line in lines]))
self._last_flush_time = time.time()
self._flushes_done += 1
def record_final_report(self, final_report: Any):
with self._writing_lock:
with bf.BlobFile(self.event_file_path, "ab") as f:
f.write((jsondumps({"final_report": final_report}) + "\n").encode("utf-8"))
query = """
UPDATE runs
SET final_report = PARSE_JSON(%(final_report)s)
WHERE run_id = %(run_id)s
"""
self._conn.robust_query(
command=query,
params={
"run_id": self.run_spec.run_id,
"final_report": jsondumps(final_report),
},
)
def record_event(self, type, data=None, sample_id=None):
# try to serialize data so we fail early!
_ = jsondumps(data)
return super().record_event(type, data, sample_id)
#########################################################################
### Helper methods which use the thread local global default recorder ###
#########################################################################
def current_sample_id() -> str:
return default_recorder().current_sample_id
def record_match(correct: bool, *, expected=None, picked=None, **extra):
return default_recorder().record_match(correct, expected=expected, picked=picked, **extra)
def record_embedding(prompt, embedding_type, **extra):
return default_recorder().record_embedding(prompt, embedding_type, **extra)
def record_sampling(prompt, sampled, **extra):
return default_recorder().record_sampling(prompt, sampled, **extra)
def record_cond_logp(prompt, completion, logp, **extra):
return default_recorder().record_cond_logp(prompt, completion, logp, **extra)
def record_pick_option(prompt, options, picked, **extra):
return default_recorder().record_pick_option(prompt, options, picked, **extra)
def record_raw(data):
return default_recorder().record_raw(data)
def record_metrics(**extra):
return default_recorder().record_metrics(**extra)
def record_error(msg: str, error: Exception = None, **extra):
return default_recorder().record_error(msg, error, **extra)
def record_extra(data):
return default_recorder().record_extra(data)
def record_event(type, data=None, sample_id=None):
return default_recorder().record_event(type, data, sample_id)
def pause():
return default_recorder().pause()
def unpause():
return default_recorder().unpause()
| openai/evals | evals/record.py | record.py | py | 23,030 | python | en | code | 12,495 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "contextvars.ContextVar",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Optio... |
6214826810 | from json import loads
from bot_core.utils.redis_topics import CMD_COLL_NAME
from bot_core.utils.action_tools import cmd_analysis, pub_sub
def main():
pub_sub.subscripe(CMD_COLL_NAME)
while (True):
msg_data = pub_sub.get_message().get("data", None)
if msg_data:
cmd_analysis(
loads(msg_data)
)
if __name__ == "__main__":
main()
| KTOALE/tel_bot_coro | bot_core/src/main_core.py | main_core.py | py | 401 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bot_core.utils.action_tools.pub_sub.subscripe",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bot_core.utils.redis_topics.CMD_COLL_NAME",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "bot_core.utils.action_tools.pub_sub",
"line_number"... |
7998920894 | import random
from . import users
from flask import request
import json
##### Hier kommt code, den ich einfach von Tina so übernommen habe. Wenn er schlecht ist ist Tina schuld ############
import os
from dotenv import load_dotenv
from flask import jsonify, Response
import pymongo
import hashlib # Die brauchen wir für die Passwörter
import string
from bson.json_util import dumps
load_dotenv() # use dotenv to hide sensitive credential as environment variables
DATABASE_URL = f'mongodb+srv://{os.environ.get("dbUser")}:{os.environ.get("dbPasswort")}' \
'@flask-mongodb-atlas.wicsm.mongodb.net/' \
'flaura?retryWrites=true&w=majority' # get connection url from environment
client = pymongo.MongoClient(DATABASE_URL) # establish connection with database
# plants.config['MONGO_DBNAME'] = 'restdb'
# plants.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'
# mongo = PyMongo(plants)
mydb = client.flaura
mycol = mydb.users
def jsonResponse(data):
return Response(data, mimetype='application/json')
##################### Ende des Codes, den ich einfach nur von Tina übernommen habe #######################
###### Dokumentation der Daten für Login
###### E-Mail: userelement["email"]
###### Passwort: userelement["pwsha256"] <- Dieses Feld soll einen SHA256-Hash von dem Passwort enthalten, dieser ist gesaltet mit "TUCLAB21"
pwsalt = "TUCLAB21"
def getAllUsers():
cursor = mycol.find({})
list_cur = list(cursor)
ppl = dumps(list_cur)
return ppl
def generatePWhash(password):
zuhashen = password + pwsalt
return hashlib.sha256(zuhashen.encode('utf-8')).hexdigest()
def generateLoginToken(length=32):
# Nach https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits
return ''.join(
random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in
range(length))
def generatePotToken(length=10):
# TODO was passiert wenn zwei Pots zufällig den gleichen Token bekommen
# Nach https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(length))
@users.route('/users')
def usersA():
return jsonResponse(getAllUsers())
@users.route('/users/loginForm')
def loginForm():
return '<center>Ja Moin. Dann registrier dich mal ne Runde!<br><br><form method="post" action="/api/users/register">Name: <input type="text" name="name"><br>E-Mail: <input type="email" name="email"><br>E-Mail wiederholen: <input type="email" name="emailconfirm"><br>Passwort: <input type="password" name="password"><br>Passwort wiederholen: <input type="password" name="passwordconfirm"><br><br><input type="submit" value="Abschicken"></form> </center>'
# Diese Seite funktioniert nicht mehr, seit wir in der API-Funktion einen anderen Typ erwarten
@users.route('/api/users/register', methods=["POST"])
def registerUser():
### Diese Funktion möchte eine E-Mail, zwei Passwörter und trägt den User in die DB ein
### "email", "password", "passwordconfirm"
bestesAntwortDict = {}
reqJson = request.get_json(force=True)
if reqJson["password"] != reqJson["passwordconfirm"]:
bestesAntwortDict["msg"] = "Passwörter stimmen nicht überein."
bestesAntwortDict["successful"] = False
return dumps(bestesAntwortDict)
#if mycol.countDocuments({"email": reqJson["email"]}) > 0:
# bestesAntwortDict["msg"] = "Email already exists"
# bestesAntwortDict["successful"] = False
# return dumps(bestesAntwortDict)
iniLoginToken = generateLoginToken()
newuser = {"email": reqJson["email"], "pwsha256": generatePWhash(request.json["password"]), "pots": [],
"tokens": [iniLoginToken]}
new_id = mycol.insert_one(newuser).inserted_id
bestesAntwortDict["successful"] = True
bestesAntwortDict["initialToken"] = iniLoginToken
return dumps(bestesAntwortDict)
@users.route('/api/users/loginRequest', methods=["POST"])
def attemptLogin():
bestesAntwortDict = {}
reqJson = request.get_json(force=True)
susUser = mycol.find_one({"email": reqJson["email"]})
## ToDo: Was machen wir, wenn es diesen User gar nicht gibt? Was kommt dann als Antowort zurück? Niemand weiß es...
# Gucke, ob das was wir gesendet bekommen haben nachdem es durch die Funktion gejagt wurde mit dem übereinstimmt was bei uns in der DB steht
if (generatePWhash(reqJson["password"]) == susUser["pwsha256"]):
bestesAntwortDict["msg"] = "Login erfolgreich"
## Wir generieren uns einen Token, mit dem man sich dann später identifizieren kann
newtoken = generateLoginToken()
if "tokens" in susUser.keys():
susUser["tokens"].append(newtoken)
else:
susUser["tokens"] = [newtoken]
mycol.save(susUser)
bestesAntwortDict["token"] = newtoken
bestesAntwortDict["loginSuccessful"] = True
else:
bestesAntwortDict["msg"] = "Login nicht erfolgreich"
bestesAntwortDict["loginSuccessful"] = False
return json.dumps(bestesAntwortDict)
@users.route('/api/users/logoutRequest', methods=["POST"])
def attemptLogout():
bestesAntwortDict = {}
susUser = mycol.find_one({"tokens": request.get_json(force=True)["token"]})
## ToDo: Was machen wir, wenn es diesen User gar nicht gibt? Was kommt dann als Antowort zurück? Niemand weiß es...
# Diese Funktion löscht den Login-Token, mit dem sie aufgerufen wurde aus der DB
if (susUser != None):
bestesAntwortDict["msg"] = "Logout erfolgreich"
susUser["tokens"].remove(request.json["token"])
mycol.save(susUser)
bestesAntwortDict["logoutSuccessful"] = True
else:
bestesAntwortDict["msg"] = "Logout nicht erfolgreich"
bestesAntwortDict["logoutSuccessful"] = False
return json.dumps(bestesAntwortDict)
@users.route('/api/users/getUser', methods=["POST"])
def getUserInfobyToken():
# Diese Funktion will einen token im POST-Header haben, und wenn es ein echter ist, kommt das entsprechende User-Objekt zurück
bestesAntwortDict = {}
susUser = mycol.find_one({"tokens": request.get_json(force=True)["token"]})
if susUser == None:
# Wenn nach dieser Suchaktion susUser None ist, dann war das kein richtiger Token!
bestesAntwortDict["msg"] = "Incorrect token"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
else:
return jsonResponse(dumps(susUser))
@users.route('/api/users/newPot', methods=["POST"])
def createNewPot():
# Diese Funktion bekommt einen Login-Token übergeben. Sie denkt sich einen Pot-Token für den neuen Pot aus, erzeugt ihn und fügen ihn hinzu
bestesAntwortDict = {}
susUser = mycol.find_one({"tokens": request.get_json(force=True)["token"]})
if susUser == None:
# Wenn nach dieser Suchaktion susUser None ist, dann war das kein richtiger Token!
bestesAntwortDict["msg"] = "Incorrect token"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
else:
newPot = {}
newToken = generatePotToken()
while (mycol.find_one({"pots.token": newToken}) != None):
newToken = generatePotToken()
newPot["token"] = newToken
newPot["sleepTime"] = 1
newPot["criticalMoisture"] = 0
newPot["waterAmountML"] = 0
susUser["pots"].append(newPot)
mycol.save(susUser)
return 'hopefully successful'
@users.route('/api/users/deletePot', methods=["POST"])
def deletePot():
# Diese Funktion bekommt einen Login-Token und einen Pot-Token übergeben. Sie löscht den Pot mit diesem Token
bestesAntwortDict = {}
susUser = mycol.find_one({"tokens": request.json["loginToken"]})
if susUser == None:
# Wenn nach dieser Suchaktion susUser None ist, dann war das kein richtiger Token!
bestesAntwortDict["msg"] = "Incorrect token"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
else:
# Gucken, ob dieser User einen Pot mit diesem PotToken hat, und wenn ja diesen Löschen
wasFoundAndDeleted = False
for pot in susUser["pots"]:
if (pot["token"] == request.json["potToken"]):
susUser["pots"].remove(pot)
wasFoundAndDeleted = True
mycol.save(susUser)
if not wasFoundAndDeleted:
bestesAntwortDict["msg"] = "Pot either not existing or not yours"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
bestesAntwortDict["msg"] = "Pot deleted"
bestesAntwortDict["error"] = False
return jsonResponse(json.dumps(bestesAntwortDict))
@users.route('/api/users/changePot', methods=["POST"])
def setPotValues():
# Diese Funktion bekommt einen Login-Token, einen Pot-Token, einen sleepTime-Wert (optional), einen criticalMoisture-Wert übergeben. Sie denkt sich einen Pot-Token für den neuen Pot aus, erzeugt ihn und fügen ihn hinzu
bestesAntwortDict = {}
reqJson = request.get_json(force=True)
susUser = mycol.find_one({"tokens": reqJson["token"]})
if susUser == None:
# Wenn nach dieser Suchaktion susUser None ist, dann war das kein richtiger Token!
bestesAntwortDict["msg"] = "Incorrect token"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
else:
# Gucken, ob dieser User einen Pot mit diesem PotToken hat, und wenn ja dessen Daten ändern
wasFoundAndEdited = False
for pot in susUser["pots"]:
if (pot["token"] == reqJson["potToken"]):
if "sleepTime" in reqJson.keys():
pot["sleepTime"] = reqJson["sleepTime"];
if "criticalMoisture" in reqJson.keys():
pot["criticalMoisture"] = reqJson["criticalMoisture"];
if "waterAmountML" in reqJson.keys():
pot["waterAmountML"] = reqJson["waterAmountML"];
wasFoundAndEdited = True
mycol.save(susUser)
return
if not wasFoundAndEdited:
bestesAntwortDict["msg"] = "Pot either not existing or not yours"
bestesAntwortDict["error"] = True
return jsonResponse(dumps(bestesAntwortDict))
| rosemaxio/flauraBackend | users/Api.py | Api.py | py | 10,619 | python | de | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient... |
12919232280 | import datetime
categories = ['INACTIVE', 'WEB', 'AUDIO', 'VIDEO', 'GAMING']
inp = raw_input("Clear? Y/N\n")
if inp in ["y", "Y"]:
with open('log.txt', 'w') as f:
f.write("")
while True:
for i, c in enumerate(categories):
print("{}: {}".format(i, c))
cat = raw_input()
print("\n")
time = datetime.datetime.now()
with open('log.txt', 'a') as f:
f.write(str(time) + '\n' + str(cat) + '\n') | noise-lab/ml-networking | activities/lib/interative_log.py | interative_log.py | py | 420 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
}
] |
7129738963 | #!/usr/bin/env python
import pandas as pd
from collections import defaultdict
import argparse
def bowtie2bed(fn, fo):
"""
From a bowtie output (tsv, NOT sam) file, return a BED file.
:param fn: string
name of bowtie default output tsv file
:param fo: string
name of bedfile output to write
:return:
"""
bowtie_headers = [
"read_name", "strand", "chrom", "start", "seq", "ascii_score", "alt_align", "mismatches"
]
df = pd.read_csv(fn, names=bowtie_headers, sep="\t")
df['len'] = df['seq'].apply(lambda x: len(x))
df['read_name_fixed'] = df['read_name'].apply(lambda x: x.split("_")[0].split('#')[:-1])
df['end'] = df['start'] + df['len']
df = df[['chrom','start','end','read_name_fixed','alt_align','strand']]
df.to_csv(fo, sep="\t", header=False, index=False)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--in_file",
required=True,
)
parser.add_argument(
"--out_file",
required=True,
)
# Process arguments
args = parser.parse_args()
out_file = args.out_file
in_file = args.in_file
# main func
bowtie2bed(
fn=in_file,
fo=out_file
)
if __name__ == "__main__":
main()
| YeoLab/chim-eCLIP | bin/bowtie2bed.py | bowtie2bed.py | py | 1,281 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 30,
"usage_type": "call"
}
] |
38763808043 | import spotipy
from model import *
from enum import Enum
class EmotionsMood(Enum):
"""Creating constants to assign to different moods"""
Calm = 1 #Calm
Energetic = 2
happy = 2
sad = 0
def get_songs_features(sp,ids):
"""Get features of songs to identify tyoe of music"""
meta = sp.track(ids)
features = sp.audio_features(ids)
# meta
name = meta['name']
album = meta['album']['name']
artist = meta['album']['artists'][0]['name']
release_date = meta['album']['release_date']
length = meta['duration_ms']
popularity = meta['popularity']
ids = meta['id']
# features
acousticness = features[0]['acousticness']
danceability = features[0]['danceability']
energy = features[0]['energy']
instrumentalness = features[0]['instrumentalness']
liveness = features[0]['liveness']
valence = features[0]['valence']
loudness = features[0]['loudness']
speechiness = features[0]['speechiness']
tempo = features[0]['tempo']
key = features[0]['key']
time_signature = features[0]['time_signature']
track = [name, album, artist, ids, release_date, popularity, length, danceability, acousticness,
energy, instrumentalness, liveness, valence, loudness, speechiness, tempo, key, time_signature]
columns = ['name','album','artist','id','release_date','popularity','length','danceability','acousticness','energy','instrumentalness',
'liveness','valence','loudness','speechiness','tempo','key','time_signature']
return track,columns
def recommendations(token,emmotion_value):
"""Get actual recommendations based on songs's features"""
playlistIdUrl=[]
track_ids = []
recommended_track=[]
rec_tracks = []
if token:
sp = spotipy.Spotify(auth=token)
else:
print("Can't get token")
return
userId=sp.current_user()['id']
if userId:
all_playlists = sp.current_user_playlists(limit=1,offset=0)
for item in all_playlists['items']:
if item['external_urls']['spotify'] not in playlistIdUrl:
playlistIdUrl.append( item['external_urls']['spotify'])
for playlistId in playlistIdUrl:
Playlist = sp.user_playlist(userId, playlistId)
tracks = Playlist["tracks"]
songs = tracks["items"]
for i in range(0, len(songs)):
if songs[i]['track']['id'] != None and songs[i]['track']['id'] not in track_ids: # Removes the local tracks in your playlist if there is any
track_ids.append(songs[i]['track']['id'])
for id in track_ids:
rec_tracks += sp.recommendations(seed_tracks=[id], seed_genres=['indian, happy, calm, chill'], limit=2, min_valence=0.3, min_popularity=60)['tracks']
for track in rec_tracks:
imageUrl=''
if track['album']['images']:
imageUrl=track['album']['images'][0]['url']
trackUrl=track['external_urls']['spotify']
name=track['name']
features=get_songs_features(sp,track["id"])
mood=predict_mood(features)
# print( mood.upper())
# print(EmotionsMood(emmotion_value).name.upper())
if mood.upper()==EmotionsMood(emmotion_value).name.upper():
if trackUrl not in recommended_track:
recommended_track.append({'imageUrl': imageUrl,'trackUrl':trackUrl,'name':name})
return recommended_track
# playlist_recs = sp.user_playlist_create(username,
# name='Recommended Songs for Playlist by Amit - {}'.format(sourcePlaylist['name']))
# #Add tracks to the new playlist
# for i in rec_array:
# sp.user_playlist_add_tracks(username, playlist_recs['id'], i)
else:
return ("Token expired!!!")
| nagarro-hackathon-2023/python_ml | spotify.py | spotify.py | py | 3,947 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "spotipy.Spotify",
"line_number": 54,
"usage_type": "call"
}
] |
29899432213 | import numpy as np
from absl import app, flags
HEADER_SIZE = 10
RECORD_SIZE = 100
FLAGS = flags.FLAGS
flags.DEFINE_string(
"input_file",
None,
"Path to binary data file.",
short_name="i",
)
flags.DEFINE_string(
"output_file",
None,
"Path to output file (optional).",
short_name="o",
)
flags.DEFINE_bool(
"header_only",
True,
f"If set, only decode the first {HEADER_SIZE} bytes of every {RECORD_SIZE} bytes.",
)
flags.DEFINE_integer(
"count",
-1,
"Only decode this many bytes; if -1, decode all.",
short_name="c",
)
flags.mark_flag_as_required("input_file")
def main(argv):
del argv # Unused.
print("Decoding", FLAGS.input_file)
if FLAGS.count >= 0:
print(f"Decoding the first {FLAGS.count} bytes")
if FLAGS.header_only:
print("Only decoding headers")
arr = np.fromfile(FLAGS.input_file, dtype=np.uint8, count=FLAGS.count)
output_file = FLAGS.output_file or FLAGS.input_file + ".txt"
with open(output_file, "w") as fout:
for (i,), x in np.ndenumerate(arr):
if not (FLAGS.header_only and i % RECORD_SIZE >= HEADER_SIZE):
print(f"{x:02x} ", end="", file=fout)
if i % RECORD_SIZE == RECORD_SIZE - 1:
print("", file=fout)
if __name__ == "__main__":
app.run(main)
| exoshuffle/raysort | scripts/misc/decode.py | decode.py | py | 1,340 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "absl.flags.FLAGS",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "absl.flags",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_string",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "absl.flags",
... |
39399855904 | import datetime
import decimal
import logging
import os
import re
from kensu.psycopg2.pghelpers import get_table_schema, get_current_db_info, pg_query_as_dicts
from kensu.utils.kensu_provider import KensuProvider
from kensu.utils.kensu import KensuDatasourceAndSchema
from kensu.utils.dsl.extractors.external_lineage_dtos import GenericComputedInMemDs
PG_CreateTableAs = 'PG_CreateTableAs'
PG_InsertTable = 'PG_InsertTable'
PG_CreateView = 'PG_CreateView'
# FIXME: quoted vs not quoted table names? e.g.:
# "testme"."testme_schema"."my_first_dbt_model" vs testme.testme_schema.my_first_dbt_model
def pg_relation_to_kensu_table_name(rel):
if str(type(rel)) == "<class 'dbt.adapters.postgres.relation.PostgresRelation'>":
return '.'.join([
rel.database,
rel.schema,
rel.identifier
])
return None
def format_relation_name(relation, cur_catalog=None, cur_schema=None):
if isinstance(relation, dict):
catalogname = ('catalogname' in relation and relation["catalogname"]) or None
schemaname = ('schemaname' in relation and relation["schemaname"]) or None
relname = ('relname' in relation and relation["relname"]) or None
else:
catalogname = hasattr(relation, 'catalogname') and relation.catalogname or None
schemaname = hasattr(relation, 'schemaname') and relation.schemaname or None
relname = hasattr(relation, 'relname') and relation.relname or None
if not relname:
return None
else:
parts = [
catalogname or cur_catalog,
schemaname or cur_schema,
relname
]
return '.'.join([n for n in parts if n])
def fetch_input_output_tables(stmt, cur_catalog, cur_schema):
from pglast import Node
output_tables = []
input_tables = []
for node in Node(stmt.stmt).traverse():
logging.debug("sql tree entry: "+str(node))
is_read_node = str(node) == 'fromClause[0]={RangeVar}'
is_read_from_join_node = str(node) == 'fromClause[0]={JoinExpr}'
is_write_node = 'rel={RangeVar}' in str(node)
if is_read_node or is_write_node:
table_name = format_relation_name(node.ast_node, cur_catalog, cur_schema)
if is_read_node:
input_tables.append(table_name)
elif is_write_node:
output_tables.append(table_name)
if is_read_from_join_node:
for joined_node in [node.ast_node.larg, node.ast_node.rarg]:
table_name = format_relation_name(joined_node, cur_catalog, cur_schema)
if table_name:
input_tables.append(table_name)
return input_tables, output_tables
def parse_pg_query(cursor, sql):
from pglast import parse_sql, ast
from kensu.psycopg2.pghelpers import get_table_schema, get_current_db_info
# fixme: this would be needed only if we support non-fully-qualified table references in SQL
# cur_catalog, cur_schema = get_current_db_info(cursor)
cur_catalog, cur_schema = None, None
parsed_tree = parse_sql(sql)
for stmt in parsed_tree:
stmt_type = None
output_tables = []
input_tables = []
if isinstance(stmt, ast.RawStmt):
if isinstance(stmt.stmt, ast.CreateTableAsStmt):
stmt_type = PG_CreateTableAs
input_tables, output_tables = fetch_input_output_tables(stmt, cur_catalog, cur_schema)
if isinstance(stmt.stmt, ast.InsertStmt):
stmt_type = PG_InsertTable
# TODO... there is probably more cases than insert ... values
table_name = format_relation_name(stmt.stmt.relation(), cur_catalog, cur_schema)
output_tables.append(table_name)
if isinstance(stmt.stmt, ast.ViewStmt):
stmt_type = PG_CreateView
output_table_name = format_relation_name(stmt.stmt.view, cur_catalog, cur_schema)
output_tables = [output_table_name]
input_tables, _ = fetch_input_output_tables(stmt, cur_catalog, cur_schema)
return stmt_type, input_tables, output_tables
def pg_try_get_schema(cursor, tname):
# FIXME: move this to kensu-py
try:
return list([(f.get('field_name') or 'unknown', f.get('field_type') or 'unknown')
for f in get_table_schema(cursor, tname)])
except:
logging.warning(f"failed getting schema for Postgres table {tname}")
def pg_to_kensu_entry(kensu_inst, cursor, tname, compute_stats=True):
# for postgres & mysql, dbt creates temporary tables and rename them later
# we want the final table name in kensu
# FIXME: this renaming might cause issues when fetching schema!!! should it happen here?
cleaned_tname = tname.replace('__dbt_tmp', '')
maybe_schema = pg_try_get_schema(cursor=cursor, tname=tname)
logging.warning(f"pg_schema: {maybe_schema}")
stats_values = None
if compute_stats and maybe_schema:
# FIXME: use a fresh cursor?
stats_values = query_stats(cursor, schema_fields=maybe_schema, orig_tname=tname)
logging.info(f'final Postgres stats values: {stats_values}')
server_info = cursor.connection.info.dsn_parameters
# FIXME: make sure the Postgres URI is consistent among all different collectors
# (e.g. is port always explicit vs default port)
ds_path = f"postgres://{server_info['host']}:{server_info['port']}/{cleaned_tname}" # FIXME?
entry = KensuDatasourceAndSchema.for_path_with_opt_schema(
ksu=kensu_inst,
ds_path=ds_path, # FIXME?
ds_name=cleaned_tname, # FIXME?
format='Postgres table',
# FIXME: ds_path like postgres://localhost:5432/a.b.c seem to cause error in Kensu webui
# categories=['logical::'+ f"postgres :: {server_info['host']}:{server_info['port']} :: {cleaned_tname}"],
categories=['logical::' + f"{cleaned_tname}"],
maybe_schema=maybe_schema,
f_get_stats=lambda: stats_values
) # type: KensuDatasourceAndSchema
return entry
def report_postgres(conn_mngr, cursor, sql, bindings):
if bindings is not None:
# Also we process the "%s" with `bindings` => otherwise the pglast parser fails
# so the result is
num_to_replace = len(re.findall("%.", sql))
sql = sql.replace("%s", "{}").format(*range(0, num_to_replace)) # bindings)
from kensu_reporting import get_kensu_agent
kensu_inst = get_kensu_agent()
stmt_type, input_tables, output_tables = parse_pg_query(cursor=cursor, sql=sql)
# e.g. input_tables=['source_data'], output_tables=['testme.testme_schema.my_first_dbt_model__dbt_tmp']
# input might contain false `table names`, being the subquery aliases inside the SQL `WITH` statement, e.g.:
# WITH source_data as (select 1) select * from source_data
# P.S. for now only fully-qualified table names supported in SQL (e.g. to remove subquery aliases)
convert_valid_tables_and_fetch_stats_fn = lambda tables: [
pg_to_kensu_entry(kensu_inst, cursor, t)
for t in tables if t.count('.') == 2]
if stmt_type == PG_CreateTableAs:
logging.info(f'POSTGRES create table. SQL: {sql}')
all_kensu_inputs = convert_valid_tables_and_fetch_stats_fn(input_tables)
elif stmt_type == PG_InsertTable:
# Considering that we are currently in a model inserting from `seed`
from kensu_reporting import get_current_thread_seeds
seed_inputs = get_current_thread_seeds()
inputs = convert_valid_tables_and_fetch_stats_fn(input_tables)
# TODO probably other cases than just seed_inputs?
all_kensu_inputs = [*seed_inputs, *inputs]
logging.info(f'POSTGRES insert. SQL: {sql}')
elif stmt_type == PG_CreateView:
all_kensu_inputs = convert_valid_tables_and_fetch_stats_fn(input_tables)
logging.debug(f'POSTGRES create view. SQL: {sql}')
else:
logging.info(f"POSTGRES untracked statement: sql={sql}")
return
if all_kensu_inputs and output_tables:
outputs = [pg_to_kensu_entry(kensu_inst, cursor, o)
for o in output_tables if o.count('.') == 2]
for output in outputs:
lineage=GenericComputedInMemDs.for_direct_or_full_mapping(all_inputs=all_kensu_inputs,
out_field_names=output.field_names())
if len(lineage.lineage) <= 0:
continue
lineage.report(
ksu=kensu_inst,
df_result=output,
operation_type='NA',
report_output=True,
register_output_orig_data=True
)
kensu_inst.report_with_mapping()
def query_stats(cursor, schema_fields, orig_tname):
stats_aggs = pg_generate_fallback_stats_queries(schema_fields)
input_filters=None
filters = ''
if input_filters is not None and len(input_filters) > 0:
filters = f"WHERE {' AND '.join(input_filters)}"
selector = ",".join([sql_aggregation + " " + col.replace(".","__ksu__") + "_" + stat_name
for col, stats_for_col in stats_aggs.items()
for stat_name, sql_aggregation in stats_for_col.items()])
stats_sql = f"select {selector}, sum(1) as nrows from {str(orig_tname)} {filters}"
logging.info(f'SQL query to fetch Postgres stats: {stats_sql}')
stats_result = pg_query_as_dicts(cur=cursor, q=stats_sql)
logging.debug(f'Postgres stats: {stats_result}')
r = {}
# FIXME: hmm this logic seem quite shared with BigQuery, extract common parts?
for row in stats_result:
if row.get('nrows'):
r['nrows'] = row['nrows']
# extract column specific stats
for col, stat_names in stats_aggs.items():
for stat_name in stat_names.keys():
result_column = col.replace(".","__ksu__") + "_" + stat_name
# looks like postgres:13 return only lowercase
v = row.get(result_column.lower()) or row.get(result_column)
if v.__class__ in [datetime.date, datetime.datetime, datetime.time]:
v = int(v.strftime("%s") + "000")
if v.__class__ in [decimal.Decimal]:
v = float(v)
if v is None:
# FIXME: this might be misleading actually
v = 0
r[(col + "." + stat_name).replace("__ksu__",".")] = v
break # there should be only one row here
return r
def pg_generate_fallback_stats_queries(schema_fields):
stats_aggs = {}
for field_name, field_type in schema_fields:
field_type = field_type.upper()
# https://www.postgresql.org/docs/9.5/datatype.html
# seem like we need to quote field names which are case-sensitive (upercase)
nullrows_agg = f"""sum(num_nulls("{field_name}"))"""
min_agg = f"""min("{field_name}")"""
max_agg = f"""max("{field_name}")"""
avg_agg = f"""avg("{field_name}")"""
if field_type in ["INTEGER", "INT", "INT4", "DECIMAL", "SMALLINT", "INT2", "FLOAT",
"FLOAT4", "FLOAT8", "FLOAT64", "REAL", "NUMERIC", "BIGINT", "INT8"]:
stats_aggs[field_name] = {"min": min_agg,
"max": max_agg,
"mean": avg_agg,
"nullrows": nullrows_agg}
elif field_type in ["TIMESTAMP", "TIMESTAMPTZ", "DATE", "TIME", "TIMETZ", "DATETIME"]:
stats_aggs[field_name] = {"min": min_agg,
"max": max_agg,
"nullrows": nullrows_agg}
elif field_type in ["BOOLEAN", "BOOL"]:
stats_aggs[field_name] = {"true": f"""sum(case "{field_name}" when true then 1 else 0 end)""",
"nullrows": nullrows_agg}
elif field_type in ["STRING", "TEXT"]:
stats_aggs[field_name] = {"levels": f"""count(distinct "{field_name}")""",
"nullrows": nullrows_agg}
return stats_aggs
| Fundamentals-of-Data-Observability/handson | python_environment/volume/week2/dbt/dbt-do/dbt-ast/kensu_postgres.py | kensu_postgres.py | py | 12,258 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "pglast.Node",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pglast.parse_sql",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pglast.ast.RawStmt",
"... |
70749170429 | #Server dependencies
#from gevent.pywsgi import WSGIServer
from threading import Thread
from flask import Flask, request, send_from_directory
from flask_mobility import Mobility
import os
#Apis used
from assistant import sendToAssistant
#File management
#from bs4 import BeautifulSoup
import codecs
#import re
import json
#import logging
#Miscelaneous
from datetime import datetime
#Warnings
#import warnings
#warnings.filterwarnings("ignore")
#Logging configuration set to debug on history.log file
#logging.basicConfig(filename='history.log', level=logging.DEBUG)
#logging.basicConfig(
# format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
def run():
#Flask built in deploy for development (lazy loading)
app.run(host='0.0.0.0',port=8081)
#WSGIServer deploy for production.
#WSGIServer(('', 8081), app).serve_forever()
#Cache reloading medthod
def cacheWorkaround(file):
date = datetime.today().strftime('%Y-%m-%d')
return file.read().replace('REPLACE', date)
#Open html files
def loadPage(src):
#For cache reloading on load
#return cacheWorkaround(src)
return codecs.open(src, "r", "utf-8").read()
#Designated thread for server proccess
def keep_alive():
t = Thread(target=run)
t.start()
#Flask app
app = Flask(__name__)
Mobility(app)
#Disable unwanted dependencies logging
#werkzeugLog = logging.getLogger('werkzeug')
#werkzeugLog.disabled = True
#requestsLog = logging.getLogger("urllib3.connectionpool")
#requestsLog.disabled = True
@app.route('/')
def main():
#Main endpoint corresponds to index.html
site = loadPage("index.html")
return site
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon')
@app.route('/input', methods=['GET'])
@app.route('/demo/input', methods=['GET'])
def web():
#server endpoint for client-watson connection
msg = request.args.get('msg')
if '\n' in msg:
msg = msg.replace('\n', '')
#logging.info('Incoming: ' + msg)
session_id = ''
try:
#sends input to watson for message analize
response, session_id = sendToAssistant(msg)
#logging.info('Watson: ' + str(response))
except:
#Critical error, either watsons response was uncall for, or server error.
response = "Error"
#logging.info('Out: ' + response)
return json.dumps(response)+'|'+str(session_id)
if __name__ == '__main__':
keep_alive()
| Creativity-Hub/chatbot_template | main.py | main.py | py | 2,540 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "datetime.datetime.today",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "codecs.open",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "threading.Threa... |
36312680043 | import pandas as pd
from xml.etree import ElementTree as ET
import requests
from datetime import datetime
import matplotlib.pyplot as plt
q_range = {'Range': 1}
q_resp_group = {'ResponseGroup': 'History'}
q_url_WHO = {'https://www.who.int/'}
q_date = datetime.date(datetime.now())
url = "https://awis.api.alexa.com/api?" \
"Action=TrafficHistory" \
"&Range=31" \
"&ResponseGroup=History" \
"&Url=https://coronavirus.jhu.edu/map.html"\
"&Start=20200301"\
payload = {}
headers = {
# 'x-api-key': '''
# ### your_api_key_here ###
# '''
}
response = requests.request("GET", url, headers=headers, data=payload)
awis_xml_str = response.text.encode('utf8')
# parse directly from texts instead of file
root = ET.fromstring(awis_xml_str)
column_names = ["Date", "Page_View_Per_Million", "Page_View_Per_User", "Rank", "Reach_Per_Million"]
click_ratio_table = pd.DataFrame(columns=column_names)
for results in root.findall('Results'):
for result in results.findall('Result'):
for alexa in result.findall('Alexa'):
for trafficHistory in alexa.findall('TrafficHistory'):
for historicalData in trafficHistory.findall('HistoricalData'):
historical_data = ET.tostring(historicalData)
root2 = ET.fromstring(historical_data)
for data in root2:
date = data[0].text
ppm = pd.to_numeric(data[1][0].text)
ppu = pd.to_numeric(data[1][1].text)
rank = pd.to_numeric(data[2].text)
rpm = pd.to_numeric(data[3][0].text)
new_row = [date, ppm, ppu, rank, rpm]
click_ratio_table.loc[len(click_ratio_table)] = new_row
click_ratio_table = pd.DataFrame(click_ratio_table)
# plt.cla()
# plt.plot(click_ratio_table.loc[:, 'Date'], click_ratio_table.loc[:, 'Page_View_Per_Million'])
# plt.title('JHU Page_View_Per_Million')
# # click_ratio_table.shape() | alanalien/covid_19_circumstantial_evidences | data_processing_funs/click_ratio_data.py | click_ratio_data.py | py | 1,858 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.date",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "reques... |
2799425380 | import os.path
import random
import torchvision.transforms as transforms
import torch
from data.base_dataset import BaseDataset, get_params, get_transform, normalize
from data.image_folder import make_dataset
from PIL import Image
import numpy as np
class AlignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
### label maps
self.dir_label = os.path.join(opt.dataroot, opt.phase + '_label')
self.label_paths = sorted(make_dataset(self.dir_label))
### real images
if opt.isTrain:
self.dir_image = os.path.join(opt.dataroot, opt.phase + '_img')
self.image_paths = sorted(make_dataset(self.dir_image))
### load face bounding box coordinates size 128x128
if opt.face_discrim or opt.face_generator:
self.dir_facetext = os.path.join(opt.dataroot, opt.phase + '_facetexts128')
print('----------- loading face bounding boxes from %s ----------' % self.dir_facetext)
self.facetext_paths = sorted(make_dataset(self.dir_facetext))
self.dataset_size = len(self.label_paths)
def __getitem__(self, index):
### label maps
paths = self.label_paths
label_path = paths[index]
label = Image.open(label_path).convert('RGB')
params = get_params(self.opt, label.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
label_tensor = transform_label(label)
original_label_path = label_path
image_tensor = next_label = next_image = face_tensor = 0
### real images
if self.opt.isTrain:
image_path = self.image_paths[index]
image = Image.open(image_path).convert('RGB')
transform_image = get_transform(self.opt, params)
image_tensor = transform_image(image).float()
is_next = index < len(self) - 1
if self.opt.gestures:
is_next = is_next and (index % 64 != 63)
""" Load the next label, image pair """
if is_next:
paths = self.label_paths
label_path = paths[index+1]
label = Image.open(label_path).convert('RGB')
params = get_params(self.opt, label.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
next_label = transform_label(label).float()
if self.opt.isTrain:
image_path = self.image_paths[index+1]
image = Image.open(image_path).convert('RGB')
transform_image = get_transform(self.opt, params)
next_image = transform_image(image).float()
""" If using the face generator and/or face discriminator """
if self.opt.face_discrim or self.opt.face_generator:
facetxt_path = self.facetext_paths[index]
facetxt = open(facetxt_path, "r")
face_tensor = torch.IntTensor(list([int(coord_str) for coord_str in facetxt.read().split()]))
input_dict = {'label': label_tensor.float(), 'image': image_tensor,
'path': original_label_path, 'face_coords': face_tensor,
'next_label': next_label, 'next_image': next_image }
return input_dict
def __len__(self):
return len(self.label_paths)
def name(self):
return 'AlignedDataset' | carolineec/EverybodyDanceNow | data/aligned_dataset.py | aligned_dataset.py | py | 3,566 | python | en | code | 639 | github-code | 6 | [
{
"api_name": "data.base_dataset.BaseDataset",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.... |
71844650429 | from django.conf import settings
from django.urls import NoReverseMatch, reverse
def get_slug_or_pk(object, slug_field=None):
res = dict()
field = slug_field if hasattr(object, slug_field) else "pk"
if object:
param = "slug" if hasattr(object, slug_field) else "pk"
res.update({param: getattr(object, field)})
return res
def get_urls_of_site(site, object=None, user=None):
urls = {}
app = site.model._meta.app_label
model = site.model._meta.model_name
kwargs = get_slug_or_pk(object, slug_field=site.slug_field)
for action, perm in (("list", "view"), ("create", "add")):
try:
url_name = site.get_url_name(action)
if not user:
urls.update({action: reverse(url_name)})
elif user.has_perm(f"{app}.{perm}_{model}"):
urls.update({action: reverse(url_name)})
except NoReverseMatch:
if settings.DEBUG:
print("DEBUG: Url not found: %s" % url_name)
if not kwargs:
return urls
for action, perm in (
("update", "change"),
("detail", "view"),
("delete", "delete"),
):
try:
url_name = site.get_url_name(action)
if not user:
urls.update({action: reverse(url_name, kwargs=kwargs)})
elif user.has_perm(f"{app}.{perm}_{model}"):
urls.update({action: reverse(url_name, kwargs=kwargs)})
except NoReverseMatch:
if settings.DEBUG:
print("DEBUG: Url not found: %s" % url_name)
return urls
| dbsiavichay/faclab | viewpack/shortcuts.py | shortcuts.py | py | 1,595 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.reverse",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.urls.NoReverseMatch",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "dj... |
12050674214 | """Define settings for the window"""
from data.options import CUSTOM_SETTINGS_FILENAME
import json
from logger import logger
from os import path
DEFAULT_SETTINGS = {
"width": 1024,
"height": 768
}
def get(_type, data):
"""Used to get value for the settings file
Args:
_type (string)
data (dict)
Returns:
int | str
"""
value = 0
try:
with open(path.join('.', 'assets', 'saved_settings', CUSTOM_SETTINGS_FILENAME), 'r') as _f:
value = json.load(_f)[_type]
except:
value = data[_type]
logger.info("Load %s : %s", _type, value)
return value
WIDTH = get("width", DEFAULT_SETTINGS)
HEIGHT = get("height", DEFAULT_SETTINGS)
FPS = 60
TITLE = 'Donjons et Dragons'
TILESIZE = 64
| Barbapapazes/dungeons-dragons | config/window.py | window.py | py | 779 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "data.options.CUSTOM_SETTINGS_FILENAME",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "json.l... |
27082695603 | import pytest
from httpx import AsyncClient
from fastapi1.main import app
menu_id = ''
def assert_menu_properties(response_json):
if not response_json:
return
assert 'id' in response_json
assert 'title' in response_json
assert 'description' in response_json
@pytest.mark.anyio
async def test_get_menus():
# Получение всех меню
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.get('/api/v1/menus')
assert response.status_code == 200
assert isinstance(response.json(), list)
@pytest.mark.anyio
async def test_create_menus():
# Тестируем правильно ли создается меню
global menu_id
data = {
'title': 'Menu 1',
'description': 'Description for menu 1'
}
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.post('/api/v1/menus', json=data)
assert response.status_code == 201
assert_menu_properties(response.json())
created_menu = response.json()
assert created_menu['title'] == 'Menu 1'
assert created_menu['description'] == 'Description for menu 1'
menu_id = created_menu['id']
@pytest.mark.anyio
async def test_update_menu():
# Тест на update
data = {
'title': 'Updated Menu 1',
'description': 'Updated Description 1'
}
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.patch(f'/api/v1/menus/{menu_id}', json=data)
assert response.status_code == 200
assert_menu_properties(response.json())
updated_menu = response.json()
assert updated_menu['title'] == data['title']
assert updated_menu['description'] == data['description']
@pytest.mark.anyio
async def test_read_menus():
# Тестируем read определенного меню
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.get(f'/api/v1/menus/{menu_id}')
assert response.status_code == 200
assert_menu_properties(response.json())
response = response.json()
assert response != []
@pytest.mark.anyio
async def test_delete_menu():
# Удаляем меню
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.delete(f'/api/v1/menus/{menu_id}')
assert response.status_code == 200
@pytest.mark.anyio
async def test_menu_empty():
# Проверяем, что меню пустые
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.get('/api/v1/menus')
assert response.status_code == 200
assert response.json() == []
@pytest.mark.anyio
async def test_empty_menu_id():
# Проверяем, что респонс пуст
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.delete(f'/api/v1/menus/{menu_id}')
assert response.status_code == 200
assert response.json() is None
| puplishe/testproject | tests/test_menu.py | test_menu.py | py | 3,056 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "httpx.AsyncClient",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "fastapi1.main.app",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "httpx.AsyncClien... |
24428391900 | #!/usr/bin/python3
""" sends a post request to the URL and displays the body
"""
import requests
from sys import argv
if __name__ == "__main__":
url = argv[1]
r = requests.get(url)
if r.status_code == 200:
print(r.text)
else:
print("Error code: {}".format(r.status_code))
| Isaiah-peter/alx-higher_level_programming | 0x11-python-network_1/7-error_code.py | 7-error_code.py | py | 305 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
}
] |
21320836605 | from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
from torch_geometric.nn import aggr
from src.utils import make_diff_matrix, triu_vector
class Model(nn.Module):
NUM_NODE_FEATURES = 140
NUM_OPCODES = 120
def __init__(self,
is_tile: bool,
is_nlp: bool, # unused for now
is_default: bool, # unused for now
wider_config: bool,
node_config_feat_size: int = 18,
tile_config_feat_size: int = 24,
):
"""
Args:
is_tile (bool): False: layout, True: tile
is_nlp (bool): False: xla, True: nlp
is_default (bool): False: random, True: default
"""
super().__init__()
node_feat_emb_size = 20
node_opcode_emb_size = 12
self.node_feat_embedding = nn.Linear(self.NUM_NODE_FEATURES,
node_feat_emb_size)
self.node_opcode_embedding = nn.Embedding(self.NUM_OPCODES,
node_opcode_emb_size)
config_feat_size = tile_config_feat_size if is_tile else node_config_feat_size
concat_node_feat_size = (node_feat_emb_size +
node_opcode_emb_size +
config_feat_size)
if is_tile or is_nlp or wider_config: # enable wider config for tile and for nlp by default
in_channels = 64
channel_config = [256, 256, 256, 256, 512, 512, 512, 512]
else:
in_channels = 32
channel_config = [64, 64, 128, 128, 256, 256]
assert len(channel_config) > 0
self.add_residuals: bool
if is_nlp:
self.add_residuals = True
else:
self.add_residuals = False
self.input_shaping = nn.Linear(concat_node_feat_size, in_channels)
self.convs = nn.ModuleList()
in_ch = in_channels
for out_ch in channel_config:
conv = SAGEConv(in_ch, out_ch)
self.convs.append(conv)
in_ch = out_ch
REGRESSION_SIZE = 1
self.output_shaping = nn.Linear(channel_config[-1], REGRESSION_SIZE)
self.aggr_sum = aggr.SumAggregation()
def forward(self,
node_feat: torch.Tensor,
node_opcode: torch.Tensor,
batch: torch.Tensor,
ptr: torch.Tensor,
node_config_feat: torch.Tensor,
node_config_ids: torch.Tensor,
node_config_ptr: torch.Tensor,
config_feat: torch.Tensor,
config_feat_ptr: torch.Tensor,
edge_index: torch.Tensor,
ub_size: int, # microbatch_size
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
DataBatch(
node_feat=[525076, 140],
node_opcode=[525076],
batch=[525076],
ptr=[41],
node_config_feat=[35496, 18],
node_config_ids=[35496],
node_config_batch=[35496],
node_config_ptr=[41],
edge_index=[2, 896088],
)
"""
is_tile = config_feat is not None
SCALE_MS_TO_SEC = 1e-3
batch_size = ptr.shape[0] - 1
if batch_size % ub_size != 0:
print(f"Warning: batch size {batch_size} not divisible "
f"by microbatch size {ub_size}. "
f"Fine for val, error for train.")
num_nodes = node_feat.shape[0]
if is_tile:
config_feat_size = config_feat.shape[0] // batch_size
else:
config_feat_size = node_config_feat.shape[1]
node_feat_log = torch.log1p(torch.abs(node_feat)) * torch.sign(node_feat)
node_feat_emb = self.node_feat_embedding(node_feat_log)
node_opcode_emb = self.node_opcode_embedding(node_opcode.long())
if is_tile:
graph_config_list = []
for ib in range(batch_size):
config_slice = slice(config_feat_ptr[ib],
config_feat_ptr[ib+1])
num_nodes_in_graph = ptr[ib+1] - ptr[ib]
graph_config = config_feat[config_slice]
graph_config_tiled = torch.tile(graph_config.unsqueeze(0),
(num_nodes_in_graph, 1))
graph_config_list.append(graph_config_tiled)
config_feat_all = torch.concat(graph_config_list)
else:
config_feat_all = torch.zeros(size=(num_nodes, config_feat_size),
dtype=torch.float32, device=node_feat.device)
for ib in range(batch_size):
config_slice = slice(node_config_ptr[ib],
node_config_ptr[ib+1])
sample_config_ids = node_config_ids[config_slice]
sample_config_feat = node_config_feat[config_slice]
global_config_ids = sample_config_ids + ptr[ib]
config_feat_all[global_config_ids, :] = sample_config_feat
node_feat_all = torch.cat((node_feat_emb,
node_opcode_emb,
config_feat_all), dim=-1)
feat = F.relu(self.input_shaping(node_feat_all))
for conv in self.convs:
feat_out = conv(feat, edge_index)
if self.add_residuals and (feat_out.shape[1] == feat.shape[1]):
feat = feat_out + feat # resudual connection
else:
feat = feat_out
feat = F.relu(feat)
per_node_latencies_unsq = self.output_shaping(feat)
# branch for MAPE
per_graph_latenies_ms = self.aggr_sum(per_node_latencies_unsq, batch)
per_graph_latenies_ms_sq = per_graph_latenies_ms.squeeze(-1)
if is_tile:
per_graph_latenies = per_graph_latenies_ms_sq
else:
per_graph_latenies = SCALE_MS_TO_SEC * per_graph_latenies_ms_sq
# branch for diff matrix
assert batch_size % ub_size == 0
num_microbatches = batch_size // ub_size
diff_triu_vector_list = []
for iub in range(num_microbatches):
ub_slice = slice(iub*ub_size,
(iub+1)*ub_size)
# per_ub_latencies [ub_size]
per_ub_latencies = per_graph_latenies[ub_slice]
# diff_matrix [ub_size, ub_size]
diff_matrix = make_diff_matrix(per_ub_latencies)
# triu_len = ub_size*(ub_size-1)/2. Ex triu_len=6 for ub_size=4.
# diff_triu_vector [triu_len]
diff_triu_vector = triu_vector(diff_matrix)
diff_triu_vector_list.append(diff_triu_vector)
# diff_triu_vector_stack [num_microbatches, triu_len]
diff_triu_vector_stack = torch.stack(diff_triu_vector_list)
return per_graph_latenies, diff_triu_vector_stack
| Obs01ete/kaggle_latenciaga | src/model.py | model.py | py | 7,190 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
10888914316 | import random
from django.template.loader import render_to_string
from .naver import 블로그_검색, 상한가_크롤링, 테마별_시세_크롤링
def search(search_engine, keyword):
if search_engine == '네이버 블로그':
post_list = 블로그_검색(keyword)
response_text = render_to_string('dialogflow/naver_blog_search_result.txt', {
'post_list': post_list[:3],
})
else:
response_text = '{}는 지원하지 않습니다.'.format(search_engine)
return {'fulfillmentText': response_text}
def stock_search(stock_search_term):
if stock_search_term == '상한가 종목':
response_text = 상한가_크롤링()
elif stock_search_term == '테마별 시세':
response_text = 테마별_시세_크롤링()
else:
response_text = '{}는 지원하지 않습니다.'.format(stock_search_term)
return {'fulfillmentText': response_text}
| allieus-archives/demo-20180805-startup-dev | dialogflow/actions.py | actions.py | py | 937 | python | ko | code | 16 | github-code | 6 | [
{
"api_name": "naver.블로그_검색",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "naver.상한가_크롤링",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "nave... |
74199623228 | from tkinter import messagebox
from PyPDF2 import PdfReader, PdfWriter
import os
class TrabajarPDF:
def divide_pdf(self, rutaPDF, rutaGuardar, num_pages):
pdf_reader = PdfReader(rutaPDF)
total_pages = len(pdf_reader.pages)
for i in range(0, total_pages, num_pages):
pdf_writer = PdfWriter()
for j in range(i, min(i + num_pages, total_pages)):
pdf_writer.add_page(pdf_reader.pages[j])
output_filename = os.path.join(rutaGuardar, f"{os.path.basename(rutaPDF)}_{i // num_pages + 1}.pdf")
with open(output_filename, "wb") as output_file:
pdf_writer.write(output_file)
messagebox.showinfo("Éxito", "El PDF se ha dividido correctamente.") | JhostinR/emergia_projects | dividir_unir_pdf/controller/dividir_functions.py | dividir_functions.py | py | 753 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyPDF2.PdfReader",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfWriter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
74606056828 | from __future__ import unicode_literals
import errno
import json
import logging
import os
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
logger = logging.getLogger(__name__)
class DashboardExporter(object):
def process_dashboard(self, project_name, dashboard_name, dashboard_data):
pass
class ProjectProcessor(object):
def __init__(self, dashboard_processors):
"""
:type dashboard_processors: list[grafana_dashboards.builder.DashboardExporter]
"""
super(ProjectProcessor, self).__init__()
self._dashboard_processors = dashboard_processors
def process_projects(self, projects, parent_context=None):
"""
:type projects: list[grafana_dashboards.components.projects.Project]
:type parent_context: dict
"""
for project in projects:
logger.info("Processing project '%s'", project.name)
for context in project.get_contexts(parent_context):
for dashboard in project.get_dashboards():
json_obj = dashboard.gen_json(context)
dashboard_name = context.expand_placeholders(dashboard.name)
for processor in self._dashboard_processors:
processor.process_dashboard(project.name, dashboard_name, json_obj)
class FileExporter(DashboardExporter):
def __init__(self, output_folder):
super(FileExporter, self).__init__()
self._output_folder = output_folder
if not os.path.exists(self._output_folder):
os.makedirs(self._output_folder)
if not os.path.isdir(self._output_folder):
raise Exception("'{0}' must be a directory".format(self._output_folder))
def process_dashboard(self, project_name, dashboard_name, dashboard_data):
super(FileExporter, self).process_dashboard(project_name, dashboard_name, dashboard_data)
dirname = os.path.join(self._output_folder, project_name)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
dashboard_path = os.path.join(dirname, dashboard_name + '.json')
logger.info("Saving dashboard '%s' to '%s'", dashboard_name, os.path.abspath(dashboard_path))
with open(dashboard_path, 'w') as f:
json.dump(dashboard_data, f, sort_keys=True, indent=2, separators=(',', ': '))
| jakubplichta/grafana-dashboard-builder | grafana_dashboards/exporter.py | exporter.py | py | 2,430 | python | en | code | 141 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line... |
38292113901 | import cv2
from cv2 import dnn_superres
# Create an SR object
sr = dnn_superres.DnnSuperResImpl_create()
# Read image
image = cv2.imread('2.jpg')
# ##########Read the desired model
#path = "./models/EDSR_x3.pb"
path = "./models/LapSRN_x2.pb"
sr.readModel(path)
# Set the desired model and scale to get correct pre- and post-processing
sr.setModel("edsr", 3)
# Upscale the image
result = sr.upsample(image)
cv2.imshow("Original Image", image)
cv2.imshow("Super Resolution by bicubic", cv2.resize(image,None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC))
cv2.imshow("Super Resolution by DL", result)
key = cv2.waitKey(20000)
cv2.destroyAllWindows()
# Save the image
cv2.imwrite("./upscaled.png", result)
#OK
############################################### if you want to use GPU
# Read the desired model
"""
path = "EDSR_x3.pb"
sr.readModel(path)
# Set CUDA backend and target to enable GPU inference
sr.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
sr.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
""" | Hsoleimanii/SuperResolution | super.py | super.py | py | 1,013 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cv2.dnn_superres.DnnSuperResImpl_create",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.dnn_superres",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "cv2.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.... |
34712009040 | # Coding Math Episode 9b
# Add the acceleration and observe how it impacts velocity (speed + direction)
# on the fireworks
__author__ = "piquesel"
import pygame
import math
import random
from ep7 import Vector as Vector
class Particle:
'Represents a particle defined by its position, velocity and direction'
def __init__(self, speed, direction, x=0, y=0, gravity=0):
'Initialize the particle'
self.x = x
self.y = y
self.position = Vector(x, y)
self.velocity = Vector(0, 0)
self.velocity.set_length(speed)
self.velocity.set_angle(direction)
self.gravity = Vector(0, gravity)
def accelerate(self, accel):
self.velocity.add_to(accel)
def update(self):
self.velocity.add_to(self.gravity)
self.position.add_to(self.velocity)
pygame.init()
NB_LINES = 100
RED = pygame.color.THECOLORS['red']
screen = pygame.display.set_mode((800, 600))
screen_rect = screen.get_rect()
pygame.display.set_caption("Episode 9b")
particles = []
NUM_PARTICLES = 300
gravity = Vector(0, 0.1)
for i in range(0, NUM_PARTICLES):
particles.append(Particle(random.random() * 5 + 2,
random.random() * math.pi * 2,
screen_rect.width/2,
screen_rect.height/3,
0.01))
main_loop = True
while main_loop:
pygame.time.delay(10)
for event in pygame.event.get():
if (event.type == pygame.QUIT
or event.type == pygame.KEYDOWN
and event.key == pygame.K_ESCAPE):
main_loop = False
screen.fill((0, 0, 0))
for i in range(0, NUM_PARTICLES):
p = particles[i]
p.accelerate(gravity)
p.update()
pygame.draw.circle(screen, RED, (round(p.position.get_x()),
round(p.position.get_y())), 5)
pygame.display.update()
pygame.quit()
| piquesel/coding-math | ep9b.py | ep9b.py | py | 1,954 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ep7.Vector",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ep7.Vector",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ep7.Vector",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 33,... |
4715617180 | from square.configuration import Configuration
from square.client import Client
from django.contrib.auth.models import User
from django.conf import settings
import datetime
from pytz import utc as utc
from django.utils import timezone
from dateutil.relativedelta import relativedelta
import uuid
ACCESS_TOKEN = settings.SQUARE_ACCESS_TOKEN
ENV = settings.SQUARE_ENV
LOCATION = settings.SQUARE_LOCATION
BASE_ADDR = settings.BASE_URL
sq = Client(access_token=ACCESS_TOKEN, environment=ENV)
#To create a new customer. Returns None if fails
def createCustomer(userID):
user = User.objects.get(id=userID)
newCust = sq.customers.create_customer(
{
"given_name": user.first_name,
"family_name": user.last_name,
"email_address": user.email,
"reference_id": user.id
}
)
if newCust.is_success():
return newCust.body["customer"]["id"]
else:
return None
#To get a link for the user to use to pay
def createCheckout(userID, subID, plan):
"""Plans: 0=Free, 1=Silver, 2=Gold"""
user = User.objects.get(id=userID)
planName = "LabLineup"
planPrice = 0
if plan == 1:
planName = "LabLineup Silver"
planPrice = 2000
elif plan == 2:
planName = "LabLineup Gold"
planPrice = 3000
result = sq.checkout.create_checkout(
location_id = LOCATION,
body = {
"idempotency_key": uuid.uuid4().hex,
"order": {
"order": {
"location_id": LOCATION,
"reference_id": str(subID),
"line_items": [
{
"name": planName,
"quantity": "1",
"base_price_money": {
"amount": planPrice,
"currency": "USD"
}
},
],
},
},
"ask_for_shipping_address": False,
"merchant_support_email": "contact@lablineup.com",
"pre_populate_buyer_email": str(user.email),
"redirect_url": (BASE_ADDR + "/subscriptionConfirmation/")
}
)
if result.is_success():
return result.body["checkout"]["checkout_page_url"]
elif result.is_error():
return None
else:
return None
#To find the most recent payment for a subscription
def findRecentPayment(subID):
"""Returns a product name and order/transaction ID"""
now = datetime.datetime.now(utc)
startDate = (now - relativedelta(years=1, days=1)).isoformat()
endDate = (now + relativedelta(days=1)).isoformat()
result = sq.orders.search_orders(
body = {
"location_ids": [LOCATION],
"query": {
"filter": {
"date_time_filter": {
"created_at": {
"start_at": str(startDate),
"end_at": str(endDate)
}
},
"state_filter": {
"states": [
"COMPLETED"
]
}
},
"sort": {
"sort_field": "CREATED_AT",
"sort_order": "DESC"
}
}
}
)
if result.is_success() and result.body != {}:
for order in result.body["orders"]:
if order["reference_id"] == str(subID):
orderAmt = order["tenders"][0]["amount_money"]["amount"]
if orderAmt != 0:
return (order["line_items"][0]["name"], order["id"])
return (None, None)
else:
return (None, None)
#To find which product was ordered by order/transaction ID
def findProductOrder(orderID):
"""Returns a product name and order/transaction ID"""
result = sq.orders.search_orders(
body = {
"location_ids": [LOCATION],
"query": {
"filter": {
"state_filter": {
"states": [
"COMPLETED"
]
}
}
}
}
)
if result.is_success() and result.body != {}:
for order in result.body["orders"]:
if order["id"] == orderID:
orderAmt = order["tenders"][0]["amount_money"]["amount"]
if orderAmt != 0:
return (order["line_items"][0]["name"])
return None
else:
return None | BryceTant/LabLineup | LLenv/LabLineup/app/Payment.py | Payment.py | py | 4,739 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.settings.SQUARE_ACCESS_TOKEN",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.SQUARE_ENV",
"line_number": 14,
"usage_type": "at... |
36078929738 | import requests
from sqlalchemy import or_
from flask import Blueprint
from flask_login import current_user
from flask import redirect, request, render_template, url_for, jsonify
from models import User, MealRequest, Proposal
from dbSession import session
from loginAPIKeyDecorator import require_api_key
from keyHelper import get_foursquare_key_dict, get_mapquest_key_dict
app_endpoints = Blueprint('app_endpoints', __name__)
mapquest_key_dict = get_mapquest_key_dict()
foursquare_key_dict = get_foursquare_key_dict()
@app_endpoints.route('/v1/users', methods = ['GET', 'PUT', 'DELETE'])
@require_api_key
def get_all_users():
if request.method == 'GET':
user_list = session.query(User).all()
if user_list is not None:
return jsonify([user.serialize for user in user_list])
else:
return 'None'
elif request.method == 'PUT':
username = request.json.get('user_name')
new_password = request.json.get('password')
new_token = request.json.get('token')
if username is not None:
current_user = session.query(User).filter_by(user_name=username).first()
else:
current_user = None
if current_user is not None:
if new_password is not None:
current_user.hash_password(new_password)
if new_token is not None:
current_user.api_key = new_token
session.commit()
return jsonify(dict(message="Success, updated user: {}!".format(username))),201
else:
return jsonify(dict(message="ERROR, not all parameter provided!")),404
elif request.method == 'DELETE':
username = request.json.get('user_name')
if username is not None:
current_user = session.query(User).filter_by(user_name=username).first()
else:
current_user = None
if current_user is not None:
session.delete(current_user)
session.commit()
return jsonify(dict(message="Success, deleted user: {}!".format(username))),200
else:
return jsonify(dict(message="ERROR, user not found or not provided!")),404
@app_endpoints.route('/v1/users/<int:id>', methods=['GET'])
@require_api_key
def get_user_with_id(id):
user_search = session.query(User).filter_by(id=id).first()
if user_search is not None:
return jsonify(user_search.serialize),200
else:
return jsonify(dict(message="ERROR, user {} not found!".format(id))),404
def update_meal_request(MealRequest):
response = requests.get('https://api.foursquare.com/v2/venues/search',params={**foursquare_key_dict, 'v':'20180323', 'limit':1,
'near':MealRequest.location_area,
'query':MealRequest.meal_type})
if response.status_code != 200:
return None, response.status_code
MealRequest.location_name = response.json().get('response').get('venues')[0].get('name')
MealRequest.latitude = response.json().get('response').get('geocode').get('feature').get('geometry').get('center').get('lat')
MealRequest.longitude = response.json().get('response').get('geocode').get('feature').get('geometry').get('center').get('lng')
return MealRequest,response.status_code
@app_endpoints.route('/v1/requests', methods = ['GET', 'POST'])
@require_api_key
def show_make_user_requests():
if request.method == 'GET':
meal_requests = session.query(MealRequest).all()
if meal_requests is not None:
return jsonify( [req.serialize for req in meal_requests])
else:
return 'None'
elif request.method == 'POST':
try:
new_meal_request = MealRequest(**request.json,user_id=current_user.id)
new_meal_request,status_code = update_meal_request(new_meal_request)
if status_code == 200:
current_user.meal_requests.append(new_meal_request)
session.commit()
return jsonify(dict(message="Success, created request: {}!".format(new_meal_request.id))),201
else:
return jsonify(dict(message="ERROR, foursquare api not working {}!".format(status_code))),404
except ValueError as value_error:
return jsonify(dict(message=value_error.args)),404
@app_endpoints.route('/v1/requests/<int:id>', methods = ['GET', 'PUT', 'DELETE'])
@require_api_key
def show_make_edit_specific_user_request(id):
request_query = session.query(MealRequest).filter_by(id=id).first()
if request_query == None:
return 'None'
if request.method == 'GET':
return jsonify(request_query.serialize),200
if request.method == 'PUT':
meal_type = request.json.get('meal_type')
location_area = request.json.get('location_area')
appointment_date= request.json.get('appointment_date')
meal_time = request.json.get('meal_time')
if meal_type is not None:
request_query.meal_type=meal_type
if location_area is not None:
request_query.location_area=location_area
if appointment_date is not None:
request_query.appointment_date=appointment_date
if meal_time is not None:
request_query.meal_time=meal_time
request_query, status_code = update_meal_request(request_query)
if status_code == 200:
session.commit()
return jsonify(dict(message="Success, updated request: {}!".format(request_query.id))),201
else:
return jsonify(dict(message="ERROR, foursquare api not working {}!".format(status_code))),404
elif request.method == 'DELETE':
session.delete(request_query)
session.commit()
return jsonify(dict(message="Success, deleted request: {}!".format(request_query.id))),200
@app_endpoints.route('/v1/proposals', methods=['GET', 'POST'])
@require_api_key
def show_and_create_user_porposals():
if request.method == 'GET':
proposals_query=session.query(Proposal).filter(or_(Proposal.user_porposed_to==current_user.user_name, Proposal.user_porposed_from==current_user.user_name)).all()
return jsonify([elements.serialize for elements in proposals_query]),200
elif request.method == 'POST':
proposal_request_id = request.json.get('request_id')
current_meal_request = session.query(MealRequest).filter_by(id=proposal_request_id).first()
if current_meal_request is None:
return jsonify(dict(message="ERROR, request id {} not found".format(proposal_request_id))), 404
meal_request_creater = session.query(User).filter_by(id=current_meal_request.user_id).first()
if session.query(Proposal).filter_by(request_id=proposal_request_id).first() is None:
new_proposal = Proposal(user_porposed_from=current_user.user_name,
user_porposed_to=meal_request_creater.user_name,
meal_request=current_meal_request)
session.add(new_proposal)
session.commit()
return jsonify(dict(message="Success, created proposal: {}!".format(new_proposal.request_id))),201
else:
return jsonify(dict(message="ERROR, request id {} does already exist".format(proposal_request_id))), 404
@app_endpoints.route('/v1/proposals/<int:id>', methods=['GET', 'POST'])
@require_api_key
def show_modify_delete_specific_proposal(id):
proposal_query = session.query(Proposal).filter(
or_(Proposal.user_porposed_from == current_user.user_name,
Proposal.user_porposed_to == current_user.user_name)).first()
if proposal_query == []:
print('asd')
| NaPiZip/Online-course-notes | Designing_RESTful_APIs/Exercices/L5/appEndpoints.py | appEndpoints.py | py | 7,871 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "keyHelper.get_mapquest_key_dict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "keyHelper.get_foursquare_key_dict",
"line_number": 17,
"usage_type": "call"
},
{
... |
27828726752 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import re
import arcpy
import sys
import traceback
import tempfile
import os
__author__ = "Coen Jonker"
class ColumnParser(object):
def __init__(self):
self.non_alpha_pattern = re.compile(r'[^A-Za-z0-9_]')
self.ending = re.compile(r'_$')
self.beginning = re.compile(r'^_')
self.doubles = re.compile(r'__')
self.next_number = {}
def parseColname(self, name_in):
temp = re.sub(self.doubles, '_',
re.sub(self.ending, '',
re.sub(self.beginning, '',
re.sub(self.non_alpha_pattern, '_', name_in)
)
)
)
if temp[0] in '0123456789':
temp = "N" + temp
if temp in self.next_number:
nn = self.next_number[temp]
temp += "_{0}".format(nn)
nn += 1
else:
nn = 1
self.next_number[temp] = nn
return temp
if __name__ == "__main__":
input_xlsx = arcpy.GetParameterAsText(0)
sheetname = arcpy.GetParameterAsText(1)
workspace = arcpy.GetParameterAsText(3)
input_fc = arcpy.GetParameterAsText(2)
arcpy.env.overwriteOutput = True
arcpy.env.workspace = workspace
temppath = tempfile.mkdtemp()
output_csv = os.path.join(temppath, "tactisch_plan_schiphol_parsed.csv")
try:
# Data inladen
data = pd.read_excel(input_xlsx, sheetname=sheetname, skiprows=1, header=0)
# Parser instantieren
parser = ColumnParser()
# Kolomnamen parsen
colnames = [parser.parseColname(x) for x in data.columns]
# Nieuwe kolomnamen aan dataframe toevoegen
data.columns = colnames
# data["Projectnummer_str"] = data["Projectnummer"].astype(str).apply(lambda x: x.split('.')[0])
# OUDE CODE, POGING OM TABEL RECHTSTREEKS WEG TE SCHRIJVEN
"""
n_array = np.array(np.rec.fromrecords(data.values))
names = data.dtypes.index.tolist()
n_array.dtype.names = tuple(names)
arcpy.AddMessage(names)
arcpy.da.NumPyArrayToTable(n_array, "Tactischplan")
"""
# CSV wegschrijven
data.to_csv(output_csv, index=False, encoding='utf-8')
arcpy.TableToTable_conversion(output_csv, workspace, "Tactischplan")
arcpy.AddField_management("Tactischplan", "ProjectNR_STR", "TEXT")
arcpy.CalculateField_management("Tactischplan", "ProjectNR_STR", "str(int(!Projectnummer!))")
arcpy.CopyFeatures_management(input_fc, "ingetekendTactischplan")
arcpy.JoinField_management("ingetekendTactischplan", "PROJECTNUMMER", "Tactischplan", "ProjectNR_STR")
except:
# Get the traceback object
#
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
#
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n"
# Return python error messages for use in script tool or Python Window
#
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
#
print(pymsg)
print(msgs)
| wagem007/asset-management-geo | tactisch_plan_xlsx_to_feature_class.py | tactisch_plan_xlsx_to_feature_class.py | py | 3,474 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 20,
... |
30353259361 | from os import path
import os
import sys
from os.path import join, dirname
# Enthought library imports.
from pyface.action.api import Action
from traitsui.api import auto_close_message
# Local imports
import mayavi.api
from mayavi.core.common import error
from mayavi.preferences.api import preference_manager
# To find the html documentation directory, first look under the
# standard place. If that directory doesn't exist, assume you
# are running from the source.
local_dir = dirname(mayavi.api.__file__)
HTML_DIR = join(local_dir, 'html')
if not path.exists(HTML_DIR):
HTML_DIR = join(dirname(dirname(local_dir)),
'build', 'docs', 'html', 'mayavi')
if not path.exists(HTML_DIR):
HTML_DIR = None
def browser_open(url):
if sys.platform == 'darwin':
os.system('open %s &' % url)
else:
import webbrowser
webbrowser.open(url, autoraise=1)
def open_help_index(*args):
""" Open the mayavi user manual index in a browser.
"""
# If the HTML_DIR was found, bring up the documentation in a
# web browser. Otherwise, bring up an error message.
if HTML_DIR:
auto_close_message("Opening help in web browser...")
browser_open(join(HTML_DIR, 'index.html'))
else:
browser_open('https://docs.enthought.com/mayavi/mayavi/')
def open_tvtk_docs(*args):
""" Open the TVTK class browser.
"""
from tvtk.tools.tvtk_doc import TVTKClassChooser
TVTKClassChooser().edit_traits()
######################################################################
# `HelpIndex` class.
######################################################################
class HelpIndex(Action):
""" An action that pop up the help in a browser. """
tooltip = "The Mayavi2 user guide"
description = "The Mayavi2 user guide"
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self, event):
""" Performs the action. """
open_help_index()
######################################################################
# `TVTKClassBrowser` class.
######################################################################
class TVTKClassBrowser(Action):
""" An action that opens the tvtk interactive class browser. """
tooltip = "The TVTK interactive class browser"
description = "The TVTK interactive class browser"
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self, event):
""" Performs the action. """
open_tvtk_docs()
| enthought/mayavi | mayavi/action/help.py | help.py | py | 2,807 | python | en | code | 1,177 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mayavi.api.api",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "mayavi.api",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"li... |
12492560506 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import print_function
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn import tree
import warnings
warnings.filterwarnings('ignore')
#for interactivity
from ipywidgets import interact
from tabulate import tabulate
data=pd.read_csv('crop_recommendation.csv')
print("Shape Of The Data:",data.shape)
print()
data.head()
data.tail()
data.columns
data.dtypes
data['label'].unique()
data['label'].value_counts()
data.isnull().sum()
#summary of dataset
# create a list of variable names
variables = ['N', 'P', 'K', 'temperature', 'humidity', 'ph', 'rainfall']
# loop through the variables and calculate/print their mean values
for var in variables:
print("Average ratio of {0} in the soil: {1:.2f}".format(var, data[var].mean()))
@interact(crops=data['label'].unique())
def summary(crops):
stats = ['mean', 'median', 'min', 'max']
vars = ['N', 'P', 'K', 'temperature', 'humidity', 'ph', 'rainfall']
print("Statistics for crop:", crops)
for var in vars:
print("Statistics of", var)
print("Statistics for crop:", crops)
for stat in stats:
print(stat, var, "required: {:.2f}".format(data[data['label'] == crops][var].agg(stat)))
print() # Add an empty print statement to print a blank line
@interact
def compare(conditions =['N','P','K','temperature','humidity','rainfall','ph']):
crops = ['rice', 'maize', 'jute', 'blackgram', 'banana', 'coconut', 'apple', 'papaya', 'muskmelon', 'grapes', 'watermelon', 'kidneybeans', 'mungbean', 'orange', 'chickpea', 'lentil', 'cotton', 'pigeonpeas', 'mothbeans', 'mango', 'pomegranate', 'coffee']
for crop in crops:
print("{}: {:.2f}".format(crop.title(), data[data['label'] == crop][conditions].mean()))
@interact
def compare(conditions =['N','P','K','temperature','humidity','rainfall','ph']):
print("crops which requires greater than the average",conditions,'\n')
print(data[data[conditions]>data[conditions].mean()]['label'].unique())
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("crops which require less than average ", conditions,'\n')
print(data[data[conditions]<=data[conditions].mean()]['label'].unique())
print("Some interesting facts")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
headers = ['Condition', 'Crops']
data = [
['High Nitrogen content in soil', ', '.join(data[data['N']>120]['label'].unique())],
['High Phosphorus content in soil', ', '.join(data[data['P']>100]['label'].unique())],
['High Potassium content in soil', ', '.join(data[data['K']>200]['label'].unique())],
['High Rainfall', ', '.join(data[data['rainfall']>200]['label'].unique())],
['Low temperature', ', '.join(data[data['temperature']<10]['label'].unique())],
['High temperature', ', '.join(data[data['temperature']>40]['label'].unique())],
['Low humidity', ', '.join(data[data['humidity']<20]['label'].unique())],
['High PH', ', '.join(data[data['ph']>9]['label'].unique())],
['Very low PH', ', '.join(data[data['ph']<4]['label'].unique())]
]
print(tabulate(data, headers=headers))
# In[2]:
import pandas as pd
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
data=pd.read_csv('crop_recommendation.csv')
x=data.loc[:,['N','P','K','temperature','humidity','rainfall','ph']].values
print(x.shape)
x_data=pd.DataFrame(x)
x_data.head()
plt.rcParams['figure.figsize']=(10,4)
wcss=[]
for i in range(1,11):
km=KMeans(n_clusters=i,init='k-means++',max_iter=300,n_init=10,random_state=0)
km.fit(x)
wcss.append(km.inertia_)
plt.plot(range(1,11),wcss)
plt.title('The Elbow method',fontsize=20)
plt.xlabel('no. of cluster')
plt.ylabel('wcss')
plt.show()
km=KMeans(n_clusters=4,init='k-means++',max_iter=300,n_init=10,random_state=0)
y_means=km.fit_predict(x)
a=data['label']
y_means=pd.DataFrame(y_means)
z=pd.concat([y_means,a],axis=1)
z=z.rename(columns={0:'cluster'})
#check cluster for each group
print("crops in first cluster :",z[z['cluster']==0]['label'].unique())
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("crops in second cluster :",z[z['cluster']==1]['label'].unique())
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("crops in third cluster :",z[z['cluster']==2]['label'].unique())
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("crops in forth cluster :",z[z['cluster']==3]['label'].unique())
# In[3]:
from sklearn.model_selection import train_test_split
import pandas as pd
data=pd.read_csv('crop_recommendation.csv')
x=data.drop(['label'],axis=1)
y=data['label']
print("shape of x",x.shape)
print("shape of y",y.shape)
print()
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)
from sklearn.linear_model import LogisticRegression
model=LogisticRegression()
model.fit(x_train,y_train)
y_pred=model.predict(x_test)
from sklearn.metrics import classification_report
cr=classification_report (y_test,y_pred)
print(cr)
Prediction=model.predict((np.array([[90,40,40,20,80,7,200]])))
print("the suggested crop for this given climate condition is ",Prediction)
Prediction=model.predict((np.array([[20,30,10,15,90,7.5,100]])))
print("the suggested crop for this given climate condition is ",Prediction)
print()
# convert any non-numeric columns to numeric
for col in data.columns:
if data[col].dtype == object:
data[col] = pd.to_numeric(data[col], errors='coerce')
sns.heatmap(data.corr(),annot=True)
features = data[['N', 'P','K','temperature', 'humidity', 'ph', 'rainfall']]
target = data['label']
#features = df[['temperature', 'humidity', 'ph', 'rainfall']]
labels = data['label']
# Initialzing empty lists to append all model's name and corresponding name
acc = []
model = []
# In[4]:
#Decision Tree
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, y_train)
y_pred = dt_clf.predict(X_test)
dt_acc = accuracy_score(y_test, y_pred)
print("Accuracy of Decision Tree:", dt_acc*100)
cv_score = cross_val_score(dt_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained Decision Tree model
import pickle
# Dump the trained Naive Bayes classifier with Pickle
DT_pkl_filename = 'DecisionTree.pkl'
# Open the file to save as pkl file
DT_Model_pkl = open(DT_pkl_filename, 'wb')
pickle.dump(DecisionTreeClassifier, DT_Model_pkl)
# Close the pickle instances
DT_Model_pkl.close()
# In[5]:
# Guassian Naive Bayes
import pandas as pd
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
nb_clf = GaussianNB()
nb_clf.fit(X_train, y_train)
y_pred = nb_clf.predict(X_test)
nb_acc = accuracy_score(y_test, y_pred)
print("Accuracy of Naive Bayes:", nb_acc*100)
cv_score = cross_val_score(nb_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained Guassian Naive Bayes model
import pickle
# Dump the trained Naive Bayes classifier with Pickle
NB_pkl_filename = 'NBClassifier.pkl'
# Open the file to save as pkl file
NB_Model_pkl = open(NB_pkl_filename, 'wb')
pickle.dump(GaussianNB, NB_Model_pkl)
# Close the pickle instances
NB_Model_pkl.close()
# In[6]:
# Support Vector Machine (SVM)
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
svm_clf = SVC(kernel='linear')
svm_clf.fit(X_train, y_train)
y_pred = svm_clf.predict(X_test)
svm_acc = accuracy_score(y_test, y_pred)
print("Accuracy of SVM:", svm_acc*100)
cv_score = cross_val_score(svm_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained SVM model
import pickle
# Dump the trained SVM classifier with Pickle
SVM_pkl_filename = 'SVMClassifier.pkl'
# Open the file to save as pkl file
SVM_Model_pkl = open(SVM_pkl_filename, 'wb')
pickle.dump(SVC, SVM_Model_pkl)
# Close the pickle instances
SVM_Model_pkl.close()
# In[7]:
#Logistic Regression
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("Crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
lr_clf = LogisticRegression(random_state=42)
lr_clf.fit(X_train, y_train)
y_pred = lr_clf.predict(X_test)
lr_acc = accuracy_score(y_test, y_pred)
print("Accuracy of Logistic Regression:", lr_acc*100)
cv_score = cross_val_score(lr_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained Logistic Regression model
import pickle
# Dump the trained Naive Bayes classifier with Pickle
LR_pkl_filename = 'LogisticRegression.pkl'
# Open the file to save as pkl file
LR_Model_pkl = open(DT_pkl_filename, 'wb')
pickle.dump(LogisticRegression, LR_Model_pkl)
# Close the pickle instances
LR_Model_pkl.close()
# In[8]:
#Random Forest
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
rf_clf = RandomForestClassifier(n_estimators=100, random_state=42)
rf_clf.fit(X_train, y_train)
y_pred = rf_clf.predict(X_test)
rf_acc = accuracy_score(y_test, y_pred)
print("Accuracy of Random Forest:", rf_acc*100)
cv_score = cross_val_score(rf_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained Random Forest model
import pickle
# Dump the trained Naive Bayes classifier with Pickle
RF_pkl_filename = 'RandomForest.pkl'
# Open the file to save as pkl file
RF_Model_pkl = open(RF_pkl_filename, 'wb')
pickle.dump(RandomForestClassifier, RF_Model_pkl)
# Close the pickle instances
RF_Model_pkl.close()
# In[9]:
#Accuracy Comparison
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
models = ['Decision Tree', 'SVM', 'Naive Bayes', 'Logistic Regression', 'Random Forest']
accuracies = [dt_acc, svm_acc, nb_acc, lr_acc, rf_acc]
fig, ax = plt.subplots(figsize=(10,7))
colors = ['cyan', 'pink', 'aquamarine', 'turquoise', 'lavender']
bars = ax.bar(models, accuracies, color=colors)
for bar in bars:
height = bar.get_height()
ax.text(bar.get_x() + bar.get_width() / 2, height, round(height, 4),
ha='center', va='bottom', fontsize=12)
ax.set_xlabel('Model')
ax.set_ylabel('Accuracy')
ax.set_title('Comparison of Accuracy for Different Classifiers')
plt.show()
# In[10]:
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
classifiers = [dt_clf, nb_clf, svm_clf, lr_clf, rf_clf]
scores = []
for clf in classifiers:
cv_scores = cross_val_score(clf, X, y, cv=10)
scores.append(cv_scores)
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111)
bp = ax.boxplot(scores, patch_artist=True)
plt.setp(bp['boxes'], facecolor='lightblue')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['caps'], color='black')
plt.setp(bp['medians'], color='red')
plt.setp(bp['fliers'], marker='o', markersize=3, markerfacecolor='blue')
ax.set_xticklabels(['Naive Bayes', 'SVM', 'Random Forest', 'Logistic Regression', 'Decision Tree'])
ax.set_ylabel('Cross-validation score')
ax.set_title('Comparison of Cross-Validation Scores for Different Classifiers')
plt.show()
# In[11]:
classifiers = [dt_clf, nb_clf, svm_clf, lr_clf, rf_clf]
acc = [dt_acc, svm_acc, nb_acc, lr_acc, rf_acc]
# Create a dictionary that maps each classifier name to its accuracy score
accuracy_models = dict(zip(classifiers, acc))
# Print the dictionary
for k, v in accuracy_models.items():
print (k, '==>', v)
# In[12]:
import numpy as np
# Define a single data instance to predict the crop label for
data_instance = np.array([[90, 42, 43, 23.6, 50.2, 6.8, 187.2]])
# Use the pre-trained Random Forest classifier to predict the crop label for the data instance
predicted_label = rf_clf.predict(data_instance)
# Print the predicted crop label
print("Predicted crop label:", predicted_label)
# In[13]:
import numpy as np
# Define a single data instance to predict the crop label for
data_instance = np.array([[83, 45, 60, 28, 70.3, 7.0, 150.9]])
# Use the pre-trained Random Forest classifier to predict the crop label for the data instance
predicted_label = rf_clf.predict(data_instance)
# Print the predicted crop label
print("Predicted crop label:", predicted_label)
| Panktibhatt08/Machine-Learning | Machine Learning Final Project.py | Machine Learning Final Project.py | py | 14,506 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interact",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "ipywidget... |
43721480783 | import numpy as np
from config import *
from KnowledgeSent import KnowledgeSentence
from transformers import BertTokenizer, BertModel
import torch
hops = FLAGS.hops
string_year = str(FLAGS.year)
number = 0
start = FLAGS.start
end = FLAGS.end
if string_year == '2015':
number = 3864
elif string_year == '2016':
number = 5640
class Embeddings(object):
'''
For each input sentence:
- Token embedding of each token
- Segment embedding
- Position embedding
- Visibility matrix
'''
def __init__(self, sofpos=True, vm=True):
self.sentences = [] # list of list of all tokens for a sentence
self.visible_matrices = []
self.vm_tensors = [] # a list of visible matrices for each sentence in tensor-shape of 1*token_numb*token_numb.
self.soft_positions = [] # list of list of softpositions for each sentence
self.segments = [] # list of list of segments for each sentence
self.embeddings = [] # a list with intial embeddings for each token in tensor-shape of 1*token_numb*768
self.hidden_states = [] # list of hidden states
self.token_hidden_states = [] # list with for each token, the token and the hidden states
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True)
self.model.eval()
self.sofpos = sofpos
self.vm = vm
def makeEmbeddings(self):
count = 0
with open('data/externalData/' + 'raw_data' + string_year + '.txt', 'r') as raw_data:
line_list = raw_data.readlines()
for i in range(start*3, min(number, end*3)):
if i % 600 == 0:
print('another 200, ({}/{})'.format(i, min(number, end*3)))
if count % 3 == 0 : # if it is a sentence line
# add CLS and SEP and remove target sign
sentence = "[CLS] " + line_list[i].replace('$T$', line_list[i+1].replace('\n', '')) + " [SEP]"
# add no knowledge
sent = KnowledgeSentence(sentence, hops, self.tokenizer, include_knowledge=False)
self.sentences.append(sent.sentence)
self.soft_positions.append(sent.soft_positions)
self.segments.append(sent.segments)
self.visible_matrices.append(sent.visibility_matrix)
else: # if it is a target line or sentiment line
pass
count += 1
# append the raw test data and add knowledge
for i in range(max(number, start*3), end*3):
if i % 600 == 0:
print('another 200 ({}/{})'.format(i, end*3))
if count % 3 == 0: # if it is a sentence line
# add CLS and SEP and replace $T$-token with target-token
sentence = "[CLS] " + line_list[i].replace('$T$', line_list[i+1].replace('\n', '')) + " [SEP]"
# add knowledge to sentence
know_sent = KnowledgeSentence(sentence, hops, self.tokenizer, include_knowledge=True)
self.sentences.append(know_sent.sentence)
self.soft_positions.append(know_sent.soft_positions)
self.segments.append(know_sent.segments)
if self.vm:
self.visible_matrices.append(know_sent.visibility_matrix)
else:
self.visible_matrices.append(np.ones((len(know_sent.sentence), len(know_sent.sentence))))
else: # if it is a target line or sentiment line
pass
count += 1
print('Creating embeddings...')
for i in range(len(self.sentences)):
token_tensor = torch.tensor([self.tokenizer.convert_tokens_to_ids(self.sentences[i])])
segment_tensor = torch.tensor([self.segments[i]])
pos_tensor = torch.tensor([self.soft_positions[i]])
if self.sofpos:
output = self.model(token_tensor, None, segment_tensor, pos_tensor)
else:
output = self.model(token_tensor, None, segment_tensor, None)
tensor = output.hidden_states.__getitem__(00)
self.embeddings.append(tensor)
self.vm_tensors.append(torch.tensor(self.visible_matrices[i]))
print('Embeddings created!')
| Felix0161/KnowledgeEnhancedABSA | Embeddings.py | Embeddings.py | py | 4,594 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "transformers.BertTokenizer.from_pretrained",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "transformers.BertTokenizer",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel.from_pretrained",
"line_number": 37,
"usage_... |
4546238710 |
import cv2
from matplotlib.pyplot import contour
import numpy as np
import matplotlib.pyplot as plt
from random import randint
import matplotlib.pyplot as plt
def DetectPositionMaxSkin(filename,x, y, w, h, lower, upper):
#y=y+50
Image = cv2.VideoCapture(filename)
#Image = cv2.VideoCapture('t8.mp4')
success, frame = Image.read()
while success :
success, frame = Image.read()
#cv2.imshow('Imagem Original', frame)
if success:
cropeedIMAGE = frame[y:y+h, x:x+w]
converted = cv2.cvtColor(cropeedIMAGE, cv2.COLOR_BGR2HSV)
#cv2.imshow('convertedHSV',converted)
skinMask = cv2.inRange(converted, lower, upper)
#cv2.imshow('skin',skinMask)
# apply a series of erosions and dilations to the mask
# using an elliptical kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (12, 12))
skinMask = cv2.erode(skinMask, kernel, iterations=3)
skinMask = cv2.dilate(skinMask, kernel, iterations=3)
# blur the mask to help remove noise, then apply the
# mask to the frame
skinMask = cv2.GaussianBlur(skinMask, (11, 11), 5)
#cv2.imshow('skinMask',skinMask)
skin = cv2.bitwise_and(cropeedIMAGE, cropeedIMAGE, mask=skinMask)
#cv2.imshow('skin',skin)
########################################################
#lowerFinger =np.array([8, 15, 110], dtype="uint8")
#upperFinger = np.array([8, 15, 110], dtype="uint8")
hsv_img = cv2.cvtColor(skin, cv2.COLOR_BGR2HSV)
#hsv_img = cv2.inRange(hsv_img, lowerFinger, upperFinger)
#cv2.imshow('hsv_img', hsv_img)
# Extracting Saturation channel on which we will work
img_s = hsv_img[:, :, 1]
#img_s = skin[:, :, 1]
#cv2.imshow('img_s', img_s)
# smoothing before applying threshold
img_s_blur = cv2.GaussianBlur(img_s, (7,7), 2)
#img_s_blur = cv2.medianBlur(skin,5)
#cv2.imshow('img_s_blur', img_s_blur)
img_s_binary = cv2.threshold(img_s_blur, 200, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] # Thresholding to generate binary image (ROI detection)
#cv2.imshow('img_s_binary1', img_s_binary1)
# reduce some noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4))
img_s_binary = cv2.morphologyEx(img_s_binary, cv2.MORPH_OPEN, kernel, iterations=4)
#cv2.imshow('img_s_binary1', img_s_binary)
# ROI only image extraction & contrast enhancement, you can crop this region
#img_croped = cv2.bitwise_and(img_s, img_s_binary) * 10
#cv2.imshow('img_croped', img_croped)
# eliminate
kernel = np.ones((5, 5), np.float32)/25
processedImage = cv2.filter2D(img_s_binary, -1, kernel)
img_s_binary[processedImage > 250] = 0
#cv2.imshow('img_s_binary2', img_s_binary)
edges = cv2.threshold(img_s_binary, 100, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
#th3 = cv2.adaptiveThreshold(img_s_blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
#_,edges = cv2.threshold(img_croped, 160, 255, cv2.THRESH_BINARY_INV)
#cv2.imshow('edges', edges)
#https://docs.opencv.org/3.4/da/d0c/tutorial_bounding_rects_circles.html
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#print("Number of contours =" + str(len(contours)))
#print("Number of hierarchy =" + str(len(hierarchy)))
#print(np.argmax(hierarchy))
contours_poly = [None]*len(contours)
centers = [None]*len(contours)
radius = [None]*len(contours)
#area= [None]*len(contours)
#drawing = np.zeros((edges.shape[0], edges.shape[1], 3), dtype=np.uint8)
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 0.02, True)
#boundRect[i] = cv2.boundingRect(contours_poly[i])
centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])
#area[i] = cv2.contourArea(contours[i])
#print("area: %s" % area)
#if i>=6 and cv2.contourArea(contours[i]) >= 100:
if 5000 >= cv2.contourArea(contours[i]) <= 7600 and radius[i] < 50:
#cv2.drawContours(skin, contours_poly, i, (255,0,0))
cv2.circle(skin, (int(centers[i][0]), int(centers[i][1])), int(radius[i]), (0,0,255), 2)
#cv2.imshow('Contours', skin)
cv2.imshow('skin', skin)
#print((centers[i][0]))S
#xe=np.arange(1,121)
#print(len(xe))
#plt.plot(x,centers[i][0],'ro')
#plt.ylabel('some numbers')
#plt.show()
#cv2.imshow('Skin Mask', skinMask)
#cv2.imshow('Skin', skin)
#vcat = cv2.hconcat((skinMask, skin))
#cv2.imshow('vcat', vcat)
#cv2.imshow('hsv_img', hsv_img)
#cv2.imshow('Extracting Saturation', img_s)
#cv2.imshow('img_s_binary1', img_s_binary1)
#cv2.imshow('img_croped', img_croped)
#cv2.imshow('edges', edges)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
#xc,yc,wc,hc,skin,skinMask,hsv_img,img_s_blur,img_s_binary1,img_croped,edges,cropeedIMAGE | raquelpantojo/Detectskin | DetectNail.py | DetectNail.py | py | 6,004 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "cv2.inRange",
... |
33787246110 | from itertools import chain
import os
import pytest
@pytest.fixture(scope="module")
def organization_id():
"""Get Organization ID from the environment variable """
return os.environ["GCLOUD_ORGANIZATION"]
@pytest.fixture(scope="module")
def source_name(organization_id):
from google.cloud import securitycenter
client = securitycenter.SecurityCenterClient()
org_name = "organizations/{org_id}".format(org_id=organization_id)
source = client.create_source(
org_name,
{
"display_name": "Unit test source",
"description": "A new custom source that does X",
},
)
return source.name
def test_create_source(organization_id):
"""Create a new findings source. """
# [START create_source]
from google.cloud import securitycenter
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization. e.g.:
# organization_id = "111122222444"
org_name = "organizations/{org_id}".format(org_id=organization_id)
created = client.create_source(
org_name,
{
"display_name": "Customized Display Name",
"description": "A new custom source that does X",
},
)
print("Created Source: {}".format(created.name))
# [END create_source]
def test_get_source(source_name):
"""Gets an existing source."""
# [START get_source]
from google.cloud import securitycenter
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
source = client.get_source(source_name)
print("Source: {}".format(source))
# [END get_source]
def test_update_source(source_name):
"""Updates a source's display name."""
# [START update_source]
from google.cloud import securitycenter
from google.protobuf import field_mask_pb2
client = securitycenter.SecurityCenterClient()
# Field mask to only update the display name.
field_mask = field_mask_pb2.FieldMask(paths=["display_name"])
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
updated = client.update_source(
{"name": source_name, "display_name": "Updated Display Name"},
update_mask=field_mask,
)
print("Updated Source: {}".format(updated))
# [END update_source]
assert updated.display_name == "Updated Display Name"
def test_add_user_to_source(source_name):
"""Gives a user findingsEditor permission to the source."""
user_email = "csccclienttest@gmail.com"
# [START update_source_iam]
from google.cloud import securitycenter
from google.iam.v1 import policy_pb2
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Get the old policy so we can do an incremental update.
old_policy = client.get_iam_policy(source_name)
print("Old Policy: {}".format(old_policy))
# Setup a new IAM binding.
binding = policy_pb2.Binding()
binding.role = "roles/securitycenter.findingsEditor"
# user_email is an e-mail address known to Cloud IAM (e.g. a gmail address).
# user_mail = user@somedomain.com
binding.members.append("user:{}".format(user_email))
# Setting the e-tag avoids over-write existing policy
updated = client.set_iam_policy(
source_name, {"etag": old_policy.etag, "bindings": [binding]}
)
print("Updated Policy: {}".format(updated))
# [END update_source_iam]
assert any(
member == "user:csccclienttest@gmail.com"
for member in chain.from_iterable(
binding.members for binding in updated.bindings
)
)
def test_list_source(organization_id):
"""Lists finding sources."""
i = -1
# [START list_sources]
from google.cloud import securitycenter
# Create a new client.
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization. e.g.:
# organization_id = "111122222444"
org_name = "organizations/{org_id}".format(org_id=organization_id)
# Call the API and print out each existing source.
for i, source in enumerate(client.list_sources(org_name)):
print(i, source)
# [END list_sources]
assert i >= 0
def test_create_finding(source_name):
"""Creates a new finding."""
# [START create_finding]
from google.cloud import securitycenter
from google.cloud.securitycenter_v1.proto.finding_pb2 import Finding
from google.protobuf.timestamp_pb2 import Timestamp
# Create a new client.
client = securitycenter.SecurityCenterClient()
# Use the current time as the finding "event time".
now_proto = Timestamp()
now_proto.GetCurrentTime()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Controlled by caller.
finding_id = "samplefindingid"
# The resource this finding applies to. The CSCC UI can link
# the findings for a resource to the corresponding Asset of a resource
# if there are matches.
resource_name = "//cloudresourcemanager.googleapis.com/organizations/11232"
# Call The API.
created_finding = client.create_finding(
source_name,
finding_id,
{
"state": Finding.ACTIVE,
"resource_name": resource_name,
"category": "MEDIUM_RISK_ONE",
"event_time": now_proto,
},
)
print(created_finding)
# [END create_finding]
assert len(created_finding.name) > 0
def test_create_finding_with_source_properties(source_name):
"""Demonstrate creating a new finding with source properties. """
# [START create_finding_with_properties]
from google.cloud import securitycenter
from google.cloud.securitycenter_v1.proto.finding_pb2 import Finding
from google.protobuf.timestamp_pb2 import Timestamp
from google.protobuf.struct_pb2 import Value
# Create a new client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Controlled by caller.
finding_id = "samplefindingid2"
# The resource this finding applies to. The CSCC UI can link
# the findings for a resource to the corresponding Asset of a resource
# if there are matches.
resource_name = "//cloudresourcemanager.googleapis.com/organizations/11232"
# Define source properties values as protobuf "Value" objects.
str_value = Value()
str_value.string_value = "string_example"
num_value = Value()
num_value.number_value = 1234
# Use the current time as the finding "event time".
now_proto = Timestamp()
now_proto.GetCurrentTime()
created_finding = client.create_finding(
source_name,
finding_id,
{
"state": Finding.ACTIVE,
"resource_name": resource_name,
"category": "MEDIUM_RISK_ONE",
"source_properties": {"s_value": str_value, "n_value": num_value},
"event_time": now_proto,
},
)
print(created_finding)
# [END create_finding_with_properties]
def test_update_finding(source_name):
# [START update_finding]
from google.cloud import securitycenter
from google.protobuf.struct_pb2 import Value
from google.protobuf import field_mask_pb2
from google.protobuf.timestamp_pb2 import Timestamp
client = securitycenter.SecurityCenterClient()
# Only update the specific source property and event_time. event_time
# is required for updates.
field_mask = field_mask_pb2.FieldMask(
paths=["source_properties.s_value", "event_time"]
)
value = Value()
value.string_value = "new_string"
# Set the update time to Now. This must be some time greater then the
# event_time on the original finding.
now_proto = Timestamp()
now_proto.GetCurrentTime()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
finding_name = "{}/findings/samplefindingid2".format(source_name)
updated_finding = client.update_finding(
{
"name": finding_name,
"source_properties": {"s_value": value},
"event_time": now_proto,
},
update_mask=field_mask,
)
print(
"New Source properties: {}, Event Time {}".format(
updated_finding.source_properties, updated_finding.event_time.ToDatetime()
)
)
# [END update_finding]
def test_update_finding_state(source_name):
"""Demonstrate updating only a finding state."""
# [START update_finding_state]
from google.cloud import securitycenter
from google.cloud.securitycenter_v1.proto.finding_pb2 import Finding
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import datetime
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
finding_name = "{}/findings/samplefindingid2".format(source_name)
now_proto = Timestamp()
now_proto.GetCurrentTime()
# Call the API to change the finding state to inactive as of now.
new_finding = client.set_finding_state(
finding_name, Finding.INACTIVE, start_time=now_proto
)
print("New state: {}".format(Finding.State.Name(new_finding.state)))
# [END update_finding_state]
def test_trouble_shoot(source_name):
"""Demonstrate calling test_iam_permissions to determine if the
service account has the correct permisions."""
# [START test_iam_permissions]
from google.cloud import securitycenter
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Check for permssions to call create_finding or update_finding.
permission_response = client.test_iam_permissions(
source_name, ["securitycenter.findings.update"]
)
print(
"Permision to create or update findings? {}".format(
len(permission_response.permissions) > 0
)
)
# [END test_iam_permissions]
assert len(permission_response.permissions) > 0
# [START test_iam_permissions]
# Check for permissions necessary to call set_finding_state.
permission_response = client.test_iam_permissions(
source_name, ["securitycenter.findings.setState"]
)
print(
"Permision to update state? {}".format(len(permission_response.permissions) > 0)
)
# [END test_iam_permissions]
assert len(permission_response.permissions) > 0
def test_list_all_findings(organization_id):
# [START list_all_findings]
from google.cloud import securitycenter
# Create a client.
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization. e.g.:
# organization_id = "111122222444"
org_name = "organizations/{org_id}".format(org_id=organization_id)
# The "sources/-" suffix lists findings across all sources. You
# also use a specific source_name instead.
all_sources = "{org_name}/sources/-".format(org_name=org_name)
finding_result_iterator = client.list_findings(all_sources)
for i, finding_result in enumerate(finding_result_iterator):
print(
"{}: name: {} resource: {}".format(
i, finding_result.finding.name, finding_result.finding.resource_name
)
)
# [END list_all_findings]
assert i > 0
def test_list_filtered_findings(source_name):
# [START list_filtered_findings]
from google.cloud import securitycenter
# Create a new client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# You an also use a wild-card "-" for all sources:
# source_name = "organizations/111122222444/sources/-"
finding_result_iterator = client.list_findings(
source_name, filter_='category="MEDIUM_RISK_ONE"'
)
# Iterate an print all finding names and the resource they are
# in reference to.
for i, finding_result in enumerate(finding_result_iterator):
print(
"{}: name: {} resource: {}".format(
i, finding_result.finding.name, finding_result.finding.resource_name
)
)
# [END list_filtered_findings]
assert i > 0
def test_list_findings_at_time(source_name):
# [START list_findings_at_a_time]
from google.cloud import securitycenter
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import timedelta, datetime
# Create a new client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# You an also use a wild-card "-" for all sources:
# source_name = "organizations/111122222444/sources/-"
five_days_ago = Timestamp()
five_days_ago.FromDatetime(datetime.now() - timedelta(days=5))
# [END list_findings_at_a_time]
i = -1
five_days_ago.FromDatetime(datetime(2019, 3, 5, 0, 0, 0))
# [START list_findings_at_a_time]
finding_result_iterator = client.list_findings(source_name, read_time=five_days_ago)
for i, finding_result in enumerate(finding_result_iterator):
print(
"{}: name: {} resource: {}".format(
i, finding_result.finding.name, finding_result.finding.resource_name
)
)
# [END list_findings_at_a_time]
assert i == -1
def test_get_iam_policy(source_name):
"""Gives a user findingsEditor permission to the source."""
user_email = "csccclienttest@gmail.com"
# [START get_source_iam]
from google.cloud import securitycenter
from google.iam.v1 import policy_pb2
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Get the old policy so we can do an incremental update.
policy = client.get_iam_policy(source_name)
print("Policy: {}".format(policy))
# [END get_source_iam]
def test_group_all_findings(organization_id):
"""Demonstrates grouping all findings across an organization."""
# [START group_all_findings]
from google.cloud import securitycenter
# Create a client.
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization. e.g.:
# organization_id = "111122222444"
org_name = "organizations/{org_id}".format(org_id=organization_id)
# The "sources/-" suffix lists findings across all sources. You
# also use a specific source_name instead.
all_sources = "{org_name}/sources/-".format(org_name=org_name)
group_result_iterator = client.group_findings(all_sources, group_by="category")
for i, group_result in enumerate(group_result_iterator):
print((i + 1), group_result)
# [END group_all_findings]
assert i > 0
def test_group_filtered_findings(source_name):
"""Demonstrates grouping all findings across an organization."""
# [START group_filtered_findings]
from google.cloud import securitycenter
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
group_result_iterator = client.group_findings(
source_name, group_by="category", filter_='state="ACTIVE"'
)
for i, group_result in enumerate(group_result_iterator):
print((i + 1), group_result)
# [END group_filtered_findings]
assert i == 0
def test_group_findings_at_time(source_name):
"""Demonstrates grouping all findings across an organization as of
a specific time."""
i = -1
# [START group_findings_at_time]
from datetime import datetime, timedelta
from google.cloud import securitycenter
from google.protobuf.timestamp_pb2 import Timestamp
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Group findings as of yesterday.
read_time = datetime.utcnow() - timedelta(days=1)
timestamp_proto = Timestamp()
timestamp_proto.FromDatetime(read_time)
group_result_iterator = client.group_findings(
source_name, group_by="category", read_time=timestamp_proto
)
for i, group_result in enumerate(group_result_iterator):
print((i + 1), group_result)
# [END group_filtered_findings_at_time]
assert i == -1
def test_group_findings_and_changes(source_name):
"""Demonstrates grouping all findings across an organization and
associated changes."""
# [START group_filtered_findings_with_changes]
from datetime import timedelta
from google.cloud import securitycenter
from google.protobuf.duration_pb2 import Duration
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# List assets and their state change the last 30 days
compare_delta = timedelta(days=30)
# Convert the timedelta to a Duration
duration_proto = Duration()
duration_proto.FromTimedelta(compare_delta)
group_result_iterator = client.group_findings(
source_name, group_by="state_change", compare_duration=duration_proto
)
for i, group_result in enumerate(group_result_iterator):
print((i + 1), group_result)
# [END group_findings_with_changes]
assert i == 0
| silverdev/google-cloud-python | securitycenter/docs/snippets_findings.py | snippets_findings.py | py | 20,863 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "google.cloud.securitycenter.SecurityCenterClient",
"line_number": 16,
"usage_type": "call"
},
{
"api... |
37005580941 | #coding: utf-8
import tornado.web
from basehandler import BaseHandler
from lib.lrclib import LrcLib
from lib.songinfo import SongInfo
from models import MusicModel, UserModel
import json
import re
import logging
log = logging.getLogger("index")
log.setLevel(logging.DEBUG)
rm_regex = r"/(\([^\)]*\))|(\[[^\]]*\])|(([^)]*))|(【[^】]*】)|((-|\/|&).*)/g"
def simplify(string):
return re.sub(rm_regex, "", string)
class IndexHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
self._lrclib = LrcLib()
self._info = SongInfo()
self._music = MusicModel()
self._users = UserModel()
super(IndexHandler, self).__init__(application, request, **kwargs)
def get(self):
user_id = self.current_user
log.debug("user_id is {0}".format(user_id))
# if not user_id:
# return self.render('login.html')
account = ""
if user_id:
user = self._users.get_one(user_id)
log.debug(user)
if user:
account = user["account"]
return self.render('index.html', current_user = account, domain=self.request.full_url())
def on_error(self):
return self.write(json.dumps({
"code": 1
}))
def post(self):
request = json.loads(self.request.body)
song = dict(
sid = request.get("songId"),
artist = request.get("artist"),
title = request.get("title"),
channel = request.get("channel"),
share_url = request.get("shareUrl"),
album_img = request.get("albumImgUrl"),
start_time = request.get("startTime")
)
lrc, song_info = self._music.get_rep(song["sid"])
if lrc and song_info:
lrc.update({"startTime": song["start_time"]})
return self.write(json.dumps({
"code": 0,
"lyricsInfo": lrc,
"songInfo": song_info
}))
lrc = self._lrclib.getlrc(simplify(song["title"]), simplify(song["artist"]))
if not song_info:
info_res = self._info.get_info(song["share_url"])
if not info_res:
return self.on_error()
song_info = info_res["song"][0]
song_info = {
"album": song_info["albumtitle"],
"albumId": song_info["aid"],
"albumImgUrl": song_info["picture"],
"albumUrl": song_info["album"],
"artist": song_info["artist"],
"company": song_info["company"],
"duration": song_info["length"],
"mp3Url": song_info["url"],
"rating": song_info["rating_avg"],
"releaseYear": song_info["public_time"],
"songId": song_info["sid"],
"ssid": song_info["ssid"],
"startToken": song_info["start_token"],
"title": song_info["title"],
"shareUrl": song["share_url"]
}
response = json.dumps({
"code": 0,
"lyricsInfo": {
"lyrics": lrc,
"offset": 0,
"startTime": song["start_time"]
},
"songInfo": song_info
})
self._music.set_rep(song["sid"], dict(lyrics=lrc,offset=0), song_info)
return self.write(response)
| mgbaozi/ruankusb | handlers/index.py | index.py | py | 3,525 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "basehandler.BaseHandler",... |
8207780556 | import base64
from django.shortcuts import render
from django.http import JsonResponse
from django.views.decorators.cache import never_cache
from stolen_wiki_game.models import Article
# Create your views here.
def index(request):
return render(request, 'stolen_wiki_game/index.html', {})
@never_cache
def article(request):
art = Article.objects.order_by('?').first()
encoded = base64.b64encode(art.slug.encode('ascii')).decode('ascii')
return JsonResponse({
'article': encoded,
'redactleIndex': 0,
'token': 'abc',
'yesterday': 'blah'
})
| Jack-Naughton/homepage | stolen_wiki_game/views.py | views.py | py | 596 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "stolen_wiki_game.models.Article.objects.order_by",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "stolen_wiki_game.models.Article.objects",
"line_number": 17,
"us... |
32822098386 | # coding:utf-8
from flask import request
from flask import Flask, render_template
from controller import search, getPage, get_suggestion
import sys
import json
# reload(sys)
# sys.setdefaultencoding('utf-8')
app = Flask(__name__)
@app.route('/')
def index():
return "the server is running!"
# return render_template('/result.html',name = 'zhangsan',)
@app.route('/search', methods=['GET'])
def do_search():
print(request.args)
params = {
'query': request.args.get('query'),
'method': request.args.get('method')
}
res = search(params)
return json.dumps({
'status': 1,
'result': res['result'],
'time': res['time']
}, ensure_ascii=False)
@app.route('/page', methods=['GET'])
def page():
docId = request.args.get('id')
res = getPage(docId)
return json.dumps({
'status': 1,
'page': res
}, ensure_ascii=False)
@app.route('/suggestion', methods=['GET'])
def suggestion():
query =request.args.get('query')
res = get_suggestion(query)
return json.dumps({
'status':1,
'queries':res
}, ensure_ascii=False)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=False)
| QimingZheng/WSM-Project-WikiSearch | app/index.py | index.py | py | 1,221 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.request.args.... |
6251031632 | import matplotlib.pyplot as plt
import eel
eel.init("web")
userid = []
@eel.expose
def login(uid,upass):
get = open("data.txt", "r")
temp = get.readlines()
udata = []
for i in range(len(temp)):
if temp[i].startswith(uid):
udata = temp[i].split("|")
#print(udata)
if udata[2] != upass:
return "false"
else:
if udata[0].startswith("vt"):
return udata,"voter"
elif udata[0].startswith("ca"):
return udata,"candidate"
elif udata[0].startswith("st"):
return udata,"staff"
get.close()
def vsort():
get = open("data.txt", "r")
get.seek(0)
temp = get.readlines()
v = []
c = []
s = []
l = ""
for i in range(len(temp)):
if temp[i].startswith("vt"):
v.append(temp[i])
if temp[i].startswith("st"):
s.append(temp[i])
if temp[i].startswith("ca"):
c.append(temp[i])
if temp[i].startswith("system"):
l = temp[i]
fout = open("data.txt", "w")
v.sort()
c.sort()
s.sort()
for j in range(len(c)):
final = f"{c[j]}"
fout.write(final)
for j in range(len(s)):
final = f"{s[j]}"
fout.write(final)
for j in range(len(v)):
final = f"{v[j]}"
fout.write(final)
fout.write(l)
fout.close()
fin1 = open("data.txt","r")
fin2 = open("index.txt","w")
fin1.seek(0)
details = fin1.readlines()
for i in range(len(details)):
data = details[i].split("|")
fin2.write(f"{i}|{data[0]}|\n")
fin1.close()
fin2.close()
get.close()
@eel.expose
def display_teams():
gets = open("data.txt", "r")
temp = gets.readlines()
clist = []
iget = open("index.txt","r")
index = iget.readlines()
cid=[]
fdata = []
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith("ca"):
cid.append(data[0])
for i in cid:
i=int(i)
clist.append(temp[i])
for i in range(len(clist)):
data = clist[i].split("|")
fdata.append(f"{data[0]}|{data[1]}|{data[3]}|")
print(fdata)
gets.close()
iget.close()
return fdata
@eel.expose
def display_voter(uid):
gets = open("data.txt", "r")
temp = gets.readlines()
clist = []
iget = open("index.txt","r")
index = iget.readlines()
cid=[]
fdata = []
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(uid):
cid.append(data[0])
for i in cid:
i=int(i)
clist.append(temp[i])
for i in range(len(clist)):
data = clist[i].split("|")
fdata = data
gets.close()
iget.close()
return fdata
def system_status():
data = []
get = open("data.txt","r")
temp = get.readlines()
for i in range(len(temp)):
if temp[i].startswith("system"):
data = temp[i].split("|")
if data[1]=="true":
return True
elif data[1]=="false":
return False
get.close()
@eel.expose
def cast_vote(cid,uid):
iget=open("index.txt","r")
index = iget.readlines()
get = open("data.txt","r")
temp = get.readlines()
cindex = ""
vindex = ""
cdata = []
vdata = []
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(cid):
cindex = data[0]
if data[1].startswith(uid):
vindex = data[0]
vidx =int(vindex)
cidx =int(cindex)
vdata = temp[vidx].split("|")
cdata = temp[cidx].split("|")
val = system_status()
if val:
if vdata[5] == "false":
vote = int(cdata[4])
vote += 1
cdata[4] = vote
vdata[5] = "true"
cupdate = f"{cdata[0]}|{cdata[1]}|{cdata[2]}|{cdata[3]}|{cdata[4]}|\n"
vupdate = f"{vdata[0]}|{vdata[1]}|{vdata[2]}|{vdata[3]}|{vdata[4]}|{vdata[5]}|\n"
fout = open("data.txt","w")
for i in range(len(temp)):
if temp[i].startswith(vdata[0]):
temp[i] = vupdate
if temp[i].startswith(cdata[0]):
temp[i] = cupdate
for i in range(len(temp)):
fout.write(temp[i])
fout.close()
iget.close()
get.close()
return "voted successfully"
else:
iget.close()
get.close()
return "it seems you have already voted"
else:
return("Vote System is Inactive")
@eel.expose
def profile(vid):
print(vid)
iget = open("index.txt","r")
index = iget.readlines()
get = open("data.txt","r")
temp = get.readlines()
pid = ""
for i in range(len(index)):
data = index[i].split("|")
if data[1] == vid:
pid = data[0]
pid = int(pid)
user_data = temp[pid].split("|")
print(user_data)
get.close()
iget.close()
return user_data,"true"
@eel.expose
def winner():
cid = []
c_teams = []
c_votes = []
iget = open("index.txt","r")
get = open("data.txt","r")
index = iget.readlines()
temp = get.readlines()
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith("ca"):
cid.append(data[0])
for i in cid:
i=int(i)
data = temp[i].split("|")
c_teams.append(data[3])
c_votes.append(int(data[4]))
num = max(c_votes)
id = c_votes.index(num)
name = c_teams[id]
plt.bar(c_teams,c_votes)
plt.savefig("web\output1", facecolor='w', bbox_inches="tight",pad_inches=0.3, transparent=True)
plt.title('winner')
plt.xlabel('teams')
plt.ylabel('Votes')
get.close()
iget.close()
return name
@eel.expose
def system_mod(uid,override_key):
sid = []
iget = open("index.txt","r")
get = open("data.txt","r")
index = iget.readlines()
temp = get.readlines()
details = temp
flag = False
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith("st"):
sid.append(data[0])
for i in sid:
i = int(i)
data = temp[i].split("|")
if data[0].startswith(uid):
if data[3].startswith(override_key):
flag = True
if flag:
for i in range(len(details)):
if details[i].startswith("system"):
s_data = details[i].split("|")
if s_data[1] == "true":
s_data[1] = "false"
s_final = f"{s_data[0]}|{s_data[1]}|"
for j in range(len(details)):
if details[j].startswith("system"):
details[j] = s_final
fout = open("data.txt", "r+")
for k in range(len(details)):
op = f"{details[k]}"
fout.write(op)
fout.close()
vsort()
return "System is Inactive"
elif s_data[1] == "false":
s_data[1] = "true"
s_final = f"{s_data[0]}|{s_data[1]}|"
for j in range(len(details)):
if details[j].startswith("system"):
details[j] = ""
details[j] = s_final
fout = open("data.txt", "r+")
for k in range(len(details)):
op = f"{details[k]}"
fout.write(op)
fout.close()
vsort()
return "system is active"
@eel.expose
def reset(uid,override_key):
sid = []
key = str(override_key)
iget = open("index.txt","r")
get = open("data.txt","r")
index = iget.readlines()
temp = get.readlines()
details = temp
flag = False
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith("st"):
sid.append(data[0])
for i in sid:
i = int(i)
data = temp[i].split("|")
if data[0].startswith(uid):
if data[3].startswith(key):
flag = True
print("pass")
if flag:
for i in range(len(details)):
if details[i].startswith("vt"):
data = details[i].split("|")
data[5] = "false"
vupdate = f"{data[0]}|{data[1]}|{data[2]}|{data[3]}|{data[4]}|{data[5]}|\n"
for i in range(len(details)):
if details[i].startswith(data[0]):
details[i] = vupdate
fout = open("data.txt", "w")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
if details[i].startswith("ca"):
cdata = details[i].split("|")
cdata[4] = 0
cupdate = f"{cdata[0]}|{cdata[1]}|{cdata[2]}|{cdata[3]}|{cdata[4]}|\n"
for i in range(len(details)):
if details[i].startswith(cdata[0]):
details[i] = cupdate
fout = open("data.txt", "w")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
return "System Data is been formated"
else:
return "Failed"
@eel.expose
def add_voter(name,password,age,ano):
get = open("data.txt","r")
details = get.readlines()
count = 0
voters = []
for i in range(len(details)):
if details[i].startswith("vt"):
data = details[i].split("|")
if data[4] == ano:
return "person already exsist"
for i in range(len(details)):
if details[i].startswith("vt"):
voters.append(details[i])
temp = voters[-1].split("|")
id = temp[0]
count = id[2:]
b = int(count)
count = b
if count < 9:
count = count+1
vid = "000"+str(count)
elif count<99:
count+=1
vid = "00"+str(count)
elif count<999:
count+=1
vid = "0"+str(count)
else:
vid = str(count)
id = "vt"+vid
status = "false"
final = f"{id}|{name}|{password}|{age}|{ano}|{status}|\n"
details.append(final)
fout = open("data.txt", "w")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
get.close()
return "Voter added successfully"
@eel.expose
def remove_voter(uid):
if uid[0:2] == "vt":
data = []
get = open("data.txt","r")
details = get.readlines()
for i in range(len(details)):
if details[i].startswith(uid):
clear = ""
data=details[i].split("|")
details[i] = clear
fout = open("data.txt", "r+")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
get.close()
return f"{data[1]}"
else:
return "false"
@eel.expose
def modify_voter(uid,name,password,age,ano):
vid = []
get = open("data.txt","r")
iget = open("index.txt","r")
details=get.readlines()
index = iget.readlines()
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(uid):
vid = data[0]
vid = int(vid)
v_data = details[vid].split("|")
v_data[1] = name
v_data[2] = password
v_data[3] = age
v_data[4] = ano
vupdate = f"{v_data[0]}|{v_data[1]}|{v_data[2]}|{v_data[3]}|{v_data[4]}|{v_data[5]}|\n"
for i in range(len(details)):
if details[i].startswith(v_data[0]):
details[i]=vupdate
fout = open("data.txt", "r+")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
get.close()
iget.close()
vsort()
return "data Modified"
@eel.expose
def add_candidate(name,password,teamname):
get = open("data.txt","r")
details = get.readlines()
count = 0
candi=[]
for i in range(len(details)):
if details[i].startswith("ca"):
data = details[i].split("|")
if data[1] == name or data[3] == teamname:
return "team already exsist"
for i in range(len(details)):
if details[i].startswith("ca"):
candi.append(details[i])
temp = candi[-1].split("|")
id = temp[0]
count = id[2:]
b = int(count)
count = b
if count < 9:
count = count+1
vid = "000"+str(count)
elif count<99:
count+=1
vid = "00"+str(count)
elif count<999:
count+=1
vid = "0"+str(count)
else:
vid = str(count)
id = "ca"+vid
votes = 0
final = f"{id}|{name}|{password}|{teamname}|{votes}|\n"
details.append(final)
fout = open("data.txt", "w")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
get.close()
return "Team Added Succesfully"
@eel.expose
def remove_candidate(uid):
get = open("data.txt","r")
details = get.readlines()
for i in range(len(details)):
if details[i].startswith(uid):
clear = ""
data = details[i].split("|")
details[i] = clear
fout = open("data.txt", "r+")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
get.close()
return data[3]
@eel.expose
def display_candidate(vid):
gets = open("data.txt", "r")
temp = gets.readlines()
clist = []
iget = open("index.txt","r")
index = iget.readlines()
cid=[]
fdata = []
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(vid):
cid.append(data[0])
for i in cid:
i=int(i)
clist.append(temp[i])
for i in range(len(clist)):
data = clist[i].split("|")
fdata = data
#print(fdata)
gets.close()
iget.close()
return fdata
@eel.expose
def modify_candidate(uid,name,password,teamname):
vid = []
get = open("data.txt","r")
iget = open("index.txt","r")
details=get.readlines()
index = iget.readlines()
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(uid):
vid = data[0]
vid = int(vid)
v_data = details[vid].split("|")
v_data[1] = name
v_data[2] = password
v_data[3] = teamname
vupdate = f"{v_data[0]}|{v_data[1]}|{v_data[2]}|{v_data[3]}|{v_data[4]}|\n"
for i in range(len(details)):
if details[i].startswith(v_data[0]):
details[i]=vupdate
fout = open("data.txt", "r+")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
get.close()
iget.close()
vsort()
return "Modified Successfully"
@eel.expose
def allvoter():
get = open("data.txt","r")
get.seek(0)
temp = get.readlines()
voters = []
for i in range(len(temp)):
if temp[i].startswith("vt"):
voters.append(temp[i])
get.close()
count = len(voters)
return voters,count
@eel.expose
def allcandidate():
get = open("data.txt","r")
get.seek(0)
temp = get.readlines()
candidate = []
for i in range(len(temp)):
if temp[i].startswith("ca"):
candidate.append(temp[i])
get.close()
count = len(candidate)
return candidate,count
eel.start("index.html")
#if __name__ == '__main__':
#allvoter()
#vsort()
#login("vt1001","1001")
#cast_vote("ca1001","vt1008")
#profile("vt1001")
#profile("ca0002")
#profile("st1002")
#display_teams()
# val = display_voter("vt0002")
# print(val)
#system_status()
#val=winner()
#print(val)
#system_mod("st1001","5656")
#val=reset("st0001","5656")
#print(val)
#add_voter("pheonix","20","2101","1010")
#remove_voter("vt0004")
#modify_voter("vt1009","breach","25","1009","3102")
#add_candidate("thanos","1004","genysis")
#remove_candidate("ca0003")
#modify_candidate("ca1003","brimstone","1003","valo")
| vish0290/Polling-System-File-Structures | main.py | main.py | py | 17,483 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "eel.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "eel.expose",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "eel.expose",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "eel.expose",
"line_number"... |
22457592991 | import torch
from transformers import BertTokenizer, BertModel, BertConfig, AdamW, BertForMaskedLM
from tokenizers import ByteLevelBPETokenizer
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from sumerian_data import SumerianDataset
from typing import List, Tuple
import numpy as np
from tqdm import tqdm
import os
DEVICE = torch.device("cuda")
DATA_PATH = r"../data/sumerian_document_set.atf"
VOCAB_PATH = r"../data/sumerian_vocab_list"
def save_corpus(save_path: str, dataset: torch.utils.data.Dataset):
try:
os.mkdir(save_path)
except FileExistsError:
print("Directory path already exists.")
docs = dataset.get_corpus()
processed_doc = [" ".join(doc) for doc in docs]
with open(os.path.join(save_path, r"processed_corpus.txt"), "w") as fp:
fp.write("\n".join(processed_doc))
class SumarianBERTDataset(torch.utils.data.Dataset):
def __init__(self, tokenizer: ByteLevelBPETokenizer, data_path: str, evaluate: bool = False):
self.evaluate = evaluate
self.tokenizer = tokenizer
self.tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
)
self.tokenizer.enable_truncation(max_length=512)
self.training_labels, self.test_labels, self.training_mask, self.test_mask = self.__get_split(data_path)
def __get_split(self, data_path: str) -> Tuple[list, list]:
with open(data_path, "r") as file:
lines = file.read().split("\n")
lines_tokens = [line for line in self.tokenizer.encode_batch(lines)]
mask = [x.attention_mask for x in lines_tokens]
labels = [line.ids for line in lines_tokens]
indices = np.random.permutation(len(labels))
split = int(len(labels) * 0.8)
training_idxs, test_idxs = indices[:split], indices[split:]
training_labels, test_labels = [], []
training_mask, test_mask = [], []
for train_idx in training_idxs:
training_labels.append(labels[train_idx])
training_mask.append(mask[train_idx])
for test_idx in test_idxs:
test_labels.append(labels[test_idx])
test_mask.append(mask[test_idx])
return training_labels, test_labels, training_mask, test_mask
def __len__(self):
if self.evaluate:
return len(self.test_labels)
else:
return len(self.training_labels)
def __getitem__(self, i):
if self.evaluate:
return (
torch.tensor(self.test_labels[i]).type(torch.float),
torch.tensor(self.test_mask[i]).type(torch.float)
)
else:
return (
torch.tensor(self.training_labels[i]).type(torch.float),
torch.tensor(self.training_mask[i]).type(torch.float)
)
def collate_fn_padd(batch):
'''
Padds batch of variable length
note: it converts things ToTensor manually here since the ToTensor transform
assume it takes in images rather than arbitrary tensors.
'''
labels = [item[0] for item in batch]
att_mask = [item[1] for item in batch]
## get sequence lengths
lengths = torch.tensor([ t.shape[0] for t in labels ]).to(DEVICE)
## padd
labels = [ torch.Tensor(t).to(DEVICE) for t in labels ]
labels = torch.nn.utils.rnn.pad_sequence(labels)
att_mask = [ torch.Tensor(t).to(DEVICE) for t in att_mask ]
att_mask = torch.nn.utils.rnn.pad_sequence(att_mask)
## compute mask
mask = (labels != 0).to(DEVICE)
input_ids = labels.detach().clone()
rand = torch.rand(input_ids.shape)
mask_arr = (rand < .15) * (input_ids != 0) * (input_ids != 1) * (input_ids != 2)
for i in range(input_ids.shape[0]):
selection = torch.flatten(mask_arr[i].nonzero()).tolist()
input_ids[i, selection] = 3
return labels.T, att_mask.T, input_ids.T, lengths.T, mask.T
# model = BertModel.from_pretrained("bert-base-multilingual-cased")
# text = "Replace me by any text you'd like."
# encoded_input = tokenizer(text, return_tensors='pt')
# output = model(**encoded_input)
def main():
SAVE_CORPUS=False
MAKE_TOKENIZER=False
dataset = SumerianDataset(DATA_PATH, VOCAB_PATH)
save_path=r"../data/processed_data/"
if SAVE_CORPUS:
save_corpus(save_path, dataset)
if MAKE_TOKENIZER:
vocab_size = dataset.get_vocab_size()
tokenizer = ByteLevelBPETokenizer()
tokenizer.train(
files=os.path.join(save_path, r"processed_corpus.txt"),
vocab_size=vocab_size,
min_frequency=2,
special_tokens=[
"<s>",
"<pad>",
"</s>",
"<unk>",
"<mask>",
])
try:
os.mkdir(r"../tokenizer/")
except FileExistsError:
print("Tokenizer directory path already exists.")
tokenizer.save_model(r"../tokenizer/")
tokenizer = ByteLevelBPETokenizer(
"../tokenizer/vocab.json",
"../tokenizer/merges.txt",
)
BERT_dataset = SumarianBERTDataset(
tokenizer,
os.path.join(save_path, r"processed_corpus.txt"),
evaluate=False)
BERT_train_loader = torch.utils.data.DataLoader(
BERT_dataset,
batch_size=16,
shuffle=True,
collate_fn=collate_fn_padd)
config = BertConfig(
vocab_size=dataset.get_vocab_size(),
max_position_embeddings=512,
hidden_size=768,
num_attention_heads=12,
num_hidden_layers=12,
type_vocab_size=2
)
# config = BertConfig(
# vocab_size=dataset.get_vocab_size(),
# max_position_embeddings=512,
# hidden_size=768,
# num_attention_heads=4,
# num_hidden_layers=4,
# type_vocab_size=1
# )
model = BertForMaskedLM(config)
multling_model = BertModel.from_pretrained("bert-base-multilingual-cased")
multling_params = multling_model.state_dict()
# Remove params that are a mismatch with current model.
del multling_params["embeddings.position_ids"]
del multling_params['embeddings.word_embeddings.weight']
del multling_params['embeddings.position_embeddings.weight']
del multling_params['embeddings.token_type_embeddings.weight']
del multling_params['embeddings.LayerNorm.weight']
del multling_params['embeddings.LayerNorm.bias']
model.load_state_dict(multling_params, strict=False)
model.to(DEVICE)
print("Number of parameters: ", end="")
print(model.num_parameters())
model.train()
optim = AdamW(model.parameters(), lr=1e-4)
epochs = 2
for epoch in range(epochs):
# setup loop with TQDM and dataloader
loop = tqdm(BERT_train_loader, leave=True)
for batch in loop:
optim.zero_grad()
labels, attention_mask, input_ids, lengths, mask = batch
input_ids.to(DEVICE)
attention_mask.to(DEVICE)
labels.to(DEVICE)
outputs = model(input_ids.long(), attention_mask=attention_mask, labels=labels.long().contiguous())
loss = outputs.loss
loss.backward()
optim.step()
loop.set_description(f'Epoch {epoch}')
loop.set_postfix(loss=loss.item())
if __name__=="__main__":
main() | sethbassetti/sumerian_embeddings | src/BERTmodel.py | BERTmodel.py | py | 7,800 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
17541028772 | #/usr/bin/env python
from pwn import *
from Crypto.Util.number import bytes_to_long, long_to_bytes
from Crypto.Cipher import AES
import hashlib
import os
import base64
from gmpy2 import is_prime
class Rng:
def __init__(self, seed):
self.seed = seed
self.generated = b""
self.num = 0
def more_bytes(self):
self.generated += hashlib.sha256(self.seed).digest()
# increase seed by 1 and ensure the seed is 32 bytes long (prepend with NULL bytes)
self.seed = long_to_bytes(bytes_to_long(self.seed) + 1, 32)
self.num += 256
def getbits(self, num=64):
while (self.num < num):
self.more_bytes()
x = bytes_to_long(self.generated)
self.num -= num
self.generated = b""
# this is not called for our primes
if self.num > 0:
self.generated = long_to_bytes(x >> num, self.num // 8)
# ANDs with 0xffff...ffff to ensure only a NUM length number is returned
return x & ((1 << num) - 1)
class DiffieHellman:
def gen_prime(self):
prime = self.rng.getbits(512)
iter = 0
while not is_prime(prime):
iter += 1
prime = self.rng.getbits(512)
print("Generated after", iter, "iterations")
return prime
def __init__(self, seed, prime=None):
self.rng = Rng(seed)
if prime is None:
prime = self.gen_prime()
self.prime = prime
self.my_secret = self.rng.getbits()
self.my_number = pow(5, self.my_secret, prime)
self.shared = 1337
def set_other(self, x):
self.shared ^= pow(x, self.my_secret, self.prime)
def pad32(x):
return (b"\x00"*32+x)[-32:]
def xor32(a, b):
return bytes(x^y for x, y in zip(pad32(a), pad32(b)))
def bit_flip(x):
print("bit-flip str:")
inputstr = b'BA=='
#inputstr = input().strip()
flip_str = base64.b64decode(inputstr)
return xor32(flip_str, x)
def get_values(conn, value):
# receive the bit-flip str:
conn.recvline()
# send the value
conn.send(value + b'\n')
# recv num iterations
num_iter = str(conn.recvline())
bob_num = conn.recvline()
iv_num = conn.recvline()
flag_num = conn.recvline()
results = [
int(num_iter.split(" ")[2:3][0]),
int(bob_num.decode('ascii').split(" ")[2:3][0]),
base64.b64decode(iv_num.decode('ascii')),
base64.b64decode(flag_num.decode('ascii'))
]
return results
def get_num_iter(conn, value):
return get_values(conn, value)[0]
def get_seed(conn):
# loop over each bit of 64-bit number
sol = 0
maxnum = 128 # 128
for i in range(1, maxnum):
# get two values one with and without the ith bit set
n = sol ^ ((2 ** i) - 2)
m = sol | (1 << i)
# base64 encode values
basen = base64.b64encode(bytes(long_to_bytes(n)))
basem = base64.b64encode(bytes(long_to_bytes(m)))
iter_n = get_num_iter(conn, basen)
iter_m = get_num_iter(conn, basem)
print("N: %s [%d], M: %s [%d]" % (basen, iter_n, basem, iter_m))
if(iter_n != iter_m + 1):
sol = sol | (1 << i)
print("SOL:" + " "*(135-maxnum) + bin(sol)[2:])
return long_to_bytes(sol, 16)
def main(conn):
# compute alice_seed
alice_seed = get_seed(conn)
print(alice_seed)
# perform one iteration with arbitrary input to get a sample of values
results = get_values(conn, b'BA==')
bobnum = results[1]
iv = results[2]
ciphertext = results[3]
# compute the encryption/decryption key
alice = DiffieHellman(bit_flip(alice_seed))
alice.set_other(bobnum)
# decrypt the ciphertext
cipher = AES.new(long_to_bytes(alice.shared, 16)[:16], AES.MODE_CBC, IV=iv)
plaintext = cipher.decrypt(ciphertext)
print(plaintext)
if __name__ == '__main__':
HOST = "127.0.0.1"
PORT = 1337
conn = remote(HOST, PORT)
main(conn)
| cybernatedwizards/CybernatedWizardsCTF | 2020/DragonCTF_2020/bitflip1/sol.py | sol.py | py | 3,818 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "hashlib.sha256",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Crypto.Util.number.long_to_bytes",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "Crypto.Util.number.bytes_to_long",
"line_number": 19,
"usage_type": "call"
},
{
"... |
72568366267 | #!/usr/bin/env python
import seaborn
import numpy as np
import os
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import sys
from termcolor import cprint
# Load data
# Global vars for tracking and labeling data at load time.
exp_idx = 0
label_parser_dict = None
smooth_factor = 10
leg_size = 30
subsample_step = 1
load_subsample_step = 50
default_colors = ["blue","orange","green","magenta", "brown", "red",'black',"grey",u'#ff7f0e',
"cyan", "pink",'purple', u'#1f77b4',
"darkorchid","sienna","lightpink", "indigo","mediumseagreen",'aqua',
'deeppink','silver','khaki','goldenrod','y','y','y','y','y','y','y','y','y','y','y','y' ] + ['y']*50
def get_all_runs(logdir, load_subsample_step=1):
"""
Recursively look through logdir for output files produced by
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
for root, _, files in os.walk(logdir):
if 'log.csv' in files:
run_name = root[8:]
exp_name = None
# try to load a config file containing hyperparameters
config = None
try:
config_path = open(os.path.join(root,'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
except:
print('No file named config.json')
exp_idx += 1
# load progress data
try:
print(os.path.join(root,'log.csv'))
exp_data = pd.read_csv(os.path.join(root,'log.csv'))
except:
raise ValueError("CSV {} faulty".format(os.path.join(root, 'log.csv')))
exp_data = exp_data[::load_subsample_step]
data_dict = exp_data.to_dict("list")
data_dict['config'] = config
nb_epochs = len(data_dict['frames'])
print('{} -> {}'.format(run_name, nb_epochs))
datasets.append(data_dict)
return datasets
def get_datasets(rootdir, load_only="", load_subsample_step=1, ignore_pattern="ignore"):
_, models_list, _ = next(os.walk(rootdir))
print(models_list)
for dir_name in models_list.copy():
# add "ignore" in a directory name to avoid loading its content
if ignore_pattern in dir_name or load_only not in dir_name:
models_list.remove(dir_name)
for expe_name in list(labels.keys()):
if expe_name not in models_list:
del labels[expe_name]
# setting per-model type colors
for i,m_name in enumerate(models_list):
for m_type, m_color in per_model_colors.items():
if m_type in m_name:
colors[m_name] = m_color
print("extracting data for {}...".format(m_name))
m_id = m_name
models_saves[m_id] = OrderedDict()
models_saves[m_id]['data'] = get_all_runs(rootdir+m_name, load_subsample_step=load_subsample_step)
print("done")
if m_name not in labels:
labels[m_name] = m_name
"""
retrieve all experiences located in "data to vizu" folder
"""
labels = OrderedDict()
per_model_colors = OrderedDict()
# per_model_colors = OrderedDict([('ALP-GMM',u'#1f77b4'),
# ('hmn','pink'),
# ('ADR','black')])
# LOAD DATA
models_saves = OrderedDict()
colors = OrderedDict()
static_lines = {}
# get_datasets("storage/",load_only="RERUN_WizardGuide")
# get_datasets("storage/",load_only="RERUN_WizardTwoGuides")
try:
figure_id = eval(sys.argv[1])
except:
figure_id = sys.argv[1]
print("fig:", figure_id)
if figure_id == 0:
# train change
env_type = "No_NPC_environment"
fig_type = "train"
get_datasets("storage/", "RERUN_WizardGuide_lang64_mm", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_deaf_no_explo", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_no_explo", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_curr_dial", load_subsample_step=load_subsample_step)
top_n = 16
elif figure_id == 1:
# arch change
env_type = "No_NPC_environment"
fig_type = "arch"
get_datasets("storage/", "RERUN_WizardGuide_lang64_mm", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_bow", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_no_mem", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_bigru", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_attgru", load_subsample_step=load_subsample_step)
top_n = 16
elif figure_id == 2:
# train change FULL
env_type = "FULL_environment"
fig_type = "train"
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_mm", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_deaf_no_explo", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_no_explo", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_curr_dial", load_subsample_step=load_subsample_step)
top_n = 16
elif figure_id == 3:
# arch change FULL
env_type = "FULL_environment"
fig_type = "arch"
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_mm", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_bow", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_no_mem", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_bigru", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_attgru", load_subsample_step=load_subsample_step)
top_n = 16
elif str(figure_id) == "ShowMe":
get_datasets("storage/", "20-05_NeurIPS_ShowMe_ABL_CEB", load_subsample_step=load_subsample_step, ignore_pattern="tanh_0.3")
get_datasets("storage/", "20-05_NeurIPS_ShowMe_NO_BONUS_ABL", load_subsample_step=load_subsample_step)
get_datasets("storage/", "20-05_NeurIPS_ShowMe_CEB", load_subsample_step=load_subsample_step, ignore_pattern="tanh_0.3")
get_datasets("storage/", "20-05_NeurIPS_ShowMe_NO_BONUS_env", load_subsample_step=load_subsample_step)
label_parser_dict = {
"20-05_NeurIPS_ShowMe_ABL_CEB" : "ShowMe_exp_bonus_no_social_skills_required",
"20-05_NeurIPS_ShowMe_NO_BONUS_ABL" : "ShowMe_no_bonus_no_social_skills_required",
"20-05_NeurIPS_ShowMe_CEB" : "ShowMe_exp_bonus",
"20-05_NeurIPS_ShowMe_NO_BONUS_env" : "ShowMe_no_bonus",
}
env_type = str(figure_id)
fig_type = "test"
top_n = 16
elif str(figure_id) == "Help":
# env_type = "Bobo"
# get_datasets("storage/", "Bobo")
get_datasets("storage/", "24-05_NeurIPS_Help", load_subsample_step=load_subsample_step, ignore_pattern="ABL")
# get_datasets("storage/", "26-05_NeurIPS_gpu_Help_NoSocial_NO_BONUS_ABL", load_subsample_step=load_subsample_step)
get_datasets("storage/", "26-05_NeurIPS_gpu_Help_NoSocial_NO_BONUS_env", load_subsample_step=load_subsample_step)
label_parser_dict = {
"Help_NO_BONUS_env": "PPO",
"Help_BONUS_env": "PPO+Explo",
# "Help_NO_BONUS_ABL_env": "ExiterRole_no_bonus_no_NPC",
# "Help_BONUS_ABL_env": "ExiterRole_bonus_no_NPC",
"26-05_NeurIPS_gpu_Help_NoSocial_NO_BONUS_env": "Unsocial PPO",
# "26-05_NeurIPS_gpu_Help_NoSocial_NO_BONUS_ABL": "ExiterRole_Insocial_ABL"
}
static_lines = {
"PPO (helper)": (0.12, 0.05, "#1f77b4"),
"PPO+Explo (helper)": (0.11, 0.04, "indianred"),
# "Help_exp_bonus": (0.11525, 0.04916 , default_colors[2]),
# "HelperRole_ABL_no_exp_bonus": (0.022375, 0.01848, default_colors[3]),
"Unsocial PPO (helper)": (0.15, 0.06, "grey"),
# "HelperRole_ABL_Insocial": (0.01775, 0.010544, default_colors[4]),
}
env_type = str(figure_id)
fig_type = "test"
top_n = 16
elif str(figure_id) == "TalkItOut":
print("You mean Polite")
exit()
elif str(figure_id) == "TalkItOutPolite":
# env_type = "TalkItOut"
# get_datasets("storage/", "ORIENT_env_MiniGrid-TalkItOut")
# env_type = "GuideThief"
# get_datasets("storage/", "GuideThief")
# env_type = "Bobo"
# get_datasets("storage/", "Bobo")
get_datasets("storage/", "20-05_NeurIPS_TalkItOutPolite", load_subsample_step=load_subsample_step)
# get_datasets("storage/", "21-05_NeurIPS_small_bonus_TalkItOutPolite")
get_datasets("storage/", "26-05_NeurIPS_gpu_TalkItOutPolite_NoSocial_NO_BONUS_env", load_subsample_step=load_subsample_step)
get_datasets("storage/", "26-05_NeurIPS_gpu_TalkItOutPolite_NoSocial_NO_BONUS_NoLiar", load_subsample_step=load_subsample_step)
label_parser_dict = {
"TalkItOutPolite_NO_BONUS_env": "PPO",
"TalkItOutPolite_e": "PPO+Explo",
"TalkItOutPolite_NO_BONUS_NoLiar": "PPO (no liar)",
"TalkItOutPolite_NoLiar_e": "PPO+Explo (no liar)",
"26-05_NeurIPS_gpu_TalkItOutPolite_NoSocial_NO_BONUS_env": "Unsocial PPO",
"26-05_NeurIPS_gpu_TalkItOutPolite_NoSocial_NO_BONUS_NoLiar": "Unsocial PPO (no liar)",
}
env_type = str(figure_id)
fig_type = "test"
top_n = 16
elif str(figure_id) == "DiverseExit":
get_datasets("storage/", "24-05_NeurIPS_DiverseExit", load_subsample_step=load_subsample_step)
get_datasets("storage/", "26-05_NeurIPS_gpu_DiverseExit", load_subsample_step=load_subsample_step)
label_parser_dict = {
"DiverseExit_NO_BONUS": "No_bonus",
"DiverseExit_BONUS": "BOnus",
"gpu_DiverseExit_NoSocial": "No_social",
}
env_type = str(figure_id)
fig_type = "test"
top_n = 16
else:
get_datasets("storage/", str(figure_id), load_subsample_step=load_subsample_step)
env_type = str(figure_id)
fig_type = "test"
top_n = 8
#### get_datasets("storage/", "RERUN_WizardGuide_lang64_nameless")
#### get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_nameless")
if per_model_colors: # order runs for legend order as in per_models_colors, with corresponding colors
ordered_labels = OrderedDict()
for teacher_type in per_model_colors.keys():
for k,v in labels.items():
if teacher_type in k:
ordered_labels[k] = v
labels = ordered_labels
else:
print('not using per_model_color')
for k in models_saves.keys():
labels[k] = k
def plot_with_shade(subplot_nb, ax,x,y,err,color,shade_color,label,
y_min=None,y_max=None, legend=False, leg_size=30, leg_loc='best', title=None,
ylim=[0,100], xlim=[0,40], leg_args={}, leg_linewidth=13.0, linewidth=10.0, ticksize=20,
zorder=None, xlabel='perf',ylabel='env steps'):
#plt.rcParams.update({'font.size': 15})
ax.locator_params(axis='x', nbins=4)
ax.locator_params(axis='y', nbins=3)
ax.tick_params(axis='both', which='major', labelsize=ticksize)
ax.plot(x,y, color=color, label=label,linewidth=linewidth,zorder=zorder)
ax.fill_between(x,y-err,y+err,color=shade_color,alpha=0.2)
if legend:
leg = ax.legend(loc=leg_loc, **leg_args) #34
for legobj in leg.legendHandles:
legobj.set_linewidth(leg_linewidth)
ax.set_xlabel(xlabel, fontsize=30)
if subplot_nb == 0:
ax.set_ylabel(ylabel, fontsize=30,labelpad=-4)
ax.set_xlim(xmin=xlim[0],xmax=xlim[1])
ax.set_ylim(bottom=ylim[0],top=ylim[1])
if title:
ax.set_title(title, fontsize=22)
# Plot utils
def plot_with_shade_grg(subplot_nb, ax,x,y,err,color,shade_color,label,
y_min=None,y_max=None, legend=False, leg_size=30, leg_loc='best', title=None,
ylim=[0,100], xlim=[0,40], leg_args={}, leg_linewidth=13.0, linewidth=10.0, ticksize=20,
zorder=None, xlabel='perf',ylabel='env steps', linestyle="-"):
#plt.rcParams.update({'font.size': 15})
ax.locator_params(axis='x', nbins=4)
ax.locator_params(axis='y', nbins=3)
ax.tick_params(axis='both', which='major', labelsize=ticksize)
ax.plot(x, y, color=color, label=label,linewidth=linewidth,zorder=zorder, linestyle=linestyle)
ax.fill_between(x, y-err, y+err,color=shade_color,alpha=0.2)
if legend:
leg = ax.legend(loc=leg_loc, **leg_args) #34
for legobj in leg.legendHandles:
legobj.set_linewidth(leg_linewidth)
ax.set_xlabel(xlabel, fontsize=30)
if subplot_nb == 0:
ax.set_ylabel(ylabel, fontsize=30, labelpad=-4)
ax.set_xlim(xmin=xlim[0],xmax=xlim[1])
ax.set_ylim(bottom=ylim[0],top=ylim[1])
if title:
ax.set_title(title, fontsize=22)
# Metric plot
metric = 'bin_extrinsic_return_mean'
# metric = 'mission_string_observed_mean'
# metric = 'extrinsic_return_mean'
# metric = 'extrinsic_return_max'
# metric = "rreturn_mean"
# metric = 'rreturn_max'
# metric = 'FPS'
f, ax = plt.subplots(1, 1, figsize=(10.0, 6.0))
ax = [ax]
max_y = -np.inf
min_y = np.inf
# hardcoded
min_y, max_y = 0.0, 1.0
max_steps = 0
exclude_patterns = []
include_patterns = []
def label_parser(label, figure_id, label_parser_dict=None):
if label_parser_dict:
if sum([1 for k, v in label_parser_dict.items() if k in label]) != 1:
if label in label_parser_dict:
# see if there is an exact match
return label_parser_dict[label]
else:
print("ERROR multiple curves match a lable and there is no exact match")
print(label)
exit()
for k, v in label_parser_dict.items():
if k in label: return v
else:
# return label.split("_env_")[1]
if figure_id not in [1,2,3,4]:
return label
else:
label_parser_dict = {
"RERUN_WizardGuide_lang64_no_explo": "MH-BabyAI",
"RERUN_WizardTwoGuides_lang64_no_explo": "MH-BabyAI",
"RERUN_WizardGuide_lang64_mm_baby_short_rec_env": "MH-BabyAI-ExpBonus",
"RERUN_WizardTwoGuides_lang64_mm_baby_short_rec_env": "MH-BabyAI-ExpBonus",
"RERUN_WizardGuide_lang64_deaf_no_explo": "Deaf-MH-BabyAI",
"RERUN_WizardTwoGuides_lang64_deaf_no_explo": "Deaf-MH-BabyAI",
"RERUN_WizardGuide_lang64_bow": "MH-BabyAI-ExpBonus-BOW",
"RERUN_WizardTwoGuides_lang64_bow": "MH-BabyAI-ExpBonus-BOW",
"RERUN_WizardGuide_lang64_no_mem": "MH-BabyAI-ExpBonus-no-mem",
"RERUN_WizardTwoGuides_lang64_no_mem": "MH-BabyAI-ExpBonus-no-mem",
"RERUN_WizardGuide_lang64_bigru": "MH-BabyAI-ExpBonus-bigru",
"RERUN_WizardTwoGuides_lang64_bigru": "MH-BabyAI-ExpBonus-bigru",
"RERUN_WizardGuide_lang64_attgru": "MH-BabyAI-ExpBonus-attgru",
"RERUN_WizardTwoGuides_lang64_attgru": "MH-BabyAI-ExpBonus-attgru",
"RERUN_WizardGuide_lang64_curr_dial": "MH-BabyAI-ExpBonus-current-dialogue",
"RERUN_WizardTwoGuides_lang64_curr_dial": "MH-BabyAI-ExpBonus-current-dialogue",
"RERUN_WizardTwoGuides_lang64_mm_baby_short_rec_100M": "MH-BabyAI-ExpBonus-100M"
}
if sum([1 for k, v in label_parser_dict.items() if k in label]) != 1:
print("ERROR multiple curves match a lable")
print(label)
exit()
for k, v in label_parser_dict.items():
if k in label: return v
return label
per_seed=False
for i, m_id in enumerate(models_saves.keys()):
#excluding some experiments
if any([ex_pat in m_id for ex_pat in exclude_patterns]):
continue
if len(include_patterns) > 0:
if not any([in_pat in m_id for in_pat in include_patterns]):
continue
runs_data = models_saves[m_id]['data']
ys = []
# DIRTY FIX FOR FAULTY LOGGING
print("m_id:", m_id)
if runs_data[0]['frames'][1] == 'frames':
runs_data[0]['frames'] = list(filter(('frames').__ne__, runs_data[0]['frames']))
###########################################
# determine minimal run length across seeds
minimum = sorted([len(run['frames']) for run in runs_data if len(run['frames'])])[-top_n]
min_len = np.min([len(run['frames']) for run in runs_data if len(run['frames']) >= minimum])
# min_len = np.min([len(run['frames']) for run in runs_data if len(run['frames']) > 10])
print("min_len:", min_len)
#compute env steps (x axis)
longest_id = np.argmax([len(rd['frames']) for rd in runs_data])
steps = np.array(runs_data[longest_id]['frames'], dtype=np.int) / 1000000
steps = steps[:min_len]
for run in runs_data:
data = run[metric]
# DIRTY FIX FOR FAULTY LOGGING (headers in data)
if data[1] == metric:
data = np.array(list(filter((metric).__ne__, data)), dtype=np.float16)
###########################################
if len(data) >= min_len:
if len(data) > min_len:
print("run has too many {} datapoints ({}). Discarding {}".format(m_id, len(data),
len(data)-min_len))
data = data[0:min_len]
ys.append(data)
ys_same_len = ys # RUNS MUST HAVE SAME LEN
# computes stats
n_seeds = len(ys_same_len)
sems = np.std(ys_same_len,axis=0)/np.sqrt(len(ys_same_len)) # sem
stds = np.std(ys_same_len,axis=0) # std
means = np.mean(ys_same_len,axis=0)
color = default_colors[i]
# per-metric adjusments
ylabel=metric
if metric == 'bin_extrinsic_return_mean':
ylabel = "success rate"
if metric == 'duration':
ylabel = "time (hours)"
means = means / 3600
sems = sems / 3600
stds = stds / 3600
#plot x y bounds
curr_max_y = np.max(means)
curr_min_y = np.min(means)
curr_max_steps = np.max(steps)
if curr_max_y > max_y:
max_y = curr_max_y
if curr_min_y < min_y:
min_y = curr_min_y
if curr_max_steps > max_steps:
max_steps = curr_max_steps
if subsample_step:
steps = steps[0::subsample_step]
means = means[0::subsample_step]
stds = stds[0::subsample_step]
sems = sems[0::subsample_step]
ys_same_len = [y[0::subsample_step] for y in ys_same_len]
# display seeds separtely
if per_seed:
for s_i, seed_ys in enumerate(ys_same_len):
seed_c = default_colors[i+s_i]
label = m_id#+"(s:{})".format(s_i)
plot_with_shade(0, ax[0], steps, seed_ys, stds*0, seed_c, seed_c, label,
legend=False, xlim=[0, max_steps], ylim=[min_y, max_y],
leg_size=leg_size, xlabel="env steps (millions)", ylabel=ylabel, smooth_factor=smooth_factor,
)
else:
label = label_parser(m_id, figure_id, label_parser_dict=label_parser_dict)
label = label #+"({})".format(n_seeds)
def smooth(x_, n=50):
if type(x_) == list:
x_ = np.array(x_)
return np.array([x_[max(i - n, 0):i + 1].mean() for i in range(len(x_))])
if smooth_factor:
means = smooth(means,smooth_factor)
stds = smooth(stds,smooth_factor)
x_lim = 30
if figure_id == "TalkItOutPolite":
leg_args = {
'ncol': 1,
'columnspacing': 1.0,
'handlelength': 1.0,
'frameon': False,
# 'bbox_to_anchor': (0.00, 0.23, 0.10, .102),
'bbox_to_anchor': (0.55, 0.35, 0.10, .102),
'labelspacing': 0.2,
'fontsize': 27
}
elif figure_id == "Help":
leg_args = {
'ncol': 1,
'columnspacing': 1.0,
'handlelength': 1.0,
'frameon': False,
# 'bbox_to_anchor': (0.00, 0.23, 0.10, .102),
'bbox_to_anchor': (0.39, 0.20, 0.10, .102),
'labelspacing': 0.2,
'fontsize': 27
}
else:
leg_args = {}
color_code = dict([
('PPO+Explo', 'indianred'),
('PPO', "#1f77b4"),
('Unsocial PPO', "grey"),
('PPO (no liar)', "#043252"),
('PPO+Explo (no liar)', "darkred"),
('Unsocial PPO (no liar)', "black"),
('PPO+Explo (helper)', 'indianred'),
('PPO (helper)', "#1f77b4"),
('Unsocial PPO (helper)', "grey")]
)
color = color_code.get(label, np.random.choice(default_colors))
print("C:",color)
plot_with_shade_grg(
0, ax[0], steps, means, stds, color, color, label,
legend=True,
xlim=[0, steps[-1] if not x_lim else x_lim],
ylim=[0, 1.0], xlabel="env steps (millions)", ylabel=ylabel, title=None,
leg_args =leg_args)
#
# plot_with_shade(0, ax[0], steps, means, stds, color, color,label,
# legend=True, xlim=[0, max_steps], ylim=[min_y, max_y],
# leg_size=leg_size, xlabel="Env steps (millions)", ylabel=ylabel, linewidth=5.0, smooth_factor=smooth_factor)
for label, (mean, std, color) in static_lines.items():
plot_with_shade_grg(
0, ax[0], steps, np.array([mean]*len(steps)), np.array([std]*len(steps)), color, color, label,
legend=True,
xlim=[0, max_steps],
ylim=[0, 1.0],
xlabel="env steps (millions)", ylabel=ylabel, linestyle=":",
leg_args=leg_args)
plt.tight_layout()
f.savefig('graphics/{}_results.svg'.format(str(figure_id)))
f.savefig('graphics/{}_results.png'.format(str(figure_id)))
plt.show() | flowersteam/social-ai | data_analysis_neurips.py | data_analysis_neurips.py | py | 22,418 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "os.walk",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 5... |
7722787082 | from spynnaker.pyNN import exceptions
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
from spynnaker.pyNN.models.neural_properties.synaptic_list import SynapticList
from spynnaker.pyNN.models.neural_properties.synapse_row_info \
import SynapseRowInfo
import logging
import numpy
logger = logging.getLogger(__name__)
class FromListConnector(AbstractConnector):
"""
Make connections according to a list.
:param `list` conn_list:
a list of tuples, one tuple for each connection. Each
tuple should contain::
(pre_idx, post_idx, weight, delay)
where pre_idx is the index (i.e. order in the Population,
not the ID) of the presynaptic neuron, and post_idx is
the index of the postsynaptic neuron.
"""
def __init__(self, conn_list=None, safe=True, verbose=False):
"""
Creates a new FromListConnector.
"""
if not safe:
logger.warn("the modification of the safe parameter will be "
"ignored")
if verbose:
logger.warn("the modification of the verbose parameter will be "
"ignored")
if conn_list is None:
conn_list = []
self._conn_list = conn_list
def generate_synapse_list(
self, presynaptic_population, postsynaptic_population, delay_scale,
weight_scale, synapse_type):
prevertex = presynaptic_population._get_vertex
postvertex = postsynaptic_population._get_vertex
# Convert connection list into numpy record array
conn_list_numpy = numpy.array(
self._conn_list, dtype=[("source", "uint32"), ("target", "uint32"),
("weight", "float"), ("delay", "float")])
if (conn_list_numpy["target"] >= postvertex.n_atoms).any():
raise exceptions.ConfigurationException("Target atom out of range")
# Sort by pre-synaptic neuron
conn_list_numpy = numpy.sort(conn_list_numpy, order="source")
# Apply weight and delay scaling
conn_list_numpy["weight"] *= weight_scale
conn_list_numpy["delay"] *= delay_scale
# Count number of connections per pre-synaptic neuron
pre_counts = numpy.histogram(
conn_list_numpy["source"], numpy.arange(prevertex.n_atoms + 1))[0]
# Take cumulative sum of these counts to get start and end indices of
# the blocks of connections coming from each pre-synaptic neuron
pre_end_idxs = numpy.cumsum(pre_counts)
pre_start_idxs = numpy.append(0, pre_end_idxs[:-1])
# Loop through slices of connections
synaptic_rows = []
for _, (start, end) in enumerate(zip(pre_start_idxs, pre_end_idxs)):
# Get slice
pre_conns = conn_list_numpy[start:end]
# Repeat synapse type correct number of times
synapse_type_row = numpy.empty(len(pre_conns), dtype="uint32")
synapse_type_row.fill(synapse_type)
# Combine post-synaptic neuron ids, weights, delays
# and synapse types together into synaptic row
synaptic_rows.append(
SynapseRowInfo(pre_conns["target"],
pre_conns["weight"],
pre_conns["delay"],
synapse_type_row))
# Return full synaptic list
return SynapticList(synaptic_rows)
| ominux/sPyNNaker | spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py | from_list_connector.py | py | 3,525 | python | en | code | null | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "spynnaker.pyNN.models.neural_projections.connectors.abstract_connector.AbstractConnector",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 50,
... |
11702399982 | import md5
import random
#from settings import SOLRSOLR_HOST, SOLR_PORT
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'j*zdirg7yy9@q1k=c*q!*kovfsd#$FDFfsdfkae#id04pyta=yz@w34m6rvwfe'
def generate_hash():
hash = md5.new()
hash.update("".join(map(lambda i: chr(random.randint(0, 255)), range(16))))
hash.update(SECRET_KEY)
key = hash.hexdigest()
return key
import urllib
import urllib2
url = 'http://127.0.0.1:8000/api/insert'
institutionnames = ["Hospital", "Medical Department", "Research Center", "Pharmacy", "Hopital", "Ziekenhuis" ]
databasenames =["heartattack", "cardiomyopathy", "Coronary heart disease", "Valvular heart disease", "Peripheral arterial disease"]
location = ["Paris", "Roterdao",
"Porto", "Aveiro", "Lisboa", "Faro", "Portimao", "Brussels", "London",
"Barcelona", "Heildeberg", "Stuttgard", "Lens"]
data = {
'contact_technical_t':'IEETA',
'created_t':'2013-04-17 12:09:32.334053',
'location_t':'Aveiro',
'institution_name_t':'IEETA',
'contact_scientific_t':'jlo@ua.pt',
'contact_administrative_t':'jlo@ua.pt',
'type_t':'researchcohorts',
'id':'a10815736f733d04d8e0aa65fe37',
'user_t' :'bastiao',
'text_t': 'ieeta ieeta bastiao emif cardiomyopathy Coronary heart attack',
'total_number_subjects_t': '20000',
'ethical_committee_t': '[No]',
'publically_doc_procedure_t': '[No]',
'ethical_committee_t': '[No]',
'number_active_patients_jan2012_t': '200',
'average_time_follow_up_t': '130',
'assess_prevalence_prevalence_t': 'Brufen Beneron',
'literature_papers_t': "Luis A. Bastiao Silva, Carlos Costa, Jose Luis Olveira. A Secure PACS Cloud Archive in CARS 2011, Berlin, Germany ",
'population_description_t':'Fat, Headcache'}
import requests
requests.post("http://127.0.0.1:8000/api/insert", data)
#import pysolr
#import random
#solr = pysolr.Solr('http://' + SOLRSOLR_HOST + ':' + SOLR_PORT+ '/solr', timeout=10)
#for i in range(10):
# index_db = random.randint(1, len(databasenames))
# index_institutionnames = random.randint(1, len(institutionnames))
# index_locations = random.randint(1, len(location))
# data['database_name_t'] = institutionnames[index_institutionnames-1] + " " + location[index_locations-1] + " " +databasenames[index_db-1]
# data['location_t'] = location[index_locations]
# data['id'] = generate_hash()
# solr.add([data])
# solr.optimize()
#curl -v -H "Accept: application/json" -H "Content-type: application/json" -X POST -d ' {"user":{"first_name":"firstname","last_name":"lastname","email":"email@email.com","password":"app123","password_confirmation":"app123"}}' http://127.0.0.1:8000/api/insert
| bioinformatics-ua/montra | emif/insert_script.py | insert_script.py | py | 2,771 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "md5.new",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 56,
"usage_type": "call"
}
] |
12791911896 | from django.contrib.auth.models import User
from django.http import JsonResponse, Http404
from django.shortcuts import redirect, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import DetailView, CreateView, View, TemplateView, DeleteView
from blog.decorators import authorized_only
from blog.forms import PostForm
from blog.models import Post, Subscription, PostRead
@method_decorator(authorized_only, name='dispatch')
class SubscribeView(View):
def post(self, request):
try:
user_id = request.POST.get('pk')
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return JsonResponse({'status': 'error', 'error': 'Пользователя не существует'})
sub, created = Subscription.objects.get_or_create(user=user)
# если пользователь уже в подписках, то удаляем его оттуда, иначе добавляем
if request.user in sub.subscribers.all():
sub.subscribers.remove(request.user.id)
subscribed = False
PostRead.objects.filter(user_id=request.user.id, post__user_id=user_id).delete()
else:
sub.subscribers.add(request.user.id)
subscribed = True
sub.save()
return JsonResponse({'status': 'ok', 'subscribed': subscribed})
class BlogView(DetailView):
model = User
template_name = 'blog/user.html'
def get_context_data(self, **kwargs):
context = super(BlogView, self).get_context_data(**kwargs)
context['posts'] = Post.objects.filter(user_id=self.object.id).prefetch_related('user')
return context
class PostCreateView(CreateView):
model = Post
form_class = PostForm
template_name = 'blog/create.html'
def form_valid(self, form):
post = form.save(commit=False)
post.user = self.request.user
post.save()
return redirect(reverse('blog:detail', args=(post.user_id, post.id)))
class PostDetailView(DetailView):
model = Post
template_name = 'blog/detail.html'
class HomeView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
# 2 запроса для получения всех постов
# Получение списка id пользователей на которых подписан
# Получение всех постов с этими id
subscription = list(self.request.user.subscription.values_list('user_id', flat=True))
subscription.append(self.request.user.id)
context['posts'] = Post.objects.filter(user_id__in=subscription).prefetch_related('user').distinct()
return context
class PostReadView(View):
def post(self, request):
try:
post = Post.objects.exclude(user_id=request.user.id).get(pk=request.POST.get('post_id'))
except Post.DoesNotExist:
return JsonResponse({'status': 'error', 'error': 'Пост не найден'})
PostRead.objects.get_or_create(user=request.user, post=post)
return JsonResponse({'status': 'ok'})
class PostDeleteView(DeleteView):
model = Post
def get_object(self, queryset=None):
return get_object_or_404(Post, user_id=self.request.user.id, pk=self.kwargs.get('pk'))
def get_success_url(self):
return reverse('blog:home') | skazancev/NeKidaem | project/blog/views.py | views.py | py | 3,548 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.View",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 19,
"usage... |
7763839215 | from Monitor import Monitor
import MonitorVarTypes
import requests
monitor_var = {'PreviousClosingPrice': MonitorVarTypes.FLOAT}
monitor_text = "URL FORMAT: https://api.polygon.io/v2/aggs/ticker/{TICKER}/prev?apiKey={APIKEY}; \nGo to https://polygon.io/docs/get_v1_meta_symbols__stocksTicker__news_anchor for more info"
class PolygonStockAPIMonitor(Monitor):
def _mapper(self):
return {'PreviousClosingPrice': self._price_check}
def _price_check(self, func, val):
try:
r = requests.get(self.src)
if r.status_code == 200:
stock_json = r.json()
closing_price = stock_json['results'][0]['c']
return func(closing_price, val), closing_price
except Exception:
raise ValueError
def start(trigger):
monitor = PolygonStockAPIMonitor(trigger)
monitor.run()
| YuHanjiang/IFTTT-backend | Monitors/PolygonStockAPIMonitor.py | PolygonStockAPIMonitor.py | py | 881 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "MonitorVarTypes.FLOAT",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "Monitor.Monitor",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
}
] |
40139751051 | from PIL import Image
img = Image.open("UnoDeck2.png")
i = 0
for y in range(6):
j = 2
k = 3
for x in range(12):
if i == 64:
break
left = 0
top = 0
height = 256
width = 166
box = (width+j)*x, (height+k)*y, width*(x+1)+(j*x), height*(y+1)+(k*y)
area = img.crop(box)
cardName = "Card" + str(i)+ ".png"
area.save((cardName),"PNG")
i += 1
| CrazyScorcer/ImageCutter | imageCut.py | imageCut.py | py | 472 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 3,
"usage_type": "name"
}
] |
33038113455 | """Config flow for Withings."""
import logging
import voluptuous as vol
from withings_api.common import AuthScope
from homeassistant import config_entries
from homeassistant.components.withings import const
from homeassistant.helpers import config_entry_oauth2_flow
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register(const.DOMAIN)
class WithingsFlowHandler(config_entry_oauth2_flow.AbstractOAuth2FlowHandler):
"""Handle a config flow."""
DOMAIN = const.DOMAIN
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
_current_data = None
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {
"scope": ",".join(
[
AuthScope.USER_INFO.value,
AuthScope.USER_METRICS.value,
AuthScope.USER_ACTIVITY.value,
]
)
}
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Override the create entry so user can select a profile."""
self._current_data = data
return await self.async_step_profile(data)
async def async_step_profile(self, data: dict) -> dict:
"""Prompt the user to select a user profile."""
profile = data.get(const.PROFILE)
if profile:
new_data = {**self._current_data, **{const.PROFILE: profile}}
self._current_data = None
return await self.async_step_finish(new_data)
profiles = self.hass.data[const.DOMAIN][const.CONFIG][const.PROFILES]
return self.async_show_form(
step_id="profile",
data_schema=vol.Schema({vol.Required(const.PROFILE): vol.In(profiles)}),
)
async def async_step_finish(self, data: dict) -> dict:
"""Finish the flow."""
self._current_data = None
return self.async_create_entry(title=data[const.PROFILE], data=data)
| 84KaliPleXon3/home-assistant-core | homeassistant/components/withings/config_flow.py | config_flow.py | py | 2,112 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.config_entry_oauth2_flow.AbstractOAuth2FlowHandler",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.helpers.config_entry_oauth2_flow",
... |
32639606030 | import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
class AddRemoveElements(unittest.TestCase):
def setUp(self) -> None:
self.driver = webdriver.Chrome(executable_path="chromedriver")
self.driver.get('https://the-internet.herokuapp.com/')
self.driver.find_element(By.LINK_TEXT, 'Add/Remove Elements').click()
def tearDown(self) -> None:
self.driver.quit()
def test_add_remove(self):
elements_added = int(input('How many elements will you add?: '))
elements_removed = int(input('How many elements will you remove?: '))
total_elements = elements_added + elements_removed
add_button = self.driver.find_element(By.XPATH, '//*[@id="content"]/div/button')
for i in range(elements_added):
add_button.click()
for i in range(elements_removed):
try:
delete_button = self.driver.find_element(By.XPATH, '//*[@id="elements"]/button[3]')
delete_button.click()
except:
print("You're trying to delete more elements the existing")
break
if total_elements > 0:
print(f'There are {total_elements} elements on screen')
else:
print("There 0 are elements on screen")
if __name__ == '__main__':
unittest.main(verbosity=2) | yorlysoro/intro_selenium_course | test_add_remove.py | test_add_remove.py | py | 1,436 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sele... |
28969789667 | import machine
import picoweb
import ujson
import ulogging as logging
import ure as re
import utime
from common import config
app = picoweb.WebApp(__name__)
hooks = {}
CONFIGURE_DEVICE_HOOK = 'CONFIGURE_WIFI'
CONFIGURE_AWS_HOOK = 'CONFIGURE_AWS'
CONFIGURE_SENSOR_HOOK = "CONFIGURE_SENSOR"
GET_STATUS_HOOK = 'GET_STATUS'
# API helpers
def create_success_response(data: dict):
return _create_common_response(data=data, status=0, status_text='ok')
def _create_common_response(data, status: int, status_text: str):
response_dict = {
'data': data,
'status': status,
'status_text': status_text,
}
encoded = ujson.dumps(response_dict)
return encoded
def create_failed_response(resp, status_text: str, status: int = 500):
return _create_common_response(data=None, status=status, status_text=status_text)
# modified picoweb's req.read_form_data:
def parse_post_body(req):
size = int(req.headers[b"Content-Length"])
data = yield from req.reader.readexactly(size)
data_txt = data.decode('utf-8')
return ujson.loads(data_txt)
# Requests handling
@app.route("/status")
def get_status(req, resp):
data = {"timestamp": utime.time()}
status = hooks[GET_STATUS_HOOK]()
for key in status.keys():
data[key] = status[key]
encoded = create_success_response(data=data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route("/measurement")
def get_last_measurement(req, resp):
value = hooks['get_measurement_hook']()
data = {"value": value}
encoded = create_success_response(data=data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route("/battery")
def get_last_measurement(req, resp):
assert req.method == 'GET'
try:
battery_v = machine.ADC(machine.Pin(config.cfg.battery_voltage_pin))
battery_v.atten(machine.ADC.ATTN_11DB)
ADC_11DB_TO_VOLT = 0.000805664
voltage = battery_v.read() * ADC_11DB_TO_VOLT
voltage_divider_ratio = config.cfg.voltage_divider_r2_k / \
(config.cfg.voltage_divider_r1_k + config.cfg.voltage_divider_r2_k)
voltage = voltage / voltage_divider_ratio
except:
logging.info("Error reading battery voltage!")
voltage = 'ERROR'
data = {"voltage": voltage}
encoded = create_success_response(data=data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route("/")
def index(req, resp):
print("route /")
headers = {"Location": "/web_pages/index.html"}
yield from picoweb.start_response(resp, status="303", headers=headers)
@app.route("/config")
def set_config(req, resp):
assert req.method == 'POST'
data = yield from parse_post_body(req)
print(data)
if 'wifi' in data.keys():
print(data['wifi'])
hooks[CONFIGURE_DEVICE_HOOK](data['wifi'])
if 'aws' in data.keys():
hooks[CONFIGURE_AWS_HOOK](data['aws'])
if 'sensor' in data.keys():
hooks[CONFIGURE_SENSOR_HOOK](data['sensor'])
config.cfg.save()
response_data = {'result': 'ok'}
encoded = create_success_response(data=response_data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route(re.compile("/web_pages/(.+)"))
def get_static_file(req, resp):
print("Get static call")
file_path = '/web_server/web_pages/' + req.url_match.group(1)
logging.info('About to send file: ' + file_path)
yield from app.sendfile(resp, file_path)
@app.route(re.compile("/start_test_data_acquisition"))
def start_test_data_acquisition(req, resp):
hooks['start_test_data_acquisition']()
response_data = {'result': 'ok'}
encoded = create_success_response(data=response_data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route(re.compile("/start_data_acquisition"))
def start_data_acquisition(req, resp):
hooks['start_data_acquisition']()
response_data = {'result': 'ok'}
encoded = create_success_response(data=response_data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
# setup and run
def setup(get_measurement_hook=None,
configure_device_hook=None,
configure_aws_hook=None,
configure_sensor_hook=None,
get_status_hook=None,
start_test_data_acquisition=None,
start_data_acquisition=None):
global hooks
hooks['get_measurement_hook'] = get_measurement_hook
hooks[CONFIGURE_DEVICE_HOOK] = configure_device_hook
hooks[CONFIGURE_AWS_HOOK] = configure_aws_hook
hooks[CONFIGURE_SENSOR_HOOK] = configure_sensor_hook
hooks[GET_STATUS_HOOK] = get_status_hook
hooks['start_test_data_acquisition'] = start_test_data_acquisition
hooks['start_data_acquisition'] = start_data_acquisition
def run():
global app
global hooks
if not hooks:
raise Exception('Please setup server with hooks first!')
logging.info('About to start server...')
app.run(debug=1, port=80, host='0.0.0.0')
def stop_server():
app.stop_server()
| wizzdev-pl/iot-starter | MicroPython/src/web_server/web_app.py | web_app.py | py | 5,324 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "picoweb.WebApp",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ujson.dumps",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "ujson.loads",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "utime.time",
"line_number"... |
15624457572 | import numpy as np
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
test_size = 0.2
seed = 42
x_data = np.load('./data/soja_images_150_new.npy', allow_pickle=True)
y_data = np.load('./data/soja_labels_150_new.npy', allow_pickle=True)
x_data = x_data.astype(np.float32)
y_data_cat = to_categorical(y_data)
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data_cat, test_size=test_size, random_state=seed)
with open('./data/x_train.npy', 'wb') as f:
np.save(f, x_train)
with open('./data/x_test.npy', 'wb') as f:
np.save(f, x_test)
with open('./data/y_train.npy', 'wb') as f:
np.save(f, y_train)
with open('./data/y_test.npy', 'wb') as f:
np.save(f, y_test)
| nagahamaVH/soybean-image-classif | app/src/prepare_data.py | prepare_data.py | py | 741 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "keras.utils.to_categorical",
... |
3177305776 | from flipperzero_cli import CONFIG, load_config, show_config, \
read_until_prompt, print_until_prompt, check_file_presence, \
flipper_init, main, \
storage_read, save_file, download_from_flipper, \
upload_to_flipper, check_local_md5, compare_md5
import builtins
import pytest
from unittest.mock import patch, mock_open
from .mock_serial import Serial
DEFAULT_CONFIG = {"filename": None,
"port": None,
"show_banner": 0,
"hide_command": False,
"show_config": False}
DEFAULT_COMMAND = ["help"]
# Helpers
def updated_config(data):
new_config = DEFAULT_CONFIG.copy()
for k, v in data.items():
new_config[k] = v
return new_config
def call_with(m, parameters=[], new_env={}):
for k in [
"FLIPPER_ZERO_SHOW_BANNER",
"FLIPPER_ZERO_HIDE_COMMAND",
"FLIPPER_ZERO_PORT",
"FLIPPER_ZERO_FILENAME",
]:
if k not in new_env:
m.delenv(k, raising=False)
for k, v in new_env.items():
m.setenv(k, v)
m.setattr("sys.argv", ["cli.py"] + parameters)
# Tests
def test_load_config(monkeypatch):
with monkeypatch.context() as m:
# Test without env variable and command line parameters
call_with(m, [])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == DEFAULT_CONFIG
# Only test with env parameters
call_with(m, [], {"FLIPPER_ZERO_SHOW_BANNER": "1"})
load_config()
assert CONFIG == updated_config({"show_banner": True})
call_with(m, [], {"FLIPPER_ZERO_HIDE_COMMAND": "1"})
load_config()
assert CONFIG == updated_config({"hide_command": True})
call_with(m, [], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
load_config()
assert CONFIG == updated_config({"port": "/dev/flipper0"})
call_with(m, [], {"FLIPPER_ZERO_FILENAME": "/home/flipper/dolpin.txt"})
load_config()
assert CONFIG == updated_config({"filename":
"/home/flipper/dolpin.txt"})
call_with(m, [], {
"FLIPPER_ZERO_SHOW_BANNER": "1",
"FLIPPER_ZERO_HIDE_COMMAND": "0",
"FLIPPER_ZERO_PORT": "/dev/flipper0",
"FLIPPER_ZERO_FILENAME": "/home/flipper/dolpin.txt",
})
load_config()
assert CONFIG == updated_config({
"show_banner": True,
"hide_command": False,
"port": "/dev/flipper0",
"filename": "/home/flipper/dolpin.txt",
})
# Test with command line parameters
# -p --port
call_with(m, ["-p", "/dev/flipper0"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"port": "/dev/flipper0"})
call_with(m, ["--port", "/dev/flipper0"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"port": "/dev/flipper0"})
call_with(m, ["--port", "/dev/flipper1"],
{"FLIPPER_ZERO_PORT": "/dev/flipper0"})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"port": "/dev/flipper1"})
# -f --filename
call_with(m, ["-f", "/home/flipper/dolpin1.txt"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"filename":
"/home/flipper/dolpin1.txt"})
call_with(m, [ "--filename", "/home/flipper/dolpin2.txt"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"filename":
"/home/flipper/dolpin2.txt"})
call_with(m, ["-f", "/home/flipper/dolpin3.txt"],
{"FLIPPER_ZERO_FILENAME": "/home/flipper/dolpin.txt"})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"filename":
"/home/flipper/dolpin3.txt"})
# --show-banner
call_with(m, ["--show-banner"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_banner": True})
call_with(m, ["--show-banner"], {"FLIPPER_ZERO_SHOW_BANNER": "1"})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_banner": True})
# --hide-command
call_with(m, ["--hide-command"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"hide_command": True})
call_with(m, ["--hide-command"], {"FLIPPER_ZERO_HIDE_COMMAND": "1"})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"hide_command": True})
# --show-config
call_with(m, ["--show-config"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_config": True})
# Test different values for FLIPPER_ZERO_SHOW_BANNER
for v in ["1", "true", "True"]:
call_with(m, [], {"FLIPPER_ZERO_SHOW_BANNER": v})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_banner": True})
for v in ["false", "False"]:
call_with(m, [], {"FLIPPER_ZERO_SHOW_BANNER": v})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_banner": False})
# Test if argparse leave "garbage" in parsing
flipper_command = ["storage", "info", "/ext"]
call_with(m, flipper_command)
assert load_config() == flipper_command
assert CONFIG == DEFAULT_CONFIG
call_with(m, ["--port", "/dev/flipper0"]+flipper_command)
assert load_config() == flipper_command
assert CONFIG == updated_config({"port": "/dev/flipper0"})
call_with(m, flipper_command+["--port", "/dev/flipper0"])
assert load_config() == flipper_command
assert CONFIG == updated_config({"port": "/dev/flipper0"})
def test_show_config(monkeypatch, capsys):
with monkeypatch.context() as m:
call_with(m, ["--port", "/dev/flipper0"])
load_config()
show_config()
captured = capsys.readouterr()
assert captured.out == "show_banner: 0\nhide_command: 0\nport: /dev/flipper0\n"
call_with(m, ["--port", "/dev/flipper1", "--hide-command"])
load_config()
show_config()
captured = capsys.readouterr()
assert captured.out == "show_banner: 0\nhide_command: True\nport: /dev/flipper1\n"
call_with(m, ["--show-banner", "--port", "/dev/flipper1"])
load_config()
show_config()
captured = capsys.readouterr()
assert captured.out == "show_banner: True\nhide_command: 0\nport: /dev/flipper1\n"
def test_read_until_prompt():
f0 = Serial()
simple_prompt = b"Text before\nFlipper prompt\n>: "
f0._out_buffer = simple_prompt
assert read_until_prompt(f0) == simple_prompt.decode()
FLIPPER_SD_INFO_PRINT = """Label: FLIPPER SD
Type: FAT32
3886080KB total
3841024KB free
"""
FLIPPER_SD_INFO = FLIPPER_SD_INFO_PRINT.encode() + b"""
>: ."""
def test_print_until_prompt(capsys):
f0 = Serial()
simple_prompt = b"Text before\nFlipper prompt\n>: "
f0._out_buffer = simple_prompt
print_until_prompt(f0, show_prompt=True)
captured = capsys.readouterr()
assert captured.out == simple_prompt.decode()+"\n"
f0._out_buffer = FLIPPER_SD_INFO
print_until_prompt(f0, show_prompt=True)
captured = capsys.readouterr()
assert captured.out == FLIPPER_SD_INFO.decode()[:-1]+"\n"
f0._out_buffer = FLIPPER_SD_INFO
print_until_prompt(f0, show_prompt=False)
captured = capsys.readouterr()
assert captured.out == FLIPPER_SD_INFO_PRINT
@patch("os.path.exists")
def test_check_file_presence(patch_exists):
# Test missing file
patch_exists.return_value = False
with pytest.raises(SystemExit) as e:
check_file_presence("/tmp/missing_file")
assert e.type == SystemExit
assert e.value.code == 1
# Test existing file
patch_exists.return_value = True
assert check_file_presence("/tmp/existing_file") == True
def test_flipper_init(monkeypatch, capsys):
with pytest.raises(SystemExit) as e:
(command, f0) = flipper_init()
assert e.type == SystemExit
assert e.value.code == 1
captured = capsys.readouterr()
assert captured.out == "Please configure flipper zero serial port\n"
with monkeypatch.context() as m:
call_with(m, [], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
(command, f0) = flipper_init(s=Serial)
assert command == "help"
STORAGE_READ_01_HEADER = b"""Size: 164
"""
STORAGE_READ_01_CONTENT = b"""In faucibus dignissim ullamcorper.
Nulla quis molestie lacus.
Pellentesque congue dui et felis pharetra eleifend.
Integer magna eros. efficitur sed porta sit amet.
"""
STORAGE_READ_01_FOOTER = b"""
>: ."""
STORAGE_READ_01_RAW = STORAGE_READ_01_HEADER + \
STORAGE_READ_01_CONTENT + \
STORAGE_READ_01_FOOTER
def test_storage_read():
f0 = Serial()
f0._out_buffer = STORAGE_READ_01_RAW
(size, data) = storage_read(f0)
assert size == 164
assert data == STORAGE_READ_01_CONTENT.decode()
def test_save_file(capsys):
mock_write = mock_open()
with patch.object(builtins, "open", mock_write, create=True) as patched_open:
save_file(STORAGE_READ_01_CONTENT.decode(),
"/tmp/file_2_save.txt",
output=False)
captured = capsys.readouterr()
assert captured.out == "Save to /tmp/file_2_save.txt\n"
assert patched_open.mock_calls[2][1][0] == STORAGE_READ_01_CONTENT.decode()
save_file(STORAGE_READ_01_CONTENT.decode(),
"/tmp/file_2_save.txt",
output=True)
captured = capsys.readouterr()
assert captured.out == "Save to /tmp/file_2_save.txt\n" + \
STORAGE_READ_01_CONTENT.decode() + "\n"
def test_download_from_flipper(capsys):
f0 = Serial()
f0._out_buffer = STORAGE_READ_01_RAW
mock_write = mock_open()
with patch.object(builtins, "open", mock_write, create=True) as patched_open:
download_from_flipper(f0, "/tmp/file_2_save.txt", output=False)
captured = capsys.readouterr()
assert captured.out == "Save to /tmp/file_2_save.txt\n"
STORAGE_WRITE_01_HEADER = b"Just write your text data. New line by Ctrl+Enter, exit by Ctrl+C.\n\n"
STORAGE_WRITE_01_FOOTER = b"""
>: """
STORAGE_WRITE_01_OUT = STORAGE_WRITE_01_HEADER + \
STORAGE_READ_01_CONTENT
STORAGE_WRITE_01_RAW = STORAGE_WRITE_01_OUT + \
STORAGE_WRITE_01_FOOTER
@patch("os.path.exists")
def test_upload_to_flipper(patch_exists, capsys):
f0 = Serial()
f0._out_buffer = STORAGE_WRITE_01_RAW
patch_exists.return_value = True
with patch("builtins.open", mock_open(read_data=STORAGE_READ_01_CONTENT)) as mock_file:
upload_to_flipper(f0, "/tmp/file_2_upload.txt")
mock_file.assert_called_with("/tmp/file_2_upload.txt", "rb")
captured = capsys.readouterr()
assert captured.out == STORAGE_WRITE_01_OUT.decode()
def test_check_local_md5():
with patch("builtins.open", mock_open(read_data=STORAGE_READ_01_CONTENT)) as mock_file:
localhash = check_local_md5("/tmp/local_filename.txt")
mock_file.assert_called_with("/tmp/local_filename.txt", "rb")
assert localhash == "9cb4a477cbbf515f7dffb459f1e05594"
GOD_HASH = "9cb4a477cbbf515f7dffb459f1e05594"
BAD_HASH = "a7b073ead2a733491a4a407e777b2e59"
@patch("os.path.exists")
@patch("flipperzero_cli.check_local_md5")
def test_compare_md5(patch_check_local_md5, patch_exists, capsys):
f0 = Serial()
f0._out_buffer = f"{GOD_HASH}\n\n>: ".encode()
patch_exists.return_value = True
patch_check_local_md5.return_value = GOD_HASH
compare_md5(f0, "/tmp/local_filename.txt")
captured = capsys.readouterr()
assert captured.out == f"OK, same hash ({GOD_HASH})\n"
f0 = Serial()
f0._out_buffer = f"{GOD_HASH}\n\n>: ".encode()
patch_exists.return_value = True
patch_check_local_md5.return_value = BAD_HASH
compare_md5(f0, "/tmp/local_filename.txt")
captured = capsys.readouterr()
assert captured.out == f"""KO different hashes:
local: '{BAD_HASH}'
remote: '{GOD_HASH}'
"""
def test_main(monkeypatch, capsys):
with pytest.raises(SystemExit) as e:
main()
assert e.type == SystemExit
assert e.value.code == 1
captured = capsys.readouterr()
assert captured.out == "Please configure flipper zero serial port\n"
with monkeypatch.context() as m:
call_with(m, [], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
main(s=Serial)
captured = capsys.readouterr()
assert captured.out == "Command: help\n\n"
with monkeypatch.context() as m:
call_with(m, ["--show-config"], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
main(s=Serial)
captured = capsys.readouterr()
assert captured.out == """show_banner: 0
hide_command: 0
port: /dev/flipper0
Command: help
"""
with monkeypatch.context() as m:
call_with(m, ["--show-banner"], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
main(s=Serial)
captured = capsys.readouterr()
assert captured.out == "Command: help\n\n\n"
with monkeypatch.context() as m:
mock_write = mock_open()
with patch.object(builtins, "open", mock_write, create=True) as patched_open:
call_with(m, ["--filename=/tmp/to_save.txt",
"storage", "read", "/ext/badusb/demo_macos.txt"],
{"FLIPPER_ZERO_PORT": "/dev/flipper0"})
with pytest.raises(SystemExit) as e:
main(s=Serial)
assert e.type == SystemExit
assert e.value.code == 1
captured = capsys.readouterr()
assert captured.out == "Command: storage read /ext/badusb/demo_macos.txt\nError in storage read\n"
call_with(m, ["--filename=/tmp/to_write.txt",
"storage", "write", "/ext/badusb/demo_macos.txt"],
{"FLIPPER_ZERO_PORT": "/dev/flipper0"})
with pytest.raises(SystemExit) as e:
main(s=Serial)
assert e.type == SystemExit
assert e.value.code == 1
captured = capsys.readouterr()
assert captured.out == "Command: storage write /ext/badusb/demo_macos.txt\n/tmp/to_write.txt is missing.\n"
with monkeypatch.context() as m:
call_with(m, [
"--filename=/tmp/to_md5.txt", "storage", "md5",
"/ext/badusb/demo_macos.txt"],
{"FLIPPER_ZERO_PORT": "/dev/flipper0"})
with pytest.raises(SystemExit) as e:
main(s=Serial)
captured = capsys.readouterr()
assert captured.out == \
"""Command: storage md5 /ext/badusb/demo_macos.txt
/tmp/to_md5.txt is missing.
"""
| nledez/flipperzero-cli | tests/test_cli.py | test_cli.py | py | 14,895 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "flipperzero_cli.load_config",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flipperzero_cli.CONFIG",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "flipperzero_cli.load_config",
"line_number": 55,
"usage_type": "call"
},
{
"ap... |
35413864969 | import multiprocessing
from multiprocessing import Process
from cleanup import TwitterCleanuper
from preprocessing import TwitterData
from word2vec import Word2VecProvider
import pandas as pd
def preprocess(results, data_path, is_testing, data_name, min_occurrences=5, cache_output=None):
twitter_data = TwitterData()
twitter_data.initialize(data_path, is_testing)
twitter_data.build_features()
twitter_data.cleanup(TwitterCleanuper())
twitter_data.tokenize()
twitter_data.build_wordlist(min_occurrences=min_occurrences)
twitter_data.build_data_model()
# twitter_data.build_ngrams()
# twitter_data.build_ngram_model()
# twitter_data.build_data_model(with_ngram=2)
word2vec = Word2VecProvider()
word2vec.load("/home/mike/Desktop/glove.twitter.27B.200d.txt")
twitter_data.build_word2vec_model(word2vec)
if cache_output is not None:
twitter_data.data_model.to_csv(cache_output, index_label="idx", float_format="%.6f")
results[data_name] = twitter_data.data_model
def preprare_data(min_occurrences):
import os
training_data = None
testing_data = None
print("Loading data...")
test_data_file_name = "data/processed_test_word2vec_bow_" + str(min_occurrences) + ".csv"
train_data_file_name = "data/processed_train_word2vec_bow_" + str(min_occurrences) + ".csv"
use_cache = os.path.isfile(train_data_file_name) and os.path.isfile(
test_data_file_name)
if use_cache:
training_data = TwitterData()
training_data.initialize(None, from_cached=train_data_file_name)
training_data = training_data.data_model
testing_data = TwitterData()
testing_data.initialize(None, from_cached=test_data_file_name)
testing_data = testing_data.data_model
print("Loaded from cached files...")
else:
print("Preprocessing data...")
with multiprocessing.Manager() as manager:
results = manager.dict()
preprocess_training = Process(target=preprocess, args=(
results, "data/train.csv", False, "train", min_occurrences, train_data_file_name,))
preprocess_testing = Process(target=preprocess, args=(
results, "data/train.csv", True, "test", min_occurrences, test_data_file_name,))
preprocess_training.start()
preprocess_testing.start()
print("Multiple processes started...")
preprocess_testing.join()
print("Preprocessed testing data...")
preprocess_training.join()
print("Preprocessed training data...")
training_data = results["train"]
testing_data = results["test"]
print("Data preprocessed & cached...")
return training_data, testing_data
class TwitterData( TwitterData_ExtraFeatures ):
def build_final_model (self, word2vec_provider, stopwords=nltk.corpus.stopwords.words( "english" )):
whitelist = self.whitelist
stopwords = list( filter( lambda sw: sw not in whitelist, stopwords ) )
extra_columns = [col for col in self.processed_data.columns if col.startswith( "number_of" )]
similarity_columns = ["bad_similarity", "good_similarity", "information_similarity"]
label_column = []
if not self.is_testing:
label_column = ["label"]
columns = label_column + ["original_id"] + extra_columns + similarity_columns + list(
map( lambda i: "word2vec_{0}".format( i ), range( 0, word2vec_provider.dimensions ) ) ) + list(
map( lambda w: w + "_bow", self.wordlist ) )
labels = []
rows = []
for idx in self.processed_data.index:
current_row = []
if not self.is_testing:
# add label
current_label = self.processed_data.loc[idx, "emotion"]
labels.append( current_label )
current_row.append( current_label )
current_row.append( self.processed_data.loc[idx, "id"] )
for _, col in enumerate( extra_columns ):
current_row.append( self.processed_data.loc[idx, col] )
# average similarities with words
tokens = self.processed_data.loc[idx, "tokenized_text"]
for main_word in map( lambda w: w.split( "_" )[0], similarity_columns ):
current_similarities = [abs( sim ) for sim in
map( lambda word: word2vec_provider.get_similarity( main_word, word.lower() ),
tokens ) if
sim is not None]
if len( current_similarities ) <= 1:
current_row.append( 0 if len( current_similarities ) == 0 else current_similarities[0] )
continue
max_sim = max( current_similarities )
min_sim = min( current_similarities )
current_similarities = [((sim - min_sim)/(max_sim - min_sim)) for sim in
current_similarities] # normalize to <0;1>
current_row.append( np.array( current_similarities ).mean() )
# add word2vec vector
tokens = self.processed_data.loc[idx, "tokenized_text"]
current_word2vec = []
for _, word in enumerate( tokens ):
vec = word2vec_provider.get_vector( word.lower() )
if vec is not None:
current_word2vec.append( vec )
averaged_word2vec = list( np.array( current_word2vec ).mean( axis=0 ) )
current_row += averaged_word2vec
# add bag-of-words
tokens = set( self.processed_data.loc[idx, "text"] )
for _, word in enumerate( self.wordlist ):
current_row.append( 1 if word in tokens else 0 )
rows.append( current_row )
self.data_model = pd.DataFrame( rows, columns=columns )
self.data_labels = pd.Series( labels )
return self.data_model, self.data_labels
def log(text):
print(text)
with open("log.txt", "a") as log_file:
log_file.write(str(text) + "\n")
if __name__ == "__main__":
def main():
for m in range( 3, 4 ):
print("Preparing data with min_occurrences=" + str( m ))
training_data, testing_data = preprare_data( m )
log( "********************************************************" )
log( "Validating for {0} min_occurrences:".format( m ) )
# drop idx & id columns
# if training_data.columns[0] == "idx":
# training_data = training_data.iloc[:, 1:]
#
# if testing_data.columns[0] == "idx":
# testing_data = testing_data.iloc[:, 1:]
#
# if "original_id" in training_data.columns:
# training_data.drop( "original_id", axis=1, inplace=True )
#
# if "original_id" in testing_data.columns:
# testing_data.drop( "original_id", axis=1, inplace=True )
td = TwitterData()
td.initialize( "data\\train.csv" )
td.build_features()
td.cleanup( TwitterCleanuper() )
td.tokenize()
td.stem()
td.build_wordlist()
td.build_final_model( word2vec )
td.data_model.head( 5 )
print("Done!")
main()
| michal0janczyk/information_diffusion | fuzzy_logic/word_2_vectors/main.py | main.py | py | 7,484 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "preprocessing.TwitterData",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cleanup.TwitterCleanuper",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "word2vec.Word2VecProvider",
"line_number": 19,
"usage_type": "call"
},
{
"api_n... |
16505295497 | from typing import List, Dict, Tuple
def create_chirp_dictionary(file_name: str) \
-> Dict[int, Tuple[int, str, List[str], List[int], List[int]]]:
"""
Opens the file "file_name" in working directory and reads the content into a
chirp dictionary as defined on Page 2 Functions 2.
Note, some spacing has been added for human readability.
>>> create_chirp_dictionary("chirps.txt")
{100000: (
400,
'Does not want to build a %SnowMan %StopAsking',
['SnowMan', 'StopAsking'],
[100, 200, 300],
[400, 500]),
100001: (
200,
'Make the ocean great again.',
[''],
[],
[400]),
100002: (
500,
"Help I'm being held captive by a beast! %OhNoes",
['OhNoes'],
[400],
[100, 200, 300]),
100003: (
500,
"Actually nm. This isn't so bad lolz :P %StockholmeSyndrome",
['StockholmeSyndrome'],
[400, 100],
[]),
100004: (
300,
'If some random dude offers to %ShowYouTheWorld do yourself a favour and %JustSayNo.',
['ShowYouTheWorld', 'JustSayNo'],
[500, 200],
[400]),
100005: (
400,
'LOLZ BELLE. %StockholmeSyndrome %SnowMan',
['StockholmeSyndrome', 'SnowMan'],
[],
[200, 300, 100, 500])}
"""
#Your code goes here
f = open(file_name, "r")
H = dict()
#Helper for tag
def tag_helper(x):
y = []
if x == '\n':
return []
else:
x = x.split(', ')
for i in x:
y.append(str(i))
return y
#Helper for liked and disliked
def helper(a):
b = []
if a == '\n':
return []
else:
a = a.strip('\n').split(',')
for numbers in a:
b.append(int(numbers))
return b
line = f.readline()
while line:
chirpid = int(line) #10000
userid = int(f.readline()) #400
message = f.readline().strip('\n') #Does not want to build a %SnowMan %StopAsking
tags = f.readline().strip('\n') #SnowMan, StopAsking
likeds = f.readline() #100, 200, 300
dislikeds = f.readline() #400, 500
sperate = f.readline() #sperates to the next userid (\n)
line = f.readline()
tag = tag_helper(tags)
liked = helper(likeds)
disliked = helper(dislikeds)
H[chirpid] = (userid, message, tag, liked, disliked)
return H
| kimber1y-tung/CSC108 | assignment3/A3-2.py | A3-2.py | py | 2,659 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.