hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
405ec366d2ae10466e3404c3d71b6788f3e189ba | 929 | py | Python | mycobot_320_moveit/scripts/sync_plan.py | dirksavage88/mycobot_ros | b4203056a8a0c494172c60542010dcc1b0a52df5 | [
"BSD-2-Clause"
] | null | null | null | mycobot_320_moveit/scripts/sync_plan.py | dirksavage88/mycobot_ros | b4203056a8a0c494172c60542010dcc1b0a52df5 | [
"BSD-2-Clause"
] | null | null | null | mycobot_320_moveit/scripts/sync_plan.py | dirksavage88/mycobot_ros | b4203056a8a0c494172c60542010dcc1b0a52df5 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python2
import time
import subprocess
import rospy
from sensor_msgs.msg import JointState
from pymycobot.mycobot import MyCobot
mc = None
def callback(data):
rospy.loginfo(rospy.get_caller_id() + "%s", data)
data_list = []
for index, value in enumerate(data.position):
# if index != 2:
# value *= -1
data_list.append(value)
mc.send_radians(data_list, 80)
def listener():
global mc
rospy.init_node("mycobot_reciver", anonymous=True)
port = subprocess.check_output(['echo -n /dev/ttyUSB*'],
shell=True)
port = rospy.get_param("~port", port)
baud = rospy.get_param("~baud", 1000000)
mc = MyCobot(port, baud)
rospy.Subscriber("joint_states", JointState, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == "__main__":
listener()
| 22.119048 | 72 | 0.64155 |
4f2c95eb27c7025cf61f8afee18f344ffb6438be | 19,555 | py | Python | gcn_lib/dense/torch_vertex2d.py | guochengqian/KPConv-PyTorch | 4138c12d276b0d9a765d41b93f2b9fd6e5eb2baa | [
"MIT"
] | 2 | 2021-08-04T17:15:04.000Z | 2021-12-06T14:37:46.000Z | gcn_lib/dense/torch_vertex2d.py | guochengqian/KPConv-PyTorch-DeepGCN | 4138c12d276b0d9a765d41b93f2b9fd6e5eb2baa | [
"MIT"
] | null | null | null | gcn_lib/dense/torch_vertex2d.py | guochengqian/KPConv-PyTorch-DeepGCN | 4138c12d276b0d9a765d41b93f2b9fd6e5eb2baa | [
"MIT"
] | null | null | null | import torch
from torch import nn
from .torch_nn import MLP2dLayer, get_center_feature, batched_index_select, act_layer, norm_layer2d, glorot
from .torch_edge import DilatedKNN2d, add_self_loops, remove_self_loops
import torch.nn.functional as F
class MRConv2d(nn.Module):
r"""Revised Max-Relative Graph Convolution layer (with activation, batch normalization)
from the `"DeepGCNs: Making GCNs Go as Deep as CNNs"
<https://arxiv.org/abs/1910.06849>`_ paper
"""
def __init__(self, in_channels, out_channels,
act='relu', norm=None, bias=True,
k=9, aggr='max'):
super(MRConv2d, self).__init__()
self.nn = MLP2dLayer([in_channels * 2, out_channels], act, norm, bias)
self.k = k
if aggr == 'max':
self.aggr = torch.max
elif aggr == 'mean':
self.aggr = torch.mean
elif aggr == 'sum':
self.aggr = torch.mean
else:
raise NotImplementedError
def forward(self, x, edge_index):
# edge_index = remove_self_loops(edge_index)
x_i = get_center_feature(x, self.k)
x_j = batched_index_select(x, edge_index[0])
aggr_out, _ = self.aggr(x_j - x_i, -1, keepdim=True)
return self.nn(torch.cat([x, aggr_out], dim=1))
class EdgeConv2d(nn.Module):
r"""Revised Edge convolution layer (with activation, batch normalization)
from the `"Dynamic Graph CNN for Learning on Point Clouds"
<https://arxiv.org/abs/1801.07829>`_ paper
"""
def __init__(self, in_channels, out_channels,
act='relu', norm=None, bias=True,
k=9, aggr='max'):
super(EdgeConv2d, self).__init__()
self.nn = MLP2dLayer([in_channels * 2, out_channels], act, norm, bias)
self.k = k
if aggr == 'max':
self.aggr = torch.max
elif aggr == 'mean':
self.aggr = torch.mean
elif aggr == 'sum':
self.aggr = torch.mean
def forward(self, x, edge_index):
# edge_index = remove_self_loops(edge_index)
x_i = get_center_feature(x, self.k)
x_j = batched_index_select(x, edge_index[0])
aggr_out, _ = self.aggr(self.nn(torch.cat([x_i, x_j - x_i], dim=1)), -1, keepdim=True)
return aggr_out
class GATConv2d(nn.Module):
r"""Revised one head graph attentional operator from the `"Graph Attention Networks"
<https://arxiv.org/abs/1710.10903>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \alpha_{i,i}\mathbf{\Theta}\mathbf{x}_{i} +
\sum_{j \in \mathcal{N}(i)} \alpha_{i,j}\mathbf{\Theta}\mathbf{x}_{j},
where the attention coefficients :math:`\alpha_{i,j}` are computed as
.. math::
\alpha_{i,j} =
\frac{
\exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top}
[\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_j]
\right)\right)}
{\sum_{k \in \mathcal{N}(i) \cup \{ i \}}
\exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top}
[\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_k]
\right)\right)}.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
heads (int, optional): Number of multi-head-attentions.
(default: :obj:`1`)
concat (bool, optional): If set to :obj:`False`, the multi-head
attentions are averaged instead of concatenated.
(default: :obj:`True`)
negative_slope (float, optional): LeakyReLU angle of the negative
slope. (default: :obj:`0.2`)
dropout (float, optional): Dropout probability of the normalized
attention coefficients which exposes each node to a stochastically
sampled neighborhood during training. (default: :obj:`0`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels, out_channels,
act='relu', norm=None, bias=True,
k=9, aggr='sum',
negative_slope=0.2, dropout=0):
super(GATConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.dropout = dropout
self.k = k
if aggr == 'max':
self.aggr = torch.max
elif aggr == 'mean':
self.aggr = torch.mean
elif aggr == 'sum':
self.aggr = torch.mean
self.nn = MLP2dLayer([in_channels, out_channels], act, norm, bias=False)
self.att = nn.Parameter(torch.Tensor(1, 2 * out_channels, 1, 1))
if bias:
self.bias = nn.Parameter(torch.Tensor(1, out_channels, 1, 1))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.att)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, x, edge_index):
x = self.nn(x)
# edge_index = add_self_loops(edge_index)
x_i = get_center_feature(x, self.k)
x_j = batched_index_select(x, edge_index[0])
# x_i BxCxNxk
alpha = (torch.cat([x_i, x_j], dim=1) * self.att).sum(dim=1, keepdim=True) # -1 xk
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = F.softmax(alpha, dim=-1)
# Sample attention coefficients stochastically.
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
x_j = x_j * alpha
aggr_out, _ = self.aggr(x_j, -1, keepdim=True)
# aggr_out = x_j.sum(dim=-1, keepdim=True)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
class SemiGCNConv2d(nn.Module):
r"""The graph convolutional operator from the `"Semi-supervised
Classification with Graph Convolutional Networks"
<https://arxiv.org/abs/1609.02907>`_ paper
.. math::
\mathbf{X}^{\prime} = \mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2} \mathbf{X} \mathbf{\Theta},
where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the
adjacency matrix with inserted self-loops and
:math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
improved (bool, optional): If set to :obj:`True`, the layer computes
:math:`\mathbf{\hat{A}}` as :math:`\mathbf{A} + 2\mathbf{I}`.
(default: :obj:`False`)
cached (bool, optional): If set to :obj:`True`, the layer will cache
the computation of :math:`\mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2}` on first execution, and will use the
cached version for further executions.
This parameter should only be set to :obj:`True` in transductive
learning scenarios. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels, out_channels,
act='relu', norm=None, bias=True,
k=9, aggr='sum'):
super(SemiGCNConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.nn = MLP2dLayer([in_channels, out_channels], act, norm, bias=False)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, out_channels, 1, 1))
else:
self.register_parameter('bias', None)
if aggr == 'max':
self.aggr = torch.max
elif aggr == 'mean':
self.aggr = torch.mean
elif aggr == 'sum':
self.aggr = torch.mean
self.reset_parameters()
def reset_parameters(self):
if self.bias is not None:
self.bias.data.zero_()
def forward(self, x, edge_index):
""""""
x = self.nn(x)
# edge_index = add_self_loops(edge_index)
x_j = batched_index_select(x, edge_index[0])
deg = edge_index.shape[-1]
norm = 1 / deg
x_j = x_j * norm
# aggr_out = x_j.sum(dim=-1, keepdim=True)
aggr_out, _ = self.aggr(x_j, -1, keepdim=True)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
class GINConv2d(nn.Module):
r"""The graph isomorphism operator from the `"How Powerful are
Graph Neural Networks?" <https://arxiv.org/abs/1810.00826>`_ paper
.. math::
\mathbf{x}^{\prime}_i = h_{\mathbf{\Theta}} \left( (1 + \epsilon) \cdot
\mathbf{x}_i + \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \right),
here :math:`h_{\mathbf{\Theta}}` denotes a neural network, *.i.e.* a MLP.
Args:
nn (torch.nn.Module): A neural network :math:`h_{\mathbf{\Theta}}` that
maps node features :obj:`x` of shape :obj:`[-1, in_channels]` to
shape :obj:`[-1, out_channels]`, *e.g.*, defined by
:class:`torch.nn.Sequential`.
eps (float, optional): (Initial) :math:`\epsilon` value.
(default: :obj:`0`)
train_eps (bool, optional): If set to :obj:`True`, :math:`\epsilon`
will be a trainable parameter. (default: :obj:`False`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels, out_channels,
act='relu', norm=None, bias=True,
k=9, aggr='sum',
eps=0, train_eps=False):
super(GINConv2d, self).__init__()
self.nn = MLP2dLayer([in_channels, out_channels], act, norm, bias)
self.initial_eps = eps
if train_eps:
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps', torch.Tensor([eps]))
if aggr == 'max':
self.aggr = torch.max
elif aggr == 'mean':
self.aggr = torch.mean
elif aggr == 'sum':
self.aggr = torch.mean
self.reset_parameters()
def reset_parameters(self):
self.eps.data.fill_(self.initial_eps)
def forward(self, x, edge_index):
# edge_index = remove_self_loops(edge_index)
x_j = batched_index_select(x, edge_index[0])
aggr_out, _ = self.aggr(x_j, -1, keepdim=True)
out = self.nn((1 + self.eps) * x + aggr_out)
return out
class RSAGEConv2d(nn.Module):
r"""The GraphSAGE operator from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper
.. math::
\mathbf{\hat{x}}_i &= \mathbf{\Theta} \cdot
\mathrm{mean}_{j \in \mathcal{N(i) \cup \{ i \}}}(\mathbf{x}_j)
\mathbf{x}^{\prime}_i &= \frac{\mathbf{\hat{x}}_i}
{\| \mathbf{\hat{x}}_i \|_2}.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
normalize (bool, optional): If set to :obj:`False`, output features
will not be :math:`\ell_2`-normalized. (default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self,
in_channels, out_channels,
act='relu', norm=True, bias=True,
k=9, aggr='max',
relative=False):
super(RSAGEConv2d, self).__init__()
self.relative = relative
self.nn = MLP2dLayer([out_channels + in_channels, out_channels], act, norm=None, bias=False)
self.pre_nn = MLP2dLayer([in_channels, out_channels], act, norm=None, bias=False)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, out_channels, 1, 1))
else:
self.register_parameter('bias', None)
self.norm = norm
if aggr == 'max':
self.aggr = torch.max
elif aggr == 'mean':
self.aggr = torch.mean
elif aggr == 'sum':
self.aggr = torch.mean
self.reset_parameters()
def reset_parameters(self):
if self.bias is not None:
self.bias.data.zero_()
def forward(self, x, edge_index):
""""""
x_j = batched_index_select(x, edge_index[0])
if self.relative:
x_i = get_center_feature(x, self.k)
x_j = self.pre_nn(x_j - x_i)
else:
x_j = self.pre_nn(x_j)
aggr_out, _ = torch.max(x_j, -1, keepdim=True)
out = self.nn(torch.cat((x, aggr_out), dim=1))
if self.bias is not None:
out = out + self.bias
if self.norm is not None:
out = F.normalize(out, dim=1)
return out
class GraphConv2d(nn.Module):
"""
Static graph convolution layer
"""
def __init__(self, in_channels, out_channels,
conv='edge', act='relu', norm=None, bias=True,
k=9
):
super(GraphConv2d, self).__init__()
if conv == 'edge':
self.gconv = EdgeConv2d(in_channels, out_channels, act, norm, bias, k)
elif conv == 'mr':
self.gconv = MRConv2d(in_channels, out_channels, act, norm, bias, k)
elif conv.lower() == 'gat':
self.gconv = GATConv2d(in_channels, out_channels, act, norm, bias, k)
elif conv.lower() == 'gcn':
self.gconv = SemiGCNConv2d(in_channels, out_channels, act, norm, bias, k)
elif conv.lower() == 'gin':
self.gconv = GINConv2d(in_channels, out_channels, act, norm, bias, k)
elif conv.lower() == 'sage':
self.gconv = RSAGEConv2d(in_channels, out_channels, act, norm, bias, k, relative=False)
elif conv.lower() == 'rsage':
self.gconv = RSAGEConv2d(in_channels, out_channels, act, norm, bias, k, relative=True)
else:
raise NotImplementedError('conv:{} is not supported'.format(conv))
def forward(self, x, edge_index):
return self.gconv(x, edge_index)
class DynConv2d(GraphConv2d):
"""
Dynamic graph convolution layer
"""
def __init__(self, in_channels, out_channels,
conv='edge', act='relu', norm=None, bias=True,
k=9, # k: number of neighbors
dilation=1, stochastic=False, epsilon=0.0):
super(DynConv2d, self).__init__(in_channels, out_channels, conv, act, norm, bias)
self.k = k
self.d = dilation
self.dilated_knn_graph = DilatedKNN2d(k, dilation,
self_loop=True, stochastic=stochastic, epsilon=epsilon)
def forward(self, x):
edge_index = self.dilated_knn_graph(x)
return super(DynConv2d, self).forward(x, edge_index)
class PlainDynBlock2d(nn.Module):
"""
Plain Dynamic graph convolution block
"""
def __init__(self, in_channels, conv='edge',
act='relu', norm=None, bias=True,
k=9, dilation=1,
stochastic=False, epsilon=0.0):
super(PlainDynBlock2d, self).__init__()
self.body = DynConv2d(in_channels, in_channels, conv, act, norm, bias, k, dilation, stochastic, epsilon)
def forward(self, x):
return self.body(x)
class ResDynBlock2d(nn.Module):
r"""Revised residual dynamic Graph convolution layer (with activation, batch normalization)
from the `"DeepGCNs: Making GCNs Go as Deep as CNNs"
<https://arxiv.org/abs/1910.06849>`_ paper
"""
def __init__(self, in_channels, conv='edge',
act='relu', norm=None, bias=True,
k=9,
dilation=1, stochastic=False, epsilon=0.0):
super(ResDynBlock2d, self).__init__()
self.body = DynConv2d(in_channels, in_channels, conv, act, norm, bias, k, dilation, stochastic, epsilon)
def forward(self, x):
return self.body(x) + x
class DenseDynBlock2d(nn.Module):
r"""Revised densely connected dynamic Graph Convolution layer (with activation, batch normalization)
from the `"DeepGCNs: Making GCNs Go as Deep as CNNs"
<https://arxiv.org/abs/1910.06849>`_ paper
"""
def __init__(self, in_channels, out_channels=64, conv='edge',
act='relu', norm=None, bias=True,
k=9, dilation=1, stochastic=False, epsilon=0.0):
super(DenseDynBlock2d, self).__init__()
self.body = DynConv2d(in_channels, out_channels, conv, act, norm, bias, k, dilation, stochastic, epsilon)
def forward(self, x):
dense = self.body(x)
return torch.cat((x, dense), 1)
class GraphPool2d(nn.Module):
"""
Dense Dynamic graph pooling block
"""
def __init__(self, in_channels, ratio=0.5, conv='edge', **kwargs):
super(GraphPool2d, self).__init__()
self.gnn = DynConv2d(in_channels, 1, conv=conv, **kwargs)
self.ratio = ratio
def forward(self, x):
""""""
score = torch.tanh(self.gnn(x))
_, indices = score.topk(int(x.shape[2] * self.ratio), 2)
return torch.gather(x, 2, indices.repeat(1, x.shape[1], 1, 1))
class VLADPool2d(torch.nn.Module):
def __init__(self, in_channels, num_clusters=64, alpha=100.0):
super(VLADPool2d, self).__init__()
self.in_channels = in_channels
self.num_clusters = num_clusters
self.alpha = alpha
self.lin = nn.Linear(in_channels, self.num_clusters, bias=True)
self.centroids = nn.Parameter(torch.rand(self.num_clusters, in_channels))
self._init_params()
def _init_params(self):
self.lin.weight = nn.Parameter((2.0 * self.alpha * self.centroids))
self.lin.bias = nn.Parameter(- self.alpha * self.centroids.norm(dim=1))
def forward(self, x, norm_intra=False, norm_L2=False):
B, C, N, _ = x.shape
x = x.squeeze().transpose(1, 2) # B, N, C
K = self.num_clusters
soft_assign = self.lin(x) # soft_assign of size (B, N, K)
soft_assign = F.softmax(soft_assign, dim=1).unsqueeze(1) # soft_assign of size (B, N, K)
soft_assign = soft_assign.expand(-1, C, -1, -1) # soft_assign of size (B, C, N, K)
# input x of size (NxC)
xS = x.transpose(1, 2).unsqueeze(-1).expand(-1, -1, -1, K) # xS of size (B, C, N, K)
cS = self.centroids.unsqueeze(0).unsqueeze(0).expand(B, N, -1, -1).transpose(2, 3) # cS of size (B, C, N, K)
residual = (xS - cS) # residual of size (B, C, N, K)
residual = residual * soft_assign # vlad of size (B, C, N, K)
vlad = torch.sum(residual, dim=2).unsqueeze(-1) # (B, C, K)
if (norm_intra):
vlad = F.normalize(vlad, p=2, dim=1) # intra-normalization
# print("i-norm vlad", vlad.shape)
if (norm_L2):
vlad = vlad.view(-1, K * C) # flatten
vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
# return vlad.view(B, -1, 1, 1)
return vlad
| 39.031936 | 117 | 0.588392 |
5169af1b81e58c002bf8f5618574ca6b0a639170 | 3,185 | py | Python | mlni/adml_regression.py | AbdulkadirA/mlni | f58d53cd70d700289063ce3ca4ad475607806729 | [
"MIT"
] | null | null | null | mlni/adml_regression.py | AbdulkadirA/mlni | f58d53cd70d700289063ce3ca4ad475607806729 | [
"MIT"
] | null | null | null | mlni/adml_regression.py | AbdulkadirA/mlni | f58d53cd70d700289063ce3ca4ad475607806729 | [
"MIT"
] | null | null | null | from mlni.regression import RB_RepeatedHoldOut_DualSVM_Regression, RB_KFold_DualSVM_Regression
from mlni.base import RB_Input
import os, pickle
from mlni.utils import make_cv_partition
__author__ = "Junhao Wen"
__copyright__ = "Copyright 2019-2020 The CBICA & SBIA Lab"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE file"
__version__ = "0.1.0"
__maintainer__ = "Junhao Wen"
__email__ = "junhao.wen89@gmail.com"
__status__ = "Development"
def regression_roi(feature_tsv, output_dir, cv_repetition, cv_strategy='hold_out', n_threads=8, seed=None, verbose=False):
"""
Core function for regression with ROI-based features
Args:
feature_tsv:str, path to the tsv containing extracted feature, following the BIDS convention. The tsv contains
the following headers: "
"i) the first column is the participant_id;"
"ii) the second column should be the session_id;"
"iii) the third column should be the diagnosis;"
"The following column should be the extracted features. e.g., the ROI features"
output_dir: str, path to store the classification results.
cv_repetition: int, number of repetitions for cross-validation (CV)
cv_strategy: str, cross validation strategy used. Default is hold_out. choices=['k_fold', 'hold_out']
n_threads: int, default is 8. The number of threads to run model in parallel.
verbose: Bool, default is False. If the output message is verbose.
Returns: classification outputs.
"""
print('MLNI for a binary classification with nested CV...')
input_data = RB_Input(feature_tsv, standardization_method="minmax")
## data split
print('Data split was performed based on validation strategy: %s...\n' % cv_strategy)
## check if data split has been done, if yes, the pickle file is there
if os.path.isfile(os.path.join(output_dir, 'data_split_stratified_' + str(cv_repetition) + '-holdout.pkl')):
split_index = pickle.load(open(os.path.join(output_dir, 'data_split_stratified_' + str(cv_repetition) + '-holdout.pkl'), 'rb'))
else:
split_index, _ = make_cv_partition(input_data.get_y(), cv_strategy, output_dir, cv_repetition, seed=seed)
print('Data split has been done!\n')
print('Starts regression with SVR...')
## Here, we perform a nested CV (outer CV with defined CV method, inner CV with 10-fold grid search) for classification.
if cv_strategy == 'hold_out':
wf_regression = RB_RepeatedHoldOut_DualSVM_Regression(input_data, split_index, os.path.join(output_dir, 'regression'),
n_threads=n_threads, n_iterations=cv_repetition, verbose=verbose)
wf_regression.run()
elif cv_strategy == 'k_fold':
wf_regression = RB_KFold_DualSVM_Regression(input_data, split_index, os.path.join(output_dir, 'regression'),
cv_repetition, n_threads=n_threads, verbose=verbose)
wf_regression.run()
else:
raise Exception("CV methods have not been implemented")
print('Finish...') | 53.083333 | 135 | 0.684458 |
a7cb868179148bf68aa95c23784f1a279af82ac8 | 4,993 | py | Python | homeassistant/components/switch/xiaomi_miio.py | olskar/home-assistant | 5986d9ff5b068b221e9d2c675f388b80070e8d87 | [
"Apache-2.0"
] | 1 | 2020-08-06T00:03:02.000Z | 2020-08-06T00:03:02.000Z | homeassistant/components/switch/xiaomi_miio.py | olskar/home-assistant | 5986d9ff5b068b221e9d2c675f388b80070e8d87 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/switch/xiaomi_miio.py | olskar/home-assistant | 5986d9ff5b068b221e9d2c675f388b80070e8d87 | [
"Apache-2.0"
] | null | null | null | """
Support for Xiaomi Smart WiFi Socket and Smart Power Strip.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/switch.xiaomi_miio/
"""
import asyncio
from functools import partial
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA, )
from homeassistant.const import (CONF_NAME, CONF_HOST, CONF_TOKEN, )
from homeassistant.exceptions import PlatformNotReady
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Xiaomi Miio Switch'
PLATFORM = 'xiaomi_miio'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
REQUIREMENTS = ['python-mirobo==0.2.0']
ATTR_POWER = 'power'
ATTR_TEMPERATURE = 'temperature'
ATTR_LOAD_POWER = 'load_power'
ATTR_MODEL = 'model'
SUCCESS = ['ok']
# pylint: disable=unused-argument
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the switch from config."""
from mirobo import Plug, DeviceException
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
try:
plug = Plug(host, token)
device_info = plug.info()
_LOGGER.info("%s %s %s initialized",
device_info.raw['model'],
device_info.raw['fw_ver'],
device_info.raw['hw_ver'])
xiaomi_plug_switch = XiaomiPlugSwitch(name, plug, device_info)
except DeviceException:
raise PlatformNotReady
async_add_devices([xiaomi_plug_switch], update_before_add=True)
class XiaomiPlugSwitch(SwitchDevice):
"""Representation of a Xiaomi Plug."""
def __init__(self, name, plug, device_info):
"""Initialize the plug switch."""
self._name = name
self._icon = 'mdi:power-socket'
self._device_info = device_info
self._plug = plug
self._state = None
self._state_attrs = {
ATTR_TEMPERATURE: None,
ATTR_LOAD_POWER: None,
ATTR_MODEL: self._device_info.raw['model'],
}
self._skip_update = False
@property
def should_poll(self):
"""Poll the plug."""
return True
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._state is not None
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@asyncio.coroutine
def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a plug command handling error messages."""
from mirobo import DeviceException
try:
result = yield from self.hass.async_add_job(
partial(func, *args, **kwargs))
_LOGGER.debug("Response received from plug: %s", result)
return result == SUCCESS
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn the plug on."""
result = yield from self._try_command(
"Turning the plug on failed.", self._plug.on)
if result:
self._state = True
self._skip_update = True
@asyncio.coroutine
def async_turn_off(self, **kwargs):
"""Turn the plug off."""
result = yield from self._try_command(
"Turning the plug off failed.", self._plug.off)
if result:
self._state = False
self._skip_update = True
@asyncio.coroutine
def async_update(self):
"""Fetch state from the device."""
from mirobo import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = yield from self.hass.async_add_job(self._plug.status)
_LOGGER.debug("Got new state: %s", state)
self._state = state.is_on
self._state_attrs.update({
ATTR_TEMPERATURE: state.temperature,
ATTR_LOAD_POWER: state.load_power,
})
except DeviceException as ex:
_LOGGER.error("Got exception while fetching the state: %s", ex)
| 29.544379 | 79 | 0.636291 |
42db8fbb0972849ef4d33999c6b7d04c6a29a4dd | 817 | py | Python | fairseq/models/roberta_updated/layer_norm.py | he1ght/BiBERT_CE | 466e6b50f0c038a331cb9c2cc5e52e697526855d | [
"MIT"
] | 21 | 2021-09-09T00:18:42.000Z | 2022-03-29T08:54:25.000Z | fairseq/models/roberta_updated/layer_norm.py | TokisakiKurumi2001/BiBERT | 2d006100dd1b2f1ff5755575e3d185a858541232 | [
"MIT"
] | null | null | null | fairseq/models/roberta_updated/layer_norm.py | TokisakiKurumi2001/BiBERT | 2d006100dd1b2f1ff5755575e3d185a858541232 | [
"MIT"
] | 2 | 2021-12-16T11:32:23.000Z | 2022-01-04T04:57:29.000Z | import torch
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if torch.jit.is_scripting():
export = True
if not export and torch.cuda.is_available() and has_fused_layernorm:
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) | 30.259259 | 81 | 0.675643 |
39ccdf61c1f15d00a8867f200e55c62123918f3c | 547 | py | Python | wagtail/wagtailcore/migrations/0025_collection_initial_data.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 3 | 2016-08-17T13:56:36.000Z | 2019-04-23T19:59:25.000Z | wagtail/wagtailcore/migrations/0025_collection_initial_data.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 11 | 2016-08-05T15:43:06.000Z | 2016-12-16T13:32:23.000Z | wagtail/wagtailcore/migrations/0025_collection_initial_data.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 2 | 2017-08-08T01:39:02.000Z | 2018-05-06T06:16:10.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def initial_data(apps, schema_editor):
Collection = apps.get_model('wagtailcore.Collection')
# Create root page
Collection.objects.create(
name="Root",
path='0001',
depth=1,
numchild=0,
)
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0024_collection'),
]
operations = [
migrations.RunPython(initial_data, migrations.RunPython.noop),
]
| 19.535714 | 70 | 0.645338 |
0e2a6aa49098275f9864f86545fbb45853a143de | 781 | py | Python | src/number_scaffold_gap.py | ifiddes/mus_strain_cactus | b56863f835892616d645f44476f5d7503acc9d7d | [
"MIT"
] | null | null | null | src/number_scaffold_gap.py | ifiddes/mus_strain_cactus | b56863f835892616d645f44476f5d7503acc9d7d | [
"MIT"
] | null | null | null | src/number_scaffold_gap.py | ifiddes/mus_strain_cactus | b56863f835892616d645f44476f5d7503acc9d7d | [
"MIT"
] | null | null | null | from collections import defaultdict
import re
from src.abstract_classifier import AbstractClassifier
import lib.sequence_lib as seq_lib
class NumberScaffoldGap(AbstractClassifier):
"""
How many 100bp N runs are there in the alignment?
100bp N runs are markers of scaffold gaps.
"""
@staticmethod
def __type__():
return "INTEGER"
def run(self):
self.get_alignment_dict()
self.get_seq_dict()
r = re.compile("[N]{100}")
s_dict = defaultdict(int)
for a_id, aln in self.alignment_dict.iteritems():
dest_seq = self.seq_dict[aln.tName][aln.tStart : aln.tEnd].upper()
if re.search(r, dest_seq) is not None:
s_dict[a_id] = 1
self.upsert_dict_wrapper(s_dict) | 25.193548 | 78 | 0.649168 |
32a63c5a1f28f95f970e14671c37837a5d560d7d | 132 | py | Python | chainladder/development/clark.py | Aborah30/chainladder-python | c7d3f4f0a5333b6bd34922cc406f252ab9c47e10 | [
"MIT"
] | 1 | 2019-03-03T06:01:26.000Z | 2019-03-03T06:01:26.000Z | chainladder/development/clark.py | Aborah30/chainladder-python | c7d3f4f0a5333b6bd34922cc406f252ab9c47e10 | [
"MIT"
] | null | null | null | chainladder/development/clark.py | Aborah30/chainladder-python | c7d3f4f0a5333b6bd34922cc406f252ab9c47e10 | [
"MIT"
] | null | null | null | """
Clark Development
=================
"""
from sklearn.base import BaseEstimator
class ClarkDevelopment(BaseEstimator):
pass
| 14.666667 | 38 | 0.666667 |
bfb0ab9f8b96cbdc0d07c6e54729e61daa868146 | 448 | py | Python | users/forms.py | Ab1gor/cardsite | 3da8b998d093fd2b788a28bf8bc0cf09a43023c3 | [
"BSD-3-Clause"
] | 1 | 2019-03-12T06:33:21.000Z | 2019-03-12T06:33:21.000Z | users/forms.py | Ab1gor/cardsite | 3da8b998d093fd2b788a28bf8bc0cf09a43023c3 | [
"BSD-3-Clause"
] | 4 | 2021-03-18T20:48:41.000Z | 2022-01-13T00:49:58.000Z | users/forms.py | Ab1gor/cardsite | 3da8b998d093fd2b788a28bf8bc0cf09a43023c3 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
from .models import userdetails
from django.contrib.auth.models import User
class userdetailsForm(forms.Form):
about = forms.CharField(max_length= 100,
widget=forms.TextInput(
attrs ={'placeholder' : ''}))
url = forms.CharField(max_length= 100,
widget=forms.TextInput(
attrs ={'placeholder' : '' }))
company = forms.CharField(max_length= 100,
widget=forms.TextInput(
attrs ={'placeholder' : '' }))
| 24.888889 | 44 | 0.709821 |
c9b5052da908b997016068786e3de0e642d465b0 | 673 | py | Python | tests/add_table_column_test.py | aescwork/sqlitemgr | 6cf64761c64e64da7c8cb44aeec16a27459df24b | [
"BSD-2-Clause"
] | 1 | 2020-01-31T11:38:18.000Z | 2020-01-31T11:38:18.000Z | tests/add_table_column_test.py | aescwork/sqlitemgr | 6cf64761c64e64da7c8cb44aeec16a27459df24b | [
"BSD-2-Clause"
] | null | null | null | tests/add_table_column_test.py | aescwork/sqlitemgr | 6cf64761c64e64da7c8cb44aeec16a27459df24b | [
"BSD-2-Clause"
] | null | null | null |
import unittest
import os
import sys
sys.path.append("../sqlitemgr/")
import sqlitemgr as sqm
class AddTableColumnTest(unittest.TestCase):
def setUp(self):
self.sm = sqm.SQLiteMgr()
self.sm.new_table("nuts")
self.final_statement = 'CREATE TABLE IF NOT EXISTS nuts(Nmbr INT PRIMARY KEY, Called TEXT UNIQUE, Description TEXT, '
self.sm.add_table_column("Nmbr", "INT", "PRIMARY KEY").add_table_column("Called", "TEXT", "UNIQUE").add_table_column("Description", "TEXT")
def test_add_table_column(self):
self.assertEqual(self.sm.table_statement, self.final_statement)
def tearDown(self):
self.sm.__del__()
if __name__ == '__main__':
unittest.main()
| 24.035714 | 141 | 0.738484 |
2942fa37fa513b567d40934aee43f30bde6ba3c1 | 189 | py | Python | beir/retrieval/search/dense/__init__.py | joshdevins/beir | f86950d166b5f6576ca6b8a1e6d8b4f2266ed60e | [
"Apache-2.0"
] | null | null | null | beir/retrieval/search/dense/__init__.py | joshdevins/beir | f86950d166b5f6576ca6b8a1e6d8b4f2266ed60e | [
"Apache-2.0"
] | null | null | null | beir/retrieval/search/dense/__init__.py | joshdevins/beir | f86950d166b5f6576ca6b8a1e6d8b4f2266ed60e | [
"Apache-2.0"
] | null | null | null | from .exact_search import DenseRetrievalExactSearch
from .faiss_search import DenseRetrievalFaissSearch, BinaryFaissSearch, PQFaissSearch, HNSWFaissSearch, FlatIPFaissSearch, PCAFaissSearch | 94.5 | 137 | 0.899471 |
47354290fcab42d0e6d3c406b595bb57c1e6559a | 4,874 | py | Python | test/CPPDEFINES/scan.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | 1 | 2019-09-18T06:37:02.000Z | 2019-09-18T06:37:02.000Z | test/CPPDEFINES/scan.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | test/CPPDEFINES/scan.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/CPPDEFINES/scan.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that use of the Scanner that evaluates CPP lines works as expected.
"""
import TestSCons
test = TestSCons.TestSCons()
m = 'Scanner evaluation of CPP lines not yet supported; skipping test.\n'
test.skip_test(m)
f1_exe = 'f1' + TestSCons._exe
f2_exe = 'f2' + TestSCons._exe
f3_exe = 'f3' + TestSCons._exe
f4_exe = 'f4' + TestSCons._exe
test.write('SConstruct', """\
env = Environment(CPPPATH = ['.'])
f1 = env.Object('f1', 'fff.c', CPPDEFINES = ['F1'])
f2 = env.Object('f2', 'fff.c', CPPDEFINES = [('F2', 1)])
f3 = env.Object('f3', 'fff.c', CPPDEFINES = {'F3':None})
f4 = env.Object('f4', 'fff.c', CPPDEFINES = {'F4':1})
env.Program('f1', ['prog.c', f1])
env.Program('f2', ['prog.c', f2])
env.Program('f3', ['prog.c', f3])
env.Program('f4', ['prog.c', f4])
""")
test.write('f1.h', """
#define STRING "F1"
""")
test.write('f2.h', """
#define STRING "F2"
""")
test.write('f3.h', """
#define STRING "F3"
""")
test.write('f4.h', """
#define STRING "F4"
""")
test.write('fff.c', """
#ifdef F1
#include <f1.h>
#endif
#if F2
#include <f2.h>
#endif
#ifdef F3
#include <f3.h>
#endif
#ifdef F4
#include <f4.h>
#endif
char *
foo(void)
{
return (STRING);
}
""")
test.write('prog.c', r"""
#include <stdio.h>
#include <stdlib.h>
extern char *foo(void);
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("prog.c: %s\n", foo());
exit (0);
}
""")
test.run(arguments = '.')
test.run(program = test.workpath('f1'), stdout = "prog.c: F1\n")
test.run(program = test.workpath('f2'), stdout = "prog.c: F2\n")
test.run(program = test.workpath('f3'), stdout = "prog.c: F3\n")
test.run(program = test.workpath('f4'), stdout = "prog.c: F4\n")
test.write('f1.h', """
#define STRING "F1 again"
""")
test.up_to_date(arguments = '%(f2_exe)s %(f3_exe)s %(f4_exe)s' % locals())
test.not_up_to_date(arguments = '.')
test.run(program = test.workpath('f1'), stdout = "prog.c: F1 again\n")
test.run(program = test.workpath('f2'), stdout = "prog.c: F2\n")
test.run(program = test.workpath('f3'), stdout = "prog.c: F3\n")
test.run(program = test.workpath('f4'), stdout = "prog.c: F4\n")
test.write('f2.h', """
#define STRING "F2 again"
""")
test.up_to_date(arguments = '%(f1_exe)s %(f3_exe)s %(f4_exe)s' % locals())
test.not_up_to_date(arguments = '.')
test.run(program = test.workpath('f1'), stdout = "prog.c: F1 again\n")
test.run(program = test.workpath('f2'), stdout = "prog.c: F2 again\n")
test.run(program = test.workpath('f3'), stdout = "prog.c: F3\n")
test.run(program = test.workpath('f4'), stdout = "prog.c: F4\n")
test.write('f3.h', """
#define STRING "F3 again"
""")
test.up_to_date(arguments = '%(f1_exe)s %(f2_exe)s %(f4_exe)s' % locals())
test.not_up_to_date(arguments = '.')
test.run(program = test.workpath('f1'), stdout = "prog.c: F1 again\n")
test.run(program = test.workpath('f2'), stdout = "prog.c: F2 again\n")
test.run(program = test.workpath('f3'), stdout = "prog.c: F3 again\n")
test.run(program = test.workpath('f4'), stdout = "prog.c: F4\n")
test.write('f4.h', """
#define STRING "F4 again"
""")
test.up_to_date(arguments = '%(f1_exe)s %(f2_exe)s %(f3_exe)s' % locals())
test.not_up_to_date(arguments = '.')
test.run(program = test.workpath('f1'), stdout = "prog.c: F1 again\n")
test.run(program = test.workpath('f2'), stdout = "prog.c: F2 again\n")
test.run(program = test.workpath('f3'), stdout = "prog.c: F3 again\n")
test.run(program = test.workpath('f4'), stdout = "prog.c: F4 again\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 25.925532 | 97 | 0.657981 |
d48a30fc8d1631b32ece4dbb036d4de4bc3d6551 | 8,501 | py | Python | digraphillion/setset.py | ComputerAlgorithmsGroupAtKyotoU/digraphillion | 44eaa37d142d9b74b34973aac5341a88b315ff64 | [
"MIT"
] | 1 | 2021-12-14T13:26:47.000Z | 2021-12-14T13:26:47.000Z | digraphillion/setset.py | ComputerAlgorithmsGroupAtKyotoU/digraphillion | 44eaa37d142d9b74b34973aac5341a88b315ff64 | [
"MIT"
] | 1 | 2021-09-05T06:47:58.000Z | 2021-09-07T05:56:57.000Z | digraphillion/setset.py | ComputerAlgorithmsGroupAtKyotoU/digraphillion | 44eaa37d142d9b74b34973aac5341a88b315ff64 | [
"MIT"
] | null | null | null | # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Module for a set of sets.
"""
from builtins import range
from future.utils import viewitems
import _digraphillion
class setset(_digraphillion.setset):
"""Represents and manipulates a set of sets.
A setset object stores a set of sets. A set element can be any
hashable object like a number, a text string, and a tuple.
Like Python set types, setset supports `set in setset`,
`len(setset)`, and `for set in setset`. It also supports all set
methods and operators,
* isdisjoint(), issubset(), issuperset(), union(), intersection(),
difference(), symmetric_difference(), copy(), update(),
intersection_update(), difference_update(),
symmetric_difference_update(), add(), remove(), discard(),
pop(), clear(),
* ==, !=, <=, <, >=, >, |, &, -, ^, |=, &=, -=, ^=.
Examples:
>>> from graphillion import setset
>>> ss = setset([set([1]), set([1,2])])
>>> len(ss)
2
>>> for s in ss:
... s
set([1])
set([1, 2])
"""
def __init__(self, setset_or_constraints=None):
obj = setset_or_constraints
if obj is None:
obj = []
elif isinstance(obj, list): # a set of sets [set+]
l = []
for s in obj:
l.append(set([setset._conv_elem(e) for e in s]))
obj = l
elif isinstance(obj, dict): # constraints
d = {}
for k, l in viewitems(obj):
d[k] = [setset._conv_elem(e) for e in l]
obj = d
_digraphillion.setset.__init__(self, obj)
def __repr__(self):
name = self.__class__.__name__
return self._repr((name + '([', '])'), ('set([', '])'))
def _repr(self, outer_braces=('[', ']'), inner_braces=('[', ']')):
n = _digraphillion._num_elems()
w = {}
for i in range(1, n + 1):
e = setset._int2obj[i]
w[e] = 1 + float(i) / n**2
ret = outer_braces[0]
maxchar = 80
no_comma = True
for s in setset.min_iter(self, w):
if no_comma:
no_comma = False
else:
ret += ', '
ret += inner_braces[0] + \
str(sorted(list(s)))[1:-1] + inner_braces[1]
if len(ret) > maxchar - 2:
break
if len(ret) <= maxchar - 2:
return ret + outer_braces[1]
else:
return ret[:(maxchar - 4)] + ' ...'
def __contains__(self, set_or_elem):
set_or_elem = setset._conv_arg(set_or_elem)
return _digraphillion.setset.__contains__(self, set_or_elem)
def add(self, set_or_elem):
set_or_elem = setset._conv_arg(set_or_elem)
return _digraphillion.setset.add(self, set_or_elem)
def remove(self, set_or_elem):
set_or_elem = setset._conv_arg(set_or_elem)
return _digraphillion.setset.remove(self, set_or_elem)
def discard(self, set_or_elem):
set_or_elem = setset._conv_arg(set_or_elem)
return _digraphillion.setset.discard(self, set_or_elem)
def pop(self):
set = _digraphillion.setset.pop(self)
return setset._conv_ret(set)
def flip(self, elem=None):
if elem is not None:
elem = setset._conv_elem(elem)
return _digraphillion.setset.flip(self, elem)
def __iter__(self):
i = _digraphillion.setset.iter(self)
while (True):
try:
yield setset._conv_ret(next(i))
except StopIteration:
return
def rand_iter(self):
i = _digraphillion.setset.rand_iter(self)
while (True):
try:
yield setset._conv_ret(next(i))
except StopIteration:
return
def min_iter(self, weights=None, default=1):
return self._optimize(weights, default, _digraphillion.setset.min_iter)
def max_iter(self, weights=None, default=1):
return self._optimize(weights, default, _digraphillion.setset.max_iter)
def _optimize(self, weights, default, generator):
ws = [default] * (_digraphillion._num_elems() + 1)
if weights:
for e, w in viewitems(weights):
i = setset._obj2int[e]
ws[i] = w
i = generator(self, ws)
while (True):
try:
yield setset._conv_ret(next(i))
except StopIteration:
return
def supersets(self, obj):
if (not isinstance(obj, setset)):
obj = setset._conv_elem(obj)
return _digraphillion.setset.supersets(self, obj)
def non_supersets(self, obj):
if (not isinstance(obj, setset)):
obj = setset._conv_elem(obj)
return _digraphillion.setset.non_supersets(self, obj)
def choice(self):
set = _digraphillion.setset.choice(self)
return setset._conv_ret(set)
def probability(self, probabilities):
ps = [-1] * (_digraphillion._num_elems() + 1)
for e, p in viewitems(probabilities):
i = setset._obj2int[e]
ps[i] = p
assert len([p for p in ps[1:] if p < 0 or 1 < p]) == 0
return _digraphillion.setset.probability(self, ps)
@staticmethod
def load(fp):
return _digraphillion.load(fp)
@staticmethod
def loads(s):
return _digraphillion.loads(s)
@staticmethod
def set_universe(universe):
if len(universe) != len(set(universe)):
raise ValueError('duplicated elements found')
_digraphillion._num_elems(0)
setset._obj2int = {}
setset._int2obj = [None]
for e in universe:
setset._add_elem(e)
setset._check_universe()
@staticmethod
def universe():
setset._check_universe()
return setset._int2obj[1:]
@staticmethod
def _check_universe():
assert len(setset._int2obj) == _digraphillion._num_elems() + 1
for e, i in viewitems(setset._obj2int):
assert e == setset._int2obj[i]
for i in range(1, len(setset._int2obj)):
e = setset._int2obj[i]
assert i == setset._obj2int[e]
@staticmethod
def _add_elem(elem):
assert elem not in setset._obj2int
if len(setset._obj2int) >= _digraphillion._elem_limit():
m = 'too many elements are set, which must be %d or less' % _digraphillion._elem_limit()
raise RuntimeError(m)
i = len(setset._int2obj)
_digraphillion.setset([set([i])])
setset._obj2int[elem] = i
setset._int2obj.append(elem)
assert len(setset._int2obj) == _digraphillion._num_elems() + 1
assert setset._int2obj[i] == elem
assert setset._obj2int[elem] == i
@staticmethod
def _conv_elem(elem):
if elem not in setset._obj2int:
setset._add_elem(elem)
return setset._obj2int[elem]
@staticmethod
def _conv_arg(obj):
if isinstance(obj, (set, frozenset)): # a set
return set([setset._conv_elem(e) for e in obj])
else: # an element
return setset._conv_elem(obj)
@staticmethod
def _conv_ret(obj):
if isinstance(obj, (set, frozenset)): # a set
ret = set()
for e in obj:
ret.add(setset._int2obj[e])
return ret
raise TypeError(obj)
_obj2int = {}
_int2obj = [None]
| 33.868526 | 100 | 0.596636 |
92873920b31d96f24c374ec29fcca24af8386823 | 20,704 | py | Python | venv/lib/python3.6/site-packages/jinja2/utils.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/jinja2/utils.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | 1 | 2021-06-01T23:32:38.000Z | 2021-06-01T23:32:38.000Z | venv/lib/python3.6/site-packages/jinja2/utils.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jinja2.utils
~~~~~~~~~~~~
Utility functions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import json
import errno
from collections import deque
from threading import Lock
from jinja2._compat import text_type, string_types, implements_iterator, url_quote
_word_split_re = re.compile(r"(\s+)")
_punctuation_re = re.compile(
"^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$"
% (
"|".join(map(re.escape, ("(", "<", "<"))),
"|".join(map(re.escape, (".", ",", ")", ">", "\n", ">"))),
)
)
_simple_email_re = re.compile(r"^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$")
_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
_entity_re = re.compile(r"&([^;]+);")
_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
_digits = "0123456789"
# special singleton representing missing values for the runtime
missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
# internal code
internal_code = set()
concat = u"".join
_slash_escape = "\\/" not in json.dumps("/")
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decorator can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for event in iterable:
pass
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ":" in import_name:
module, obj = import_name.split(":", 1)
elif "." in import_name:
items = import_name.split(".")
module = ".".join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode="rb"):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except IOError as e:
if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
raise
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return "None"
elif obj is Ellipsis:
return "Ellipsis"
# __builtin__ in 2.x, builtins in 3.x
if obj.__class__.__module__ in ("__builtin__", "builtins"):
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + "." + obj.__class__.__name__
return "%s object" % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, rel=None, target=None):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If target is not None, a target attribute will be added to the link.
"""
trim_url = (
lambda x, limit=trim_url_limit: limit is not None
and (x[:limit] + (len(x) >= limit and "..." or ""))
or x
)
words = _word_split_re.split(text_type(escape(text)))
rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
target_attr = target and ' target="%s"' % escape(target) or ""
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith("www.") or (
"@" not in middle
and not middle.startswith("http://")
and not middle.startswith("https://")
and len(middle) > 0
and middle[0] in _letters + _digits
and (
middle.endswith(".org")
or middle.endswith(".net")
or middle.endswith(".com")
)
):
middle = '<a href="http://%s"%s%s>%s</a>' % (
middle,
rel_attr,
target_attr,
trim_url(middle),
)
if middle.startswith("http://") or middle.startswith("https://"):
middle = '<a href="%s"%s%s>%s</a>' % (
middle,
rel_attr,
target_attr,
trim_url(middle),
)
if (
"@" in middle
and not middle.startswith("www.")
and not ":" in middle
and _simple_email_re.match(middle)
):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u"".join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem ipsum for the template."""
from jinja2.constants import LOREM_IPSUM_WORDS
from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ","
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += "."
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u" ".join(p)
if p.endswith(","):
p = p[:-1] + "."
elif not p.endswith("."):
p += "."
result.append(p)
if not html:
return u"\n\n".join(result)
return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result))
def unicode_urlencode(obj, charset="utf-8", for_qs=False):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
if isinstance(obj, text_type):
obj = obj.encode(charset)
safe = not for_qs and b"/" or b""
rv = text_type(url_quote(obj, safe))
if for_qs:
rv = rv.replace("%20", "+")
return rv
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self):
return {
"capacity": self.capacity,
"_mapping": self._mapping,
"_queue": self._queue,
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
self._wlock.acquire()
try:
try:
return self[key]
except KeyError:
self[key] = default
return default
finally:
self._wlock.release()
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self._mapping)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
finally:
self._wlock.release()
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
return reversed(tuple(self._queue))
__iter__ = iterkeys
def __reversed__(self):
"""Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
# register the LRU cache as mutable mapping if possible
try:
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
pass
def select_autoescape(
enabled_extensions=("html", "htm", "xml"),
disabled_extensions=(),
default_for_string=True,
default=False,
):
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
If you want to enable it for all templates created from strings or
for all templates with `.html` and `.xml` extensions::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
enabled_extensions=('html', 'xml'),
default_for_string=True,
))
Example configuration to turn it on at all times except if the template
ends with `.txt`::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
disabled_extensions=('txt',),
default_for_string=True,
default=True,
))
The `enabled_extensions` is an iterable of all the extensions that
autoescaping should be enabled for. Likewise `disabled_extensions` is
a list of all templates it should be disabled for. If a template is
loaded from a string then the default from `default_for_string` is used.
If nothing matches then the initial value of autoescaping is set to the
value of `default`.
For security reasons this function operates case insensitive.
.. versionadded:: 2.9
"""
enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
def autoescape(template_name):
if template_name is None:
return default_for_string
template_name = template_name.lower()
if template_name.endswith(enabled_patterns):
return True
if template_name.endswith(disabled_patterns):
return False
return default
return autoescape
def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
"""
if dumper is None:
dumper = json.dumps
rv = (
dumper(obj, **kwargs)
.replace(u"<", u"\\u003c")
.replace(u">", u"\\u003e")
.replace(u"&", u"\\u0026")
.replace(u"'", u"\\u0027")
)
return Markup(rv)
@implements_iterator
class Cycler(object):
"""A cycle helper for templates."""
def __init__(self, *items):
if not items:
raise RuntimeError("at least one item has to be provided")
self.items = items
self.reset()
def reset(self):
"""Resets the cycle."""
self.pos = 0
@property
def current(self):
"""Returns the current item."""
return self.items[self.pos]
def next(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
__next__ = next
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u", "):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u""
return self.sep
class Namespace(object):
"""A namespace object that can hold arbitrary attributes. It may be
initialized from a dictionary or with keyword argments."""
def __init__(*args, **kwargs):
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name):
if name == "_Namespace__attrs":
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
except KeyError:
raise AttributeError(name)
def __setitem__(self, name, value):
self.__attrs[name] = value
def __repr__(self):
return "<Namespace %r>" % self.__attrs
# does this python version support async for in and async generators?
try:
exec("async def _():\n async for _ in ():\n yield _")
have_async_gen = True
except SyntaxError:
have_async_gen = False
# Imported here because that's where it was in the past
from markupsafe import Markup, escape, soft_unicode
| 30.901493 | 87 | 0.595827 |
ce5dc5af7ab8c6e6dce85bca36e70cf804664942 | 7,686 | py | Python | scripts/torture_test.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | null | null | null | scripts/torture_test.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | null | null | null | scripts/torture_test.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Torture-test Afterglow Core API
"""
import argparse
import base64
import json
import random
import requests
import time
import traceback
import warnings
from multiprocessing import Process
from typing import Any, Dict, Optional, Union
def api_call(host, port, https, root, api_version, token, method, resource,
params=None) -> Optional[Union[Dict[str, Any], str, bytes]]:
method = method.upper()
headers = {'Authorization': 'Bearer {}'.format(token)}
if method != 'GET':
# Extract CSRF token from access/refresh token
# noinspection PyBroadException
try:
s = token[:token.rfind('.')]
s = base64.decodebytes(s + '='*((4 - len(s) % 4) % 4))
i = 1
while i <= len(s):
try:
json.loads(s[:i])
except ValueError:
i += 1
else:
break
s = s[i:]
i = 1
while i <= len(s):
try:
headers['X-CSRF-Token'] = json.loads(s[:i])['csrf']
break
except ValueError:
i += 1
except Exception:
pass
if not root and host not in ('localhost', '127.0.0.1'):
root = '/core'
elif root and not root.startswith('/'):
root = '/' + root
url = 'http{}://{}:{:d}{}/'.format('s' if https else '', host, port, root)
if not resource.startswith('oauth2') and not resource.startswith('ajax'):
url += 'api/v{}/'.format(api_version)
url += resource
json_data = None
if method not in ('GET', 'HEAD', 'OPTIONS') and params:
# For requests other than GET, we must pass parameters as JSON
params, json_data = None, params
warnings.filterwarnings('ignore', 'Unverified HTTPS request is being made')
r = requests.request(
method, url, verify=False, params=params, headers=headers,
json=json_data)
try:
content_type = r.headers['Content-Type'].split(';')[0].strip()
except KeyError:
return
if content_type.split('/')[-1].lower() == 'json':
res = r.json()
if 'data' in res:
return res['data']
if 'error' in res:
raise RuntimeError(str(res['error']))
return res
if content_type.split('/')[0].lower() == 'text':
return r.text
return r.content
def run_job(host, port, https, root, api_version, token, job_type, params):
job_params = {'type': job_type}
job_params.update(params)
job_id = api_call(
host, port, https, root, api_version, token, 'POST', 'jobs',
job_params)['id']
while True:
time.sleep(1)
if api_call(
host, port, https, root, api_version, token, 'GET',
'jobs/{}/state'.format(job_id))['status'] == 'completed':
break
res = api_call(
host, port, https, root, api_version, token, 'GET',
'jobs/{}/result'.format(job_id))
if res['errors']:
print(res['errors'])
return res
def test_process(
proc_id, host, port, https, root, api_version, token, obs_id, cycles):
# Import observation
while True:
# noinspection PyBroadException
try:
file_ids = run_job(
host, port, https, root, api_version, token, 'batch_import',
{'settings': [{
'provider_id': '1', 'duplicates': 'append',
'path': 'User Observations/{}/reduced'.format(obs_id)
}]})['file_ids']
except Exception:
time.sleep(5)
else:
if file_ids:
break
time.sleep(5)
for cycle in range(cycles):
# noinspection PyBroadException
try:
# Retrieve pixel data
for i in file_ids:
api_call(
host, port, https, root, api_version, token,
'GET', 'data-files/{}/pixels'.format(i))
# Stack images
time.sleep(random.uniform(0, 10))
temp_file_id = run_job(
host, port, https, root, api_version, token, 'stacking',
{'file_ids': file_ids})['file_id']
while True:
# noinspection PyBroadException
try:
api_call(
host, port, https, root, api_version, token,
'DELETE', 'data-files/{}'.format(temp_file_id))
except Exception:
time.sleep(5)
else:
break
# Extract sources from the first image
time.sleep(random.uniform(0, 10))
sources = run_job(
host, port, https, root, api_version, token,
'source_extraction', {'file_ids': [file_ids[0]]})['data']
# Photometer sources in all images
time.sleep(random.uniform(0, 10))
run_job(
host, port, https, root, api_version, token, 'photometry',
{'file_ids': file_ids, 'sources': sources, 'settings': {
'a': 10, 'a_in': 15, 'a_out': 20}})
except Exception:
traceback.print_exc()
print('{}: {}'.format(proc_id + 1, cycle + 1))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--host', metavar='HOSTNAME', default='localhost',
help='Afterglow API server hostname or IP address')
# noinspection PyTypeChecker
parser.add_argument(
'--port', metavar='PORT', type=int, default=5000,
help='Afterglow API server port')
parser.add_argument(
'-s', '--https', action='store_true', help='use HTTPS instead of HTTP')
parser.add_argument('-r', '--root', default='', help='API root')
parser.add_argument(
'-v', '--api-version', default='1', help='server API version')
parser.add_argument(
'-t', '--token', help='authenticate with this personal token')
parser.add_argument(
'-o', '--obs', metavar='N', help='test observation ID')
parser.add_argument(
'-w', '--workers', metavar='N', type=int, default=100,
help='number of worker processes')
parser.add_argument(
'-c', '--cycles', metavar='N', type=int, default=100,
help='number of test cycles')
args = parser.parse_args()
print('Starting {} processes with {} test cycles'
.format(args.workers, args.cycles))
processes = [Process(target=test_process, args=(
i, args.host, args.port, args.https, args.root, args.api_version,
args.token, args.obs, args.cycles)) for i in range(args.workers)]
for p in processes:
p.start()
try:
for p in processes:
p.join()
finally:
# Cleanup
data_files = api_call(
args.host, args.port, args.https, args.root, args.api_version,
args.token, 'GET', 'data-files')
print('Deleting {} data files'.format(len(data_files)))
for f in data_files:
while True:
# noinspection PyBroadException
try:
# noinspection PyTypeChecker
api_call(
args.host, args.port, args.https, args.root,
args.api_version, args.token,
'DELETE', 'data-files/{}'.format(f['id']))
except Exception:
time.sleep(1)
else:
break
| 34.16 | 79 | 0.532397 |
9b7c1dfdbbd8f3527c2874386524e8935acc86f9 | 8,243 | py | Python | static/classes/User.py | avirois/ProjectManagement | 9e05d1302336ca4ef32bef93e28bcc69fdad7d4f | [
"WTFPL"
] | null | null | null | static/classes/User.py | avirois/ProjectManagement | 9e05d1302336ca4ef32bef93e28bcc69fdad7d4f | [
"WTFPL"
] | 3 | 2021-03-24T11:53:18.000Z | 2021-04-05T10:20:58.000Z | static/classes/User.py | avirois/ProjectManagement | 9e05d1302336ca4ef32bef93e28bcc69fdad7d4f | [
"WTFPL"
] | null | null | null | import re
import sqlite3
from cryptography.fernet import Fernet
from static.classes.Institution import Institution
from static.classes.Faculty import Faculty
from flask import current_app
# Secret key for passwords
sec_key = b'pRmgMa8T0INjEAfksaq2aafzoZXEuwKI7wDe4c1F8AY='
def encryptPassword(password):
"""Function that returns encrypted password"""
# Encrypt passwords
cipher_suite = Fernet(sec_key)
ciphered_text = cipher_suite.encrypt(str(password).encode("utf-8"))
return (ciphered_text)
def decryptPassword(password):
"""Function that returns decrypted password"""
# Decrypt password
cipher_suite = Fernet(sec_key)
unciphered_text = (cipher_suite.decrypt(password))
return (unciphered_text.decode('utf-8'))
class User():
"""
Class for object of user that constructed from username, firstname,
lastname, password, institutionID, FacultyID, study year, role, isBanned.
"""
def __init__(self, username, fName, lName, password, institutionID, facultyID, studyYear, isBanned = 0, email = None):
"""Ctor function for object of user"""
self.username = username
self.fName = fName
self.lName = lName
self.email = email
self.password = password
self.institutionID = institutionID
self.facultyID = facultyID
self.studyYear = studyYear
self.role = 0
self.isBanned = isBanned
def getUsername(self):
"""The function retruns username of user"""
return (self.username)
def getFName(self):
"""The function retruns firstname of user"""
return (self.fName)
def getLName(self):
"""The function retruns lastname of user"""
return (self.lName)
def getEmail(self):
"""The function retruns email of user"""
return (self.email)
def getPassword(self):
"""The function retruns password of user"""
return (self.password)
def getInstitutionID(self):
"""The function retruns institution ID of user"""
return (self.institutionID)
def getInstitutionName(self):
"""The function retruns the name of the institution of the user"""
# Connect to database and check if user exists
con = sqlite3.connect(current_app.config['DB_NAME'])
# Prepare the query
sqlQuryLogin = "SELECT * FROM Institutions WHERE InstitutionID = (?)"
# Run the query to get institution data
sqlRes = con.execute(sqlQuryLogin,(self.getInstitutionID(),))
# Fetch the result
record = sqlRes.fetchone()
# Create institution object for current selected username
instOfUser = None
# Check if user exists
if (record != None):
# Create institution object for current selected username
instOfUser = Institution(record[0], record[1])
# Close the connection to the database
con.close()
return (instOfUser.getName())
def getFacultyID(self):
"""The function retruns faculty ID of user"""
return (self.facultyID)
def getFacultyName(self):
"""The function retruns the name of the faculty of the user"""
# Connect to database and check if user exists
con = sqlite3.connect(current_app.config['DB_NAME'])
# Prepare the query
sqlQuryLogin = "SELECT * FROM Faculties WHERE FacultyID = (?)"
# Run the query to get faculty data
sqlRes = con.execute(sqlQuryLogin,(self.getFacultyID(),))
# Fetch the result
record = sqlRes.fetchone()
# Create facullty object for current selected username
facOfUser = None
# Check if user exists
if (record != None):
# Create faculty object for current selected username
facOfUser = Faculty(record[0], record[1])
# Close the connection to the database
con.close()
return (facOfUser.getName())
def getStudyYear(self):
"""The function retruns study year of user"""
return (self.studyYear)
def getRole(self):
"""The function retruns role of user"""
return (self.role)
def getIsBanned(self):
"""The function retruns isBanned flag of user"""
return (self.isBanned)
def setFName(self, fName):
"""The function sets firstname of user"""
self.fName = fName
def setLName(self, lName):
"""The function sets lastname of user"""
self.lName = lName
def setEmail(self, email):
"""The function sets email of user"""
self.email = email
def setPassword(self, password):
"""The function sets password of user"""
self.password = password
def setInstitutionID(self, institutionID):
"""The function sets institution ID of user"""
self.institutionID = institutionID
def setFacultyID(self, facultyID):
"""The function sets faculty ID of user"""
self.facultyID = facultyID
def setStudyYear(self, studyYear):
"""The function sets study year of user"""
self.studyYear = studyYear
def setRole(self, role):
"""The function sets role of user"""
self.role = role
def setIsBanned(self, isBanned):
"""The function sets isBanned flag of user"""
self.isBanned = isBanned
def validateUser(self):
"""Function to validate user data"""
msg = ""
# Regular expressions
regUserName = "^[a-zA-Z0-9]+$"
regPassword = "^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#?!@$%^&*-]).{8,}$"
regEmail = "\w+[.|\w]\w+@\w+[.]\w+[.|\w+]\w+"
# Check if username is incorrect
if (not re.match(regUserName, self.getUsername())):
msg += "User name was incorrect!\n"
# Check if firstname is not empty
if (self.getFName() == ""):
msg += "First name was not entered!\n"
# Check if lastname is not empty
if (self.getLName() == ""):
msg += "Last name is not entered!\n"
# Check if password is incorrect
if (not re.match(regPassword, self.getPassword())):
msg += "Password must be constructed from minimum eight characters, " +\
"at least one uppercase letter, one lowercase letter, " +\
"one digit, and one special character!\n"
# Check if no institution selected
if (self.getInstitutionID() == ""):
msg += "Institution was not selected!\n"
# Check if no faculty selected
if (self.getFacultyID() == ""):
msg += "Faculty was not selected!\n"
# Check if email entered
if ((self.getEmail() != None) and (self.getEmail() != "")):
# Check if email is correct
if (not re.match(regEmail, self.getEmail())):
msg += "Email is incorrect!\n"
return (msg)
def validateEditBio(self):
"""Function to validate user data at edit bio"""
msg = ""
# Regular expressions
regEmail = "\w+[.|\w]\w+@\w+[.]\w+[.|\w+]\w+"
# Check if firstname is not empty
if (self.getFName() == ""):
msg += "First name was not entered!\n"
# Check if lastname is not empty
if (self.getLName() == ""):
msg += "Last name is not entered!\n"
# Check if no institution selected
if (self.getInstitutionID() == ""):
msg += "Institution was not selected!\n"
# Check if no faculty selected
if (self.getFacultyID() == ""):
msg += "Faculty was not selected!\n"
# Check if email entered
if ((self.getEmail() != None) and (self.getEmail() != "")):
# Check if email is correct
if (not re.match(regEmail, self.getEmail())):
msg += "Email is incorrect!\n"
return (msg)
def validatePassword(self, password):
"""Function to validate user password at login"""
return (self.getPassword() == password)
def isAdmin(self):
""" The function returns false if user is not admin """
return (False) | 31.826255 | 122 | 0.596021 |
1a08c9d75b3066882b6c7ae46f65df9afacb191c | 488 | py | Python | env/Lib/site-packages/plotly/validators/isosurface/colorbar/tickformatstop/_templateitemname.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/isosurface/colorbar/tickformatstop/_templateitemname.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/isosurface/colorbar/tickformatstop/_templateitemname.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="isosurface.colorbar.tickformatstop",
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 28.705882 | 78 | 0.653689 |
eb5c9d13b0b1ffd722750c38568454296d526163 | 1,742 | py | Python | examples/visualization/topo_compare_conditions.py | lokinou/mne-python | f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b | [
"BSD-3-Clause"
] | null | null | null | examples/visualization/topo_compare_conditions.py | lokinou/mne-python | f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b | [
"BSD-3-Clause"
] | null | null | null | examples/visualization/topo_compare_conditions.py | lokinou/mne-python | f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
.. _ex-topo-compare:
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and auditory responses is created.
Both conditions are then accessed by their respective names to create a sensor
layout plot of the related evoked responses.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
# %%
# Set parameters
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks='meg', baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
# %%
# Show topography for two different conditions
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
| 26.393939 | 79 | 0.677956 |
2fdd61d838b580c2fb63d670801e1913addbacdd | 4,050 | py | Python | sdk/python/pulumi_azure_nextgen/devices/v20170821preview/list_iot_dps_resource_keys_for_key_name.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/devices/v20170821preview/list_iot_dps_resource_keys_for_key_name.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/devices/v20170821preview/list_iot_dps_resource_keys_for_key_name.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListIotDpsResourceKeysForKeyNameResult',
'AwaitableListIotDpsResourceKeysForKeyNameResult',
'list_iot_dps_resource_keys_for_key_name',
]
@pulumi.output_type
class ListIotDpsResourceKeysForKeyNameResult:
"""
Description of the shared access key.
"""
def __init__(__self__, key_name=None, primary_key=None, rights=None, secondary_key=None):
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if rights and not isinstance(rights, str):
raise TypeError("Expected argument 'rights' to be a str")
pulumi.set(__self__, "rights", rights)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
Name of the key.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[str]:
"""
Primary SAS key value.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def rights(self) -> str:
"""
Rights that this key has.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[str]:
"""
Secondary SAS key value.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListIotDpsResourceKeysForKeyNameResult(ListIotDpsResourceKeysForKeyNameResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotDpsResourceKeysForKeyNameResult(
key_name=self.key_name,
primary_key=self.primary_key,
rights=self.rights,
secondary_key=self.secondary_key)
def list_iot_dps_resource_keys_for_key_name(key_name: Optional[str] = None,
provisioning_service_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotDpsResourceKeysForKeyNameResult:
"""
Use this data source to access information about an existing resource.
:param str key_name: Logical key name to get key-values for.
:param str provisioning_service_name: Name of the provisioning service.
:param str resource_group_name: The name of the resource group that contains the provisioning service.
"""
__args__ = dict()
__args__['keyName'] = key_name
__args__['provisioningServiceName'] = provisioning_service_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:devices/v20170821preview:listIotDpsResourceKeysForKeyName', __args__, opts=opts, typ=ListIotDpsResourceKeysForKeyNameResult).value
return AwaitableListIotDpsResourceKeysForKeyNameResult(
key_name=__ret__.key_name,
primary_key=__ret__.primary_key,
rights=__ret__.rights,
secondary_key=__ret__.secondary_key)
| 37.850467 | 181 | 0.670864 |
3e88c6106bd43aaa0c630d621b899998cffbe17f | 14,739 | py | Python | scripts/neural_machine_translation/filter_langs_nmt.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 4,145 | 2019-09-13T08:29:43.000Z | 2022-03-31T18:31:44.000Z | scripts/neural_machine_translation/filter_langs_nmt.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 2,031 | 2019-09-17T16:51:39.000Z | 2022-03-31T23:52:41.000Z | scripts/neural_machine_translation/filter_langs_nmt.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 1,041 | 2019-09-13T10:08:21.000Z | 2022-03-30T06:37:38.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import multiprocessing as mp
import re
import shutil
import warnings
from pathlib import Path
from time import sleep
import fasttext
from tqdm import tqdm
"""
Usage:
python filter_by_language.py --input-src train.en \
--input-tgt train.de \
--output-src train_lang_filtered.en \
--output-tgt train_lang_filtered.de \
--source-lang en \
--target-lang de \
--removed-src train_garbage.en \
--removed-tgt train_garbage.de \
--fasttext-model lid.176.bin
"""
logging.basicConfig(level=logging.INFO)
# temp fix for the warning: "Warning : 'load_model' does not return WordVectorModel or SupervisedModel any more, but a 'FastText' object which is very similar."
fasttext.FastText.eprint = lambda x: None
def get_args():
parser = argparse.ArgumentParser(
description="It is a script for verifying language in machine translation data sets. If the script is used on "
"a parallel corpus, it verifies both a source and a target language. If number of jobs `--num-jobs` is bigger "
"than 1 than lines in an input file (or files if parallel corpus is checked) split equally between workers. "
"If `num_jobs > 1` is used, the best performance is achieved if dataset is shuffled and lines with different "
"lengths are distributed evenly in the input file. Filtered data is stored into `output_src`[, `--output-tgt`]"
" and removed lines are put into `removed_src`[, `--removed-tgt`] files. If language cannot be detected "
"(e.g. date), the line is removed. Working time on en-de wikimatrix (6.23M pairs: 700 MB German and 625 MB "
"English) from wmt20 on machine with 20 CPU cores: less than 1 minute."
)
parser.add_argument(
"--input-src",
"-s",
help="Path to the input file which has to contain text in language `source_lang`.",
required=True,
type=Path,
)
parser.add_argument(
"--input-tgt",
"-t",
help="Path to the input file which has to contain text in language `target_lang`. If not provided, data is "
"processed as monolingual.",
type=Path,
)
parser.add_argument(
"--output-src",
"-S",
help="Path to the file where filtered `input_src` will be saved.",
required=True,
type=Path,
)
parser.add_argument(
"--output-tgt", "-T", help="Path to the output target file", type=Path,
)
parser.add_argument(
"--source-lang",
"-l",
required=True,
help="Input language. For options see https://fasttext.cc/docs/en/language-identification.html.",
)
parser.add_argument(
"--target-lang",
"-L",
help="Output language. For options see https://fasttext.cc/docs/en/language-identification.html.",
)
parser.add_argument(
"--removed-src", "-r", required=True, help="Path to file where removed source lines will be saved", type=Path,
)
parser.add_argument(
"--removed-tgt", "-R", help="Path to file where removed target lines will be saved", type=Path,
)
parser.add_argument(
"--num-jobs",
"-j",
type=int,
help="Number of jobs. By default, the number of jobs is equal to the number of CPU cores.",
)
parser.add_argument(
"--fasttext-model",
"-m",
help="Path to fasttext model. The description and download links are here "
"https://fasttext.cc/docs/en/language-identification.html",
type=Path,
)
args = parser.parse_args()
if not (
args.output_tgt is None
and args.input_tgt is None
and args.target_lang is None
and args.removed_tgt is None
or args.output_tgt is not None
and args.input_tgt is not None
and args.target_lang is not None
and args.removed_tgt is not None
):
raise ValueError(
f"Arguments `input_tgt`, `output_tgt`, `target_lang`, `removed_tgt` have to be either `None` "
f"simultaneously or not `None` simultaneously. Given "
f"input_tgt={args.input_tgt}, output_tgt={args.output_tgt}, target_lang={args.target_lang}, "
f"removed_tgt={args.removed_tgt}"
)
args.input_src = args.input_src.expanduser()
if args.input_tgt is not None:
args.input_tgt = args.input_tgt.expanduser()
args.output_src = args.output_src.expanduser()
if args.output_tgt is not None:
args.output_tgt = args.output_tgt.expanduser()
args.removed_src = args.removed_src.expanduser()
if args.removed_tgt is not None:
args.removed_tgt = args.removed_tgt.expanduser()
args.fasttext_model = args.fasttext_model.expanduser()
return args
def get_lang(line, fasttext_model):
labels, _ = fasttext_model.predict(line, k=1)
lang = labels[0].split('__')[-1]
return lang
def get_edges_in_1_file(fn, num_parts):
num_lines = 0
edges = [0]
with open(fn) as f:
i = 0
for l in f:
i += len(l.encode('utf-8'))
edges.append(i)
num_lines += 1
return [edges[int(i * num_lines / num_parts)] for i in range(num_parts)] + [edges[-1]], num_lines
def get_edges_and_num_lines(src_fn, tgt_fn, num_parts):
src_edges, src_num_lines = get_edges_in_1_file(src_fn, num_parts)
assert num_parts + 1 == len(src_edges)
src_edges = [(src_edges[i], src_edges[i + 1]) for i in range(len(src_edges) - 1)]
if tgt_fn is not None:
tgt_edges, tgt_num_lines = get_edges_in_1_file(tgt_fn, num_parts)
tgt_edges = [(tgt_edges[i], tgt_edges[i + 1]) for i in range(len(tgt_edges) - 1)]
if tgt_num_lines != src_num_lines:
raise ValueError(
f"Source {repr(src_fn)} and target {repr(tgt_fn)} files have different number of lines "
f"{src_num_lines} and {tgt_num_lines} correspondingly."
)
else:
tgt_edges = [None] * num_parts
assert len(src_edges) == num_parts
return src_edges, tgt_edges, src_num_lines
def filter_pairs(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
source_lang,
target_lang,
fasttext_model,
rank,
):
global counter
fasttext_model = fasttext.load_model(str(fasttext_model))
output_src = filtered_dir_src / Path(f"rank{rank}")
output_src_removed = removed_dir_src / Path(f"rank{rank}")
output_tgt = filtered_dir_tgt / Path(f"rank{rank}")
output_tgt_removed = removed_dir_tgt / Path(f"rank{rank}")
with open(input_src) as in_src, open(input_tgt) as in_tgt, open(output_src, 'w') as out_src, open(
output_tgt, 'w'
) as out_tgt, open(output_src_removed, 'w') as out_r_src, open(output_tgt_removed, 'w') as out_r_tgt:
in_src.seek(src_edges[0])
in_tgt.seek(tgt_edges[0])
src_l, tgt_l, i = in_src.readline(), in_tgt.readline(), 0
if in_src.tell() > src_edges[1] or in_tgt.tell() > tgt_edges[1]:
return
while src_l and tgt_l:
with counter.get_lock():
counter.value += 1
src_l = src_l.strip()
tgt_l = tgt_l.strip()
src_lang = get_lang(src_l, fasttext_model)
if src_lang is not None:
tgt_lang = get_lang(tgt_l, fasttext_model)
if src_lang is None or tgt_lang is None or src_lang != source_lang or tgt_lang != target_lang:
out_r_src.write(src_l + '\n')
out_r_tgt.write(tgt_l + '\n')
else:
out_src.write(src_l + '\n')
out_tgt.write(tgt_l + '\n')
if in_src.tell() >= src_edges[1]:
if in_tgt.tell() < tgt_edges[1]:
raise ValueError(
f"Edges of target and source has to be reached simultaneously, whereas "
f"in_src.tell()={in_src.tell()}, in_tgt.tell()={in_tgt.tell()}, "
f"src_edges[1]={src_edges[1]}, tgt_edges[1]={tgt_edges[1]}."
)
break
if in_tgt.tell() >= tgt_edges[1]:
raise ValueError(
f"Edges of target and source has to be reached simultaneously, whereas "
f"in_src.tell()={in_src.tell()}, in_tgt.tell()={in_tgt.tell()}, "
f"src_edges[1]={src_edges[1]}, tgt_edges[1]={tgt_edges[1]}."
)
src_l, tgt_l, i = in_src.readline(), in_tgt.readline(), i + 1
with counter.get_lock():
counter.value += 1
def filter_singles(
src_edges, input_src, filtered_dir_src, removed_dir_src, source_lang, fasttext_model, rank,
):
logging.debug("filter singles")
global counter
fasttext_model = fasttext.load_model(str(fasttext_model))
output_src = filtered_dir_src / Path(f"rank{rank}")
output_src_removed = removed_dir_src / Path(f"rank{rank}")
with open(input_src) as in_f, open(output_src, 'w') as out_f, open(output_src_removed, 'w') as out_r_f:
in_f.seek(src_edges[0])
i, line = 0, in_f.readline()
if in_f.tell() > src_edges[1]:
return
while line:
with counter.get_lock():
counter.value += 1
line = line.strip()
in_lang = get_lang(line, fasttext_model)
if in_lang is None or in_lang != source_lang:
out_r_f.write(line + '\n')
else:
out_f.write(line + '\n')
if in_f.tell() >= src_edges[1]:
break
i, line = i + 1, in_f.readline()
with counter.get_lock():
counter.value += 1
def filter_by_lang(args):
(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
source_lang,
target_lang,
fasttext_model,
rank,
) = args
logging.debug(f"filter by lang input_tgt: {input_tgt}")
if input_tgt is None:
if tgt_edges is not None:
warnings.warn("If input target is not provided `tgt_edges` argument is expected to be `None`")
filter_singles(
src_edges, input_src, filtered_dir_src, removed_dir_src, source_lang, fasttext_model, rank,
)
else:
filter_pairs(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
source_lang,
target_lang,
fasttext_model,
rank,
)
def _cat_results(out_file, tmp_dir):
file_name_pattern = re.compile(r"/rank([1-9][0-9]*)|0$")
with out_file.open('w') as out_f:
for f in sorted(tmp_dir.iterdir()):
if not f.is_file():
warnings.warn(f"Unexpected not file {f}")
elif not file_name_pattern.search(str(f)):
warnings.warn(f"Unexpected file {f}")
else:
with f.open('r') as in_f:
for l in in_f:
out_f.write(l)
def cat_results(out_files, tmp_dirs):
for o_f, t_d in zip(out_files, tmp_dirs):
if o_f is None or t_d is None:
if o_f is not None or t_d is not None:
warnings.warn(
f"Output file and tmp directory are expected to be `None` simultaneously whereas tmp directory "
f"is {t_d} and output file is {o_f}."
)
else:
_cat_results(o_f, t_d)
counter = None
def init(args):
global counter
counter = args
def main():
args = get_args()
tmp_dir = Path("tmp")
i = 0
while tmp_dir.exists():
tmp_dir = Path("tmp" + str(i))
i += 1
tmp_filtered = tmp_dir / Path("filtered")
tmp_filtered_src = tmp_filtered / Path("src")
tmp_filtered_src.mkdir(parents=True, exist_ok=True)
if args.input_tgt is None:
tmp_filtered_tgt = None
else:
tmp_filtered_tgt = tmp_filtered / Path("tgt")
tmp_filtered_tgt.mkdir(parents=True, exist_ok=True)
tmp_removed = tmp_dir / Path("removed")
tmp_removed_src = tmp_removed / Path("src")
tmp_removed_src.mkdir(parents=True, exist_ok=True)
if args.input_tgt is None:
tmp_removed_tgt = None
else:
tmp_removed_tgt = tmp_removed / Path("tgt")
tmp_removed_tgt.mkdir(parents=True, exist_ok=True)
num_jobs = mp.cpu_count() if args.num_jobs is None else args.num_jobs
src_edges, tgt_edges, num_lines = get_edges_and_num_lines(args.input_src, args.input_tgt, num_jobs)
global counter
counter = mp.Value('i', 0)
t = tqdm(total=num_lines, desc="processed lines / total number of lines")
with mp.Pool(num_jobs, initializer=init, initargs=(counter,)) as pool:
async_result = pool.map_async(
filter_by_lang,
[
(
se,
te,
args.input_src,
args.input_tgt,
tmp_filtered_src,
tmp_filtered_tgt,
tmp_removed_src,
tmp_removed_tgt,
args.source_lang,
args.target_lang,
args.fasttext_model,
rank,
)
for rank, (se, te) in enumerate(zip(src_edges, tgt_edges))
],
)
while not async_result.ready():
t.update(counter.value)
with counter.get_lock():
counter.value = 0
sleep(0.1)
t.update(counter.value)
cat_results(
[args.output_src, args.output_tgt, args.removed_src, args.removed_tgt],
[tmp_filtered_src, tmp_filtered_tgt, tmp_removed_src, tmp_removed_tgt],
)
shutil.rmtree(tmp_dir)
if __name__ == "__main__":
main()
| 36.302956 | 160 | 0.605536 |
842c7bea0c4ad83223347f5e241a18288964a95d | 4,115 | py | Python | tools/train_extend.py | undeadyequ/PaddleOCR | 7e31d064ba3054f87fa27cff84784f706248c61e | [
"Apache-2.0"
] | null | null | null | tools/train_extend.py | undeadyequ/PaddleOCR | 7e31d064ba3054f87fa27cff84784f706248c61e | [
"Apache-2.0"
] | null | null | null | tools/train_extend.py | undeadyequ/PaddleOCR | 7e31d064ba3054f87fa27cff84784f706248c61e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
import yaml
import paddle
import paddle.distributed as dist
paddle.seed(2)
from ppocr.data import build_dataloader
from ppocr.modeling.architectures import build_model_extend
from ppocr.losses import build_loss
from ppocr.optimizer import build_optimizer
from ppocr.postprocess import build_post_process
from ppocr.metrics import build_metric
from ppocr.utils.save_load import init_model
import tools.program as program
from ppocr.modeling.architectures.extend_model import JointVisDet, JointVisDetFineGrained
from tools.infer import utility
from tools.extend.util import ProtestDataset, ProtestDatasetEval
dist.get_world_size()
def main(config, config_detrec, device, logger, vdl_writer):
# init dist environment
if config['Global']['distributed']:
dist.init_parallel_env()
# build dataloader
ProtestDataset
# build model
# build loss
# build optimize
# build
# build dataloader
train_dataloader = build_dataloader(config, 'Train', device, logger)
if len(train_dataloader) == 0:
logger.error(
'No Images in train dataset, please check annotation file and path in the configuration file'
)
return
if config['Eval']:
valid_dataloader = build_dataloader(config, 'Eval', device, logger)
else:
valid_dataloader = None
# build model
model = build_model_extend(config['Architecture']["extend_args"], config_detrec)
if config['Global']['distributed']:
model = paddle.DataParallel(model)
# build loss
loss_class = build_loss(config['Architecture']["extend_args"])
# build optim
optimizer, lr_scheduler = build_optimizer(
config['Optimizer'],
epochs=config['Global']['epoch_num'],
step_each_epoch=len(train_dataloader),
parameters=model.parameters())
# build metric
eval_class = build_metric(config['Metric'])
# load pretrain model
pre_best_model_dict = init_model(config, model, logger, optimizer)
logger.info('train dataloader has {} iters, valid dataloader has {} iters'.
format(len(train_dataloader), len(valid_dataloader)))
# start train
program.train(config, train_dataloader, valid_dataloader, device, model,
loss_class, optimizer, lr_scheduler, post_process_class,
eval_class, pre_best_model_dict, logger, vdl_writer)
def test_reader(config, device, logger):
loader = build_dataloader(config, 'Train', device, logger)
import time
starttime = time.time()
count = 0
try:
for data in loader():
count += 1
if count % 1 == 0:
batch_time = time.time() - starttime
starttime = time.time()
logger.info("reader: {}, {}, {}".format(
count, len(data[0]), batch_time))
except Exception as e:
logger.info(e)
logger.info("finish reader: {}, Success!".format(count))
if __name__ == '__main__':
config_detrec = utility.parse_args()
config_general, device, logger, vdl_writer = program.preprocess(is_train=True)
main(config_general, config_detrec, device, logger, vdl_writer)
# test_reader(config, device, logger)
| 31.899225 | 105 | 0.70401 |
95a233a4d13372f7b66badd660a0053d2535a62e | 773 | py | Python | var/spack/repos/builtin/packages/dtcmp/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/dtcmp/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/dtcmp/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dtcmp(AutotoolsPackage):
"""The Datatype Comparison Library provides comparison operations and
parallel sort algorithms for MPI applications."""
homepage = "https://github.com/hpc/dtcmp"
url = "https://github.com/hpc/dtcmp/releases/download/v1.0.3/dtcmp-1.0.3.tar.gz"
version('1.1.0', 'af5c73f7d3a9afd90a22d0df85471d2f')
version('1.0.3', 'cdd8ccf71e8ff67de2558594a7fcd317')
depends_on('mpi')
depends_on('lwgrp')
def configure_args(self):
return ["--with-lwgrp=" + self.spec['lwgrp'].prefix]
| 32.208333 | 89 | 0.71022 |
d99f702c45ed984147aacd9b60be3ef5cc4c2570 | 33,078 | py | Python | tests/support/case.py | bgridley/salt | 8af9765d4a4a02fa6168ae2617a72009996ea7a1 | [
"Apache-2.0"
] | 1 | 2021-07-15T18:11:51.000Z | 2021-07-15T18:11:51.000Z | tests/support/case.py | bgridley/salt | 8af9765d4a4a02fa6168ae2617a72009996ea7a1 | [
"Apache-2.0"
] | null | null | null | tests/support/case.py | bgridley/salt | 8af9765d4a4a02fa6168ae2617a72009996ea7a1 | [
"Apache-2.0"
] | 1 | 2020-04-10T20:18:40.000Z | 2020-04-10T20:18:40.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
====================================
Custom Salt TestCase Implementations
====================================
Custom reusable :class:`TestCase<python2:unittest.TestCase>`
implementations.
'''
# pylint: disable=repr-flag-used-in-string
# Import python libs
from __future__ import absolute_import, unicode_literals
import os
import re
import sys
import time
import errno
import signal
import textwrap
import logging
import tempfile
import subprocess
from datetime import datetime, timedelta
# Import salt testing libs
from tests.support.unit import TestCase
from tests.support.helpers import (
RedirectStdStreams, requires_sshd_server, win32_kill_process_tree
)
from tests.support.runtests import RUNTIME_VARS
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.paths import INTEGRATION_TEST_DIR, CODE_DIR, PYEXEC, SCRIPT_DIR
from tests.support.cli_scripts import ScriptPathMixin
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import cStringIO # pylint: disable=import-error
STATE_FUNCTION_RUNNING_RE = re.compile(
r'''The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID '''
r'(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)'
)
log = logging.getLogger(__name__)
class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin):
'''
Execute a test for a shell command
'''
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False, timeout=15):
r'''
Run the ``salt`` CLI tool with the provided arguments
.. code-block:: python
class MatchTest(ShellTestCase):
def test_list(self):
"""
test salt -L matcher
"""
data = self.run_salt('-L minion test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
'''
arg_str = '-c {0} -t {1} {2}'.format(self.config_dir, timeout, arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
def run_ssh(self, arg_str, with_retcode=False, timeout=25,
catch_stderr=False, wipe=False, raw=False):
'''
Execute salt-ssh
'''
arg_str = '{0} {1} -c {2} -i --priv {3} --roster-file {4} localhost {5} --out=json'.format(
' -W' if wipe else '',
' -r' if raw else '',
self.config_dir,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'),
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'),
arg_str
)
return self.run_script('salt-ssh', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, raw=True)
def run_run(self,
arg_str,
with_retcode=False,
catch_stderr=False,
asynchronous=False,
timeout=60,
config_dir=None,
**kwargs):
'''
Execute salt-run
'''
asynchronous = kwargs.get('async', asynchronous)
arg_str = '-c {0}{async_flag} -t {timeout} {1}'.format(
config_dir or self.config_dir,
arg_str,
timeout=timeout,
async_flag=' --async' if asynchronous else '')
return self.run_script('salt-run',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout)
def run_run_plus(self, fun, *arg, **kwargs):
'''
Execute the runner function and return the return data and output in a dict
'''
ret = {'fun': fun}
# Late import
import salt.config
import salt.output
import salt.runner
from salt.ext.six.moves import cStringIO
opts = salt.config.master_config(
self.get_config_file_path('master')
)
opts_arg = list(arg)
if kwargs:
opts_arg.append({'__kwarg__': True})
opts_arg[-1].update(kwargs)
opts.update({'doc': False, 'fun': fun, 'arg': opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['return'] = runner.run()
try:
ret['jid'] = runner.jid
except AttributeError:
ret['jid'] = None
# Compile output
# TODO: Support outputters other than nested
opts['color'] = False
opts['output_file'] = cStringIO()
try:
salt.output.display_output(ret['return'], opts=opts)
ret['out'] = opts['output_file'].getvalue()
finally:
opts['output_file'].close()
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
'''
Execute salt-key
'''
arg_str = '-c {0} {1}'.format(self.config_dir, arg_str)
return self.run_script(
'salt-key',
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode
)
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-cp
'''
arg_str = '--config-dir {0} {1}'.format(self.config_dir, arg_str)
return self.run_script('salt-cp', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_call(self, arg_str, with_retcode=False, catch_stderr=False, local=False):
arg_str = '{0} --config-dir {1} {2}'.format('--local' if local else '',
self.config_dir, arg_str)
return self.run_script('salt-call', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_cloud(self, arg_str, catch_stderr=False, timeout=None):
'''
Execute salt-cloud
'''
arg_str = '-c {0} {1}'.format(self.config_dir, arg_str)
return self.run_script('salt-cloud', arg_str, catch_stderr, timeout)
def run_script(self,
script,
arg_str,
catch_stderr=False,
with_retcode=False,
catch_timeout=False,
# FIXME A timeout of zero or disabling timeouts may not return results!
timeout=15,
raw=False,
popen_kwargs=None,
log_output=None):
'''
Execute a script with the given argument string
The ``log_output`` argument is ternary, it can be True, False, or None.
If the value is boolean, then it forces the results to either be logged
or not logged. If it is None, then the return code of the subprocess
determines whether or not to log results.
'''
import salt.utils.platform
script_path = self.get_script_path(script)
if not os.path.isfile(script_path):
return False
if salt.utils.platform.is_windows():
cmd = 'python '
else:
cmd = 'PYTHONPATH='
python_path = os.environ.get('PYTHONPATH', None)
if python_path is not None:
cmd += '{0}:'.format(python_path)
if sys.version_info[0] < 3:
cmd += '{0} '.format(':'.join(sys.path[1:]))
else:
cmd += '{0} '.format(':'.join(sys.path[0:]))
cmd += 'python{0}.{1} '.format(*sys.version_info)
cmd += '{0} '.format(script_path)
cmd += '{0} '.format(arg_str)
tmp_file = tempfile.SpooledTemporaryFile()
popen_kwargs = popen_kwargs or {}
popen_kwargs = dict({
'shell': True,
'stdout': tmp_file,
'universal_newlines': True,
}, **popen_kwargs)
if catch_stderr is True:
popen_kwargs['stderr'] = subprocess.PIPE
if not sys.platform.lower().startswith('win'):
popen_kwargs['close_fds'] = True
def detach_from_parent_group():
# detach from parent group (no more inherited signals!)
os.setpgrp()
popen_kwargs['preexec_fn'] = detach_from_parent_group
def format_return(retcode, stdout, stderr=None, timed_out=False):
'''
DRY helper to log script result if it failed, and then return the
desired output based on whether or not stderr was desired, and
wither or not a retcode was desired.
'''
log_func = log.debug
if timed_out:
log.error(
'run_script timed out after %d seconds (process killed)',
timeout
)
log_func = log.error
if log_output is True \
or timed_out \
or (log_output is None and retcode != 0):
log_func(
'run_script results for: %s %s\n'
'return code: %s\n'
'stdout:\n'
'%s\n\n'
'stderr:\n'
'%s',
script, arg_str, retcode, stdout, stderr
)
stdout = stdout or ''
stderr = stderr or ''
if not raw:
stdout = stdout.splitlines()
stderr = stderr.splitlines()
ret = [stdout]
if catch_stderr:
ret.append(stderr)
if with_retcode:
ret.append(retcode)
if catch_timeout:
ret.append(timed_out)
return ret[0] if len(ret) == 1 else tuple(ret)
process = subprocess.Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
time.sleep(0.1)
if datetime.now() <= stop_at:
# We haven't reached the timeout yet
if process.returncode is not None:
break
else:
# We've reached the timeout
if term_sent is False:
# Kill the process group since sending the term signal
# would only terminate the shell, not the command
# executed in the shell
if salt.utils.platform.is_windows():
_, alive = win32_kill_process_tree(process.pid)
if alive:
log.error("Child processes still alive: %s", alive)
else:
os.killpg(os.getpgid(process.pid), signal.SIGINT)
term_sent = True
continue
try:
# As a last resort, kill the process group
if salt.utils.platform.is_windows():
_, alive = win32_kill_process_tree(process.pid)
if alive:
log.error("Child processes still alive: %s", alive)
else:
os.killpg(os.getpgid(process.pid), signal.SIGINT)
except OSError as exc:
if exc.errno != errno.ESRCH:
# If errno is not "no such process", raise
raise
return format_return(
process.returncode,
*process.communicate(),
timed_out=True
)
tmp_file.seek(0)
if sys.version_info >= (3,):
try:
out = tmp_file.read().decode(__salt_system_encoding__)
except (NameError, UnicodeDecodeError):
# Let's cross our fingers and hope for the best
out = tmp_file.read().decode('utf-8')
else:
out = tmp_file.read()
if catch_stderr:
if sys.version_info < (2, 7):
# On python 2.6, the subprocess'es communicate() method uses
# select which, is limited by the OS to 1024 file descriptors
# We need more available descriptors to run the tests which
# need the stderr output.
# So instead of .communicate() we wait for the process to
# finish, but, as the python docs state "This will deadlock
# when using stdout=PIPE and/or stderr=PIPE and the child
# process generates enough output to a pipe such that it
# blocks waiting for the OS pipe buffer to accept more data.
# Use communicate() to avoid that." <- a catch, catch situation
#
# Use this work around were it's needed only, python 2.6
process.wait()
err = process.stderr.read()
else:
_, err = process.communicate()
# Force closing stderr/stdout to release file descriptors
if process.stdout is not None:
process.stdout.close()
if process.stderr is not None:
process.stderr.close()
# pylint: disable=maybe-no-member
try:
return format_return(process.returncode, out, err or '')
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, six.string_types):
# tmp_file.name is an int when using SpooledTemporaryFiles
# int types cannot be used with os.remove() in Python 3
os.remove(tmp_file.name)
else:
# Clean up file handles
tmp_file.close()
process.terminate()
except OSError as err:
# process already terminated
pass
# pylint: enable=maybe-no-member
# TODO Remove this?
process.communicate()
if process.stdout is not None:
process.stdout.close()
try:
return format_return(process.returncode, out)
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, six.string_types):
# tmp_file.name is an int when using SpooledTemporaryFiles
# int types cannot be used with os.remove() in Python 3
os.remove(tmp_file.name)
else:
# Clean up file handles
tmp_file.close()
process.terminate()
except OSError as err:
# process already terminated
pass
class ShellCase(ShellTestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin):
'''
Execute a test for a shell command
'''
_code_dir_ = CODE_DIR
_script_dir_ = SCRIPT_DIR
_python_executable_ = PYEXEC
RUN_TIMEOUT = 500
def chdir(self, dirname):
try:
os.chdir(dirname)
except OSError:
os.chdir(INTEGRATION_TEST_DIR)
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False, # pylint: disable=W0221
timeout=RUN_TIMEOUT, popen_kwargs=None):
'''
Execute salt
'''
arg_str = '-c {0} -t {1} {2}'.format(self.config_dir, timeout, arg_str)
ret = self.run_script('salt',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
popen_kwargs=popen_kwargs)
log.debug('Result of run_salt for command \'%s\': %s', arg_str, ret)
return ret
def run_spm(self, arg_str, with_retcode=False, catch_stderr=False, timeout=RUN_TIMEOUT): # pylint: disable=W0221
'''
Execute spm
'''
ret = self.run_script('spm',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout)
log.debug('Result of run_spm for command \'%s\': %s', arg_str, ret)
return ret
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False, # pylint: disable=W0221
timeout=RUN_TIMEOUT, wipe=True, raw=False):
'''
Execute salt-ssh
'''
arg_str = '{0} -ldebug{1} -c {2} -i --priv {3} --roster-file {4} --out=json localhost {5}'.format(
' -W' if wipe else '',
' -r' if raw else '',
self.config_dir,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'),
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'),
arg_str)
ret = self.run_script('salt-ssh',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
raw=True)
log.debug('Result of run_ssh for command \'%s\': %s', arg_str, ret)
return ret
def run_run(self, arg_str, with_retcode=False, catch_stderr=False,
asynchronous=False, timeout=RUN_TIMEOUT, config_dir=None, **kwargs):
'''
Execute salt-run
'''
asynchronous = kwargs.get('async', asynchronous)
arg_str = '-c {0}{async_flag} -t {timeout} {1}'.format(config_dir or self.config_dir,
arg_str,
timeout=timeout,
async_flag=' --async' if asynchronous else '')
ret = self.run_script('salt-run',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout + 10)
log.debug('Result of run_run for command \'%s\': %s', arg_str, ret)
return ret
def run_run_plus(self, fun, *arg, **kwargs):
'''
Execute the runner function and return the return data and output in a dict
'''
# Late import
import salt.runner
import salt.output
ret = {'fun': fun}
from_scratch = bool(kwargs.pop('__reload_config', False))
# Have to create an empty dict and then update it, as the result from
# self.get_config() is an ImmutableDict which cannot be updated.
opts = {}
opts.update(self.get_config('client_config', from_scratch=from_scratch))
opts_arg = list(arg)
if kwargs:
opts_arg.append({'__kwarg__': True})
opts_arg[-1].update(kwargs)
opts.update({'doc': False, 'fun': fun, 'arg': opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['return'] = runner.run()
try:
ret['jid'] = runner.jid
except AttributeError:
ret['jid'] = None
# Compile output
# TODO: Support outputters other than nested
opts['color'] = False
opts['output_file'] = cStringIO()
try:
salt.output.display_output(ret['return'], opts=opts)
ret['out'] = opts['output_file'].getvalue().splitlines()
finally:
opts['output_file'].close()
log.debug('Result of run_run_plus for fun \'%s\' with arg \'%s\': %s',
fun, opts_arg, ret)
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False, # pylint: disable=W0221
timeout=RUN_TIMEOUT):
'''
Execute salt-key
'''
arg_str = '-c {0} {1}'.format(self.config_dir, arg_str)
ret = self.run_script('salt-key',
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode,
timeout=timeout)
log.debug('Result of run_key for command \'%s\': %s', arg_str, ret)
return ret
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False, # pylint: disable=W0221
timeout=RUN_TIMEOUT):
'''
Execute salt-cp
'''
# Note: not logging result of run_cp because it will log a bunch of
# bytes which will not be very helpful.
arg_str = '--config-dir {0} {1}'.format(self.config_dir, arg_str)
return self.run_script('salt-cp',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout)
def run_call(self, arg_str, with_retcode=False, catch_stderr=False, # pylint: disable=W0221
local=False, timeout=RUN_TIMEOUT):
'''
Execute salt-call.
'''
arg_str = '{0} --config-dir {1} {2}'.format('--local' if local else '',
self.config_dir, arg_str)
ret = self.run_script('salt-call',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout)
log.debug('Result of run_call for command \'%s\': %s', arg_str, ret)
return ret
def run_cloud(self, arg_str, catch_stderr=False, timeout=RUN_TIMEOUT):
'''
Execute salt-cloud
'''
arg_str = '-c {0} {1}'.format(self.config_dir, arg_str)
ret = self.run_script('salt-cloud',
arg_str,
catch_stderr,
timeout=timeout)
log.debug('Result of run_cloud for command \'%s\': %s', arg_str, ret)
return ret
class SPMTestUserInterface(object):
'''
Test user interface to SPMClient
'''
def __init__(self):
self._status = []
self._confirm = []
self._error = []
def status(self, msg):
self._status.append(msg)
def confirm(self, action):
self._confirm.append(action)
def error(self, msg):
self._error.append(msg)
class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
'''
Class for handling spm commands
'''
def _spm_build_files(self, config):
self.formula_dir = os.path.join(' '.join(config['file_roots']['base']), 'formulas')
self.formula_sls_dir = os.path.join(self.formula_dir, 'apache')
self.formula_sls = os.path.join(self.formula_sls_dir, 'apache.sls')
self.formula_file = os.path.join(self.formula_dir, 'FORMULA')
dirs = [self.formula_dir, self.formula_sls_dir]
for f_dir in dirs:
os.makedirs(f_dir)
# Late import
import salt.utils.files
with salt.utils.files.fopen(self.formula_sls, 'w') as fp:
fp.write(textwrap.dedent('''\
install-apache:
pkg.installed:
- name: apache2
'''))
with salt.utils.files.fopen(self.formula_file, 'w') as fp:
fp.write(textwrap.dedent('''\
name: apache
os: RedHat, Debian, Ubuntu, Suse, FreeBSD
os_family: RedHat, Debian, Suse, FreeBSD
version: 201506
release: 2
summary: Formula for installing Apache
description: Formula for installing Apache
'''))
def _spm_config(self, assume_yes=True):
self._tmp_spm = tempfile.mkdtemp()
config = self.get_temp_config('minion', **{
'spm_logfile': os.path.join(self._tmp_spm, 'log'),
'spm_repos_config': os.path.join(self._tmp_spm, 'etc', 'spm.repos'),
'spm_cache_dir': os.path.join(self._tmp_spm, 'cache'),
'spm_build_dir': os.path.join(self._tmp_spm, 'build'),
'spm_build_exclude': ['apache/.git'],
'spm_db_provider': 'sqlite3',
'spm_files_provider': 'local',
'spm_db': os.path.join(self._tmp_spm, 'packages.db'),
'extension_modules': os.path.join(self._tmp_spm, 'modules'),
'file_roots': {'base': [self._tmp_spm, ]},
'formula_path': os.path.join(self._tmp_spm, 'salt'),
'pillar_path': os.path.join(self._tmp_spm, 'pillar'),
'reactor_path': os.path.join(self._tmp_spm, 'reactor'),
'assume_yes': True if assume_yes else False,
'force': False,
'verbose': False,
'cache': 'localfs',
'cachedir': os.path.join(self._tmp_spm, 'cache'),
'spm_repo_dups': 'ignore',
'spm_share_dir': os.path.join(self._tmp_spm, 'share'),
})
import salt.utils.files
import salt.utils.yaml
if not os.path.isdir(config['formula_path']):
os.makedirs(config['formula_path'])
with salt.utils.files.fopen(os.path.join(self._tmp_spm, 'spm'), 'w') as fp:
salt.utils.yaml.safe_dump(config, fp)
return config
def _spm_create_update_repo(self, config):
build_spm = self.run_spm('build', self.config, self.formula_dir)
c_repo = self.run_spm('create_repo', self.config,
self.config['spm_build_dir'])
repo_conf_dir = self.config['spm_repos_config'] + '.d'
os.makedirs(repo_conf_dir)
# Late import
import salt.utils.files
with salt.utils.files.fopen(os.path.join(repo_conf_dir, 'spm.repo'), 'w') as fp:
fp.write(textwrap.dedent('''\
local_repo:
url: file://{0}
'''.format(self.config['spm_build_dir'])))
u_repo = self.run_spm('update_repo', self.config)
def _spm_client(self, config):
import salt.spm
self.ui = SPMTestUserInterface()
client = salt.spm.SPMClient(self.ui, config)
return client
def run_spm(self, cmd, config, arg=None):
client = self._spm_client(config)
spm_cmd = client.run([cmd, arg])
client._close()
return self.ui._status
class ModuleCase(TestCase, SaltClientTestCaseMixin):
'''
Execute a module function
'''
def minion_run(self, _function, *args, **kw):
'''
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
'''
return self.run_function(_function, args, **kw)
def run_function(self, function, arg=(), minion_tgt='minion', timeout=300, **kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
known_to_return_none = (
'file.chown',
'file.chgrp',
'ssh.recv_known_host_entries',
'pkg.refresh_db' # At least on CentOS
)
if minion_tgt == 'sub_minion':
known_to_return_none += ('mine.update',)
if 'f_arg' in kwargs:
kwargs['arg'] = kwargs.pop('f_arg')
if 'f_timeout' in kwargs:
kwargs['timeout'] = kwargs.pop('f_timeout')
orig = self.client.cmd(minion_tgt,
function,
arg,
timeout=timeout,
kwarg=kwargs)
if minion_tgt not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion \'{0}\'. Command output: {1}'.format(
minion_tgt, orig
)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
'the minion \'{1}\'. Command output: {2}'.format(
function, minion_tgt, orig
)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_state(self, function, **kwargs):
'''
Run the state.single command and return the state return structure
'''
ret = self.run_function('state.single', [function], **kwargs)
return self._check_state_return(ret)
def _check_state_return(self, ret):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
if not isinstance(item, six.string_types):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
# We don't know how to handle this
continue
jid = match.group('jid')
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function('saltutil.find_job', [jid])
job_kill = self.run_function('saltutil.kill_job', [jid])
msg = (
'A running state.single was found causing a state lock. '
'Job details: \'{0}\' Killing Job Returned: \'{1}\''.format(
job_data, job_kill
)
)
ret.append('[TEST SUITE ENFORCED]{0}'
'[/TEST SUITE ENFORCED]'.format(msg))
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixin):
'''
Execute a syndic based execution test
'''
_salt_client_config_file_name_ = 'syndic_master'
def run_function(self, function, arg=()):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
orig = self.client.cmd('minion', function, arg, timeout=25)
if 'minion' not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion. Command output: {0}'.format(orig)
)
return orig['minion']
@requires_sshd_server
class SSHCase(ShellCase):
'''
Execute a command via salt-ssh
'''
def _arg_str(self, function, arg):
return '{0} {1}'.format(function, ' '.join(arg))
def run_function(self, function, arg=(), timeout=180, wipe=True, raw=False, **kwargs):
'''
We use a 180s timeout here, which some slower systems do end up needing
'''
ret = self.run_ssh(self._arg_str(function, arg), timeout=timeout,
wipe=wipe, raw=raw)
log.debug('SSHCase run_function executed %s with arg %s', function, arg)
log.debug('SSHCase JSON return: %s', ret)
# Late import
import salt.utils.json
try:
return salt.utils.json.loads(ret)['localhost']
except Exception:
return ret
class ClientCase(AdaptedConfigurationTestCaseMixin, TestCase):
'''
A base class containing relevant options for starting the various Salt
Python API entrypoints
'''
def get_opts(self):
# Late import
import salt.config
return salt.config.client_config(self.get_config_file_path('master'))
def mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# ----- Backwards Compatible Imports -------------------------------------------------------------------------------->
from tests.support.mixins import ShellCaseCommonTestsMixin # pylint: disable=unused-import
# <---- Backwards Compatible Imports ---------------------------------------------------------------------------------
| 37.334086 | 118 | 0.526725 |
bfd0e40cf7897cde2e63f9a15c9a11da5a7a6966 | 5,904 | py | Python | RNA_PDX/bin/read_group_from_fastq.py | TheJacksonLaboratory/PDX-Analysis-Workflows | 4660d4923a689840b755df169c71ef8edad57ecd | [
"RSA-MD"
] | 7 | 2018-09-13T02:17:44.000Z | 2021-01-30T09:59:49.000Z | RNA_PDX/bin/read_group_from_fastq.py | TheJacksonLaboratory/PDX-Analysis-Workflows | 4660d4923a689840b755df169c71ef8edad57ecd | [
"RSA-MD"
] | 1 | 2020-03-13T10:38:56.000Z | 2020-03-20T03:43:15.000Z | RNA_PDX/bin/read_group_from_fastq.py | TheJacksonLaboratory/PDX-Analysis-Workflows | 4660d4923a689840b755df169c71ef8edad57ecd | [
"RSA-MD"
] | 9 | 2019-02-08T13:21:59.000Z | 2021-05-16T20:56:19.000Z | #! /usr/bin/env python
"""
read_group_from_fastq.py
Input: the fastq file specified as argv[1], the first command line argument.
Handles compressed or uncompressed fastqs.
Output: the second command line argument, if specified, else, sys.stdout.
Notes:
We will usually be handling standard Illumina Casava 1.8+ output, which
has a regular file naming format and read name format. If any of the
steps here fail, cause the pipeline to fail rather than producing
untraceable output.
"""
import sys
import os
import re
import time
import gzip
import argparse
try:
import bz2file as bz2
except ImportError:
import bz2
def parse_args():
parser = argparse.ArgumentParser(version='V2.0')
parser.add_argument('-p', '--picard', action='store_true',
help="Use Picard format for read group line")
parser.add_argument('-t', '--tumor', action='store_true',
help="Sample is tumor in a tumor/normal pair")
parser.add_argument('-n', '--normal', action='store_true',
help="Sample is normal in a tumor/normal pair")
parser.add_argument('fastq',
help="Path to fastq file for sample")
parser.add_argument('output', nargs='?',
help="Output file name [STDOUT]")
args = parser.parse_args()
if args.tumor:
if args.normal:
# Check for a conflict.
parser.error("Must not specify both --tumor and --normal.")
args.sample_type = "Tumor_"
elif args.normal:
args.sample_type = "Normal_"
else:
args.sample_type = ""
return args
def multi_open(name):
if name.endswith('.gz'):
f = gzip.open(name)
elif name.endswith('.bz2'):
f = bz2.BZ2File(name)
else:
f = open(name)
return f
def make_fake(args):
"""
If we can't get adequate data from the file, use timestamps.
:return:
"""
# Sleep for 2 seconds, to make sure that a previous invocation
# will have a different time stamp.
time.sleep(2)
ts = time.strftime('%H%M%S')
id = 'ID_' + ts
lb = 'LIB_' + ts
sm = 'SAMPLE_' + ts
bc = 'RUN_' + ts
output(id, lb, sm, bc, args)
sys.exit(0)
def main():
#cga_version.parse_options()
args = parse_args()
# First get the info from the filename
fn = os.path.split(args.fastq)[1]
if 'fastq' not in fn and 'fq' not in fn:
print >> sys.stderr, "Not seemingly a fastq file:", fn
make_fake(args)
# Does not return...
# Now split the basename portion into its constituent parts.
fn_parts = fn.split('_')
# Scan for the "GES" starting a filename part. If found,
# That separates the Sample name portion from the Library name.
# If GES is not found starting a part, use the whole filename
# as both the Sample name and the Library name.
# Maybe redo this with regular expressions, but for now, it works.
pos = -1
for n in range(len(fn_parts)):
if fn_parts[n].startswith("GES"):
pos = n
break
if pos == -1:
# Didn't find the GES marker. Use the filename up to the end name.
match = re.search('(.*)[._]R[12]_.*',fn)
if match is not None:
fn = match.group(1)
else:
# something is seriously odd here, but we'll just use the
# whole filename
pass
cust_id = ges_id = fn
else:
cust_id = '_'.join(fn_parts[:pos])
ges_parts = fn_parts[pos:]
pos = 999 # Way bigger than the number of parts we'll see.
for n in range(len(ges_parts)):
if ges_parts[n] == 'R1' or ges_parts[n] == 'R2':
pos = n
break
ges_id = '_'.join(ges_parts[:pos])
# Sanity check that we have some amount of text for our fields. The
# down stream tools can't tolerate empty fields in the read group
# information.
if not ges_id:
ges_id = fn
if not cust_id:
cust_id = ges_id
# Now the parts from the first readname--the first line of the file.
# When split on ':', the readname contains
# - the ID in the first four fields.
# Note: the leading '@' needs to be stripped.
try:
inf = multi_open(sys.argv[1])
line = inf.readline()
except IOError, e:
print sys.stderr, "Couldn't read the file: {0}\n {1}". \
format(fn, e.message)
make_fake(args)
# Does not return
# Example line:
# @HISEQ2000:190:D19U8ACXX:5:1101:1492:1901 1:N:0:TAGCTT
parts = line[1:].strip().split(' ')
read_name = parts[0]
# Example read_name: HISEQ2000:190:D19U8ACXX:5:1101:1492:1901
rparts = read_name.split(':')
if len(rparts) >= 4:
rparts = rparts[:4]
# Try to add the bar code in:
bar_code = "no_barcode"
if len(parts) >= 2:
# Example comment: 1:N:0:TAGCTT
comment = parts[1]
cparts = comment.split(':')
if len(cparts) == 4:
bar_code = cparts[3]
rparts.append(bar_code)
id = ':'.join(rparts)
# Example id: HISEQ2000:190:D19U8ACXX:5:TAGCTT
output(id, ges_id, cust_id, bar_code, args)
def output(id, ges_id, cust_id, bar_code, args):
if args.output is not None:
of = open(args.output, 'w')
else:
of = sys.stdout
if args.picard:
line = 'RGID={0}{1} RGLB={0}{2} ' \
'RGPL=ILLUMINA RGSM={3} RGPU={4}'.\
format(args.sample_type, id, ges_id, cust_id, bar_code)
else :
line = '@RG\\tID:{0}{1}\\tLB:{0}{2}\\tSM:{3}\\tPL:ILLUMINA'.\
format(args.sample_type, id, ges_id, cust_id)
# This needs to be a single line file; no terminating \n
print >> of, line,
if of != sys.stdout:
of.close()
if __name__ == '__main__':
main()
| 29.373134 | 77 | 0.588923 |
ee03655992403ccc56e79011910921d3147a7ac1 | 7,447 | py | Python | processing/stft.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | null | null | null | processing/stft.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:21:18.000Z | 2021-11-30T16:21:18.000Z | processing/stft.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | null | null | null | ########## ------------------------------- IMPORTS ------------------------ ##########
import numpy as np
import pandas as pd
from typing import Union#, List
from beartype import beartype
from scipy.signal import stft as scipy_stft
########## ---------------------------------------------------------------- ##########
class GetIndex():
"Get index"
def __init__(self, array):
self.array = array
def find_nearest(self, value):
"""
Find nearest value in self.array.
Parameters
----------
value : values to search the array for
Returns
-------
idx for values
"""
return (np.abs(self.array - value)).argmin()
@beartype
def get_freq_index(freq_vector:np.ndarray, freqs) -> np.ndarray: #freqs: List[Union[int, float]]
"""
Get frequency index.
Parameters
----------
freq_vector : np.ndarray, frequency vector to be indexed
freqs : values to find the index
Returns
-------
np.ndarray
"""
# instantiate
f = GetIndex(freq_vector)
# vectorize function
vfunc = np.vectorize(f.find_nearest)
# return index
return vfunc(freqs)
def f_fill(arr:np.ndarray, axis:int = 0) -> np.ndarray:
"""
Replace nans using pandas ffil method.
Parameters
----------
arr : np.ndarray
axis : int, axis for filling operation
Returns
-------
np.ndarray
"""
# convert to dataframe and fill missing
df = pd.DataFrame(arr)
df = df.interpolate(method='nearest', limit_direction='forward', axis = axis)
return df.values
class Properties:
" Convert dictionary to class properties and check types"
types = {'sampling_rate':int, 'fft_win':int, 'fft_freq_range':list,
'fft_overlap':float, 'mains_noise':list}
def __init__(self, properties:dict):
# Add dictionary elements to object attributes if variable names and types match
for key, value in properties.items():
if key in self.types:
if self.types[key] == type(value):
setattr(self, key, value)
else:
raise(Exception('-> Got ' + str(type(value)) + '. Expected: ' + str(self.types[key]) + '.\n'))
else:
raise(Exception('-> Variable *' + key + '* was not found.\n'))
@beartype
def check_range_input(input_range:list, lower_limit : Union[float, int], upper_limit: Union[float, int]):
"""
Check whether input_range list is valid
Parameters
----------
input_range: list
lower_limit : (float|int).
upper_limit : (float|int)
Returns
-------
None.
"""
# check that there are only two real numbers [lower, upper] limit within nyquist limit
if all(isinstance(x, (int, float)) for x in input_range) == False:
raise(Exception('-> Elements in list have to be numeric.\n'))
if len(input_range) != 2:
raise(Exception('-> Got length of freq_range : ' + str(len(input_range)) + '. Expected : 2.\n'))
if input_range[0] > input_range[1]:
raise(Exception('-> The second element of freq_range has to be greater than the first.\n'))
if any(np.array(input_range) < lower_limit):
raise(Exception('-> Values can not be below lower limit: ' + str(lower_limit) +'.\n'))
if any(np.array(input_range) > upper_limit):
raise(Exception('-> Values can not exceed upper limit: ' + str(upper_limit) +'.\n'))
# Single PSD class
class Stft(Properties):
"""
Perform Stft analysis on 1D signal.
"""
@beartype
def __init__(self, properties:dict):
"""
Parameters
----------
sampling_rate : int
fft_win : int
fft_freq_range : list
fft_overlap : float, the default is 0.5.
Returns
-------
None.
"""
# pass parameters to object
super().__init__(properties)
self.winsize = int(self.sampling_rate * self.fft_win) # window size (samples)
self.fft_overlap_size = int(self.winsize * self.fft_overlap) # fft_overlap size (samples)
self.f_idx = self.get_freq_idx(self.fft_freq_range) # get frequency index
# check that there are only two real numbers [lower, upper] limit within nyquist limit
check_range_input(self.fft_freq_range, 0, self.sampling_rate/2)
# check if mains noise is within user specified frequency range
check_range_input(self.mains_noise, self.fft_freq_range[0], self.fft_freq_range[1])
# get frequency range
self.f = np.arange(self.fft_freq_range[0], self.fft_freq_range[1] + (1/self.fft_win), 1/self.fft_win)
@beartype
def get_freq_idx(self, f:list) -> np.ndarray:
"""
Convert frequency value to frequency index based on sampling rate
Parameters
----------
f : list, containing frequency value
Returns
-------
freq_idx : list, frequency index value(int)
"""
freq_idx = np.zeros(len(f), dtype = np.int32)
for i in range(len(f)):
freq_idx[i] = int(f[i]*(self.winsize/self.sampling_rate))
return freq_idx
@beartype
def get_stft(self, input_wave:np.ndarray):
"""
Run short time fourier transfrom on input_wave.
Parameters
----------
input_wave : np.ndarray, 1D signal
Returns
-------
power_matrix : 2D numpy array, rows = freq and columns = time bins
"""
# get spectrogram # f, t, pmat =
_, _, pmat = scipy_stft(input_wave, self.sampling_rate,
nperseg=self.winsize,
noverlap=self.fft_overlap_size,
padded=False)
# get real power
pmat = np.square(np.abs(pmat[self.f_idx[0] : self.f_idx[1]+1,:]))
return pmat #f[self.f_idx[0] : self.f_idx[1]+1],
@beartype
def remove_mains(self, freq:np.ndarray, pmat:np.ndarray) -> np.ndarray:
"""
Remove mains noise, using nans replacement and
Parameters
----------
freq : np.ndarray
pmat : np.ndarray
Returns
-------
pmat : np.ndarray
"""
# find frequency index
f_idx = get_freq_index(freq, self.mains_noise)
# set noise index to NaNs
pmat[f_idx[0]:f_idx[1]+1,:] = np.nan
# fill NaNs
pmat = f_fill(pmat, axis=0)
return pmat
@beartype
def run_stft(self, input_wave:np.ndarray):
"""
Get stft and remove mains noise.
Parameters
----------
input_wave : np.ndarray, 1D signal
f_noise : list, lower and upper bounds of mains noise
Returns
-------
freq : np.ndarray, real frequency vector
pmat : np.ndarray, transformed spectogram
"""
# get stft
pmat = self.get_stft(input_wave)
# remove mains nose
pmat = self.remove_mains(self.f, pmat)
return pmat
| 27.278388 | 116 | 0.542366 |
cae684431e79b7a0e8adb103bfd75fbd29946e23 | 23,810 | py | Python | demisto_sdk/commands/upload/tests/uploader_test.py | cyrengmbh/demisto-sdk | 4d05532efd627a32b1af659e010540cf7ed3ebf3 | [
"MIT"
] | null | null | null | demisto_sdk/commands/upload/tests/uploader_test.py | cyrengmbh/demisto-sdk | 4d05532efd627a32b1af659e010540cf7ed3ebf3 | [
"MIT"
] | null | null | null | demisto_sdk/commands/upload/tests/uploader_test.py | cyrengmbh/demisto-sdk | 4d05532efd627a32b1af659e010540cf7ed3ebf3 | [
"MIT"
] | null | null | null | import inspect
import json
from functools import wraps
from unittest.mock import MagicMock, patch
import demisto_client
import pytest
from demisto_client.demisto_api.rest import ApiException
from demisto_sdk.commands.common.constants import (CLASSIFIERS_DIR,
INTEGRATIONS_DIR,
LAYOUTS_DIR, SCRIPTS_DIR,
TEST_PLAYBOOKS_DIR,
FileType)
from demisto_sdk.commands.common.legacy_git_tools import git_path
from demisto_sdk.commands.common.tools import get_yml_paths_in_dir
from demisto_sdk.commands.upload.uploader import (
Uploader, parse_error_response, print_summary,
sort_directories_based_on_dependencies)
from packaging.version import parse
DATA = ''
# Taken from https://github.com/pytest-dev/pytest-bdd/issues/155
if not hasattr(inspect, '_orig_findsource'):
@wraps(inspect.findsource)
def findsource(*args, **kwargs):
try:
return inspect._orig_findsource(*args, **kwargs)
except IndexError:
raise IOError("Invalid line")
inspect._orig_findsource = inspect.findsource
inspect.findsource = findsource
@pytest.fixture
def demisto_client_configure(mocker):
mocker.patch("demisto_sdk.commands.upload.uploader.get_demisto_version", return_value=parse('6.0.0'))
mocker.patch("demisto_sdk.commands.common.content.objects.pack_objects.integration.integration.get_demisto_version",
return_value=parse('6.0.0'))
mocker.patch("demisto_sdk.commands.common.content.objects.pack_objects.script.script.get_demisto_version",
return_value=parse('6.0.0'))
mocker.patch("builtins.print")
def test_upload_integration_positive(demisto_client_configure, mocker):
mocker.patch.object(demisto_client, 'configure', return_value="object")
integration_pckg_path = f'{git_path()}/demisto_sdk/tests/test_files/content_repo_example/Integrations/Securonix/'
integration_pckg_uploader = Uploader(input=integration_pckg_path, insecure=False, verbose=False)
with patch.object(integration_pckg_uploader, 'client', return_value='ok'):
assert integration_pckg_uploader.upload() == 0
def test_upload_script_positive(demisto_client_configure, mocker):
"""
Given
- A script named EntryWidgetNumberHostsXDR to upload
When
- Uploading a script
Then
- Ensure script is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
script_name = "DummyScriptUnified.yml"
script_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/Scripts/{script_name}"
uploader = Uploader(input=script_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [(script_name, FileType.SCRIPT.value)] == uploader.successfully_uploaded_files
def test_upload_playbook_positive(demisto_client_configure, mocker):
"""
Given
- A playbook named Cortex_XDR_Incident_Handling to upload
When
- Uploading a playbook
Then
- Ensure playbook is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
playbook_name = "Cortex_XDR_Incident_Handling.yml"
playbook_path = f"{git_path()}/demisto_sdk/tests/test_files/CortexXDR/Playbooks/{playbook_name}"
uploader = Uploader(input=playbook_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [(playbook_name, FileType.PLAYBOOK.value)] == uploader.successfully_uploaded_files
def test_upload_widget_positive(demisto_client_configure, mocker):
"""
Given
- A widget named ActiveIncidentsByRole to upload
When
- Uploading a widget
Then
- Ensure widget is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
widget_name = "widget-ActiveIncidentsByRole.json"
widget_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/Widgets/{widget_name}"
uploader = Uploader(input=widget_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [(widget_name, FileType.WIDGET.value)] == uploader.successfully_uploaded_files
def test_upload_dashboard_positive(demisto_client_configure, mocker):
"""
Given
- A dashboard named upload_test_dashboard.json to upload
When
- Uploading a dashboard
Then
- Ensure dashboard is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
dashboard_name = "upload_test_dashboard.json"
dashboard_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/Dashboards/{dashboard_name}"
uploader = Uploader(input=dashboard_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [('upload_test_dashboard.json', FileType.DASHBOARD.value)] == uploader.successfully_uploaded_files
def test_upload_layout_positive(demisto_client_configure, mocker):
"""
Given
- A layout named layout-details-test_bla-V2 to upload
When
- Uploading a layout
Then
- Ensure layout is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
layout_name = "layout-details-test_bla-V2.json"
layout_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/Layouts/{layout_name}"
uploader = Uploader(input=layout_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [(layout_name, FileType.LAYOUT.value)] == uploader.successfully_uploaded_files
def test_upload_incident_type_positive(demisto_client_configure, mocker):
"""
Given
- An incident type named Hello_World_Alert to upload
When
- Uploading incident type
Then
- Ensure incident type is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
incident_type_name = "incidenttype-Hello_World_Alert.json"
incident_type_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/IncidentTypes/{incident_type_name}"
uploader = Uploader(input=incident_type_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [(incident_type_name, FileType.INCIDENT_TYPE.value)] == uploader.successfully_uploaded_files
def test_upload_classifier_positive(demisto_client_configure, mocker):
"""
Given
- A classifier type named XDR_Alert_Count to upload
When
- Uploading classifier
Then
- Ensure classifier is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
classifier_name = "classifier-aws_sns_test_classifier.json"
classifier_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/Classifiers/{classifier_name}"
uploader = Uploader(input=classifier_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [(classifier_name, FileType.OLD_CLASSIFIER.value)] == uploader.successfully_uploaded_files
def test_upload_incident_field_positive(demisto_client_configure, mocker):
"""
Given
- An incident field named XDR_Alert_Count to upload
When
- Uploading incident field
Then
- Ensure incident field is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
incident_field_name = "XDR_Alert_Count.json"
incident_field_path = f"{git_path()}/demisto_sdk/tests/test_files/CortexXDR/IncidentFields/{incident_field_name}"
uploader = Uploader(input=incident_field_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [(incident_field_name, FileType.INCIDENT_FIELD.value)] == uploader.successfully_uploaded_files
def test_upload_indicator_field_positive(demisto_client_configure, mocker):
"""
Given
- An indicator field named DNS to upload
When
- Uploading indicator field
Then
- Ensure indicator field is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value='object')
indicator_field_name = 'dns.json'
indicator_field_path = f'{git_path()}/demisto_sdk/tests/test_files/CortexXDR/IndicatorFields/{indicator_field_name}'
uploader = Uploader(input=indicator_field_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
assert [(indicator_field_name, FileType.INDICATOR_FIELD.value)] == uploader.successfully_uploaded_files
def test_upload_incident_type_correct_file_change(demisto_client_configure, mocker):
"""
Given
- An incident type named incidenttype-Hello_World_Alert to upload
When
- Uploading incident type
Then
- Ensure incident type is in the correct format for upload
"""
def save_file(file):
global DATA
with open(file, 'r') as f:
DATA = f.read()
return
class demisto_client_mocker():
def import_incident_fields(self, file):
pass
mocker.patch.object(demisto_client, 'configure', return_value=demisto_client_mocker)
incident_type_name = "incidenttype-Hello_World_Alert.json"
incident_type_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/IncidentTypes/{incident_type_name}"
uploader = Uploader(input=incident_type_path, insecure=False, verbose=False)
uploader.client.import_incident_types_handler = MagicMock(side_effect=save_file)
uploader.upload()
with open(incident_type_path) as json_file:
incident_type_data = json.load(json_file)
assert json.loads(DATA)[0] == incident_type_data
def test_upload_incident_field_correct_file_change(demisto_client_configure, mocker):
"""
Given
- An incident field named XDR_Alert_Count to upload
When
- Uploading incident field
Then
- Ensure incident field is in the correct format for upload
"""
def save_file(file):
global DATA
with open(file, 'r') as f:
DATA = f.read()
return
class demisto_client_mocker():
def import_incident_fields(self, file):
pass
mocker.patch.object(demisto_client, 'configure', return_value=demisto_client_mocker)
incident_field_name = "XDR_Alert_Count.json"
incident_field_path = f"{git_path()}/demisto_sdk/tests/test_files/CortexXDR/IncidentFields/{incident_field_name}"
uploader = Uploader(input=incident_field_path, insecure=False, verbose=False)
uploader.client.import_incident_fields = MagicMock(side_effect=save_file)
uploader.upload()
with open(incident_field_path) as json_file:
incident_field_data = json.load(json_file)
assert json.loads(DATA)['incidentFields'][0] == incident_field_data
def test_upload_an_integration_directory(demisto_client_configure, mocker):
"""
Given
- An integration directory called UploadTest
When
- Uploading an integration
Then
- Ensure integration is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
integration_dir_name = "UploadTest"
integration_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/Integrations/{integration_dir_name}"
uploader = Uploader(input=integration_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
_, integration_yml_name = get_yml_paths_in_dir(integration_path)
integration_yml_name = integration_yml_name.split('/')[-1]
assert [(integration_yml_name, FileType.INTEGRATION.value)] == uploader.successfully_uploaded_files
def test_upload_a_script_directory(demisto_client_configure, mocker):
"""
Given
- A script directory called DummyScript
When
- Uploading an script
Then
- Ensure script is uploaded successfully
- Ensure success upload message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
script_dir_name = "DummyScript"
scripts_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/Scripts/{script_dir_name}"
uploader = Uploader(input=scripts_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
uploader.upload()
_, script_yml_name = get_yml_paths_in_dir(scripts_path)
uploaded_file_name = script_yml_name.split('/')[-1]
assert [(uploaded_file_name, FileType.SCRIPT.value)] == uploader.successfully_uploaded_files
def test_upload_incident_fields_directory(demisto_client_configure, mocker):
"""
Given
- An incident fields directory called DummyScript
When
- Uploading incident fields
Then
- Ensure incident fields are uploaded successfully
- Ensure status code is as expected
- Ensure amount of messages is as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
mocker.patch("click.secho")
dir_name = "IncidentFields"
incident_fields_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/{dir_name}/"
uploader = Uploader(input=incident_fields_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
assert uploader.upload() == 0
assert len(uploader.successfully_uploaded_files) == 3
def test_upload_pack(demisto_client_configure, mocker):
"""
Given
- A pack called DummyPack
When
- Uploading pack
Then
- Ensure pack is uploaded successfully
- Ensure status code is as expected
- Check that all expected content entities that appear in the pack are reported as uploaded.
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
pack_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack"
uploader = Uploader(input=pack_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
status_code = uploader.upload()
expected_entities = ['DummyIntegration.yml', 'UploadTest.yml', 'DummyScriptUnified.yml',
'DummyScript.yml', 'DummyPlaybook.yml', 'DummyTestPlaybook.yml',
'incidenttype-Hello_World_Alert.json', 'incidentfield-Hello_World_ID.json',
'incidentfield-Hello_World_Type.json', 'incidentfield-Hello_World_Status.json',
'classifier-aws_sns_test_classifier.json', 'widget-ActiveIncidentsByRole.json',
'layout-details-test_bla-V2.json', 'upload_test_dashboard.json']
assert status_code == 0
uploaded_objects = [obj_pair[0] for obj_pair in uploader.successfully_uploaded_files]
for entity in expected_entities:
assert entity in uploaded_objects
def test_upload_invalid_path(demisto_client_configure, mocker):
mocker.patch.object(demisto_client, 'configure', return_value="object")
script_dir_path = f'{git_path()}/demisto_sdk/tests/test_files/content_repo_not_exists/Scripts/'
script_dir_uploader = Uploader(input=script_dir_path, insecure=False, verbose=False)
assert script_dir_uploader.upload() == 1
def test_file_not_supported(demisto_client_configure, mocker):
"""
Given
- A not supported (.py) file
When
- Uploading a file
Then
- Ensure uploaded failure message is printed as expected
"""
mocker.patch.object(demisto_client, 'configure', return_value="object")
file_path = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack/Scripts/DummyScript/DummyScript.py"
uploader = Uploader(input=file_path, insecure=False, verbose=False)
mocker.patch.object(uploader, 'client')
status_code = uploader.upload()
assert status_code == 1
assert uploader.failed_uploaded_files[0][0] == 'DummyScript.py'
def test_parse_error_response_ssl(demisto_client_configure, mocker):
"""
Given
- An API exception raised by SSL failure
When
- Parsing error response
Then
- Ensure a error message is parsed successfully
- Verify SSL error message printed as expected
"""
file_type = "playbook"
file_name = "SomePlaybookName.yml"
api_exception = ApiException(reason="[SSL: CERTIFICATE_VERIFY_FAILED]")
message = parse_error_response(error=api_exception, file_type=file_type, file_name=file_name)
assert message == '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate.\n' \
'Try running the command with --insecure flag.'
def test_parse_error_response_connection(demisto_client_configure, mocker):
"""
Given
- An API exception raised by connection failure
When
- Parsing error response
Then
- Ensure a error message is parsed successfully
- Verify connection error message printed as expected
"""
file_type = "widget"
file_name = "SomeWidgetName.json"
api_exception = ApiException(reason="Failed to establish a new connection:")
error_message = parse_error_response(error=api_exception, file_type=file_type, file_name=file_name)
assert error_message == 'Failed to establish a new connection: Connection refused.\n' \
'Try checking your BASE url configuration.'
def test_parse_error_response_forbidden(demisto_client_configure, mocker):
"""
Given
- An API exception raised by forbidden failure
When
- Parsing error response
Then
- Ensure a error message is parsed successfully
- Verify forbidden error message printed as expected
"""
file_type = "incident field"
file_name = "SomeIncidentFieldName.json"
api_exception = ApiException(
reason="Forbidden",
)
api_exception.body = json.dumps({
"status": 403,
"error": "Error message"
})
message = parse_error_response(error=api_exception, file_type=file_type, file_name=file_name)
assert message == "Error message\nTry checking your API key configuration."
def test_sort_directories_based_on_dependencies(demisto_client_configure):
"""
Given
- An empty (no given input path) Uploader object
- List of non-sorted (based on dependencies) content directories
When
- Running sort_directories_based_on_dependencies on the list
Then
- Ensure a sorted listed of the directories is returned
"""
dir_list = [TEST_PLAYBOOKS_DIR, INTEGRATIONS_DIR, SCRIPTS_DIR, CLASSIFIERS_DIR, LAYOUTS_DIR]
sorted_dir_list = sort_directories_based_on_dependencies(dir_list)
assert sorted_dir_list == [INTEGRATIONS_DIR, SCRIPTS_DIR, TEST_PLAYBOOKS_DIR,
CLASSIFIERS_DIR, LAYOUTS_DIR]
def test_print_summary_successfully_uploaded_files(demisto_client_configure, mocker):
"""
Given
- An empty (no given input path) Uploader object
- A successfully uploaded integration named SomeIntegrationName
When
- Printing summary of uploaded files
Then
- Ensure uploaded successfully message is printed as expected
"""
mocker.patch("click.secho")
from click import secho
successfully_uploaded_files = [("SomeIntegrationName", "Integration")]
print_summary(successfully_uploaded_files, [], [])
expected_upload_summary_title = '\n\nUPLOAD SUMMARY:'
expected_successfully_uploaded_files_title = '\nSUCCESSFUL UPLOADS:'
expected_successfully_uploaded_files = """╒═════════════════════╤═════════════╕
│ NAME │ TYPE │
╞═════════════════════╪═════════════╡
│ SomeIntegrationName │ Integration │
╘═════════════════════╧═════════════╛
"""
# verify exactly 3 calls to print_color
assert secho.call_count == 3
assert secho.call_args_list[0][0][0] == expected_upload_summary_title
assert secho.call_args_list[1][0][0] == expected_successfully_uploaded_files_title
assert secho.call_args_list[2][0][0] == expected_successfully_uploaded_files
def test_print_summary_failed_uploaded_files(demisto_client_configure, mocker):
"""
Given
- A uploaded script named SomeScriptName which failed to upload
When
- Printing summary of uploaded files
Then
- Ensure uploaded failure message is printed as expected
"""
mocker.patch("click.secho")
from click import secho
failed_uploaded_files = [("SomeScriptName", "Script", "Some Error")]
print_summary([], [], failed_uploaded_files)
expected_upload_summary_title = '\n\nUPLOAD SUMMARY:'
expected_failed_uploaded_files_title = '\nFAILED UPLOADS:'
expected_failed_uploaded_files = """╒════════════════╤════════╤════════════╕
│ NAME │ TYPE │ ERROR │
╞════════════════╪════════╪════════════╡
│ SomeScriptName │ Script │ Some Error │
╘════════════════╧════════╧════════════╛
"""
# verify exactly 3 calls to print_color
assert secho.call_count == 3
assert secho.call_args_list[0][0][0] == expected_upload_summary_title
assert secho.call_args_list[1][0][0] == expected_failed_uploaded_files_title
assert secho.call_args_list[2][0][0] == expected_failed_uploaded_files
def test_print_summary_unuploaded_files(demisto_client_configure, mocker):
"""
Given
- A uploaded script named SomeScriptName which did not upload due to version mismatch
When
- Printing summary of uploaded files
Then
- Ensure uploaded unuploaded message is printed as expected
"""
mocker.patch("click.secho")
from click import secho
unploaded_files = [("SomeScriptName", "Script", "6.0.0", "0.0.0", "5.0.0")]
print_summary([], unploaded_files, [])
expected_upload_summary_title = '\n\nUPLOAD SUMMARY:'
expected_failed_uploaded_files_title = '\nNOT UPLOADED DUE TO VERSION MISMATCH:'
expected_failed_uploaded_files = """╒════════════════╤════════╤═════════════════╤═════════════════════╤═══════════════════╕
│ NAME │ TYPE │ XSOAR Version │ FILE_FROM_VERSION │ FILE_TO_VERSION │
╞════════════════╪════════╪═════════════════╪═════════════════════╪═══════════════════╡
│ SomeScriptName │ Script │ 6.0.0 │ 0.0.0 │ 5.0.0 │
╘════════════════╧════════╧═════════════════╧═════════════════════╧═══════════════════╛
"""
# verify exactly 3 calls to print_color
assert secho.call_count == 3
assert secho.call_args_list[0][0][0] == expected_upload_summary_title
assert secho.call_args_list[1][0][0] == expected_failed_uploaded_files_title
assert secho.call_args_list[2][0][0] == expected_failed_uploaded_files
| 37.914013 | 127 | 0.700882 |
206159303a3b50d48fc8284fd2ba82ab702c4638 | 3,500 | py | Python | utils.py | fatyoge/hive_flask_restful | 5746d33beab5577c112b1b3e6e2b036c164cc3e2 | [
"MIT"
] | 3 | 2019-07-16T14:03:39.000Z | 2021-02-15T04:57:55.000Z | utils.py | fatyoge/hive_flask_restful | 5746d33beab5577c112b1b3e6e2b036c164cc3e2 | [
"MIT"
] | null | null | null | utils.py | fatyoge/hive_flask_restful | 5746d33beab5577c112b1b3e6e2b036c164cc3e2 | [
"MIT"
] | null | null | null | import re
from sqlalchemy import *
from sqlalchemy.sql import sqltypes
from sqlalchemy import desc, asc
from flask_restful import fields
from collections import OrderedDict
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
def singleton(cls):
instance = cls()
instance.__call__ = lambda: instance
return instance
@singleton
class SQLFormator:
string_type = [
sqltypes.String,
sqltypes.DateTime,
sqltypes.Date,
]
operator_map = {
'eq':'=',
'ne':'!=',
'gt':'>',
'gte':'>=',
'lt':'<',
'lte':'<=',
}
func_map = {
'sum' : func.sum,
'count': func.count,
'max' : func.max,
'min' : func.min,
'avg' : func.avg,
'median' : func.median,
}
type_map = {
str : fields.String,
int : fields.Integer,
float : fields.Float,
}
@classmethod
def _whereSingleTransform(cls, txt, table):
ops = ['~or','~and','~xor']
if str(txt).strip() in ops:
return " {} ".format(str(txt).strip().replace('~',''))
cond = txt.split(',')
col = cond[0]
oper = cls.operator_map[cond[1]]
value = cond[2]
if col in table.alias().columns and type(table.alias().columns[col].type) in cls.string_type:
value = "'{}'".format(value)
return "{} {} {}".format(cond[0], oper, value)
@classmethod
def whereTransform(cls, txt, table):
wheres = re.findall(r'[^()]+',txt)
for subwhere in wheres:
tmp = cls._whereSingleTransform(subwhere, table)
txt = str(txt).replace(subwhere,tmp,1)
return txt
@classmethod
def selectTransform(cls, fields, table):
cols = []
#resource_type = OrderedDict()
if fields is None:
cols = table.c.values()
else:
fields = fields.split(',')
for field in fields:
col = cls._selectSingleTransform(field, table)
cols.append(col)
#for col in cols:
# resource_type[col.name] = cls.type_map[col.type.python_type]
#return cols, resource_type
return cols
@classmethod
def _selectSingleTransform(cls, field, table):
regs = re.findall(r'[^()]+',field)
if len(regs) == 1:
return table.c[field]
elif len(regs) == 2 and regs[0] in cls.func_map:
col_name = regs[1]
col = cls.func_map[regs[0]](table.columns[col_name] if col_name in table.columns else col_name)
#col.name = '{}_{}'.format(regs[0], regs[1]).replace('*','table')
return col
@classmethod
def orderbyTransform(cls, order_by):
return [desc(x[1:]) if x[0]=='-' else asc(x) for x in order_by.split(',')]
@classmethod
def getResourceType(cls, query, table):
resource_type = OrderedDict()
for col in query.c:
#logging.info('{} in table.c: {}'.format(col, str(col) in table.c))
#if str(col) in table.c:
# resource_type[str(col)] = cls.type_map[table.c[str(col)].type.python_type]
#else:
# resource_type[str(col)] = cls.type_map[col.type.python_type]
resource_type[str(col)] = cls.type_map[table.c[str(col)].type.python_type] if str(col) in table.c else cls.type_map[col.type.python_type]
return resource_type
| 32.407407 | 149 | 0.559714 |
47e529929bc628ec196326f12126c735dcd0b6e9 | 285 | py | Python | src/allennlp_utils/data/tokenizer.py | wj-Mcat/allennlp-utils | 817132fca5bf358c95c1b340658e1a1925ac45c3 | [
"Apache-2.0"
] | null | null | null | src/allennlp_utils/data/tokenizer.py | wj-Mcat/allennlp-utils | 817132fca5bf358c95c1b340658e1a1925ac45c3 | [
"Apache-2.0"
] | null | null | null | src/allennlp_utils/data/tokenizer.py | wj-Mcat/allennlp-utils | 817132fca5bf358c95c1b340658e1a1925ac45c3 | [
"Apache-2.0"
] | null | null | null | from typing import List
import jieba
from allennlp.data import Tokenizer, Token
@Tokenizer.register("utils-jieba")
class JiebaTokenizer(Tokenizer):
def tokenize(self, text: str) -> List[Token]:
tokens = [Token(token) for token in jieba.lcut(text)]
return tokens
| 23.75 | 61 | 0.715789 |
10c264b60c571abe3f0e823bdd415473fae0bad1 | 4,165 | py | Python | game/server/server.py | AntonYermilov/progue | 7f382208c9efc904cff9d8df4750606039801d45 | [
"MIT"
] | null | null | null | game/server/server.py | AntonYermilov/progue | 7f382208c9efc904cff9d8df4750606039801d45 | [
"MIT"
] | 6 | 2019-03-25T21:11:28.000Z | 2019-06-21T16:21:47.000Z | game/server/server.py | AntonYermilov/progue | 7f382208c9efc904cff9d8df4750606039801d45 | [
"MIT"
] | 1 | 2021-12-22T22:03:47.000Z | 2021-12-22T22:03:47.000Z | import threading
import time
from concurrent import futures
import numpy as np
import grpc
from game.client.model.action import *
from game.server.game import Game
from game.util import serialize_object
from .generated import progue_pb2_grpc, progue_pb2
class ProgueServer(progue_pb2_grpc.ProgueServerServicer):
def __init__(self):
super().__init__()
self.games = dict()
self.lock = threading.RLock()
def get_state(self, request, context):
with self.lock:
game = self.games[request.game_id.id]
state = game.get_state(request.player.id)
return progue_pb2.StateResponse(state=progue_pb2.State(state=serialize_object(state)))
def make_turn(self, request, context):
if request.action.action_type is 0:
action_type = ActionType.MOVE_ACTION
action_desc = MoveAction(row=request.action.move_action.row,
column=request.action.move_action.col)
elif request.action.action_type is 1:
action_type = ActionType.INVENTORY_ACTION
action_desc = InventoryAction(item_id=request.action.inventory_action.item_id,
action=request.action.inventory_action.action_type)
elif request.action.action_type is 2:
self.quit(request.game_id.id, request.player.id)
return progue_pb2.MakeTurnResponse()
else:
print('Error: unknown action type')
return None
action = Action(type=action_type, desc=action_desc)
player_id = request.player.id
with self.lock:
game = self.games[request.game_id.id]
if game is None:
return None
try:
game.on_make_turn(player_id, action)
except Exception as e:
print(e)
return progue_pb2.MakeTurnResponse()
def quit(self, game_id, player_id):
with self.lock:
game = self.games[game_id]
if game.player_quit(player_id):
del self.games[game_id]
def list_games(self, request, context):
response = progue_pb2.ListGamesResponse()
with self.lock:
for game_id in self.games:
response.game_ids.append(progue_pb2.GameId(id=game_id))
return response
def connect_to_game(self, request, context):
game_id = request.game_id.id
with self.lock:
if game_id in self.games:
game = self.games[game_id]
player_id = game.on_connect()
return progue_pb2.ConnectToGameResponse(successfully_connected=True,
player=progue_pb2.Player(id=player_id))
else:
return progue_pb2.ConnectToGameResponse(successfully_connected=False)
def create_game(self, request, context):
game_id = 'game ' + str(np.random.randint(1 << 30))
with self.lock:
if game_id not in self.games:
game = Game(singleplayer=request.singleplayer, load=request.load)
self.games[game_id] = game
player_id = game.on_connect()
player = progue_pb2.Player(id=player_id)
response = progue_pb2.CreateGameResponse(successfully_created=True,
player=player,
id=game_id)
return response
else:
return progue_pb2.CreateGameResponse(successfully_created=False)
def start_server(port: str):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=(('grpc.so_reuseport', 0),))
progue_pb2_grpc.add_ProgueServerServicer_to_server(ProgueServer(), server)
result = server.add_insecure_port(f'0.0.0.0:{port}')
server.start()
print(f'Serving on {result}')
try:
while True:
time.sleep(20000)
except KeyboardInterrupt:
print('Keyboard interrupt, shutting server down.')
finally:
server.stop(0)
| 36.217391 | 105 | 0.605282 |
b11ef5bedc44f7bbf117ed48fbdedf7c36de3c23 | 1,719 | py | Python | tempest/api/compute/floating_ips/test_list_floating_ips_negative.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | 3 | 2015-03-03T15:43:06.000Z | 2016-10-24T06:12:40.000Z | tempest/api/compute/floating_ips/test_list_floating_ips_negative.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/floating_ips/test_list_floating_ips_negative.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | 1 | 2021-11-10T07:21:02.000Z | 2021-11-10T07:21:02.000Z | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class FloatingIPDetailsNegativeTestJSON(base.BaseV2ComputeTest):
@classmethod
def setUpClass(cls):
super(FloatingIPDetailsNegativeTestJSON, cls).setUpClass()
cls.client = cls.floating_ips_client
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_get_nonexistent_floating_ip_details(self):
# Negative test:Should not be able to GET the details
# of non-existent floating IP
# Creating a non-existent floatingIP id
if CONF.service_available.neutron:
non_exist_id = str(uuid.uuid4())
else:
non_exist_id = data_utils.rand_int_id(start=999)
self.assertRaises(exceptions.NotFound,
self.client.get_floating_ip_details, non_exist_id)
class FloatingIPDetailsNegativeTestXML(FloatingIPDetailsNegativeTestJSON):
_interface = 'xml'
| 34.38 | 78 | 0.724258 |
f6323885b087c5475387cc0345e3a659d3527fe7 | 16,785 | py | Python | src/flake8/processor.py | thijskramer/flake8 | bcb88c4c3e24cb75d71da1796ae2e5023b59b282 | [
"MIT"
] | 2,013 | 2015-01-02T20:46:49.000Z | 2022-03-31T20:10:41.000Z | src/flake8/processor.py | thijskramer/flake8 | bcb88c4c3e24cb75d71da1796ae2e5023b59b282 | [
"MIT"
] | 1,413 | 2015-02-07T07:34:40.000Z | 2022-03-23T16:27:14.000Z | src/flake8/processor.py | thijskramer/flake8 | bcb88c4c3e24cb75d71da1796ae2e5023b59b282 | [
"MIT"
] | 241 | 2015-03-23T17:04:45.000Z | 2022-03-30T21:51:02.000Z | """Module containing our file processor that tokenizes a file for checks."""
import argparse
import ast
import contextlib
import logging
import tokenize
from typing import Any
from typing import Dict
from typing import Generator
from typing import List
from typing import Optional
from typing import Tuple
import flake8
from flake8 import defaults
from flake8 import utils
LOG = logging.getLogger(__name__)
PyCF_ONLY_AST = 1024
NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
SKIP_TOKENS = frozenset(
[tokenize.NL, tokenize.NEWLINE, tokenize.INDENT, tokenize.DEDENT]
)
_Token = Tuple[int, str, Tuple[int, int], Tuple[int, int], str]
_LogicalMapping = List[Tuple[int, Tuple[int, int]]]
_Logical = Tuple[List[str], List[str], _LogicalMapping]
class FileProcessor:
"""Processes a file and holdes state.
This processes a file by generating tokens, logical and physical lines,
and AST trees. This also provides a way of passing state about the file
to checks expecting that state. Any public attribute on this object can
be requested by a plugin. The known public attributes are:
- :attr:`blank_before`
- :attr:`blank_lines`
- :attr:`checker_state`
- :attr:`indent_char`
- :attr:`indent_level`
- :attr:`line_number`
- :attr:`logical_line`
- :attr:`max_line_length`
- :attr:`max_doc_length`
- :attr:`multiline`
- :attr:`noqa`
- :attr:`previous_indent_level`
- :attr:`previous_logical`
- :attr:`previous_unindented_logical_line`
- :attr:`tokens`
- :attr:`file_tokens`
- :attr:`total_lines`
- :attr:`verbose`
"""
#: always ``False``, included for compatibility
noqa = False
def __init__(
self,
filename: str,
options: argparse.Namespace,
lines: Optional[List[str]] = None,
) -> None:
"""Initialice our file processor.
:param str filename:
Name of the file to process
"""
self.options = options
self.filename = filename
self.lines = lines if lines is not None else self.read_lines()
self.strip_utf_bom()
# Defaults for public attributes
#: Number of preceding blank lines
self.blank_before = 0
#: Number of blank lines
self.blank_lines = 0
#: Checker states for each plugin?
self._checker_states: Dict[str, Dict[Any, Any]] = {}
#: Current checker state
self.checker_state: Dict[Any, Any] = {}
#: User provided option for hang closing
self.hang_closing = options.hang_closing
#: Character used for indentation
self.indent_char: Optional[str] = None
#: Current level of indentation
self.indent_level = 0
#: Number of spaces used for indentation
self.indent_size = options.indent_size
#: Line number in the file
self.line_number = 0
#: Current logical line
self.logical_line = ""
#: Maximum line length as configured by the user
self.max_line_length = options.max_line_length
#: Maximum docstring / comment line length as configured by the user
self.max_doc_length = options.max_doc_length
#: Whether the current physical line is multiline
self.multiline = False
#: Previous level of indentation
self.previous_indent_level = 0
#: Previous logical line
self.previous_logical = ""
#: Previous unindented (i.e. top-level) logical line
self.previous_unindented_logical_line = ""
#: Current set of tokens
self.tokens: List[_Token] = []
#: Total number of lines in the file
self.total_lines = len(self.lines)
#: Verbosity level of Flake8
self.verbose = options.verbose
#: Statistics dictionary
self.statistics = {"logical lines": 0}
self._file_tokens: Optional[List[_Token]] = None
# map from line number to the line we'll search for `noqa` in
self._noqa_line_mapping: Optional[Dict[int, str]] = None
@property
def file_tokens(self) -> List[_Token]:
"""Return the complete set of tokens for a file."""
if self._file_tokens is None:
line_iter = iter(self.lines)
self._file_tokens = list(
tokenize.generate_tokens(lambda: next(line_iter))
)
return self._file_tokens
@contextlib.contextmanager
def inside_multiline(
self, line_number: int
) -> Generator[None, None, None]:
"""Context-manager to toggle the multiline attribute."""
self.line_number = line_number
self.multiline = True
yield
self.multiline = False
def reset_blank_before(self) -> None:
"""Reset the blank_before attribute to zero."""
self.blank_before = 0
def delete_first_token(self) -> None:
"""Delete the first token in the list of tokens."""
del self.tokens[0]
def visited_new_blank_line(self) -> None:
"""Note that we visited a new blank line."""
self.blank_lines += 1
def update_state(self, mapping: _LogicalMapping) -> None:
"""Update the indent level based on the logical line mapping."""
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
def update_checker_state_for(self, plugin: Dict[str, Any]) -> None:
"""Update the checker_state attribute for the plugin."""
if "checker_state" in plugin["parameters"]:
self.checker_state = self._checker_states.setdefault(
plugin["name"], {}
)
def next_logical_line(self) -> None:
"""Record the previous logical line.
This also resets the tokens list and the blank_lines count.
"""
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
if not self.indent_level:
self.previous_unindented_logical_line = self.logical_line
self.blank_lines = 0
self.tokens = []
def build_logical_line_tokens(self) -> _Logical:
"""Build the mapping, comments, and logical line lists."""
logical = []
comments = []
mapping: _LogicalMapping = []
length = 0
previous_row = previous_column = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mutate_string(text)
if previous_row:
(start_row, start_column) = start
if previous_row != start_row:
row_index = previous_row - 1
column_index = previous_column - 1
previous_text = self.lines[row_index][column_index]
if previous_text == "," or (
previous_text not in "{[(" and text not in "}])"
):
text = f" {text}"
elif previous_column != start_column:
text = line[previous_column:start_column] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(previous_row, previous_column) = end
return comments, logical, mapping
def build_ast(self) -> ast.AST:
"""Build an abstract syntax tree from the list of lines."""
return ast.parse("".join(self.lines))
def build_logical_line(self) -> Tuple[str, str, _LogicalMapping]:
"""Build a logical line from the current tokens list."""
comments, logical, mapping_list = self.build_logical_line_tokens()
joined_comments = "".join(comments)
self.logical_line = "".join(logical)
self.statistics["logical lines"] += 1
return joined_comments, self.logical_line, mapping_list
def split_line(self, token: _Token) -> Generator[str, None, None]:
"""Split a physical line's line based on new-lines.
This also auto-increments the line number for the caller.
"""
for line in token[1].split("\n")[:-1]:
yield line
self.line_number += 1
def keyword_arguments_for(
self,
parameters: Dict[str, bool],
arguments: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Generate the keyword arguments for a list of parameters."""
if arguments is None:
arguments = {}
for param, required in parameters.items():
if param in arguments:
continue
try:
arguments[param] = getattr(self, param)
except AttributeError as exc:
if required:
LOG.exception(exc)
raise
else:
LOG.warning(
'Plugin requested optional parameter "%s" '
"but this is not an available parameter.",
param,
)
return arguments
def generate_tokens(self) -> Generator[_Token, None, None]:
"""Tokenize the file and yield the tokens."""
for token in tokenize.generate_tokens(self.next_line):
if token[2][0] > self.total_lines:
break
self.tokens.append(token)
yield token
def _noqa_line_range(self, min_line: int, max_line: int) -> Dict[int, str]:
line_range = range(min_line, max_line + 1)
joined = "".join(self.lines[min_line - 1 : max_line])
return dict.fromkeys(line_range, joined)
def noqa_line_for(self, line_number: int) -> Optional[str]:
"""Retrieve the line which will be used to determine noqa."""
if self._noqa_line_mapping is None:
try:
file_tokens = self.file_tokens
except (tokenize.TokenError, SyntaxError):
# if we failed to parse the file tokens, we'll always fail in
# the future, so set this so the code does not try again
self._noqa_line_mapping = {}
else:
ret = {}
min_line = len(self.lines) + 2
max_line = -1
for tp, _, (s_line, _), (e_line, _), _ in file_tokens:
if tp == tokenize.ENDMARKER:
break
min_line = min(min_line, s_line)
max_line = max(max_line, e_line)
if tp in (tokenize.NL, tokenize.NEWLINE):
ret.update(self._noqa_line_range(min_line, max_line))
min_line = len(self.lines) + 2
max_line = -1
# in newer versions of python, a `NEWLINE` token is inserted
# at the end of the file even if it doesn't have one.
# on old pythons, they will not have hit a `NEWLINE`
if max_line != -1:
ret.update(self._noqa_line_range(min_line, max_line))
self._noqa_line_mapping = ret
# NOTE(sigmavirus24): Some plugins choose to report errors for empty
# files on Line 1. In those cases, we shouldn't bother trying to
# retrieve a physical line (since none exist).
return self._noqa_line_mapping.get(line_number)
def next_line(self) -> str:
"""Get the next line from the list."""
if self.line_number >= self.total_lines:
return ""
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in defaults.WHITESPACE:
self.indent_char = line[0]
return line
def read_lines(self) -> List[str]:
"""Read the lines for this file checker."""
if self.filename is None or self.filename == "-":
self.filename = self.options.stdin_display_name or "stdin"
lines = self.read_lines_from_stdin()
else:
lines = self.read_lines_from_filename()
return lines
def read_lines_from_filename(self) -> List[str]:
"""Read the lines for a file."""
try:
with tokenize.open(self.filename) as fd:
return fd.readlines()
except (SyntaxError, UnicodeError):
# If we can't detect the codec with tokenize.detect_encoding, or
# the detected encoding is incorrect, just fallback to latin-1.
with open(self.filename, encoding="latin-1") as fd:
return fd.readlines()
def read_lines_from_stdin(self) -> List[str]:
"""Read the lines from standard in."""
return utils.stdin_get_lines()
def should_ignore_file(self) -> bool:
"""Check if ``flake8: noqa`` is in the file to be ignored.
:returns:
True if a line matches :attr:`defaults.NOQA_FILE`,
otherwise False
:rtype:
bool
"""
if not self.options.disable_noqa and any(
defaults.NOQA_FILE.match(line) for line in self.lines
):
return True
elif any(defaults.NOQA_FILE.search(line) for line in self.lines):
LOG.warning(
"Detected `flake8: noqa` on line with code. To ignore an "
"error on a line use `noqa` instead."
)
return False
else:
return False
def strip_utf_bom(self) -> None:
"""Strip the UTF bom from the lines of the file."""
if not self.lines:
# If we have nothing to analyze quit early
return
first_byte = ord(self.lines[0][0])
if first_byte not in (0xEF, 0xFEFF):
return
# If the first byte of the file is a UTF-8 BOM, strip it
if first_byte == 0xFEFF:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == "\xEF\xBB\xBF":
self.lines[0] = self.lines[0][3:]
def is_eol_token(token: _Token) -> bool:
"""Check if the token is an end-of-line token."""
return token[0] in NEWLINE or token[4][token[3][1] :].lstrip() == "\\\n"
def is_multiline_string(token: _Token) -> bool:
"""Check if this is a multiline string."""
return token[0] == tokenize.STRING and "\n" in token[1]
def token_is_newline(token: _Token) -> bool:
"""Check if the token type is a newline token type."""
return token[0] in NEWLINE
def count_parentheses(current_parentheses_count: int, token_text: str) -> int:
"""Count the number of parentheses."""
if token_text in "([{": # nosec
return current_parentheses_count + 1
elif token_text in "}])": # nosec
return current_parentheses_count - 1
return current_parentheses_count
def log_token(log: logging.Logger, token: _Token) -> None:
"""Log a token to a provided logging object."""
if token[2][0] == token[3][0]:
pos = "[{}:{}]".format(token[2][1] or "", token[3][1])
else:
pos = f"l.{token[3][0]}"
log.log(
flake8._EXTRA_VERBOSE,
"l.%s\t%s\t%s\t%r"
% (token[2][0], pos, tokenize.tok_name[token[0]], token[1]),
)
def expand_indent(line: str) -> int:
r"""Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
return len(line.expandtabs(8))
# NOTE(sigmavirus24): This was taken wholesale from
# https://github.com/PyCQA/pycodestyle. The in-line comments were edited to be
# more descriptive.
def mutate_string(text: str) -> str:
"""Replace contents with 'xxx' to prevent syntax matching.
>>> mutate_string('"abc"')
'"xxx"'
>>> mutate_string("'''abc'''")
"'''xxx'''"
>>> mutate_string("r'abc'")
"r'xxx'"
"""
# NOTE(sigmavirus24): If there are string modifiers (e.g., b, u, r)
# use the last "character" to determine if we're using single or double
# quotes and then find the first instance of it
start = text.index(text[-1]) + 1
end = len(text) - 1
# Check for triple-quoted strings
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + "x" * (end - start) + text[end:]
| 35.865385 | 79 | 0.589812 |
303b55998666bbb4742c48e0a045997ebd3f85f9 | 1,389 | py | Python | tests/python/test_printers.py | yoni206/pono | 0b8223bfff70932555aa3eda12e2ee3ca04a0462 | [
"BSD-3-Clause"
] | null | null | null | tests/python/test_printers.py | yoni206/pono | 0b8223bfff70932555aa3eda12e2ee3ca04a0462 | [
"BSD-3-Clause"
] | null | null | null | tests/python/test_printers.py | yoni206/pono | 0b8223bfff70932555aa3eda12e2ee3ca04a0462 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import smt_switch as ss
import pono
import os
import tempfile
@pytest.mark.parametrize("create_solver", ss.solvers.values())
def test_vcd_trace(create_solver):
solver = create_solver(False)
solver.set_opt("incremental", "true")
solver.set_opt("produce-models", "true")
bvsort8 = solver.make_sort(ss.sortkinds.BV, 8)
ts = pono.FunctionalTransitionSystem(solver)
x = ts.make_statevar('x', bvsort8)
ts.constrain_init(solver.make_term(ss.primops.Equal,
x,
solver.make_term(0, x.get_sort())))
ts.assign_next(x,
ts.make_term(ss.primops.BVAdd,
x,
solver.make_term(1, x.get_sort())))
prop_term = ts.make_term(ss.primops.BVUle, x, solver.make_term(9, x.get_sort()))
prop = pono.Property(ts, prop_term)
bmc = pono.Bmc(prop, solver)
res = bmc.check_until(10)
assert res == False, "res should be false, not just unknown (i.e. None)"
witness = bmc.witness()
with tempfile.NamedTemporaryFile() as temp:
assert os.stat(temp.name).st_size == 0, "Expect file to start empty"
vcd_printer = pono.VCDWitnessPrinter(ts, witness)
vcd_printer.dump_trace_to_file(temp.name)
assert os.stat(temp.name).st_size, "Expect file to be non-empty"
| 35.615385 | 84 | 0.62275 |
7c5f54d89982e30411db761644d402fc5a3d54e0 | 3,752 | py | Python | djangoecommerce/settings.py | Ikigaii/djangoecommerce | 5871f256b1a5d23dc2332a88f3a60256c44ab123 | [
"CC0-1.0"
] | null | null | null | djangoecommerce/settings.py | Ikigaii/djangoecommerce | 5871f256b1a5d23dc2332a88f3a60256c44ab123 | [
"CC0-1.0"
] | null | null | null | djangoecommerce/settings.py | Ikigaii/djangoecommerce | 5871f256b1a5d23dc2332a88f3a60256c44ab123 | [
"CC0-1.0"
] | null | null | null | """
Django settings for djangoecommerce project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-uhpfb-=5&l!s0h5sn8*(3a^slvqng24z7b$0yt!a8w+2*$m9#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'catalog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# apps
'catalog.context_processors.categories',
],
},
},
]
WSGI_APPLICATION = 'djangoecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['*']
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
try:
from .local_settings import *
except ImportError:
pass
| 26.237762 | 91 | 0.704424 |
524afe764c0ee294c51d8dfb7bea168c0fb02b45 | 8,775 | py | Python | TextFromHtmlExtractor.py | vasily-khodyrev/python_mini_readability | 74c9aa95a04ee8136432aa2c60016dee928f9c30 | [
"Apache-2.0"
] | null | null | null | TextFromHtmlExtractor.py | vasily-khodyrev/python_mini_readability | 74c9aa95a04ee8136432aa2c60016dee928f9c30 | [
"Apache-2.0"
] | null | null | null | TextFromHtmlExtractor.py | vasily-khodyrev/python_mini_readability | 74c9aa95a04ee8136432aa2c60016dee928f9c30 | [
"Apache-2.0"
] | null | null | null | """
HTML <-> array of texts.
1. Can extract from HTML string
texts = getTextArrayFromHtml(html_str)
2. Can extract text by html url
texts = getTextArrayFromUrl(url)
"""
from HTMLParser import HTMLParser, HTMLParseError
from htmlentitydefs import name2codepoint
#from BeautifulSoup import BeautifulSoup
import re, sys, codecs
import urllib2
from urlparse import urlparse
INCLUSION_PATTERNS = [('*','itemtype','http://schema.org/NewsArticle'),
('div', 'class', 'article_article-page')]
EXCLUSION_PATTERNS = [('script', None, None),
('style', None, None),
('aside', None, None)]
PRE_NEWLINE_SEPARATED = ['p','h1','h2','h3','h4','br']
POST_NEWLINE_SEPARATED = ['p','h1','h2','h3','h4']
ALLOW_TEXT_OUT_OF_INC_ROOTS = True
ROOT_TEXT_TAGS = ['p','h1','h2','h3','h4']
OUT = codecs.getwriter('utf-8')(sys.stdout)
class _HTMLToText(HTMLParser):
def __init__(self, baseUrl='', config = {}, verbose = False):
HTMLParser.__init__(self)
self._baseUrl = baseUrl
self._texts = []
self._context = None
# tag attrs incl excl has_text
self._tag_stack = [[None, None, False, False, False]]
self._config = config
self._verbose = verbose
def __getINCLUSION_PATTERNS(self):
return self._config.get('INCLUSION_PATTERNS',INCLUSION_PATTERNS)
def __getEXCLUSION_PATTERNS(self):
return self._config.get('EXCLUSION_PATTERNS', EXCLUSION_PATTERNS)
def __getPRE_NEWLINE_SEPARATED(self):
return self._config.get('PRE_NEWLINE_SEPARATED', PRE_NEWLINE_SEPARATED)
def __getPOST_NEWLINE_SEPARATED(self):
return self._config.get('POST_NEWLINE_SEPARATED', POST_NEWLINE_SEPARATED)
def __getROOT_TEXT_TAGS(self):
return self._config.get('ROOT_TEXT_TAGS', ROOT_TEXT_TAGS)
def __getALLOW_TEXT_OUT_OF_INC_ROOTS(self):
return self._config.get('ALLOW_TEXT_OUT_OF_INC_ROOTS', ALLOW_TEXT_OUT_OF_INC_ROOTS)
def __getAttrValue(self, param):
attrs = self._tag_stack[-1][1]
for attr in attrs:
if attr[0] == param:
return attr[1]
return None
def __getAttrValue(self, item, param):
attrs = item[1]
for attr in attrs:
if attr[0] == param:
return attr[1]
return None
def __isUnderRootTextTag(self):
if len(self._tag_stack) > 1:
for tag_item in reversed(self._tag_stack):
tag = tag_item[0]
if tag is not None and tag in self.__getROOT_TEXT_TAGS():
return True
return False
def hasAttrValue(self, attrs, param, value):
for tuple in attrs:
if (tuple[0] == param) and (value in tuple[1]):
return True
return False
def isInclusion(self):
if len(self._tag_stack)>0:
return self._tag_stack[-1][2]
else:
return False
def isExclusion(self):
if len(self._tag_stack) > 0:
return self._tag_stack[-1][3]
else:
return False
def __isHideOutput(self):
return self.isExclusion() or (not self.isInclusion() and not self.__getALLOW_TEXT_OUT_OF_INC_ROOTS())
def __closeStack(self, tag):
'''
Find closest open tag in stack. And removes all items in between. To solve HTML tag inconsistency issue.
In case match tag is not found - do nothing
:param tag: item in stack
:return:
'''
ind = 0
found = False
for item in reversed(self._tag_stack):
ind += 1
if item[0] is not None and item[0] == tag:
found = True
break
result = None
if found:
if self._verbose:
print 'Found pair: ' + str(tag) + ' dist: ' + str(ind)
while (ind > 0):
result = self._tag_stack.pop()
ind -= 1
else:
if self._verbose:
print 'WARN: no pair found for end tag: ' + str(tag)
return result
def handle_starttag(self, tag, attrs):
if self._verbose:
print ' '*len(self._tag_stack) + "Start: [" + tag + "]" + str(attrs)
isInclusion = self.isInclusion()
for incl_filter in self.__getINCLUSION_PATTERNS():
if (tag == incl_filter[0] or incl_filter[0] == '*') and (not incl_filter[1] or self.hasAttrValue(attrs, incl_filter[1], incl_filter[2])):
isInclusion = True
isExclusion = self.isExclusion()
for excl_filter in self.__getEXCLUSION_PATTERNS():
if (tag == excl_filter[0] or excl_filter[0] == '*') and (not excl_filter[1] or self.hasAttrValue(attrs, excl_filter[1], excl_filter[2])):
isExclusion = True
self._context = tag
self._tag_stack.append([tag, attrs, isInclusion, isExclusion, False])
if tag in self.__getPRE_NEWLINE_SEPARATED() and not self.__isHideOutput():
self._texts.append('\n')
def handle_endtag(self, tag):
item = self.__closeStack(tag)
self._context = self._tag_stack[-1][0]
if self._verbose:
print ' '*len(self._tag_stack) + "End: [" + tag + "]" + str(item)
if (item is not None):
if tag in ('a') and item[4] and not self.__isHideOutput() and self.__isUnderRootTextTag():
link = self.__getAttrValue(item, 'href')
if link is not None:
self._texts.append('[' + link + ']')
if tag in self.__getPOST_NEWLINE_SEPARATED() and not self.__isHideOutput():
self._texts.append('\n')
def handle_data(self, text):
if text:
shortS = re.sub(r'[\s\r\n]+', ' ', text).strip()
if len(shortS)>0:
if self._verbose:
ss = u' '*len(self._tag_stack) + u'Text: [' + str(self._context) + u']' + shortS
OUT.write("%s\n" % ss)
#print ' '*len(self._tag_stack) + 'Text: [' + str(self._context) + ']' + shortS
self._tag_stack[-1][4] = True # Set textIsPresent flag
if not self.__isHideOutput():
if self.__isUnderRootTextTag():
self._texts.append(re.sub(r'[\s\r\n]+', ' ', text))
def handle_entityref(self, name):
if name in name2codepoint and not self.__isHideOutput() and self.__isUnderRootTextTag():
c = unichr(name2codepoint[name])
self._texts.append(c)
def handle_charref(self, name):
if not self.__isHideOutput() and self.__isUnderRootTextTag():
n = int(name[1:], 16) if name.startswith('x') else int(name)
self._texts.append(unichr(n))
def get_text_array(self):
res = ['']
wasNewline = False
for text in self._texts:
if text in '\n':
res.append(text)
wasNewline = True
else:
if not wasNewline:
res[-1] += text
else:
res.append(text)
wasNewline = False
return res
def html_to_text_array(html='<html/>', baseUrl='', config = {}, verbose = False):
"""
Given a piece of HTML, return the array of texts it contains.
"""
parser = _HTMLToText(baseUrl, config, verbose)
try:
parser.feed(html)
parser.close()
except HTMLParseError:
pass
return parser.get_text_array()
def getTextArrayFromHtml(html='<html/>', baseUrl='' , config = {}):
"""
Given a piece of HTML, return the array of texts it contains.
"""
parser = _HTMLToText(baseUrl, config)
try:
parser.feed(html)
parser.close()
except HTMLParseError:
pass
return parser.get_text()
def getTextArrayFromUrl(url, config={}, verbose = False):
o = urlparse(url)
req = urllib2.Request(url)
response = urllib2.urlopen(req)
if response.getcode() == 200:
contentTypeHeader = response.headers['content-type'];
encoding = "utf-8"
if contentTypeHeader and "charset" in contentTypeHeader:
encoding = response.headers['content-type'].split('charset=')[-1];
the_page = unicode(response.read(),encoding)
#Fix html if there're any unclosed tags
#soup = BeautifulSoup(the_page)
#correct_html = unicode(soup.prettify(),"utf-8")
correct_html = the_page
return html_to_text_array(correct_html, o.scheme + "://" + o.netloc, config, verbose)
else:
raise Exception('Cannot get web page by url: ' + url, 'Server response: ' + response.getcode() + ' msg: ' + response.msg())
| 36.260331 | 149 | 0.584729 |
d9378588b03687efff6d3e7c1ec8861aa57b9608 | 6,333 | py | Python | homeassistant/components/onvif/camera.py | fanta759/core | fcdb54d8780900fb85e6c20d5382cfd13b69a0b3 | [
"Apache-2.0"
] | 1 | 2021-04-08T11:37:15.000Z | 2021-04-08T11:37:15.000Z | homeassistant/components/onvif/camera.py | axellebot/core | 5694e4190cddcab365cb0eb10201e5e1e193a9f0 | [
"Apache-2.0"
] | 56 | 2020-08-03T07:30:54.000Z | 2022-03-31T06:02:04.000Z | homeassistant/components/onvif/camera.py | axellebot/core | 5694e4190cddcab365cb0eb10201e5e1e193a9f0 | [
"Apache-2.0"
] | 1 | 2021-07-02T14:43:59.000Z | 2021-07-02T14:43:59.000Z | """Support for ONVIF Cameras with FFmpeg as decoder."""
import asyncio
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
from onvif.exceptions import ONVIFError
import voluptuous as vol
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.components.ffmpeg import CONF_EXTRA_ARGUMENTS, DATA_FFMPEG
from homeassistant.const import HTTP_BASIC_AUTHENTICATION
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from .base import ONVIFBaseEntity
from .const import (
ABSOLUTE_MOVE,
ATTR_CONTINUOUS_DURATION,
ATTR_DISTANCE,
ATTR_MOVE_MODE,
ATTR_PAN,
ATTR_PRESET,
ATTR_SPEED,
ATTR_TILT,
ATTR_ZOOM,
CONF_RTSP_TRANSPORT,
CONF_SNAPSHOT_AUTH,
CONTINUOUS_MOVE,
DIR_DOWN,
DIR_LEFT,
DIR_RIGHT,
DIR_UP,
DOMAIN,
GOTOPRESET_MOVE,
LOGGER,
RELATIVE_MOVE,
SERVICE_PTZ,
ZOOM_IN,
ZOOM_OUT,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ONVIF camera video stream."""
platform = entity_platform.current_platform.get()
# Create PTZ service
platform.async_register_entity_service(
SERVICE_PTZ,
{
vol.Optional(ATTR_PAN): vol.In([DIR_LEFT, DIR_RIGHT]),
vol.Optional(ATTR_TILT): vol.In([DIR_UP, DIR_DOWN]),
vol.Optional(ATTR_ZOOM): vol.In([ZOOM_OUT, ZOOM_IN]),
vol.Optional(ATTR_DISTANCE, default=0.1): cv.small_float,
vol.Optional(ATTR_SPEED, default=0.5): cv.small_float,
vol.Optional(ATTR_MOVE_MODE, default=RELATIVE_MOVE): vol.In(
[CONTINUOUS_MOVE, RELATIVE_MOVE, ABSOLUTE_MOVE, GOTOPRESET_MOVE]
),
vol.Optional(ATTR_CONTINUOUS_DURATION, default=0.5): cv.small_float,
vol.Optional(ATTR_PRESET, default="0"): cv.string,
},
"async_perform_ptz",
)
device = hass.data[DOMAIN][config_entry.unique_id]
async_add_entities(
[ONVIFCameraEntity(device, profile) for profile in device.profiles]
)
return True
class ONVIFCameraEntity(ONVIFBaseEntity, Camera):
"""Representation of an ONVIF camera."""
def __init__(self, device, profile):
"""Initialize ONVIF camera entity."""
ONVIFBaseEntity.__init__(self, device, profile)
Camera.__init__(self)
self.stream_options[CONF_RTSP_TRANSPORT] = device.config_entry.options.get(
CONF_RTSP_TRANSPORT
)
self._basic_auth = (
device.config_entry.data.get(CONF_SNAPSHOT_AUTH)
== HTTP_BASIC_AUTHENTICATION
)
self._stream_uri = None
@property
def supported_features(self) -> int:
"""Return supported features."""
return SUPPORT_STREAM
@property
def name(self) -> str:
"""Return the name of this camera."""
return f"{self.device.name} - {self.profile.name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
if self.profile.index:
return f"{self.device.info.mac or self.device.info.serial_number}_{self.profile.index}"
return self.device.info.mac or self.device.info.serial_number
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self.device.max_resolution == self.profile.video.resolution.width
async def stream_source(self):
"""Return the stream source."""
return self._stream_uri
async def async_camera_image(self):
"""Return a still image response from the camera."""
image = None
if self.device.capabilities.snapshot:
try:
image = await self.device.device.get_snapshot(
self.profile.token, self._basic_auth
)
except ONVIFError as err:
LOGGER.error(
"Fetch snapshot image failed from %s, falling back to FFmpeg; %s",
self.device.name,
err,
)
if image is None:
ffmpeg = ImageFrame(self.hass.data[DATA_FFMPEG].binary, loop=self.hass.loop)
image = await asyncio.shield(
ffmpeg.get_image(
self._stream_uri,
output_format=IMAGE_JPEG,
extra_cmd=self.device.config_entry.options.get(
CONF_EXTRA_ARGUMENTS
),
)
)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
LOGGER.debug("Handling mjpeg stream from camera '%s'", self.device.name)
ffmpeg_manager = self.hass.data[DATA_FFMPEG]
stream = CameraMjpeg(ffmpeg_manager.binary, loop=self.hass.loop)
await stream.open_camera(
self._stream_uri,
extra_cmd=self.device.config_entry.options.get(CONF_EXTRA_ARGUMENTS),
)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
ffmpeg_manager.ffmpeg_stream_content_type,
)
finally:
await stream.close()
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
uri_no_auth = await self.device.async_get_stream_uri(self.profile)
self._stream_uri = uri_no_auth.replace(
"rtsp://", f"rtsp://{self.device.username}:{self.device.password}@", 1
)
async def async_perform_ptz(
self,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan=None,
tilt=None,
zoom=None,
) -> None:
"""Perform a PTZ action on the camera."""
await self.device.async_perform_ptz(
self.profile,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan,
tilt,
zoom,
)
| 31.984848 | 99 | 0.621822 |
172a914476cb4a4ae568e5dee602651e451f8c13 | 2,557 | py | Python | OscopeBootstrap/SyntheticPlotResults.py | alexisboukouvalas/OscoNet | f100d1ccfe8f7dad050a3082773a4b6383a4994a | [
"MIT"
] | 1 | 2020-09-03T10:00:44.000Z | 2020-09-03T10:00:44.000Z | OscopeBootstrap/SyntheticPlotResults.py | alexisboukouvalas/OscoNet | f100d1ccfe8f7dad050a3082773a4b6383a4994a | [
"MIT"
] | 1 | 2022-02-10T02:22:05.000Z | 2022-02-10T02:22:05.000Z | OscopeBootstrap/SyntheticPlotResults.py | alexisboukouvalas/OscoNet | f100d1ccfe8f7dad050a3082773a4b6383a4994a | [
"MIT"
] | 1 | 2019-09-25T16:44:30.000Z | 2019-09-25T16:44:30.000Z | from matplotlib import pyplot as plt
import pickle as pickle
def plot(i, strTitle, strExperiment):
plt.figure()
plt.title(strTitle + ' ' + strExperiment)
linList = []
lgl = []
for ip in range(pairwiseMetrics.shape[1]):
if ip == pairwiseMetrics.shape[1]-1:
leg = 'Oscope'
else:
leg = str(nbootsamples[ip])
l, = plt.plot(noiseLevelSigma, pairwiseMetrics[:, ip, i], '--o')
linList.append(l)
lgl.append(leg)
plt.legend(lgl)
plt.savefig(strDir+'/'+strTitle+'.png', bbox_inches='tight')
# TODO: plotting?
# def plot_batch_experiment(results, q_lower=1-0.999999, q_upper=0.999999, n_q=20):
# import matplotlib.pyplot as plt
# qtTry = np.linspace(q_lower, q_upper, n_q)
# f, ax = plt.subplots(1, 1)
# ax.set_xlabel('threshold', fontsize=20)
# ax.plot([0, 1], [0, 1], '--k', lw=5)
# ax.set_xlim(0.0, 0.1)
# ax.set_ylim(0.0, 0.1)
#
# for noiselevel, result in enumerate(results):
# adjMatrix_true = true_adj_matrix(result['G'], result['angularSpeed'])
# print('**** Noise level ', noiselevel)
# for q in qtTry:
# adj = get_adjacency_matrices(result['G'], result['angularSpeed'], result['pvalues'], alpha=q,
# fBonferonni=True)
# incorrect_ratio = calculate_inaccuracy(adj['adjMatrixBootstrapQvalue'], adj['adjMatrixTrue'])
# print('Noise level', noiselevel, ' q=', q, 'accuracy', 1-incorrect_ratio)
# TPR, FDR, FPR = get_metrics_for_different_qvalue_thresholds(result['qvalues'], adjMatrix_true, qtTry)
# ax.plot(qtTry, FDR, ':', label=noiselevel, lw=5)
# ax.legend(loc='lower right', ncol=1)
# plt.show(block=True)
if __name__ == '__main__':
plt.close("all")
strDir = '/home/mqbssaby/Periodicity/Oscope/data/bootstrapRunCPU16'
print('Plotting results after loading processcluster file from ' + strDir)
saveDict = pickle.load(open(strDir + '/processClusterResultingPlotting.p', "rb"))
pairwiseMetrics = saveDict['pairwiseMetrics']
fBonferonni = saveDict['fBonferonni']
alpha = saveDict['alpha']
noiseLevelSigma = saveDict['noiseLevelSigma']
nbootsamples = saveDict['nbootsamples']
strExperiment = 'Bonferonni=%g, alpha=%g ' % (fBonferonni, alpha)
plt.ion()
plot(0, 'TPR', strExperiment)
plot(1, 'FDR', strExperiment)
# plot(2, 'TP', strExperiment)
# plot(3, 'TN', strExperiment)
# plot(4, 'FP', strExperiment)
# plot(5, 'FN', strExperiment)
| 38.742424 | 111 | 0.626907 |
52b1b10f7c980b938aaae45df6984d371c3417b1 | 4,758 | py | Python | tools/osm-streets.py | rgreinho/bna-tools | ea1d13de42c7e93d89d4ffbbdb8a8c131d7a38e5 | [
"MIT"
] | null | null | null | tools/osm-streets.py | rgreinho/bna-tools | ea1d13de42c7e93d89d4ffbbdb8a8c131d7a38e5 | [
"MIT"
] | null | null | null | tools/osm-streets.py | rgreinho/bna-tools | ea1d13de42c7e93d89d4ffbbdb8a8c131d7a38e5 | [
"MIT"
] | null | null | null | import asyncio
import multiprocessing
import pathlib
import shlex
import subprocess
import sys
import aiohttp
from halo import Halo
import pandas as pd
CITY = "austin"
STATE = "texas"
CITY_NAME_STATE = f"{CITY}_{STATE}"
CITY_NAME_STATE_CSV = f"{CITY_NAME_STATE}-streets.csv"
CITY_NAME_STATE_FILTERED = f"{CITY_NAME_STATE}.filtered.osm"
CITY_NAME_STATE_OSM = f"{CITY_NAME_STATE}.osm"
CITY_NAME_STATE_POLY = f"{CITY_NAME_STATE}.poly"
CITY_NAME_STATE_POLY_URL = f"https://raw.githubusercontent.com/JamesChevalier/cities/master/united_states/{STATE}/{CITY_NAME_STATE_POLY}"
OSM_REGION = f"{STATE}-latest.osm.pbf"
OSM_REGION_URL = f"https://download.geofabrik.de/north-america/us/{OSM_REGION}"
OUTPUT_DIR = "osm-streets"
CHUNK_SIZE = 1024 * 2014
async def get_content(session, url, output):
spinner = Halo(text=f"Downloading {url}")
spinner.start()
try:
async with session.get(url) as response:
output_dir = output.resolve().parent
output_dir.mkdir(parents=True, exist_ok=True)
with output.open(mode="wb") as fd:
while True:
chunk = await response.content.read(CHUNK_SIZE)
if not chunk:
break
fd.write(chunk)
except Exception as e:
spinner.fail(f"{e}")
sys.exit(1)
else:
spinner.succeed()
def execute(cmd, cwd=None, message="Loading"):
spinner = Halo(text=message)
spinner.start()
try:
subprocess.run(cmd, shell=True, check=True, capture_output=True, cwd=cwd)
except subprocess.CalledProcessError as cpe:
spinner.fail(
f'"{cpe.cmd}" failed to execute with error code {cpe.returncode} for the following reason:\n'
f"{cpe.stderr.decode('utf-8')}."
)
sys.exit(1)
else:
spinner.succeed()
async def main():
output_dir = pathlib.Path(OUTPUT_DIR)
# Retrieve the Texas state PBF file and the Austin Polygon.
downloads = [
(CITY_NAME_STATE_POLY_URL, output_dir / CITY_NAME_STATE_POLY),
(OSM_REGION_URL, output_dir / OSM_REGION),
]
async with aiohttp.ClientSession() as session:
for download in downloads:
url, output = download
if not output.exists():
await get_content(session, url, output)
# Run osmosis to extract data within the boundaries of the Austin polygon.
# Take 8-10 minutes to run.
austin_osm = output_dir / CITY_NAME_STATE_OSM
austin_poly = output_dir / CITY_NAME_STATE_POLY
if not austin_osm.exists():
workers = multiprocessing.cpu_count()
osmosis_cmd = (
f'osmosis --read-pbf-fast file="{OSM_REGION}" workers={workers} '
f'--bounding-polygon file="{austin_poly.name}" '
f'--write-xml file="{austin_osm.name}"'
)
execute(
osmosis_cmd,
cwd=output_dir,
message=f'Processing "{OSM_REGION}" (takes 8-10 minutes)',
)
# Run osmfilter to limit the file content to the attributes we need.
OSM_FILTERS = ["addr:street=", "addr:postcode="]
austin_filtered = output_dir / CITY_NAME_STATE_FILTERED
if not austin_filtered.exists():
osmfilter_cmd = (
f'osmfilter {austin_osm.name} --keep="{" or ".join(OSM_FILTERS)}" '
f"--ignore-depedencies "
f"--drop-relations "
f"--drop-ways "
f"-o={austin_filtered.name}"
)
execute(osmfilter_cmd, cwd=output_dir, message=f"Filtering {austin_osm.name}")
# Run osmconvert to extract the information we need into a CSV file.
austin_csv = output_dir / CITY_NAME_STATE_CSV
if not austin_csv.exists():
osmconvert_cmd = (
f"osmconvert {austin_filtered.name} "
f'--csv="{" ".join([f[:-1] for f in OSM_FILTERS])}" '
"--csv-headline "
"--csv-separator=, "
f"-o={austin_csv.name}"
)
execute(osmconvert_cmd, cwd=output_dir, message=f"Saving data to {austin_csv}")
# Clean up the result.
pd_streets = pd.read_csv(austin_csv.resolve())
df = pd.DataFrame(columns=["Street Name", "Zipcode"])
df["Street Name"] = pd_streets["addr:street"]
df["Zipcode"] = pd_streets["addr:postcode"]
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
# sort the df.
df.sort_values("Zipcode", inplace=True)
# Save.
df.to_csv(austin_csv)
# Extras.
valid = df["Zipcode"].str.isdigit()
grouped = df[valid].groupby(["Zipcode"]).count()
grouped.to_csv(output_dir / "austin-street-grouped.csv")
if __name__ == "__main__":
asyncio.run(main())
| 34.230216 | 137 | 0.626314 |
2c126949de2716e446112a33fce267a4afa01644 | 3,584 | py | Python | sfg2d/io/ntb.py | deisi/SFG2D | 48232b64556164ae28907735144f11e12facecd2 | [
"MIT"
] | null | null | null | sfg2d/io/ntb.py | deisi/SFG2D | 48232b64556164ae28907735144f11e12facecd2 | [
"MIT"
] | null | null | null | sfg2d/io/ntb.py | deisi/SFG2D | 48232b64556164ae28907735144f11e12facecd2 | [
"MIT"
] | null | null | null | from numpy import loadtxt
from pandas import DataFrame, to_timedelta
from datetime import datetime, timedelta
import io
class NtbFile():
def __init__(self, fname):
"""Read .ntb file, as they are exported by the FilmWaterX Tensiometer Software
Tested with version: 3.62
Properties
-----------
header : dict
Dictionary with all the Header information
Header
------
Time : int
Creation time of the file in seconds relative to 00:00 of the day of creation
Date : str
Date of the program start, not the date of the file creation.
As you might think, haha this would have been to smart wouldn't it....
WHAT THE FUCK, WHO DID THIS.
"""
self._fname = fname
self._filedesc = open(fname, 'r', encoding='latin-1')
self._readFile()
self._processData()
def _readFile(self):
import yaml
with self._filedesc:
line = ''
self.header = []
end_cond = 'Nr;TotalArea;Area;DeltaArea;DeltaMolecules;Pressure;Tension;Mode;Time;Temp;Potential;Radioactivity'
while end_cond not in line:
line = self._filedesc.readline().replace(',', '.')
# There are empty lines in the file that must be skipped
if line is '\n':
continue
# Correct for the not yaml conform lines
if 'Lipid(s) Details' in line:
lipid = yaml.load(line)
lipid['Lipid(s) Details'] = yaml.load(
self._filedesc.readline().replace(',', '.')
)
lipid['Lipid(s) Details'] += yaml.load(
self._filedesc.readline().replace(',', '.')
)
self.header.append(lipid)
continue
if end_cond in line:
self._names = yaml.load(line).split(';')
break
self.header.append(yaml.load(line))
# We want to have a single dict with the header information
self.header = {k: v for d in self.header for k, v in d.items()}
htime = self.header['Time']
hdate = self.header['Date']
if isinstance(htime, int):
time = timedelta(seconds=htime)
date = datetime.strptime(hdate, "%d.%m.%Y")
thisdatetime = date + time
elif isinstance(htime, str):
thisdatetime = datetime.strptime(
hdate + ' ' + htime,
"%d.%m.%Y %H:%M:%S"
)
else:
raise IOError('Cant read date from header.')
self.header['datetime'] = thisdatetime
# Read the actual data
data = self._filedesc.read().replace(',', '.')
self.data = loadtxt(io.StringIO(data), delimiter=';')
def _processData(self):
self.df = DataFrame(self.data, columns=self._names)
# I Want to Process the Time Column to use datetime
# Because this makes comparisons much easier.
# This way, time is a absolute time axis
self.header['Duration'] = to_timedelta(self.df['Time'].iloc[-1], unit='s')
self.header['StartTime'] = self.header['datetime'] - self.header['Duration']
self.df['TimeDelta'] = to_timedelta(self.df["Time"], unit='s')
self.df["Time"] = self.header['StartTime'] + self.df['TimeDelta']
| 36.948454 | 123 | 0.530134 |
6eacf11ee841cef86bc9e9d84b4a1772f67ae375 | 1,729 | py | Python | smarts/core/utils/string.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | 2 | 2021-12-13T12:41:54.000Z | 2021-12-16T03:10:24.000Z | smarts/core/utils/string.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | null | null | null | smarts/core/utils/string.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
def truncate(str_, length, separator="..."):
"""Truncates a string to the given length by removing internal characters.
Args:
str_:
The string to truncate.
length:
The length to truncate the string to.
separator:
The intermediary characters that replaces removed characters.
"""
if len(str_) <= length:
return str_
start = math.ceil((length - len(separator)) / 2)
end = math.floor((length - len(separator)) / 2)
return f"{str_[:start]}{separator}{str_[len(str_) - end:]}"
| 42.170732 | 79 | 0.718913 |
d4cbd26983581204f8cf772fa7e0ee4e8b9d7b75 | 15,567 | py | Python | gen/pb_python/flyteidl/plugins/sagemaker/hyperparameter_tuning_job_pb2.py | SmritiSatyanV/flyteidl | e8a29e0deb437d9e7086f9e90b72362cd26000a2 | [
"Apache-2.0"
] | 13 | 2019-08-05T22:02:36.000Z | 2020-07-05T06:21:14.000Z | gen/pb_python/flyteidl/plugins/sagemaker/hyperparameter_tuning_job_pb2.py | SmritiSatyanV/flyteidl | e8a29e0deb437d9e7086f9e90b72362cd26000a2 | [
"Apache-2.0"
] | 70 | 2021-02-01T22:14:27.000Z | 2022-03-29T12:56:06.000Z | gen/pb_python/flyteidl/plugins/sagemaker/hyperparameter_tuning_job_pb2.py | SmritiSatyanV/flyteidl | e8a29e0deb437d9e7086f9e90b72362cd26000a2 | [
"Apache-2.0"
] | 22 | 2021-02-01T16:13:28.000Z | 2022-02-25T08:15:29.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: flyteidl/plugins/sagemaker/hyperparameter_tuning_job.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from flyteidl.plugins.sagemaker import parameter_ranges_pb2 as flyteidl_dot_plugins_dot_sagemaker_dot_parameter__ranges__pb2
from flyteidl.plugins.sagemaker import training_job_pb2 as flyteidl_dot_plugins_dot_sagemaker_dot_training__job__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='flyteidl/plugins/sagemaker/hyperparameter_tuning_job.proto',
package='flyteidl.plugins.sagemaker',
syntax='proto3',
serialized_options=_b('Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins'),
serialized_pb=_b('\n:flyteidl/plugins/sagemaker/hyperparameter_tuning_job.proto\x12\x1a\x66lyteidl.plugins.sagemaker\x1a\x31\x66lyteidl/plugins/sagemaker/parameter_ranges.proto\x1a-flyteidl/plugins/sagemaker/training_job.proto\"\xa1\x01\n\x17HyperparameterTuningJob\x12=\n\x0ctraining_job\x18\x01 \x01(\x0b\x32\'.flyteidl.plugins.sagemaker.TrainingJob\x12#\n\x1bmax_number_of_training_jobs\x18\x02 \x01(\x03\x12\"\n\x1amax_parallel_training_jobs\x18\x03 \x01(\x03\"H\n!HyperparameterTuningObjectiveType\"#\n\x05Value\x12\x0c\n\x08MINIMIZE\x10\x00\x12\x0c\n\x08MAXIMIZE\x10\x01\"\x91\x01\n\x1dHyperparameterTuningObjective\x12[\n\x0eobjective_type\x18\x01 \x01(\x0e\x32\x43.flyteidl.plugins.sagemaker.HyperparameterTuningObjectiveType.Value\x12\x13\n\x0bmetric_name\x18\x02 \x01(\t\"A\n\x1cHyperparameterTuningStrategy\"!\n\x05Value\x12\x0c\n\x08\x42\x41YESIAN\x10\x00\x12\n\n\x06RANDOM\x10\x01\":\n\x1cTrainingJobEarlyStoppingType\"\x1a\n\x05Value\x12\x07\n\x03OFF\x10\x00\x12\x08\n\x04\x41UTO\x10\x01\"\x83\x03\n\x1dHyperparameterTuningJobConfig\x12J\n\x15hyperparameter_ranges\x18\x01 \x01(\x0b\x32+.flyteidl.plugins.sagemaker.ParameterRanges\x12W\n\x0ftuning_strategy\x18\x02 \x01(\x0e\x32>.flyteidl.plugins.sagemaker.HyperparameterTuningStrategy.Value\x12S\n\x10tuning_objective\x18\x03 \x01(\x0b\x32\x39.flyteidl.plugins.sagemaker.HyperparameterTuningObjective\x12h\n training_job_early_stopping_type\x18\x04 \x01(\x0e\x32>.flyteidl.plugins.sagemaker.TrainingJobEarlyStoppingType.ValueB9Z7github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/pluginsb\x06proto3')
,
dependencies=[flyteidl_dot_plugins_dot_sagemaker_dot_parameter__ranges__pb2.DESCRIPTOR,flyteidl_dot_plugins_dot_sagemaker_dot_training__job__pb2.DESCRIPTOR,])
_HYPERPARAMETERTUNINGOBJECTIVETYPE_VALUE = _descriptor.EnumDescriptor(
name='Value',
full_name='flyteidl.plugins.sagemaker.HyperparameterTuningObjectiveType.Value',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MINIMIZE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAXIMIZE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=389,
serialized_end=424,
)
_sym_db.RegisterEnumDescriptor(_HYPERPARAMETERTUNINGOBJECTIVETYPE_VALUE)
_HYPERPARAMETERTUNINGSTRATEGY_VALUE = _descriptor.EnumDescriptor(
name='Value',
full_name='flyteidl.plugins.sagemaker.HyperparameterTuningStrategy.Value',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='BAYESIAN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RANDOM', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=606,
serialized_end=639,
)
_sym_db.RegisterEnumDescriptor(_HYPERPARAMETERTUNINGSTRATEGY_VALUE)
_TRAININGJOBEARLYSTOPPINGTYPE_VALUE = _descriptor.EnumDescriptor(
name='Value',
full_name='flyteidl.plugins.sagemaker.TrainingJobEarlyStoppingType.Value',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OFF', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTO', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=673,
serialized_end=699,
)
_sym_db.RegisterEnumDescriptor(_TRAININGJOBEARLYSTOPPINGTYPE_VALUE)
_HYPERPARAMETERTUNINGJOB = _descriptor.Descriptor(
name='HyperparameterTuningJob',
full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJob',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='training_job', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJob.training_job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_number_of_training_jobs', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJob.max_number_of_training_jobs', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_parallel_training_jobs', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJob.max_parallel_training_jobs', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=189,
serialized_end=350,
)
_HYPERPARAMETERTUNINGOBJECTIVETYPE = _descriptor.Descriptor(
name='HyperparameterTuningObjectiveType',
full_name='flyteidl.plugins.sagemaker.HyperparameterTuningObjectiveType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_HYPERPARAMETERTUNINGOBJECTIVETYPE_VALUE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=352,
serialized_end=424,
)
_HYPERPARAMETERTUNINGOBJECTIVE = _descriptor.Descriptor(
name='HyperparameterTuningObjective',
full_name='flyteidl.plugins.sagemaker.HyperparameterTuningObjective',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objective_type', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningObjective.objective_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metric_name', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningObjective.metric_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=427,
serialized_end=572,
)
_HYPERPARAMETERTUNINGSTRATEGY = _descriptor.Descriptor(
name='HyperparameterTuningStrategy',
full_name='flyteidl.plugins.sagemaker.HyperparameterTuningStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_HYPERPARAMETERTUNINGSTRATEGY_VALUE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=574,
serialized_end=639,
)
_TRAININGJOBEARLYSTOPPINGTYPE = _descriptor.Descriptor(
name='TrainingJobEarlyStoppingType',
full_name='flyteidl.plugins.sagemaker.TrainingJobEarlyStoppingType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_TRAININGJOBEARLYSTOPPINGTYPE_VALUE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=641,
serialized_end=699,
)
_HYPERPARAMETERTUNINGJOBCONFIG = _descriptor.Descriptor(
name='HyperparameterTuningJobConfig',
full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJobConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hyperparameter_ranges', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJobConfig.hyperparameter_ranges', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuning_strategy', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJobConfig.tuning_strategy', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuning_objective', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJobConfig.tuning_objective', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_job_early_stopping_type', full_name='flyteidl.plugins.sagemaker.HyperparameterTuningJobConfig.training_job_early_stopping_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=702,
serialized_end=1089,
)
_HYPERPARAMETERTUNINGJOB.fields_by_name['training_job'].message_type = flyteidl_dot_plugins_dot_sagemaker_dot_training__job__pb2._TRAININGJOB
_HYPERPARAMETERTUNINGOBJECTIVETYPE_VALUE.containing_type = _HYPERPARAMETERTUNINGOBJECTIVETYPE
_HYPERPARAMETERTUNINGOBJECTIVE.fields_by_name['objective_type'].enum_type = _HYPERPARAMETERTUNINGOBJECTIVETYPE_VALUE
_HYPERPARAMETERTUNINGSTRATEGY_VALUE.containing_type = _HYPERPARAMETERTUNINGSTRATEGY
_TRAININGJOBEARLYSTOPPINGTYPE_VALUE.containing_type = _TRAININGJOBEARLYSTOPPINGTYPE
_HYPERPARAMETERTUNINGJOBCONFIG.fields_by_name['hyperparameter_ranges'].message_type = flyteidl_dot_plugins_dot_sagemaker_dot_parameter__ranges__pb2._PARAMETERRANGES
_HYPERPARAMETERTUNINGJOBCONFIG.fields_by_name['tuning_strategy'].enum_type = _HYPERPARAMETERTUNINGSTRATEGY_VALUE
_HYPERPARAMETERTUNINGJOBCONFIG.fields_by_name['tuning_objective'].message_type = _HYPERPARAMETERTUNINGOBJECTIVE
_HYPERPARAMETERTUNINGJOBCONFIG.fields_by_name['training_job_early_stopping_type'].enum_type = _TRAININGJOBEARLYSTOPPINGTYPE_VALUE
DESCRIPTOR.message_types_by_name['HyperparameterTuningJob'] = _HYPERPARAMETERTUNINGJOB
DESCRIPTOR.message_types_by_name['HyperparameterTuningObjectiveType'] = _HYPERPARAMETERTUNINGOBJECTIVETYPE
DESCRIPTOR.message_types_by_name['HyperparameterTuningObjective'] = _HYPERPARAMETERTUNINGOBJECTIVE
DESCRIPTOR.message_types_by_name['HyperparameterTuningStrategy'] = _HYPERPARAMETERTUNINGSTRATEGY
DESCRIPTOR.message_types_by_name['TrainingJobEarlyStoppingType'] = _TRAININGJOBEARLYSTOPPINGTYPE
DESCRIPTOR.message_types_by_name['HyperparameterTuningJobConfig'] = _HYPERPARAMETERTUNINGJOBCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HyperparameterTuningJob = _reflection.GeneratedProtocolMessageType('HyperparameterTuningJob', (_message.Message,), dict(
DESCRIPTOR = _HYPERPARAMETERTUNINGJOB,
__module__ = 'flyteidl.plugins.sagemaker.hyperparameter_tuning_job_pb2'
# @@protoc_insertion_point(class_scope:flyteidl.plugins.sagemaker.HyperparameterTuningJob)
))
_sym_db.RegisterMessage(HyperparameterTuningJob)
HyperparameterTuningObjectiveType = _reflection.GeneratedProtocolMessageType('HyperparameterTuningObjectiveType', (_message.Message,), dict(
DESCRIPTOR = _HYPERPARAMETERTUNINGOBJECTIVETYPE,
__module__ = 'flyteidl.plugins.sagemaker.hyperparameter_tuning_job_pb2'
# @@protoc_insertion_point(class_scope:flyteidl.plugins.sagemaker.HyperparameterTuningObjectiveType)
))
_sym_db.RegisterMessage(HyperparameterTuningObjectiveType)
HyperparameterTuningObjective = _reflection.GeneratedProtocolMessageType('HyperparameterTuningObjective', (_message.Message,), dict(
DESCRIPTOR = _HYPERPARAMETERTUNINGOBJECTIVE,
__module__ = 'flyteidl.plugins.sagemaker.hyperparameter_tuning_job_pb2'
# @@protoc_insertion_point(class_scope:flyteidl.plugins.sagemaker.HyperparameterTuningObjective)
))
_sym_db.RegisterMessage(HyperparameterTuningObjective)
HyperparameterTuningStrategy = _reflection.GeneratedProtocolMessageType('HyperparameterTuningStrategy', (_message.Message,), dict(
DESCRIPTOR = _HYPERPARAMETERTUNINGSTRATEGY,
__module__ = 'flyteidl.plugins.sagemaker.hyperparameter_tuning_job_pb2'
# @@protoc_insertion_point(class_scope:flyteidl.plugins.sagemaker.HyperparameterTuningStrategy)
))
_sym_db.RegisterMessage(HyperparameterTuningStrategy)
TrainingJobEarlyStoppingType = _reflection.GeneratedProtocolMessageType('TrainingJobEarlyStoppingType', (_message.Message,), dict(
DESCRIPTOR = _TRAININGJOBEARLYSTOPPINGTYPE,
__module__ = 'flyteidl.plugins.sagemaker.hyperparameter_tuning_job_pb2'
# @@protoc_insertion_point(class_scope:flyteidl.plugins.sagemaker.TrainingJobEarlyStoppingType)
))
_sym_db.RegisterMessage(TrainingJobEarlyStoppingType)
HyperparameterTuningJobConfig = _reflection.GeneratedProtocolMessageType('HyperparameterTuningJobConfig', (_message.Message,), dict(
DESCRIPTOR = _HYPERPARAMETERTUNINGJOBCONFIG,
__module__ = 'flyteidl.plugins.sagemaker.hyperparameter_tuning_job_pb2'
# @@protoc_insertion_point(class_scope:flyteidl.plugins.sagemaker.HyperparameterTuningJobConfig)
))
_sym_db.RegisterMessage(HyperparameterTuningJobConfig)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 42.186992 | 1,564 | 0.802081 |
8012190bf4c4218bd0051a6155f29589c8b801dd | 54,947 | py | Python | tests/runner.py | gertvanhoey/emscripten | e2f7a32805f46f200c8db7165c4cd2932468ae43 | [
"MIT"
] | null | null | null | tests/runner.py | gertvanhoey/emscripten | e2f7a32805f46f200c8db7165c4cd2932468ae43 | [
"MIT"
] | null | null | null | tests/runner.py | gertvanhoey/emscripten | e2f7a32805f46f200c8db7165c4cd2932468ae43 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# This Python file uses the following encoding: utf-8
'''
Simple test runner.
'''
# XXX Use EM_ALL_ENGINES=1 in the env to test all engines!
from __future__ import print_function
from subprocess import Popen, PIPE, STDOUT
import os, unittest, tempfile, shutil, time, inspect, sys, math, glob, re, difflib
import webbrowser, hashlib, threading, platform
import multiprocessing, functools, stat, string, random, fnmatch
import atexit
import operator
import parallel_runner
if sys.version_info.major == 2:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SimpleHTTPServer import SimpleHTTPRequestHandler
from httplib import HTTPConnection
from urllib import unquote
else:
from http.server import HTTPServer, BaseHTTPRequestHandler, SimpleHTTPRequestHandler
from http.client import HTTPConnection
from urllib.parse import unquote
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root(''), path_from_root('third_party/websockify')]
import tools.shared
from tools.shared import *
from tools.line_endings import check_line_endings
logger = logging.getLogger(__file__)
# User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to
# run using another browser command line than the default system browser.
# Setting '0' as the browser disables running a browser (but we still see tests compile)
emscripten_browser = os.environ.get('EMSCRIPTEN_BROWSER')
if emscripten_browser:
cmd = shlex.split(emscripten_browser)
def run_in_other_browser(url):
Popen(cmd + [url])
if EM_BUILD_VERBOSE_LEVEL >= 3:
print("using Emscripten browser: " + str(cmd), file=sys.stderr)
webbrowser.open_new = run_in_other_browser
# checks if browser testing is enabled
def has_browser():
return emscripten_browser != '0'
# returns what browser is being used (None means the default)
def get_browser():
return emscripten_browser
# Sanity check for config
try:
assert COMPILER_OPTS != None
except:
raise Exception('Cannot find "COMPILER_OPTS" definition. Is %s set up properly? You may need to copy the template settings file into it.' % EM_CONFIG)
HELP_TEXT = '''
==============================================================================
Running the main part of the test suite. Don't forget to run the other parts!
A recommended order is:
sanity - tests for first run, etc., modifies ~/.emscripten
(the main test suite)
other - tests separate from the main suite
browser - runs pages in a web browser
interactive - runs interactive browser tests that need human verification, and could not be automated
sockets - runs websocket networking tests
benchmark - run before and after each set of changes before pushing to
master, verify no regressions
To run one of those parts, do something like
python tests/runner.py sanity
To run a specific set of tests, you can do things like
python tests/runner.py asm2
(that runs the asm2 (asm.js, -O2) tests). You can run individual tests with
python tests/runner.py test_hello_world
Combinations work too, for example
python tests/runner.py browser.test_sdl_image
In the main test suite, you can run all variations (O0, O1, O2, etc.) of
an individual test with
python tests/runner.py ALL.test_hello_world
You can run a random set of N tests with a command like
python tests/runner.py random50
An individual test can be skipped by passing the "skip:" prefix. E.g.
python tests/runner.py other skip:other.test_cmake
Passing a wildcard allows choosing a subset of tests in a suite, e.g.
python tests/runner.py browser.test_pthread_*
will run all the pthreads related tests. Wildcards can also be passed in skip,
so
python tests/runner.py browser skip:browser.test_pthread_*
will run the whole browser suite except for all the pthread tests in it.
Debugging: You can run
EM_SAVE_DIR=1 python tests/runner.py ALL.test_hello_world
in order to save the test runner directory, in /tmp/emscripten_temp. All files
created by the test will be present there. You can also use EMCC_DEBUG to
further debug the compiler itself, see emcc.
==============================================================================
'''
# Core test runner class, shared between normal tests and benchmarks
checked_sanity = False
test_modes = [
'default',
'asm1',
'asm2',
'asm3',
'asm2g',
]
nondefault_test_modes = [
'asm2f',
'binaryen0',
'binaryen1',
'binaryen2',
'binaryen3',
'binaryens',
'binaryenz',
'asmi',
'asm2i',
]
test_index = 0
use_all_engines = os.environ.get('EM_ALL_ENGINES') # generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
class RunnerCore(unittest.TestCase):
emcc_args = None
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
save_dir = os.environ.get('EM_SAVE_DIR')
save_JS = 0
stderr_redirect = STDOUT # This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
env = {}
EM_TESTRUNNER_DETECT_TEMPFILE_LEAKS = int(os.getenv('EM_TESTRUNNER_DETECT_TEMPFILE_LEAKS')) if os.getenv('EM_TESTRUNNER_DETECT_TEMPFILE_LEAKS') != None else 0
temp_files_before_run = []
def is_emterpreter(self):
return False
def is_wasm_backend(self):
return Settings.WASM_BACKEND
def uses_memory_init_file(self):
if self.emcc_args is None:
return None
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file')+1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
return ('-O2' in self.emcc_args or '-O3' in self.emcc_args or '-Oz' in self.emcc_args) and not (Settings.SIDE_MODULE or Settings.WASM)
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
def setUp(self):
Settings.reset()
if self.EM_TESTRUNNER_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames: self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames: self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = use_all_engines
if not self.save_dir:
dirname = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
else:
dirname = CANONICAL_TEMP_DIR
if not os.path.exists(dirname):
os.makedirs(dirname)
self.working_dir = dirname
os.chdir(dirname)
# Use emscripten root for node module lookup
scriptdir = os.path.dirname(os.path.abspath(__file__))
os.environ['NODE_PATH'] = os.path.join(scriptdir, '..', 'node_modules')
if not self.save_dir:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.join(self.get_dir(), '..'))
try_delete(self.get_dir())
if self.EM_TESTRUNNER_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames: temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames: temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir. They may not be due to
# us, but e.g. the browser when running browser tests. Until we figure out a proper solution,
# ignore some temp file names that we see on our CI infrastructure.
ignorable_files = ['/tmp/tmpaddon']
left_over_files = list(set(temp_files_after_run) - set(self.temp_files_before_run) - set(ignorable_files))
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
raise Exception('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
# Make sure we don't leave stuff around
#if not self.has_prev_ll:
# for temp_file in os.listdir(TEMP_DIR):
# assert not temp_file.endswith('.ll'), temp_file
# # TODO assert not temp_file.startswith('emscripten_'), temp_file
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def hardcode_arguments(self, filename, args):
# Hardcode in the arguments, so js is portable without manual commandlinearguments
if not args: return
js = open(filename).read()
open(filename, 'w').write(js.replace('run();', 'run(%s + Module["arguments"]);' % str(args)))
def prep_ll_run(self, filename, ll_file, force_recompile=False, build_ll_hook=None):
#force_recompile = force_recompile or os.stat(filename + '.o.ll').st_size > 50000 # if the file is big, recompile just to get ll_opts # Recompiling just for dfe in ll_opts is too costly
def fix_target(ll_filename):
if LLVM_TARGET == ASM_JS_TARGET:
return
with open(ll_filename) as f:
contents = f.read()
if LLVM_TARGET in contents:
return
asmjs_layout = "e-p:32:32-i64:64-v128:32:128-n32-S128"
wasm_layout = "e-m:e-p:32:32-i64:64-n32:64-S128"
assert(ASM_JS_TARGET in contents)
assert(asmjs_layout in contents)
contents = contents.replace(asmjs_layout, wasm_layout)
contents = contents.replace(ASM_JS_TARGET, WASM_TARGET)
with open(ll_filename, 'w') as f:
f.write(contents)
if Building.LLVM_OPTS or force_recompile or build_ll_hook:
if ll_file.endswith(('.bc', '.o')):
if ll_file != filename + '.o':
shutil.copy(ll_file, filename + '.o')
Building.llvm_dis(filename)
else:
shutil.copy(ll_file, filename + '.o.ll')
fix_target(filename + '.o.ll')
if build_ll_hook:
need_post = build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.pre') # for comparisons later
if Building.LLVM_OPTS:
Building.llvm_opts(filename)
Building.llvm_dis(filename)
if build_ll_hook and need_post:
build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.post') # for comparisons later
Building.llvm_dis(filename)
Building.llvm_as(filename)
else:
if ll_file.endswith('.ll'):
safe_copy(ll_file, filename + '.o.ll')
fix_target(filename + '.o.ll')
Building.llvm_as(filename)
else:
safe_copy(ll_file, filename + '.o')
# Generate JS from ll, and optionally modify the generated JS with a post_build function. Note
# that post_build is called on unoptimized JS, so we send it to emcc (otherwise, if run after
# emcc, it would not apply on the optimized/minified JS)
def ll_to_js(self, filename, post_build):
if type(post_build) in (list, tuple):
post1, post2 = post_build
else:
post1 = post_build
post2 = None
emcc_args = self.emcc_args
if emcc_args is None:
emcc_args = []
transform_args = []
if post1:
transform_filename = os.path.join(self.get_dir(), 'transform.py')
transform = open(transform_filename, 'w')
transform.write('\nimport sys\nsys.path += [%r]\n' % path_from_root(''))
transform.write(post1)
transform.write('\nprocess(sys.argv[1])\n')
transform.close()
transform_args = ['--js-transform', "%s '%s'" % (PYTHON, transform_filename)]
Building.emcc(filename + '.o', Settings.serialize() + emcc_args + transform_args + Building.COMPILER_TEST_OPTS, filename + '.o.js')
if post2: post2(filename + '.o.js')
# Build JavaScript code from source code
def build(self, src, dirname, filename, output_processor=None, main_file=None, additional_files=[], libraries=[], includes=[], build_ll_hook=None, post_build=None, js_outfile=True):
Building.LLVM_OPT_OPTS = ['-O3'] # pick llvm opts here, so we include changes to Settings in the test case code
# Copy over necessary files for compiling the source
if main_file is None:
f = open(filename, 'w')
f.write(src)
f.close()
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = [os.path.join(dirname, f) for f in additional_files]
os.chdir(self.get_dir())
if build_ll_hook or post_build:
# "slow", old path: build to bc, then build to JS
# C++ => LLVM binary
for f in [filename] + additional_files:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except:
pass
args = [PYTHON, EMCC] + Building.COMPILER_TEST_OPTS + Settings.serialize() + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
['-c', f, '-o', f + '.o']
output = subprocess.check_call(args, stderr=self.stderr_redirect if not DEBUG else None)
assert os.path.exists(f + '.o')
# Link all files
if len(additional_files) + len(libraries):
shutil.move(filename + '.o', filename + '.o.alone')
Building.link([filename + '.o.alone'] + [f + '.o' for f in additional_files] + libraries,
filename + '.o')
if not os.path.exists(filename + '.o'):
print("Failed to link LLVM binaries:\n\n", output)
raise Exception("Linkage error")
# Finalize
self.prep_ll_run(filename, filename + '.o', build_ll_hook=build_ll_hook)
# BC => JS
self.ll_to_js(filename, post_build)
else:
# "fast", new path: just call emcc and go straight to JS
all_files = [filename] + additional_files + libraries
for i in range(len(all_files)):
if '.' not in all_files[i]:
shutil.move(all_files[i], all_files[i] + '.bc')
all_files[i] += '.bc'
args = [PYTHON, EMCC] + Building.COMPILER_TEST_OPTS + Settings.serialize() + \
self.emcc_args + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
all_files + \
['-o', filename + '.o.js']
output = subprocess.check_call(args, stderr=self.stderr_redirect if not DEBUG else None)
if js_outfile:
assert os.path.exists(filename + '.o.js')
if output_processor is not None:
output_processor(open(filename + '.o.js').read())
if self.emcc_args is not None and js_outfile:
src = open(filename + '.o.js').read()
if self.uses_memory_init_file():
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def validate_asmjs(self, err):
m = re.search("asm.js type error: '(\w+)' is not a (standard|supported) SIMD type", err)
if m:
# Bug numbers for missing SIMD types:
bugs = {
'Int8x16' : 1136226,
'Int16x8' : 1136226,
'Uint8x16' : 1244117,
'Uint16x8' : 1244117,
'Uint32x4' : 1240796,
'Float64x2': 1124205,
}
simd = m.group(1)
if simd in bugs:
print(("\nWARNING: ignoring asm.js type error from {} due to implementation not yet available in SpiderMonkey." +
" See https://bugzilla.mozilla.org/show_bug.cgi?id={}\n").format(simd, bugs[simd]), file=sys.stderr)
err = err.replace(m.group(0), '')
if 'uccessfully compiled asm.js code' in err and 'asm.js link error' not in err:
print("[was asm.js'ified]", file=sys.stderr)
elif 'asm.js' in err: # if no asm.js error, then not an odin build
raise Exception("did NOT asm.js'ify: " + err)
err = '\n'.join([line for line in err.split('\n') if 'uccessfully compiled asm.js code' not in line])
return err
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{': n += 1
elif src[t] == '}':
n -= 1
if n == 0: return src[start:t+1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open (javascript_file, 'rt') as fin:
blob = "".join(fin.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
line = [line for line in out.split('\n') if '[' + what + ']' in line][0].strip()
ret = line.split(':')[1].strip()
return int(ret)
def get_wasm_text(self, wasm_binary):
return run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_generated_code(self, engine, filename, args=[], check_timeout=True, output_nicerizer=None, assert_returncode=0):
stdout = os.path.join(self.get_dir(), 'stdout') # use files, as PIPE can get too full and hang us
stderr = os.path.join(self.get_dir(), 'stderr')
try:
cwd = os.getcwd()
except:
cwd = None
os.chdir(self.get_dir())
assert(check_line_endings(filename) == 0) # Make sure that we produced proper line endings to the .js file we are about to run.
run_js(filename, engine, args, check_timeout, stdout=open(stdout, 'w'), stderr=open(stderr, 'w'), assert_returncode=assert_returncode)
if cwd is not None:
os.chdir(cwd)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if engine == SPIDERMONKEY_ENGINE and Settings.ASM_JS == 1:
err = self.validate_asmjs(err)
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
assert 'strict warning:' not in ret, 'We should pass all strict mode checks: ' + ret
return ret
def build_native(self, filename, args=[]):
compiler = CLANG if filename.endswith('cpp') else CLANG_CC
process = run_process([compiler, '-O2', '-fno-math-errno', filename, '-o', filename+'.native'] + args, stdout=PIPE, stderr=self.stderr_redirect, check=False)
if process.returncode is not 0:
print("Building native executable with command '%s' failed with a return code %d!" % (' '.join([CLANG, '-O2', filename, '-o', filename+'.native']), process.returncode), file=sys.stderr)
print("Output: " + process.stdout)
def run_native(self, filename, args):
process = run_process([filename+'.native'] + args, stdout=PIPE, check=False)
if process.returncode is not 0:
print("Running native executable with command '%s' failed with a return code %d!" % (' '.join([filename+'.native'] + args), process.returncode), file=sys.stderr)
print("Output: " + output.stdout)
return output.stdout
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2)
def assertIdentical(self, values, y):
if type(values) not in [list, tuple]: values = [values]
for x in values:
if x == y: return # success
raise Exception("Expected to have '%s' == '%s', diff:\n\n%s" % (
limit_size(values[0]), limit_size(y),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(x.split('\n'), y.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]: values = [values]
values = list(map(asstr, values))
if callable(string): string = string()
for value in values:
if value in string: return # success
raise Exception("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])),
additional_info
))
def assertNotContained(self, value, string):
if callable(value): value = value() # lazy loading
if callable(string): string = string()
if value in string:
raise Exception("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
if not os.path.exists(ret):
os.makedirs(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'], configure_args=[], make=['make'], make_args='help', cache=True, env_init={}, cache_name_extra='', native=False):
if make_args == 'help':
make_args = ['-j', str(multiprocessing.cpu_count())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
cache_name = name + ','.join([opt for opt in Building.COMPILER_TEST_OPTS if len(opt) < 10]) + '_' + hashlib.md5(str(Building.COMPILER_TEST_OPTS).encode('utf-8')).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache is not None:
if cache and self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
f = open(bc_file, 'wb')
f.write(contents)
f.close()
generated_libs.append(bc_file)
return generated_libs
print('<building and saving %s into cache> ' % cache_name, file=sys.stderr)
return Building.build_library(name, build_dir, output_dir, generated_libs, configure, configure_args, make, make_args, self.library_cache, cache_name,
copy_project=True, env_init=env_init, native=native)
def clear(self, in_curr=False):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name) if not in_curr else name)
emcc_debug = os.environ.get('EMCC_DEBUG')
if emcc_debug and not in_curr and EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
# Shared test code between main suite and others
def setup_runtimelink_test(self):
header = r'''
struct point
{
int x, y;
};
'''
open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x+p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
supp_name = os.path.join(self.get_dir(), 'supp.cpp')
open(supp_name, 'w').write(supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = JS_ENGINES
for engine in js_engines: assert type(engine) == list
for engine in self.banned_js_engines: assert type(engine) == list
js_engines = [engine for engine in js_engines if engine[0] not in [banned[0] for banned in self.banned_js_engines]]
return js_engines
def do_run_from_file(self, src, expected_output, *args, **kwargs):
self.do_run(open(src).read(), open(expected_output).read(), *args, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None,
output_processor=None, no_build=False, main_file=None, additional_files=[],
js_engines=None, post_build=None, basename='src.cpp', libraries=[],
includes=[], force_c=False, build_ll_hook=None,
assert_returncode=None, assert_identical=False):
if Settings.ASYNCIFY == 1 and self.is_wasm_backend():
self.skipTest("wasm backend doesn't support ASYNCIFY yet")
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
Building.COMPILER = to_cc(Building.COMPILER)
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
if not no_build:
self.build(src, dirname, filename, main_file=main_file, additional_files=additional_files, libraries=libraries, includes=includes,
build_ll_hook=build_ll_hook, post_build=post_build)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
js_engines = self.filtered_js_engines(js_engines)
js_file = filename + '.o.js'
if len(js_engines) == 0: self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
if len(js_engines) > 1 and not self.use_all_engines:
if SPIDERMONKEY_ENGINE in js_engines: # make sure to get asm.js validation checks, using sm
js_engines = [SPIDERMONKEY_ENGINE]
else:
js_engines = js_engines[:1]
for engine in js_engines:
#print 'test in', engine
js_output = self.run_generated_code(engine, js_file, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
else:
self.assertContained(expected_output, js_output)
self.assertNotContained('ERROR', js_output)
except Exception as e:
print('(test did not pass in JS engine: %s)' % engine)
raise e
#shutil.rmtree(dirname) # TODO: leave no trace in memory. But for now nice for debugging
if self.save_JS:
global test_index
self.hardcode_arguments(js_file, args)
shutil.copyfile(js_file, os.path.join(TEMP_DIR, str(test_index) + '.js'))
test_index += 1
# No building - just process an existing .ll file (or .bc, which we turn into .ll)
def do_ll_run(self, ll_file, expected_output=None, args=[], js_engines=None,
output_nicerizer=None, post_build=None, force_recompile=False,
build_ll_hook=None, assert_returncode=None):
filename = os.path.join(self.get_dir(), 'src.cpp')
self.prep_ll_run(filename, ll_file, force_recompile, build_ll_hook)
self.ll_to_js(filename, post_build)
self.do_run(None,
expected_output,
args,
no_build=True,
js_engines=js_engines,
output_nicerizer=output_nicerizer,
post_build=None,
assert_returncode=assert_returncode) # post_build was already done in ll_to_js, this do_run call is just to test the output
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(q, port):
class TestServerHandler(BaseHTTPRequestHandler):
def do_GET(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
if s.path == '/run_harness':
s.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
else:
result = b'False'
if not q.empty():
result = q.get()
s.wfile.write(result)
def log_request(code=0, size=0):
# don't log; too noisy
pass
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
def server_func(dir, q, port):
class TestServerHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if 'report_' in self.path:
print('[server response:', self.path, ']')
q.put(self.path)
# Send a default OK response to the browser.
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header('Cache-Control','no-cache, must-revalidate')
self.send_header('Connection','close')
self.send_header('Expires','-1')
self.end_headers()
self.wfile.write(b'OK')
else:
# Use SimpleHTTPServer default file serving operation for GET.
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
os.chdir(dir)
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(self):
super(BrowserCore, self).setUpClass()
self.also_asmjs = os.environ.get('EMCC_BROWSER_ALSO_ASMJS', '0') == '1'
self.test_port = int(os.environ.get('EMCC_BROWSER_TEST_PORT', '8888'))
self.harness_port = int(os.environ.get('EMCC_BROWSER_HARNESS_PORT', '9999'))
if not has_browser(): return
self.browser_timeout = 30
self.harness_queue = multiprocessing.Queue()
self.harness_server = multiprocessing.Process(target=harness_server_func, args=(self.harness_queue, self.harness_port))
self.harness_server.start()
print('[Browser harness server on process %d]' % self.harness_server.pid)
webbrowser.open_new('http://localhost:%s/run_harness' % self.harness_port)
@classmethod
def tearDownClass(self):
super(BrowserCore, self).tearDownClass()
if not has_browser(): return
self.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def run_browser(self, html_file, message, expectedResult=None, timeout=None):
if not has_browser(): return
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
queue = multiprocessing.Queue()
server = multiprocessing.Process(target=functools.partial(server_func, self.get_dir()), args=(queue, self.test_port))
server.start()
# Starting the web page server above is an asynchronous procedure, so before we tell the browser below to navigate to
# the test page, we need to know that the server has started up and is ready to process the site navigation.
# Therefore block until we can make a connection to the server.
for i in range(10):
httpconn = HTTPConnection('localhost:%s' % self.test_port, timeout=1)
try:
httpconn.connect()
httpconn.close()
break
except:
time.sleep(1)
else:
raise Exception('[Test harness server failed to start up in a timely manner]')
self.harness_queue.put(asbytes('http://localhost:%s/%s' % (self.test_port, html_file)))
output = '[no http server activity]'
start = time.time()
if timeout is None: timeout = self.browser_timeout
while time.time() - start < timeout:
if not queue.empty():
output = queue.get()
break
time.sleep(0.1)
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
self.assertIdentical(expectedResult, output)
finally:
server.terminate()
time.sleep(0.1) # see comment about Windows above
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, code):
return r'''
#ifdef __EMSCRIPTEN__
#ifndef __REPORT_RESULT_DEFINED__
#define __REPORT_RESULT_DEFINED__
#include <emscripten.h>
static void EMSCRIPTEN_KEEPALIVE _ReportResult(int result, int sync)
{
EM_ASM({
var xhr = new XMLHttpRequest();
var result = $0;
if (Module['pageThrewException']) result = 12345;
xhr.open('GET', 'http://localhost:%s/report_result?' + result, !$1);
xhr.send();
if (!Module['pageThrewException'] /* for easy debugging, don't close window on failure */) setTimeout(function() { window.close() }, 1000);
}, result, sync);
}
#if __EMSCRIPTEN_PTHREADS__
#include <emscripten/threading.h>
#define REPORT_RESULT(result) emscripten_async_run_in_main_runtime_thread(EM_FUNC_SIG_VII, _ReportResult, (result), 0)
#define REPORT_RESULT_SYNC(result) emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_VII, _ReportResult, (result), 1)
#else
#define REPORT_RESULT(result) _ReportResult((result), 0)
#define REPORT_RESULT_SYNC(result) _ReportResult((result), 1)
#endif
#endif // ~__REPORT_RESULT_DEFINED__
#endif
''' % self.test_port + code
def reftest(self, expected):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'reftest.js'), 'w').write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + wrong);
xhr.send();
if (wrong < 10 /* for easy debugging, don't close window on failure */) setTimeout(function() { window.close() }, 1000);
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
};
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
''' % (self.test_port, basename))
def btest(self, filename, expected=None, reference=None, force_c=False, reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False, url_suffix='', timeout=None):
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
filepath = path_from_root('tests', filename) if not filename_is_src else ('main.c' if force_c else 'main.cpp')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
original_args = args[:]
if 'USE_PTHREADS=1' in args or 'USE_PTHREADS=2' in args:
if os.environ.get('EMCC_TEST_WASM_PTHREADS', '0') != '1':
# Browsers currently have wasm threads off by default, so don't test them unless explicitly enabled.
args = args + ['-s', 'WASM=0']
if not 'WASM=0' in args:
# Filter out separate-asm, which is implied by wasm
args = [a for a in args if a != '--separate-asm']
# wasm doesn't support USE_PTHREADS=2
args = ['USE_PTHREADS=1' if a == 'USE_PTHREADS=2' else a for a in args]
if filename_is_src:
with open(temp_filepath, 'w') as f: f.write(src)
if not reference:
if not src:
with open(filepath) as f: src = f.read()
with open(temp_filepath, 'w') as f: f.write(self.with_report_result(src))
else:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack+1)]
shutil.copyfile(filepath, temp_filepath)
self.reftest(path_from_root('tests', reference))
if not manual_reference:
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = [PYTHON, EMCC, '-s', 'IN_TEST_HARNESS=1', temp_filepath, '-o', outfile] + args
#print('all args:', all_args)
try_delete(outfile)
Popen(all_args).communicate()
assert os.path.exists(outfile)
if post_build: post_build()
if not isinstance(expected, list): expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout)
if self.also_asmjs and not 'WASM=0' in args:
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def get_zlib_library(runner_core):
if WINDOWS:
return runner_core.get_library('zlib', os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.', '-DBUILD_SHARED_LIBS=OFF'],
make=['mingw32-make'],
make_args=[])
else:
return runner_core.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a'])
# Both test_core and test_other access the Bullet library, share the access here to avoid duplication.
def get_bullet_library(runner_core, use_cmake):
if use_cmake:
configure_commands = ['cmake', '.']
configure_args = ['-DBUILD_DEMOS=OFF', '-DBUILD_EXTRAS=OFF', '-DUSE_GLUT=OFF']
# Depending on whether 'configure' or 'cmake' is used to build, Bullet places output files in different directory structures.
generated_libs = [os.path.join('src', 'BulletDynamics', 'libBulletDynamics.a'),
os.path.join('src', 'BulletCollision', 'libBulletCollision.a'),
os.path.join('src', 'LinearMath', 'libLinearMath.a')]
else:
configure_commands = ['sh', './configure']
# Force a nondefault --host= so that the configure script will interpret that we are doing cross-compilation
# and skip attempting to run the generated executable with './a.out', which would fail since we are building a .js file.
configure_args = ['--host=i686-pc-linux-gnu', '--disable-demos','--disable-dependency-tracking']
generated_libs = [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')]
return runner_core.get_library('bullet', generated_libs,
configure=configure_commands,
configure_args=configure_args,
cache_name_extra=configure_commands[0])
def main(args):
print_help_if_args_empty(args)
args = get_default_args(args)
print_js_engine_message()
sanity_checks()
args = args_with_extracted_js_engine_override(args)
args = args_with_default_suite_prepended(args)
args = args_with_expanded_all_suite(args)
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
args = args_with_expanded_wildcards(args, all_tests)
args = skip_requested_tests(args, modules)
args = args_for_random_tests(args, modules)
suites, unmatched_tests = load_test_suites(args, modules)
return run_tests(suites, unmatched_tests)
def print_help_if_args_empty(args):
if len(args) == 2 and args[1] in ['--help', '-h']:
print(HELP_TEXT)
sys.exit(0)
def get_default_args(args):
# If no tests were specified, run the core suite
if len(args) == 1:
print(HELP_TEXT)
time.sleep(2)
return [args[0]] + [mode for mode in test_modes]
return args
def print_js_engine_message():
if use_all_engines:
print('(using ALL js engines)')
else:
logger.warning('use EM_ALL_ENGINES=1 in the env to run against all JS engines, which is slower but provides more coverage')
def sanity_checks():
global JS_ENGINES
total_engines = len(JS_ENGINES)
JS_ENGINES = list(filter(jsrun.check_engine, JS_ENGINES))
if len(JS_ENGINES) == 0:
print('WARNING: None of the JS engines in JS_ENGINES appears to work.')
elif len(JS_ENGINES) < total_engines:
print('WARNING: Not all the JS engines in JS_ENGINES appears to work, ignoring those.')
def args_with_extracted_js_engine_override(args):
# used by benchmarks
for i in range(1, len(args)):
arg = args[i]
if arg.isupper():
print('Interpreting all capital argument "%s" as JS_ENGINE override' % arg)
Building.JS_ENGINE_OVERRIDE = eval(arg)
args[i] = None
return [arg for arg in args if arg is not None]
def args_with_default_suite_prepended(args):
def prepend_default(arg):
if arg.startswith('test_'):
return 'default.' + arg
return arg
return list(map(prepend_default, args))
def args_with_expanded_all_suite(args):
# If a test (e.g. test_html) is specified as ALL.test_html, add an entry for each test_mode
new_args = [args[0]]
for i in range(1, len(args)):
arg = args[i]
if arg.startswith('ALL.'):
ignore, test = arg.split('.')
print('Running all test modes on test "%s"' % test)
new_args += [mode+'.'+test for mode in test_modes]
else:
new_args += [arg]
return new_args
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = test_modes + nondefault_test_modes + \
['other', 'browser', 'sanity', 'sockets', 'interactive']
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def args_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = [args[0]]
for i in range(1, len(args)):
arg = args[i]
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if len(new_args) == 1 and len(args) > 1:
print('No tests found to run in set ' + str(args[1:]))
sys.exit(0)
return new_args
def skip_requested_tests(args, modules):
for i in range(len(args)):
arg = args[i]
if arg.startswith('skip:'):
which = arg.split('skip:')[1]
if which.startswith('ALL.'):
ignore, test = which.split('.')
which = [mode+'.'+test for mode in test_modes]
else:
which = [which]
print(','.join(which), file=sys.stderr)
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
try:
suite = getattr(m, suite_name)
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
break
except:
pass
args[i] = None
return [arg for arg in args if arg is not None]
def args_for_random_tests(args, modules):
if len(args) <= 1:
return args
first = args[1]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = [args[0]] + choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = 'default'
relevant_modes = test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests)*len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5/math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args[1:])
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_runner.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_runner.ParallelTestSuite()
return unittest.TestSuite()
def run_tests(suites, unmatched_test_names):
resultMessages = []
num_failures = 0
if len(unmatched_test_names):
print('WARNING: could not find the following tests: ' + ' '.join(unmatched_test_names))
num_failures += len(unmatched_test_names)
resultMessages.append('Could not find %s tests' % (len(unmatched_test_names),))
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = '%s: %s run, %s errors, %s failures, %s skipped' % (mod_name,
res.testsRun, len(res.errors), len(res.failures), len(res.skipped)
)
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
| 39.932413 | 196 | 0.655923 |
d565ce4a030fb49a412cc4cfd051d5b4c49474f9 | 6,593 | py | Python | gishaku/features/management.py | windowsboy111/jishaku | 870aef6ff674ed1b5e52c030ee21f235290391e8 | [
"MIT"
] | null | null | null | gishaku/features/management.py | windowsboy111/jishaku | 870aef6ff674ed1b5e52c030ee21f235290391e8 | [
"MIT"
] | null | null | null | gishaku/features/management.py | windowsboy111/jishaku | 870aef6ff674ed1b5e52c030ee21f235290391e8 | [
"MIT"
] | 1 | 2021-09-01T14:36:02.000Z | 2021-09-01T14:36:02.000Z | # -*- coding: utf-8 -*-
"""
gishaku.features.management
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The gishaku extension and bot control commands.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import itertools
import math
import time
import traceback
from urllib.parse import urlencode
import guilded
from guilded.ext import commands
from gishaku.features.baseclass import Feature
from gishaku.flags import Flags
from gishaku.modules import ExtensionConverter
from gishaku.paginators import WrappedPaginator
class ManagementFeature(Feature):
"""
Feature containing the extension and bot control commands
"""
@Feature.Command(parent="gsk", name="load", aliases=["reload"])
async def gsk_load(self, ctx, *extensions: ExtensionConverter):
"""
Loads or reloads the given extension names.
Reports any extensions that failed to load.
"""
paginator = WrappedPaginator(prefix='', suffix='')
# 'gsk reload' on its own just reloads gishaku
if ctx.invoked_with == 'reload' and not extensions:
extensions = [['gishaku']]
for extension in itertools.chain(*extensions):
method, icon = (
(self.bot.reload_extension, "\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS}")
if extension in self.bot.extensions else
(self.bot.load_extension, "\N{INBOX TRAY}")
)
try:
method(extension)
except Exception as exc: # pylint: disable=broad-except
traceback_data = ''.join(traceback.format_exception(type(exc), exc, exc.__traceback__, 1))
paginator.add_line(
f"{icon}\N{WARNING SIGN} `{extension}`\n```py\n{traceback_data}\n```",
empty=True
)
else:
paginator.add_line(f"{icon} `{extension}`", empty=True)
for page in paginator.pages:
await ctx.send(page)
@Feature.Command(parent="gsk", name="unload")
async def gsk_unload(self, ctx, *extensions: ExtensionConverter):
"""
Unloads the given extension names.
Reports any extensions that failed to unload.
"""
paginator = WrappedPaginator(prefix='', suffix='')
icon = "\N{OUTBOX TRAY}"
for extension in itertools.chain(*extensions):
try:
self.bot.unload_extension(extension)
except Exception as exc: # pylint: disable=broad-except
traceback_data = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__, 1))
paginator.add_line(
f"{icon}\N{WARNING SIGN} `{extension}`\n```py\n{traceback_data}\n```",
empty=True
)
else:
paginator.add_line(f"{icon} `{extension}`", empty=True)
for page in paginator.pages:
await ctx.send(page)
@Feature.Command(parent="gsk", name="shutdown", aliases=["logout"])
async def gsk_shutdown(self, ctx):
"""
Logs this bot out.
"""
ellipse_character = "\N{BRAILLE PATTERN DOTS-356}" if Flags.USE_BRAILLE_J else "\N{HORIZONTAL ELLIPSIS}"
await ctx.send(f"Logging out now{ellipse_character}")
await ctx.bot.close()
@Feature.Command(parent="gsk", name="invite")
async def gsk_invite(self, ctx, *perms: str):
"""
Retrieve the invite URL for this bot.
If the names of permissions are provided, they are requested as part of the invite.
"""
scopes = ('bot', 'applications.commands')
permissions = guilded.Permissions()
for perm in perms:
if perm not in dict(permissions):
raise commands.BadArgument(f"Invalid permission: {perm}")
setattr(permissions, perm, True)
application_info = await self.bot.application_info()
query = {
"client_id": application_info.id,
"scope": "+".join(scopes),
"permissions": permissions.value
}
#FIXME
return await ctx.send(
f"Link to invite this bot:\n<https://discordapp.com/oauth2/authorize?{urlencode(query, safe='+')}>"
)
@Feature.Command(parent="gsk", name="rtt", aliases=["ping"])
async def gsk_rtt(self, ctx):
"""
Calculates Round-Trip Time to the API.
"""
message = None
# We'll show each of these readings as well as an average and standard deviation.
api_readings = []
# We'll also record websocket readings, but we'll only provide the average.
websocket_readings = []
# We do 6 iterations here.
# This gives us 5 visible readings, because a request can't include the stats for itself.
for _ in range(6):
# First generate the text
text = "Calculating round-trip time...\n\n"
text += "\n".join(f"Reading {index + 1}: {reading * 1000:.2f}ms" for index, reading in enumerate(api_readings))
if api_readings:
average = sum(api_readings) / len(api_readings)
if len(api_readings) > 1:
stddev = math.sqrt(sum(math.pow(reading - average, 2) for reading in api_readings) / (len(api_readings) - 1))
else:
stddev = 0.0
text += f"\n\nAverage: {average * 1000:.2f} \N{PLUS-MINUS SIGN} {stddev * 1000:.2f}ms"
else:
text += "\n\nNo readings yet."
if websocket_readings:
average = sum(websocket_readings) / len(websocket_readings)
text += f"\nWebsocket latency: {average * 1000:.2f}ms"
else:
text += f"\nWebsocket latency: {self.bot.latency * 1000:.2f}ms"
# Now do the actual request and reading
if message:
before = time.perf_counter()
await message.edit(content=text)
after = time.perf_counter()
api_readings.append(after - before)
else:
before = time.perf_counter()
message = await ctx.send(content=text)
after = time.perf_counter()
api_readings.append(after - before)
# Ignore websocket latencies that are 0 or negative because they usually mean we've got bad heartbeats
if self.bot.latency > 0.0:
websocket_readings.append(self.bot.latency)
| 33.984536 | 129 | 0.582284 |
9bee8bed957e5e49f72918e3bcc00db3f53ef55d | 427 | py | Python | week2/fibonacci_last_digit.py | llulai/algorithms_coursera | 5406f7e336022d91cb84d0971cc1fa09ac1a2e6c | [
"MIT"
] | null | null | null | week2/fibonacci_last_digit.py | llulai/algorithms_coursera | 5406f7e336022d91cb84d0971cc1fa09ac1a2e6c | [
"MIT"
] | null | null | null | week2/fibonacci_last_digit.py | llulai/algorithms_coursera | 5406f7e336022d91cb84d0971cc1fa09ac1a2e6c | [
"MIT"
] | null | null | null | # Uses python3
import sys
def get_fibonacci_last_digit_naive(n):
if n <= 1:
return n
previous = 0
current = 1
for _ in range(n - 1):
previous, current = current, previous + current
previous = previous % 10
current = current % 10
return current % 10
if __name__ == '__main__':
input = sys.stdin.read()
n = int(input)
print(get_fibonacci_last_digit_naive(n))
| 19.409091 | 55 | 0.611241 |
f1be34a701bffc3aff4117d4f7f5cf6a61de766c | 2,038 | py | Python | Others/ModelProject/app.py | safsoftwarellc/pyWorkspace | edcacb8cb9220fc6e98c3b02da308cfd0a0e3a28 | [
"MIT"
] | null | null | null | Others/ModelProject/app.py | safsoftwarellc/pyWorkspace | edcacb8cb9220fc6e98c3b02da308cfd0a0e3a28 | [
"MIT"
] | null | null | null | Others/ModelProject/app.py | safsoftwarellc/pyWorkspace | edcacb8cb9220fc6e98c3b02da308cfd0a0e3a28 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = 'True'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class xml_data(db.Model):
__tablename__='xml_data'
file_id = db.Column(db.Integer, primary_key=True)
file_name = db.Column(db.String(50), unique = True, nullable = False)
file_data = db.Column(db.LargeBinary)
update_date = db.Column(db.DateTime())
def __repr__(self):
return 'File - {}'.format(self.file_name)
class xpath_data(db.Model):
__tablename__='xpath_data'
xpath_id = db.Column(db.Integer, primary_key=True)
file_id = db.Column(db.Integer, db.ForeignKey('xml_data.file_id'), nullable=False)
file_id_relation = db.relationship('xml_data', backref=db.backref('xpaths', lazy=True))
xpath_string = db.Column(db.String(250), nullable = False)
update_date = db.Column(db.DateTime())
def __repr__(self):
return 'Xpath - {}'.format(self.xpath_string)
class excel_data(db.Model):
__tablename__='excel_data'
data_id = db.Column(db.Integer, primary_key=True)
excel_file_name=db.Column(db.String(50), unique = True, nullable = False)
excel_file_data=db.Column(db.LargeBinary)
update_date = db.Column(db.DateTime())
def __repr__(self):
return 'File Name - {}'.format(self.excel_file_name)
class queue_config(db.Model):
__tablename__='queue_config'
config_id = db.Column(db.Integer, primary_key=True)
queue_name=db.Column(db.String(50), unique = True, nullable = False)
config_name=db.Column(db.String(80), unique = True, nullable = False)
config_value=db.Column(db.String(120), unique = True, nullable = False)
update_date = db.Column(db.DateTime())
def __repr__(self):
return 'Config Name - {} and Value - {}'.format(self.config_name, self.config_value)
if __name__=='__main__':
app.run(debug=True)
| 35.137931 | 92 | 0.705594 |
e4cf3b981ecca0723175cd56f0f8765842fbbdfa | 1,066 | py | Python | src/lambda_codebase/account/handler.py | prakashar11/aws-deployment-framework | da7ac5fdf03faccc5146340895f92a0f91770a3e | [
"Apache-2.0"
] | null | null | null | src/lambda_codebase/account/handler.py | prakashar11/aws-deployment-framework | da7ac5fdf03faccc5146340895f92a0f91770a3e | [
"Apache-2.0"
] | 1 | 2021-11-15T17:52:44.000Z | 2021-11-15T17:52:44.000Z | src/lambda_codebase/account/handler.py | prakashar11/aws-deployment-framework | da7ac5fdf03faccc5146340895f92a0f91770a3e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""
The Account handler that is called when ADF is installed to initially create the deployment account if required
"""
try:
from main import lambda_handler # pylint: disable=unused-import
except Exception as err: # pylint: disable=broad-except
from urllib.request import Request, urlopen
import json
def lambda_handler(event, _context, prior_error=err):
response = dict(
LogicalResourceId=event["LogicalResourceId"],
PhysicalResourceId=event.get(
"PhysicalResourceId",
"NOT_YET_CREATED"),
Status="FAILED",
RequestId=event["RequestId"],
StackId=event["StackId"],
Reason=str(prior_error),
)
urlopen(
Request(
event["ResponseURL"],
data=json.dumps(response).encode(),
headers={"content-type": ""},
method="PUT",
)
)
| 32.30303 | 111 | 0.595685 |
fac9f062a2e85fe179ff1d5ac3e98d2765ebc7c0 | 3,289 | py | Python | tests/python/unittest/test_autotvm_common.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | 1 | 2021-03-20T02:03:00.000Z | 2021-03-20T02:03:00.000Z | tests/python/unittest/test_autotvm_common.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_autotvm_common.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common utilities for testing autotvm"""
import time
import numpy as np
import tvm
from tvm import autotvm
from tvm.autotvm import MeasureInput, MeasureResult
from tvm.autotvm.measure.measure import Runner
class DummyRunner(Runner):
def __init__(self):
super(DummyRunner, self).__init__(1, 1)
def run(self, measure_inputs, build_results):
return [MeasureResult((np.random.random(),), 0, 0.2, time.time())
for _ in range(len(measure_inputs))]
def get_build_kwargs(self):
return {}
@autotvm.template
def matmul(N, L, M, dtype):
A = tvm.placeholder((N, L), name='A', dtype=dtype)
B = tvm.placeholder((L, M), name='B', dtype=dtype)
k = tvm.reduce_axis((0, L), name='k')
C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C')
s = tvm.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
##### define space begin #####
cfg = autotvm.get_config()
cfg.define_split("tile_y", y, num_outputs=2)
cfg.define_split("tile_x", x, num_outputs=2)
##### define space end #####
# schedule according to config
yo, yi = cfg["tile_y"].apply(s, C, y)
xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
@autotvm.template
def bad_matmul(N, L, M, dtype):
if 'bad_device' in tvm.target.Target.current().keys:
A = tvm.placeholder((N, L), name='A', dtype=dtype)
B = tvm.placeholder((L, M), name='B', dtype=dtype)
k = tvm.reduce_axis((0, L-1), name='k')
C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C')
s = tvm.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
cfg = autotvm.get_config()
cfg.define_split("tile_y", y, num_outputs=2)
cfg.define_split("tile_x", x, num_outputs=2)
return s, [A, B, C]
return matmul(N, L, M, dtype)
def get_sample_task(n=128):
"""return a sample task for testing"""
target = tvm.target.create("llvm")
task = autotvm.task.create(matmul, args=(n, n, n, 'float32'), target=target)
return task, target
def get_sample_records(n):
"""get sample records for testing"""
tsk, target = get_sample_task()
inps, ress = [], []
for i in range(n):
inps.append(MeasureInput(target, tsk, tsk.config_space.get(i)))
ress.append(MeasureResult((i+1,), 0, i, time.time()))
return list(zip(inps, ress))
| 32.89 | 90 | 0.644269 |
b507e64a1f3a3c830b48c6dbcc8db5ae08bef881 | 103 | py | Python | pmdarima/preprocessing/exog/__init__.py | Saravji/pmdarima | 7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a | [
"MIT"
] | 1 | 2020-11-22T00:41:47.000Z | 2020-11-22T00:41:47.000Z | pmdarima/preprocessing/exog/__init__.py | Saravji/pmdarima | 7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a | [
"MIT"
] | null | null | null | pmdarima/preprocessing/exog/__init__.py | Saravji/pmdarima | 7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .fourier import *
__all__ = [s for s in dir() if not s.startswith("_")]
| 17.166667 | 53 | 0.592233 |
baa442a24c4c50c24ca957865a009e0b84971c55 | 851 | py | Python | pyqt/1simple/4close.py | chenliangold4j/MyPyDictionnary | 3428333f42249f33732da71e420bdc41a412f594 | [
"Apache-2.0"
] | null | null | null | pyqt/1simple/4close.py | chenliangold4j/MyPyDictionnary | 3428333f42249f33732da71e420bdc41a412f594 | [
"Apache-2.0"
] | null | null | null | pyqt/1simple/4close.py | chenliangold4j/MyPyDictionnary | 3428333f42249f33732da71e420bdc41a412f594 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Py40 PyQt5 tutorial
This program creates a quit
button. When we press the button,
the application terminates.
author: Jan Bodnar
website: py40.com
last edited: January 2015
"""
import sys
from PyQt5.QtWidgets import QWidget, QPushButton, QApplication
from PyQt5.QtCore import QCoreApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
qbtn = QPushButton('Quit', self)
qbtn.clicked.connect(QCoreApplication.instance().quit)
qbtn.resize(qbtn.sizeHint())
qbtn.move(50, 50)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Quit button')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| 19.790698 | 62 | 0.661575 |
3c2d4f21f67d485a3eef03cf7f0a8ec16f6e7e75 | 162 | py | Python | quiz/admin.py | vparjunmohan/Django-Quiz-App | b625bc4fd87a42ea069ccd3ae7ca857aac344088 | [
"MIT"
] | 2 | 2022-02-10T09:45:55.000Z | 2022-02-17T07:26:26.000Z | quiz/admin.py | vparjunmohan/Django-Quiz-App | b625bc4fd87a42ea069ccd3ae7ca857aac344088 | [
"MIT"
] | null | null | null | quiz/admin.py | vparjunmohan/Django-Quiz-App | b625bc4fd87a42ea069ccd3ae7ca857aac344088 | [
"MIT"
] | 1 | 2022-02-18T21:47:43.000Z | 2022-02-18T21:47:43.000Z | from django.contrib import admin
from .models import Category, Question
# Register your models here.
admin.site.register(Category)
admin.site.register(Question) | 23.142857 | 38 | 0.814815 |
f01bfc5828b90437f4348303679736ce20f625b6 | 8,793 | py | Python | scalene/scalene_parseargs.py | edisga/scalene | d5c190a4a205071199398948e04edbfd07ca4071 | [
"Apache-2.0"
] | null | null | null | scalene/scalene_parseargs.py | edisga/scalene | d5c190a4a205071199398948e04edbfd07ca4071 | [
"Apache-2.0"
] | null | null | null | scalene/scalene_parseargs.py | edisga/scalene | d5c190a4a205071199398948e04edbfd07ca4071 | [
"Apache-2.0"
] | null | null | null | from scalene.scalene_arguments import ScaleneArguments
from scalene.scalene_version import scalene_version
from typing import (
Any,
List,
NoReturn,
Optional,
Tuple,
)
from textwrap import dedent
import argparse
import sys
class RichArgParser(argparse.ArgumentParser):
def __init__(self, *args: Any, **kwargs: Any):
from rich.console import Console
self.console = Console()
super().__init__(*args, **kwargs)
def _print_message(self, message: Optional[str], file: Any = None) -> None:
if message:
self.console.print(message)
class StopJupyterExecution(Exception):
"""NOP exception to enable clean exits from within Jupyter notebooks."""
def _render_traceback_(self) -> None:
pass
class ScaleneParseArgs:
@staticmethod
def clean_exit(code: object = 0) -> NoReturn:
"""Replacement for sys.exit that exits cleanly from within Jupyter notebooks."""
raise StopJupyterExecution
@staticmethod
def parse_args() -> Tuple[argparse.Namespace, List[str]]:
# In IPython, intercept exit cleanly (because sys.exit triggers a backtrace).
try:
from IPython import get_ipython
if get_ipython():
sys.exit = ScaleneParseArgs.clean_exit
sys._exit = ScaleneParseArgs.clean_exit # type: ignore
except:
pass
defaults = ScaleneArguments()
usage = dedent(
f"""[b]Scalene[/b]: a high-precision CPU and memory profiler, version {scalene_version}
[link=https://github.com/plasma-umass/scalene]https://github.com/plasma-umass/scalene[/link]
command-line:
% [b]scalene \[options] yourprogram.py[/b]
or
% [b]python3 -m scalene \[options] yourprogram.py[/b]
in Jupyter, line mode:
[b] %scrun \[options] statement[/b]
in Jupyter, cell mode:
[b] %%scalene \[options]
your code here
[/b]
"""
)
epilog = dedent(
"""When running Scalene in the background, you can suspend/resume profiling
for the process ID that Scalene reports. For example:
% python3 -m scalene [options] yourprogram.py &
Scalene now profiling process 12345
to suspend profiling: python3 -m scalene.profile --off --pid 12345
to resume profiling: python3 -m scalene.profile --on --pid 12345
"""
)
parser = RichArgParser( # argparse.ArgumentParser(
prog="scalene",
description=usage,
epilog=epilog,
formatter_class=argparse.RawTextHelpFormatter,
allow_abbrev=False,
)
parser.add_argument(
"--version",
dest="version",
action="store_const",
const=True,
help="prints the version number for this release of Scalene and exits",
)
parser.add_argument(
"--outfile",
type=str,
default=defaults.outfile,
help="file to hold profiler output (default: [blue]"
+ ("stdout" if not defaults.outfile else defaults.outfile)
+ "[/blue])",
)
parser.add_argument(
"--html",
dest="html",
action="store_const",
const=True,
default=defaults.html,
help="output as HTML (default: [blue]"
+ str("html" if defaults.html else "text")
+ "[/blue])",
)
parser.add_argument(
"--json",
dest="json",
action="store_const",
const=True,
default=defaults.json,
help="output as JSON (default: [blue]"
+ str("json" if defaults.json else "text")
+ "[/blue])",
)
parser.add_argument(
"--reduced-profile",
dest="reduced_profile",
action="store_const",
const=True,
default=defaults.reduced_profile,
help=f"generate a reduced profile, with non-zero lines only (default: [blue]{defaults.reduced_profile}[/blue])",
)
parser.add_argument(
"--profile-interval",
type=float,
default=defaults.profile_interval,
help=f"output profiles every so many seconds (default: [blue]{defaults.profile_interval}[/blue])",
)
parser.add_argument(
"--cpu-only",
dest="cpu_only",
action="store_const",
const=True,
default=defaults.cpu_only,
help="only profile CPU+GPU time (default: [blue]profile "
+ (
"CPU only"
if defaults.cpu_only
else "CPU+GPU, memory, and copying"
)
+ "[/blue])",
)
parser.add_argument(
"--profile-all",
dest="profile_all",
action="store_const",
const=True,
default=defaults.profile_all,
help="profile all executed code, not just the target program (default: [blue]"
+ (
"all code"
if defaults.profile_all
else "only the target program"
)
+ "[/blue])",
)
parser.add_argument(
"--profile-only",
dest="profile_only",
type=str,
default=defaults.profile_only,
help="profile only code in filenames that contain the given strings, separated by commas (default: [blue]"
+ (
"no restrictions"
if not defaults.profile_only
else defaults.profile_only
)
+ "[/blue])",
)
parser.add_argument(
"--use-virtual-time",
dest="use_virtual_time",
action="store_const",
const=True,
default=defaults.use_virtual_time,
help=f"measure only CPU time, not time spent in I/O or blocking (default: [blue]{defaults.use_virtual_time}[/blue])",
)
parser.add_argument(
"--cpu-percent-threshold",
dest="cpu_percent_threshold",
type=int,
default=defaults.cpu_percent_threshold,
help=f"only report profiles with at least this percent of CPU time (default: [blue]{defaults.cpu_percent_threshold}%%[/blue])",
)
parser.add_argument(
"--cpu-sampling-rate",
dest="cpu_sampling_rate",
type=float,
default=defaults.cpu_sampling_rate,
help=f"CPU sampling rate (default: every [blue]{defaults.cpu_sampling_rate}s[/blue])",
)
parser.add_argument(
"--malloc-threshold",
dest="malloc_threshold",
type=int,
default=defaults.malloc_threshold,
help=f"only report profiles with at least this many allocations (default: [blue]{defaults.malloc_threshold}[/blue])",
)
parser.add_argument(
"--program-path",
dest="program_path",
type=str,
default="",
help="The directory containing the code to profile (default: [blue]the path to the profiled program[/blue])",
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"--on",
action="store_true",
help="start with profiling on (default)",
)
group.add_argument(
"--off", action="store_true", help="start with profiling off"
)
# the PID of the profiling process (for internal use only)
parser.add_argument(
"--pid", type=int, default=0, help=argparse.SUPPRESS
)
# collect all arguments after "---", which Scalene will ignore
parser.add_argument(
"---",
dest="unused_args",
default=[],
help=argparse.SUPPRESS,
nargs=argparse.REMAINDER,
)
# Parse out all Scalene arguments.
# https://stackoverflow.com/questions/35733262/is-there-any-way-to-instruct-argparse-python-2-7-to-remove-found-arguments-fro
args, left = parser.parse_known_args()
left += args.unused_args
import re
in_jupyter_notebook = len(sys.argv) >= 1 and re.match(
"<ipython-input-([0-9]+)-.*>", sys.argv[0]
)
# If the user did not enter any commands (just `scalene` or `python3 -m scalene`),
# print the usage information and bail.
if not in_jupyter_notebook and (len(sys.argv) + len(left) == 1):
parser.print_help(sys.stderr)
sys.exit(-1)
if args.version:
print(f"Scalene version {scalene_version}")
sys.exit(-1)
return args, left
| 34.347656 | 139 | 0.565677 |
772169c9939f3245ec9795693daf70e661e306c2 | 1,357 | py | Python | src/course1/week4/rselect.py | manoldonev/algo1-assignments | 936ed30bab844d9ed9bbf14e9785174865aac4c3 | [
"MIT"
] | null | null | null | src/course1/week4/rselect.py | manoldonev/algo1-assignments | 936ed30bab844d9ed9bbf14e9785174865aac4c3 | [
"MIT"
] | null | null | null | src/course1/week4/rselect.py | manoldonev/algo1-assignments | 936ed30bab844d9ed9bbf14e9785174865aac4c3 | [
"MIT"
] | null | null | null |
"""Randomized Selection Implementation"""
from random import randint
def rselect(numbers: list[int], i: int) -> int:
"""Rselect public method"""
return _rselect(numbers, 0, len(numbers) - 1, i)
def _rselect(numbers: list[int], left: int, right: int, i: int) -> int:
"""Divide and conquer step"""
if left >= right:
return numbers[right]
pivot_index = _partition(numbers, left, right)
if pivot_index == i:
return numbers[i]
if pivot_index > i:
return _rselect(numbers, left, pivot_index - 1, i)
return _rselect(numbers, pivot_index + 1, right, i)
def _partition(numbers: list[int], left: int, right: int) -> int:
"""Partition around pivot"""
pivot_index = _choose_pivot(numbers, left, right)
# Make sure chosen pivot is on first slot (relative to subarray)
numbers[left], numbers[pivot_index] = numbers[pivot_index], numbers[left]
pivot = numbers[left]
i = left + 1
j = left + 1
while j <= right:
if numbers[j] < pivot:
numbers[i], numbers[j] = numbers[j], numbers[i]
i += 1
j += 1
numbers[left], numbers[i - 1] = numbers[i - 1], numbers[left]
return i - 1
def _choose_pivot(_: list[int], left: int, right: int) -> int:
"""Choose pivot"""
# Choose random pivot
return randint(left, right)
| 23.807018 | 77 | 0.615328 |
d9cb4c65c04607c5aa766867fa042f9e0b1d36f8 | 7,782 | py | Python | backend/server/tests/wrapper_test/test_tutorial_anchor_wrapper.py | FlickerSoul/Graphery | 8b1390e1ba96fd2867f0cd8e5fc1d4ad6108121e | [
"MIT"
] | 5 | 2020-08-26T00:15:01.000Z | 2021-01-11T17:24:51.000Z | backend/server/tests/wrapper_test/test_tutorial_anchor_wrapper.py | FlickerSoul/Graphery | 8b1390e1ba96fd2867f0cd8e5fc1d4ad6108121e | [
"MIT"
] | 69 | 2020-08-02T23:45:44.000Z | 2021-04-17T03:04:32.000Z | backend/server/tests/wrapper_test/test_tutorial_anchor_wrapper.py | FlickerSoul/Graphery | 8b1390e1ba96fd2867f0cd8e5fc1d4ad6108121e | [
"MIT"
] | 4 | 2020-09-10T05:40:49.000Z | 2020-12-20T11:44:16.000Z | from uuid import UUID
import pytest
from backend.intel_wrappers.intel_wrapper import TutorialAnchorWrapper
from backend.intel_wrappers.validators import ValidationError
from tests.wrapper_test.factories import category_wrappers_factory
from tests.wrapper_test.test_wrapper_helper import gen_wrapper_test_class
# for pycharm quick start
class TestTutorialAnchorWrapper:
def test_func(self):
pass
# noinspection PyRedeclaration
TestTutorialAnchorWrapper = gen_wrapper_test_class(wrapper_class=TutorialAnchorWrapper, test_params={
'test_load': [
pytest.param('stored_mock_tutorial_anchor', True),
pytest.param('stored_mock_tutorial_anchor', False)
],
'test_set_variables': [
{
'url': 'test-set-var',
'name': 'test set var',
'categories': category_wrappers_factory('temp cat {}', 1),
'level': 101,
'section': 0,
},
{
'url': 'test-set-var',
'name': 'test set var',
'categories': category_wrappers_factory('temp cat {}', 10),
'level': 101,
'section': 0,
},
{
'categories': [],
},
{
'level': 101,
'section': 0,
},
{
'url': 'test-set-var',
'name': 'test set var',
},
{
'url': 'test-set-var',
'name': 'test set var',
'level': 101,
'section': 0,
},
{
}
],
'test_making_new_model': [
pytest.param({
'url': 'test-make-new-model',
'name': 'test make new model',
'categories': category_wrappers_factory('make new model {}', 1),
'level': 101,
'section': 0,
}),
pytest.param({
'url': 'test-make-new-model',
'name': 'test make new model',
'categories': category_wrappers_factory('test making new {}', 10),
'level': 101,
'section': 0,
}),
pytest.param({
'url': 'test-make-new-model',
'name': 'test make new model',
'categories': None,
'level': 101,
'section': 0,
}),
],
'test_retrieve_model': [
pytest.param('stored_mock_tutorial_anchor', {'id': UUID('b0015ac8-5376-4b99-b649-6f25771dbd91'), })
],
'test_overwrite': [
pytest.param('one_time_mock_tutorial_anchor', {
'url': 'one_time_mock_test_tutorial_mod',
'name': 'one time mock test tutorial mode',
'categories': category_wrappers_factory('test overwrite {}', 5),
'section': 2,
'level': 222,
'is_published': False
}),
pytest.param('one_time_mock_tutorial_anchor', {
'url': 'one_time_mock_test_tutorial_mod',
'name': 'one time mock test tutorial mode',
'categories': [],
'section': 2,
'level': 222,
'is_published': False
}),
pytest.param('one_time_mock_tutorial_anchor', {
'categories': category_wrappers_factory('test overwrite cat only {}', 7)
}),
pytest.param('one_time_mock_tutorial_anchor', {
'url': 'one_time_mock_test_tutorial_mod',
'name': 'one time mock test tutorial mode',
'categories': [],
'section': 1,
'level': 212,
'is_published': True
}),
pytest.param('one_time_mock_tutorial_anchor', {
'section': 1,
'level': 212,
}),
pytest.param('one_time_mock_tutorial_anchor', {
'url': 'one_time_mock_test_tutorial_mod',
'name': 'one time mock test tutorial mode',
'is_published': True
}),
pytest.param('one_time_mock_tutorial_anchor', {
}
)
],
'test_validation': [
pytest.param({'url': ''}, ValidationError,
None),
pytest.param({'url': 't', 'name': ''}, ValidationError,
None),
pytest.param({'url': 't', 'name': '', 'categories': None}, ValidationError,
None),
pytest.param({'url': 't', 'name': '', 'categories': [], 'level': ''}, ValidationError,
None),
pytest.param({'url': 't', 'name': '', 'categories': [], 'level': 100, 'section': 1}, ValidationError,
None)
],
'test_get_model': [
pytest.param(None, {
'url': 'one_time_mock_test_tutorial_mod',
'name': 'one time mock test tutorial mode',
'categories': [],
'section': 2,
'level': 222,
'is_published': False
}, False, False, AssertionError, 'Cannot make new model without validations!'),
pytest.param(None, {
'url': 'one-time-mock-test-tutorial-mod',
'name': 'one time mock test tutorial mode',
'categories': [],
'section': 2,
'level': 222,
'is_published': False
}, True, True, None, None, id='make new model'),
pytest.param('stored_mock_tutorial_anchor', {
'id': UUID('b0015ac8-5376-4b99-b649-6f25771dbd91'),
'url': 'mock-test-tutorial',
'name': 'mock test tutorial',
'section': 1,
'level': 210,
'is_published': True
}, True, False, None, None, id='get old model')
],
'test_finalize': [
pytest.param('one_time_mock_tutorial_anchor', {
'url': 'finalize-test-tutorial',
'name': 'finalize test tutorial',
'categories': category_wrappers_factory('full mod finalize {}', 5),
'section': 3,
'level': 220,
'is_published': False
}, True, True, None, None),
pytest.param('one_time_mock_tutorial_anchor', {
'url': 'finalize-test-tutorial',
'name': 'finalize test tutorial',
'section': 1,
'level': 212,
'is_published': False
}, True, False, None, None),
pytest.param('one_time_mock_tutorial_anchor', {
'url': 'finalize-test-tutorial',
'name': 'finalize test tutorial',
'categories': []
}, True, True, None, None),
pytest.param('one_time_mock_tutorial_anchor', {
'url': ''
}, True, True, ValidationError, None),
pytest.param(None, {
'url': 'finalize-mock-test-tutorial',
'name': 'finalize mock test tutorial',
'categories': category_wrappers_factory('finalize cat {}', 7),
'section': 1,
'level': 213,
'is_published': True
}, True, True, None, None),
pytest.param(None, {
'url': 'finalize-mock-test-tutorial',
'name': 'finalize mock test tutorial',
'categories': category_wrappers_factory('finalize cat {}', 10),
'section': 1,
'level': 213,
}, True, True, None, None),
pytest.param(None, {
'url': 'finalize-mock-test-tutorial',
'name': 'finalize mock test tutorial',
'categories': [],
'section': 1,
'level': 213,
'is_published': True
}, False, True, AssertionError, None),
pytest.param(None, {
'url': 'finalize-mock-test-tutorial',
'name': 'finalize mock test tutorial',
'categories': [],
'section': 1,
'level': 213,
'is_published': True
}, True, False, AssertionError, None),
]
}, default_params={'is_published': False})
| 35.054054 | 109 | 0.514778 |
80ac82681579a1a7e4d83c2d50038a4de78cdfa6 | 3,386 | py | Python | implementation/src/model/core/earlystopping.py | gucci-j/negotiation-breakdown-detection | d4c2bf63b4da95b342fd952065f9ad3e97179134 | [
"MIT"
] | 4 | 2021-04-26T09:05:46.000Z | 2022-03-08T06:01:07.000Z | implementation/src/model/core/earlystopping.py | gucci-j/negotiation-breakdown-detection | d4c2bf63b4da95b342fd952065f9ad3e97179134 | [
"MIT"
] | null | null | null | implementation/src/model/core/earlystopping.py | gucci-j/negotiation-breakdown-detection | d4c2bf63b4da95b342fd952065f9ad3e97179134 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2018 Bjarte Mehus Sunde & 2019-2020 Atsuki Yamaguchi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, logger, patience=7, verbose=False, metric_type='loss'):
"""
Args:
`patience` (int): How long to wait after last time validation loss improved.
Default: 7
`verbose` (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.logger = logger
if metric_type == 'loss':
self.val_pred_min = np.Inf
else:
self.val_pred_max = -np.Inf
self.metric_type = metric_type
def __call__(self, val_value, model, save_path):
if self.metric_type == 'loss':
score = -val_value
else:
score = val_value
if self.best_score is None: # init
self.best_score = score
self.show_checkpoint(val_value, model, save_path)
elif score < self.best_score: # not improved
self.counter += 1
if self.verbose:
self.logger.info(f'\tEarlyStopping counter: {self.counter} / {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else: # improved
self.best_score = score
self.show_checkpoint(val_value, model, save_path)
self.counter = 0
def show_checkpoint(self, val_value, model, save_path):
if self.metric_type == 'loss':
if self.verbose:
self.logger.info(f'\tValidation loss decreased: {self.val_pred_min:.6f} --> {val_value:.6f}')
self.val_pred_min = val_value
else:
if self.verbose:
self.logger.info(f'\tValidation {self.metric_type} improved: {self.val_pred_max:.6f} --> {val_value:.6f}')
self.val_pred_max = val_value
self.save_model(model, save_path)
def save_model(self, model, save_path):
torch.save(model.state_dict(), save_path) | 39.372093 | 122 | 0.651211 |
05be25f6b690ee83ade9c81b409089cefe01d0fb | 212 | py | Python | robosuite/models/arenas/__init__.py | gy20073/robosuite | cb02dd64c02d7b3d76f6016c9d00dc9391776ef9 | [
"MIT"
] | 3 | 2020-02-25T07:52:21.000Z | 2021-11-29T13:28:24.000Z | robosuite/models/arenas/__init__.py | gy20073/robosuite | cb02dd64c02d7b3d76f6016c9d00dc9391776ef9 | [
"MIT"
] | null | null | null | robosuite/models/arenas/__init__.py | gy20073/robosuite | cb02dd64c02d7b3d76f6016c9d00dc9391776ef9 | [
"MIT"
] | null | null | null | from .arena import Arena
from .bins_arena import BinsArena
from .bin_packing_arena import BinPackingArena
from .empty_arena import EmptyArena
from .pegs_arena import PegsArena
from .table_arena import TableArena
| 30.285714 | 46 | 0.858491 |
104e84ada209661a76bb2b4e72ebba3377e2c737 | 301 | py | Python | orchestra/migrations/0021_merge.py | ksbek/orchestra | 07556717feb57efcf8fb29a1e2e98eebe2313b8c | [
"Apache-2.0"
] | null | null | null | orchestra/migrations/0021_merge.py | ksbek/orchestra | 07556717feb57efcf8fb29a1e2e98eebe2313b8c | [
"Apache-2.0"
] | null | null | null | orchestra/migrations/0021_merge.py | ksbek/orchestra | 07556717feb57efcf8fb29a1e2e98eebe2313b8c | [
"Apache-2.0"
] | 1 | 2021-12-15T01:10:35.000Z | 2021-12-15T01:10:35.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0020_auto_20151028_1559'),
('orchestra', '0020_auto_20151022_1553'),
]
operations = [
]
| 18.8125 | 49 | 0.66113 |
dc8946afe0a92e2fbb310ec87b6b682d94703d9a | 428 | py | Python | issues/migrations/0001_initial.py | FIOpwK/django-issuetracker | 99e7cd97923af8cf2fe37f1380d0feab108d6bff | [
"MIT"
] | 1 | 2021-10-19T03:26:17.000Z | 2021-10-19T03:26:17.000Z | issues/migrations/0001_initial.py | FIOpwK/django-issuetracker | 99e7cd97923af8cf2fe37f1380d0feab108d6bff | [
"MIT"
] | null | null | null | issues/migrations/0001_initial.py | FIOpwK/django-issuetracker | 99e7cd97923af8cf2fe37f1380d0feab108d6bff | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-12 16:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| 20.380952 | 117 | 0.581776 |
d8cb4a4a43db19f4703d77b794a6bc6e9195cb14 | 3,117 | py | Python | azure-batch/azure/batch/models/job_enable_options.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-batch/azure/batch/models/job_enable_options.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-batch/azure/batch/models/job_enable_options.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobEnableOptions(Model):
"""Additional parameters for enable operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
super(JobEnableOptions, self).__init__()
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
| 49.47619 | 191 | 0.713507 |
a672d69cfb76529addd22f1b8a1d9268a85c5b56 | 1,705 | py | Python | python/scrapy/AdvancedScrapy/AdvancedScraping/AdvancedScraping/spiders/aspx_spider_xpath.py | RitamDey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | 2 | 2016-10-14T16:58:05.000Z | 2017-05-04T04:59:18.000Z | python/scrapy/AdvancedScrapy/AdvancedScraping/AdvancedScraping/spiders/aspx_spider_xpath.py | GreenJoey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | null | null | null | python/scrapy/AdvancedScrapy/AdvancedScraping/AdvancedScraping/spiders/aspx_spider_xpath.py | GreenJoey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | null | null | null | import scrapy
class SpidyQuotesViewStateSpider(scrapy.Spider):
name = "spidyquotesViewstateXpath"
start_urls = ["http://quotes.toscrape.com/search.aspx",]
download_delay = 1.5
def parse(self, response):
for author in response.xpath('//select[@id="author"]/option/@value').extract():
yield scrapy.FormRequest(
"http://quotes.toscrape.com/filter.aspx",
formdata = {
'author': author,
'__VIEWSTATE': response.xpath('//input[@id="__VIEWSTATE"]/@value').extract_first()
},
callback=self.parse_tags
)
def parse_tags(self, response):
for tag in response.xpath('//select[@id="tag"]/option/@value').extract():
yield scrapy.FormRequest(
'http://quotes.toscrape.com/filter.aspx',
formdata = {
'author': response.xpath(
'//select[@id="author"]/option[@selected]/@value'
).extract_first(),
'tag': tag,
'__VIEWSTATE': response.xpath('//input[@id="__VIEWSTATE"]/@value').extract_first()
},
callback=self.parse_result
)
def parse_result(self, response):
for quote in response.css("div.quote"):
yield {
'quote': quote.css('span.content ::text').extract_first(),
'author': quote.css('span.author ::text').extract_first(),
'tag': quote.css('span.tag ::text').extract_first(),
}
| 40.595238 | 106 | 0.493255 |
bc0ed43c56307f40bf97449a5bef54653f493cd0 | 10,891 | py | Python | PIL/EpsImagePlugin.py | radicalgraphics/Pillow | 9d22c16d539f6e0356d64849b84f2feec6787179 | [
"Python-2.0"
] | 132 | 2021-02-24T12:14:35.000Z | 2022-03-28T13:06:22.000Z | PIL/EpsImagePlugin.py | radicalgraphics/Pillow | 9d22c16d539f6e0356d64849b84f2feec6787179 | [
"Python-2.0"
] | 9 | 2020-06-05T20:37:40.000Z | 2021-09-22T18:28:23.000Z | udacity-car/lib/python2.7/site-packages/PIL/EpsImagePlugin.py | 808brick/CarND-Capstone | f9e536b4a9d96322d7e971073602c8969dbd9369 | [
"MIT"
] | 3 | 2021-12-08T15:20:46.000Z | 2021-12-13T04:55:08.000Z | #
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.5"
import re
import io
from PIL import Image, ImageFile, _binary
#
# --------------------------------------------------------------------
i32 = _binary.i32le
o32 = _binary.o32le
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_windows_binary = None
import sys
if sys.platform.startswith('win'):
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
# Python < 3.3
import distutils.spawn
which = distutils.spawn.find_executable
for binary in ('gswin32c', 'gswin64c', 'gs'):
if which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
def Ghostscript(tile, size, fp):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
import tempfile, os
file = tempfile.mktemp()
# Build ghostscript command
command = ["gs",
"-q", # quite mode
"-g%dx%d" % size, # set output geometry (pixels)
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % file,# output file
"- >/dev/null 2>/dev/null"]
if gs_windows_binary is not None:
if gs_windows_binary is False:
raise WindowsError('Unable to locate Ghostscript on paths')
command[0] = gs_windows_binary
command[-1] = '- >nul 2>nul'
command = " ".join(command)
# push data through ghostscript
try:
gs = os.popen(command, "w")
# adjust for image origin
if bbox[0] != 0 or bbox[1] != 0:
gs.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
fp.seek(offset)
while length > 0:
s = fp.read(8192)
if not s:
break
length = length - len(s)
gs.write(s)
status = gs.close()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
return im
class PSFile:
"""Wrapper that treats either CR or LF as end of line."""
def __init__(self, fp):
self.fp = fp
self.char = None
def __getattr__(self, id):
v = getattr(self.fp, id)
setattr(self, id, v)
return v
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def read(self, count):
return self.fp.read(count).decode('latin-1')
def tell(self):
pos = self.fp.tell()
if self.char:
pos = pos - 1
return pos
def readline(self):
s = b""
if self.char:
c = self.char
self.char = None
else:
c = self.fp.read(1)
while c not in b"\r\n":
s = s + c
c = self.fp.read(1)
if c == b"\r":
self.char = self.fp.read(1)
if self.char == b"\n":
self.char = None
return s.decode('latin-1') + "\n"
def _accept(prefix):
return prefix[:4] == b"%!PS" or i32(prefix) == 0xC6D3D0C5
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
def _open(self):
# FIXME: should check the first 512 bytes to see if this
# really is necessary (platform-dependent, though...)
fp = PSFile(self.fp)
# HEAD
s = fp.read(512)
if s[:4] == "%!PS":
offset = 0
fp.seek(0, 2)
length = fp.tell()
elif i32(s) == 0xC6D3D0C5:
offset = i32(s[4:])
length = i32(s[8:])
fp.seek(offset)
else:
raise SyntaxError("not an EPS file")
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline()
while s:
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[-2:] == '\r\n':
s = s[:-2]
elif s[-1:] == '\n':
s = s[:-1]
try:
m = split.match(s)
except re.error as v:
raise SyntaxError("not an EPS file")
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = [int(float(s)) for s in v.split()]
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0,0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
elif s[0:1] == '%':
# handle non-DSC Postscript comments that some
# tools mistakenly put in the Comments section
pass
else:
raise IOError("bad EPS header")
s = fp.readline()
if s[:1] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[0] == "%":
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[-2:] == '\r\n':
s = s[:-2]
elif s[-1:] == '\n':
s = s[:-1]
if s[:11] == "%ImageData:":
[x, y, bi, mo, z3, z4, en, id] =\
s[11:].split(None, 7)
x = int(x); y = int(y)
bi = int(bi)
mo = int(mo)
en = int(en)
if en == 1:
decoder = "eps_binary"
elif en == 2:
decoder = "eps_hex"
else:
break
if bi != 8:
break
if mo == 1:
self.mode = "L"
elif mo == 2:
self.mode = "LAB"
elif mo == 3:
self.mode = "RGB"
else:
break
if id[:1] == id[-1:] == '"':
id = id[1:-1]
# Scan forward to the actual image data
while True:
s = fp.readline()
if not s:
break
if s[:len(id)] == id:
self.size = x, y
self.tile2 = [(decoder,
(0, 0, x, y),
fp.tell(),
0)]
return
s = fp.readline()
if not s:
break
if not box:
raise IOError("cannot determine EPS bounding box")
def load(self):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError("image mode is not supported")
class NoCloseStream:
def __init__(self, fp):
self.fp = fp
def __getattr__(self, name):
return getattr(self.fp, name)
def close(self):
pass
base_fp = fp
fp = io.TextIOWrapper(NoCloseStream(fp), encoding='latin-1')
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
#fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + "\n")
fp.flush()
ImageFile._save(im, base_fp, [("eps", (0,0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript")
| 27.925641 | 78 | 0.459921 |
4d4a7c82ecba5ba03a5742e1d811cc0bcf8586fa | 1,062 | py | Python | homeassistant/components/nfandroidtv/const.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/nfandroidtv/const.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/nfandroidtv/const.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Constants for the NFAndroidTV integration."""
DOMAIN: str = "nfandroidtv"
CONF_DURATION = "duration"
CONF_FONTSIZE = "fontsize"
CONF_POSITION = "position"
CONF_TRANSPARENCY = "transparency"
CONF_COLOR = "color"
CONF_INTERRUPT = "interrupt"
DATA_HASS_CONFIG = "nfandroid_hass_config"
DEFAULT_NAME = "Android TV / Fire TV"
DEFAULT_TIMEOUT = 5
ATTR_DURATION = "duration"
ATTR_FONTSIZE = "fontsize"
ATTR_POSITION = "position"
ATTR_TRANSPARENCY = "transparency"
ATTR_COLOR = "color"
ATTR_BKGCOLOR = "bkgcolor"
ATTR_INTERRUPT = "interrupt"
ATTR_IMAGE = "image"
# Attributes contained in image
ATTR_IMAGE_URL = "url"
ATTR_IMAGE_PATH = "path"
ATTR_IMAGE_USERNAME = "username"
ATTR_IMAGE_PASSWORD = "password"
ATTR_IMAGE_AUTH = "auth"
ATTR_ICON = "icon"
# Attributes contained in icon
ATTR_ICON_URL = "url"
ATTR_ICON_PATH = "path"
ATTR_ICON_USERNAME = "username"
ATTR_ICON_PASSWORD = "password"
ATTR_ICON_AUTH = "auth"
# Any other value or absence of 'auth' lead to basic authentication being used
ATTR_IMAGE_AUTH_DIGEST = "digest"
ATTR_ICON_AUTH_DIGEST = "digest"
| 27.230769 | 78 | 0.778719 |
f6e79d2afe5f691c194743aecb2abaeaaf6f0a0f | 4,686 | py | Python | monorail/koon/input.py | mic47/MysticMine | 2fc0a5eaa0ab299c3a23ce17ae1c56a98055a44c | [
"MIT"
] | 28 | 2015-05-28T13:29:02.000Z | 2018-07-29T04:03:19.000Z | monorail/koon/input.py | mic47/MysticMine | 2fc0a5eaa0ab299c3a23ce17ae1c56a98055a44c | [
"MIT"
] | 8 | 2015-06-28T10:29:14.000Z | 2016-08-03T22:09:36.000Z | monorail/koon/input.py | mic47/MysticMine | 2fc0a5eaa0ab299c3a23ce17ae1c56a98055a44c | [
"MIT"
] | 11 | 2015-06-17T18:23:31.000Z | 2018-09-07T20:06:36.000Z | """ Contains keyboard, mouse, joysticks, etc...
"""
from geo import Vec2D
import pygame
class ButtonLogger:
def __init__( self ):
self.went_down_buttons = []
self.went_up_buttons = []
self.down_buttons = []
def feed_down( self, key ):
self.went_down_buttons.append( key )
self.down_buttons.append( key )
def feed_up( self, key ):
self.went_up_buttons.append( key )
try:
self.down_buttons.remove( key )
except: pass
def update( self ):
self.went_down_buttons = []
self.went_up_buttons = []
def went_down( self, key ):
return key in self.went_down_buttons
def went_up( self, key ):
return key in self.went_up_buttons
def any_went_down( self, keys = None ):
if keys is not None:
for k in keys:
if self.went_down( k ):
return True
return False
else:
return len( self.went_down_buttons ) > 0
def any_went_up( self, keys = None ):
if keys is not None:
for k in keys:
if self.went_up( k ):
return True
return False
else:
return len( self.went_up_buttons ) > 0
def is_down( self, key ):
return key in self.down_buttons
def any_is_down( self, keys = None ):
if keys is not None:
for k in keys:
if self.is_down( k ):
return True
return False
else:
return len( self.down_buttons ) > 0
class Keyboard (ButtonLogger):
def __init__( self ):
ButtonLogger.__init__( self )
self._char_buffer = ""
def feed_char( self, char ):
self._char_buffer += char
def get_chars( self ):
return self._char_buffer
def update( self ):
ButtonLogger.update( self )
self._char_buffer = ""
def get_name( self, key ):
return pygame.key.name( key )
class Mouse (ButtonLogger):
"""The mouse interface
public members:
- pos: the current Vec2D position of the mouse
"""
UNKNOWN, LEFT, RIGHT, MIDDLE, SCROLLUP, SCROLLDOWN = range( 6 )
def __init__( self ):
ButtonLogger.__init__( self )
self.pos = None
self._prev_pos = None
def feed_pos( self, pos ):
self._prev_pos = self.pos
self.pos = pos
def has_moved( self ):
return self._prev_pos is not None and \
self._prev_pos <> self.pos
class Joystick (ButtonLogger):
BTN_A, BTN_B, BTN_X, BTN_Y, BTN_LB, BTN_RB, BTN_BACK, BTN_START, \
BTN_GUIDE = range( 9 )
DPAD_LEFT, DPAD_RIGHT, DPAD_UP, DPAD_DOWN = range( 11, 15 )
def get_name( self, key ):
return "joy " + str(key)
class Joysticks (list):
def any_went_up( self, button ):
for joy in self:
if joy.went_up(button):
return True
return False
def any_went_down( self, button ):
for joy in self:
if joy.went_down(button):
return True
return False
def any_is_down( self, button ):
for joy in self:
if joy.is_down(button):
return True
return False
class UserInput:
def __init__( self ):
self.key = Keyboard()
self.mouse = Mouse()
self.joys = Joysticks()
for i in range( 0, pygame.joystick.get_count() ):
joy = pygame.joystick.Joystick( i )
joy.init()
self.joys.append( Joystick() )
self.devs_no_mouse = [ self.key ]
self.devs_no_mouse.extend( self.joys )
def update( self ):
self.key.update()
self.mouse.update()
for joy in self.joys:
joy.update()
def any_went_up( self ):
for dev in self.devs_no_mouse:
if dev.any_went_up():
return True
return False
def any_went_down( self ):
for dev in self.devs_no_mouse:
if dev.any_went_down():
return True
return False
class Button (object):
def __init__( self, device, button ):
self.dev = device
self.button = button
def __eq__( self, other ):
return self.dev == other.dev and self.button == other.button
def __ne__( self, other ):
return not (self == other)
def __hash__( self ):
return hash( self.dev ) ^ hash( self.button )
def get_name( self ):
return self.dev.get_name( self.button )
def went_down( self ):
return self.dev.went_down( self.button )
| 24.925532 | 70 | 0.555484 |
47a04206dfe521ee4e80fac52d3694158c2a5fff | 362 | py | Python | streaming-python/ut_join/joinMapperTU.py | pratitidevelop/hadoop-framework-examples | 8f569fe41dbee648514f810865eea005053e9348 | [
"Apache-2.0"
] | 141 | 2015-01-02T18:09:02.000Z | 2022-01-29T15:57:10.000Z | streaming-python/ut_join/joinMapperTU.py | pratitidevelop/hadoop-framework-examples | 8f569fe41dbee648514f810865eea005053e9348 | [
"Apache-2.0"
] | 7 | 2015-10-05T22:08:43.000Z | 2020-03-11T07:28:39.000Z | streaming-python/ut_join/joinMapperTU.py | pratitidevelop/hadoop-framework-examples | 8f569fe41dbee648514f810865eea005053e9348 | [
"Apache-2.0"
] | 163 | 2015-03-21T13:11:20.000Z | 2022-01-07T05:59:16.000Z | #!/usr/bin/env python
import sys
for line in sys.stdin:
user_id = ""
product_id = "-"
location = "-"
line = line.strip()
splits = line.split("\t")
if len(splits) == 5:
user_id = splits[2]
product_id = splits[1]
else:
user_id = splits[0]
location = splits[3]
print '%s\t%s\t%s' % (user_id,product_id,location)
| 22.625 | 51 | 0.569061 |
ee652e0139cb05a4cffa960de8900683923e9cd2 | 2,000 | py | Python | rabbitai/migrations/versions/42b4c9e01447_security_converge_databases.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/migrations/versions/42b4c9e01447_security_converge_databases.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/migrations/versions/42b4c9e01447_security_converge_databases.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | 1 | 2021-07-09T16:29:50.000Z | 2021-07-09T16:29:50.000Z | """security converge databases
Revision ID: 42b4c9e01447
Revises: 5daced1f0e76
Create Date: 2020-12-14 10:49:36.110805
"""
# revision identifiers, used by Alembic.
revision = "42b4c9e01447"
down_revision = "1f6dca87d1a2"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import Session
from rabbitai.migrations.shared.security_converge import (
add_pvms,
get_reversed_new_pvms,
get_reversed_pvm_map,
migrate_roles,
Pvm,
)
NEW_PVMS = {"Database": ("can_read", "can_write",)}
PVM_MAP = {
Pvm("DatabaseView", "can_add"): (Pvm("Database", "can_write"),),
Pvm("DatabaseView", "can_delete"): (Pvm("Database", "can_write"),),
Pvm("DatabaseView", "can_edit",): (Pvm("Database", "can_write"),),
Pvm("DatabaseView", "can_list",): (Pvm("Database", "can_read"),),
Pvm("DatabaseView", "can_mulexport",): (Pvm("Database", "can_read"),),
Pvm("DatabaseView", "can_post",): (Pvm("Database", "can_write"),),
Pvm("DatabaseView", "can_show",): (Pvm("Database", "can_read"),),
Pvm("DatabaseView", "muldelete",): (Pvm("Database", "can_write"),),
Pvm("DatabaseView", "yaml_export",): (Pvm("Database", "can_read"),),
}
def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
# Add the new permissions on the migration itself
add_pvms(session, NEW_PVMS)
migrate_roles(session, PVM_MAP)
try:
session.commit()
except SQLAlchemyError as ex:
print(f"An error occurred while upgrading permissions: {ex}")
session.rollback()
def downgrade():
bind = op.get_bind()
session = Session(bind=bind)
# Add the old permissions on the migration itself
add_pvms(session, get_reversed_new_pvms(PVM_MAP))
migrate_roles(session, get_reversed_pvm_map(PVM_MAP))
try:
session.commit()
except SQLAlchemyError as ex:
print(f"An error occurred while downgrading permissions: {ex}")
session.rollback()
pass
| 29.850746 | 74 | 0.6785 |
567ad880faa15201baecbb33073514d712ac5ba7 | 15,865 | py | Python | tests/get_or_create/tests.py | metamatik/django | 415ae960bb9f1bdae798023fdce3247d2c938eec | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/get_or_create/tests.py | metamatik/django | 415ae960bb9f1bdae798023fdce3247d2c938eec | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/get_or_create/tests.py | metamatik/django | 415ae960bb9f1bdae798023fdce3247d2c938eec | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-16T07:19:02.000Z | 2020-09-16T07:19:02.000Z | from __future__ import unicode_literals
import traceback
from datetime import date
from django.db import DatabaseError, IntegrityError
from django.test import TestCase, TransactionTestCase, ignore_warnings
from django.utils.encoding import DjangoUnicodeDecodeError
from .models import (
Author, Book, DefaultPerson, ManualPrimaryKeyTest, Person, Profile,
Publisher, Tag, Thing,
)
class GetOrCreateTests(TestCase):
def setUp(self):
self.lennon = Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
def test_get_or_create_method_with_get(self):
created = Person.objects.get_or_create(
first_name="John", last_name="Lennon", defaults={
"birthday": date(1940, 10, 9)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
def test_get_or_create_method_with_create(self):
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_redundant_instance(self):
"""
If we execute the exact same statement twice, the second time,
it won't create a Person.
"""
Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_invalid_params(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.get_or_create(first_name="Tom", last_name="Smith")
def test_get_or_create_on_related_manager(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertTrue(created)
self.assertEqual(obj.defaults, 'testing')
obj2, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertFalse(created)
self.assertEqual(obj, obj2)
def test_callable_defaults(self):
"""
Callables in `defaults` are evaluated if the instance is created.
"""
obj, created = Person.objects.get_or_create(
first_name="George",
defaults={"last_name": "Harrison", "birthday": lambda: date(1943, 2, 25)},
)
self.assertTrue(created)
self.assertEqual(date(1943, 2, 25), obj.birthday)
def test_callable_defaults_not_called(self):
def raise_exception():
raise AssertionError
obj, created = Person.objects.get_or_create(
first_name="John", last_name="Lennon",
defaults={"birthday": lambda: raise_exception()},
)
class GetOrCreateTestsWithManualPKs(TestCase):
def setUp(self):
self.first_pk = ManualPrimaryKeyTest.objects.create(id=1, data="Original")
def test_create_with_duplicate_primary_key(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_get_or_create_raises_IntegrityError_plus_traceback(self):
"""
get_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn(str('obj.save'), formatted_traceback)
# MySQL emits a warning when broken data is saved
@ignore_warnings(module='django.db.backends.mysql.base')
def test_savepoint_rollback(self):
"""
Regression test for #20463: the database connection should still be
usable after a DataError or ProgrammingError in .get_or_create().
"""
try:
Person.objects.get_or_create(
birthday=date(1970, 1, 1),
defaults={'first_name': b"\xff", 'last_name': b"\xff"})
except (DatabaseError, DjangoUnicodeDecodeError):
Person.objects.create(
first_name="Bob", last_name="Ross", birthday=date(1950, 1, 1))
else:
self.skipTest("This backend accepts broken utf-8.")
def test_get_or_create_empty(self):
"""
If all the attributes on a model have defaults, get_or_create() doesn't
require any arguments.
"""
DefaultPerson.objects.get_or_create()
class GetOrCreateTransactionTests(TransactionTestCase):
available_apps = ['get_or_create']
def test_get_or_create_integrityerror(self):
"""
Regression test for #15117. Requires a TransactionTestCase on
databases that delay integrity checks until the end of transactions,
otherwise the exception is never raised.
"""
try:
Profile.objects.get_or_create(person=Person(id=1))
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
class GetOrCreateThroughManyToMany(TestCase):
def test_get_get_or_create(self):
tag = Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
a_thing.tags.add(tag)
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertFalse(created)
self.assertEqual(obj.pk, tag.pk)
def test_create_get_or_create(self):
a_thing = Thing.objects.create(name='a')
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertTrue(created)
self.assertEqual(obj.text, 'foo')
self.assertIn(obj, a_thing.tags.all())
def test_something(self):
Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
with self.assertRaises(IntegrityError):
a_thing.tags.get_or_create(text='foo')
class UpdateOrCreateTests(TestCase):
def test_update(self):
Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertFalse(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create(self):
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertTrue(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create_twice(self):
params = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': date(1940, 10, 10),
}
Person.objects.update_or_create(**params)
# If we execute the exact same statement, it won't create a Person.
p, created = Person.objects.update_or_create(**params)
self.assertFalse(created)
def test_integrity(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.update_or_create(first_name="Tom", last_name="Smith")
def test_manual_primary_key_test(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
with self.assertRaises(IntegrityError):
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_error_contains_full_traceback(self):
"""
update_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises/assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
def test_create_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book, created = p.books.update_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
self.assertEqual(p.books.count(), 1)
def test_update_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
self.assertEqual(p.books.count(), 1)
name = "The Book of Django"
book, created = p.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(p.books.count(), 1)
def test_create_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book, created = author.books.update_or_create(name="The Book of Ed & Fred", publisher=p)
self.assertTrue(created)
self.assertEqual(author.books.count(), 1)
def test_update_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
book.authors.add(author)
self.assertEqual(author.books.count(), 1)
name = "The Book of Django"
book, created = author.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(author.books.count(), 1)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertTrue(created)
self.assertEqual(obj.defaults, 'testing')
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'another testing',
}
)
self.assertFalse(created)
self.assertEqual(obj.defaults, 'another testing')
def test_update_callable_default(self):
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison',
defaults={'birthday': lambda: date(1943, 2, 25)},
)
self.assertEqual(obj.birthday, date(1943, 2, 25))
| 38.414044 | 96 | 0.630192 |
2c2490f11a4bf135bb658ba257d1ad312c2836ca | 121 | py | Python | Cookies.py | LeszekBlazewski/SeleniumAutomationTests | 0503fd6ae6fe90ace79ffccd78e8bfca5c0434bd | [
"MIT"
] | null | null | null | Cookies.py | LeszekBlazewski/SeleniumAutomationTests | 0503fd6ae6fe90ace79ffccd78e8bfca5c0434bd | [
"MIT"
] | null | null | null | Cookies.py | LeszekBlazewski/SeleniumAutomationTests | 0503fd6ae6fe90ace79ffccd78e8bfca5c0434bd | [
"MIT"
] | null | null | null | cookies = [{"name": "zaleniumTestPassed", "value": "false"},
{"name": "zaleniumTestPassed", "value": "true"}]
| 40.333333 | 60 | 0.578512 |
8c5859e269b7a7a1e0eb99bb3907c74dfb7f3e6d | 3,910 | py | Python | skynet/misc_utils.py | aleksanw/skynet | 87083443a0ec108113d149f52f7bdf87445af18d | [
"Apache-2.0"
] | null | null | null | skynet/misc_utils.py | aleksanw/skynet | 87083443a0ec108113d149f52f7bdf87445af18d | [
"Apache-2.0"
] | 9 | 2020-01-28T22:21:58.000Z | 2022-02-09T23:49:57.000Z | skynet/misc_utils.py | aleksanw/skynet | 87083443a0ec108113d149f52f7bdf87445af18d | [
"Apache-2.0"
] | null | null | null | import numpy as np
def boolean_flag(parser, name, default=False, help=None):
"""Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest)
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not belive how complex the previous solution was."""
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=2)
if dtype is not None:
out = out.astype(dtype)
return out
'''
Routines to select the less loaded gpu
Based on code by Yaroslav Bulatov:
https://gist.github.com/yaroslavvb/3de518e0912e21a150c55c0eb5cfadeb
'''
import subprocess, re, os
os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
# Nvidia-smi GPU memory parsing.
# Tested on nvidia-smi 370.23
def run_command(cmd):
"""Run command, return output as string."""
output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
return output.decode("ascii")
def list_available_gpus():
"""Returns list of available GPU ids."""
output = run_command("nvidia-smi -L")
# lines of the form GPU 0: TITAN X
gpu_regex = re.compile(r"GPU (?P<gpu_id>\d+):")
result = []
for line in output.strip().split("\n"):
m = gpu_regex.match(line)
assert m, "Couldnt parse "+line
result.append(int(m.group("gpu_id")))
return result
def gpu_memory_map():
"""Returns map of GPU id to memory allocated on that GPU."""
output = run_command("nvidia-smi")
gpu_output = output[output.find("GPU Memory"):]
# lines of the form
# | 0 8734 C python 11705MiB |
memory_regex = re.compile(r"[|]\s+?(?P<gpu_id>\d+)\D+?(?P<pid>\d+).+[ ](?P<gpu_memory>\d+)MiB")
rows = gpu_output.split("\n")
result = {gpu_id: 0 for gpu_id in list_available_gpus()}
for row in gpu_output.split("\n"):
m = memory_regex.search(row)
if not m:
continue
gpu_id = int(m.group("gpu_id"))
gpu_memory = int(m.group("gpu_memory"))
result[gpu_id] += gpu_memory
return result
def gpu_memory_map_simple():
output = run_command('nvidia-smi --query-gpu=memory.used --format=csv')
list_mem = [float(re.sub('.?MiB', '', s)) for s in output.split('\n')[1:] if len(s)>1]
result = {gpu_id: memory for (gpu_id, memory) in enumerate(list_mem)}
return result
def pick_gpu_lowest_memory(filter=None):
"""Returns GPU with the least allocated memory"""
if filter is not None:
if type(filter) == str or type(filter) == int:
filter = [int(filter)]
elif type(filter) == list:
filter = [int(item) for item in filter]
gpu_map = gpu_memory_map_simple()
if not filter or (filter and not bool(set(gpu_map.keys()) - set(filter))):
# Filter is None or Filter includes all existing gpus; ignore filter and return the best gpu
filter = []
memory_gpu_map = [(memory, gpu_id) for (gpu_id, memory) in gpu_memory_map_simple().items() if gpu_id not in filter]
best_memory, best_gpu = sorted(memory_gpu_map)[0]
return best_gpu | 36.886792 | 119 | 0.639386 |
68b98a56772771f5d8141ced8f68c918bebf5240 | 9,622 | py | Python | src/spn/structure/Base.py | Hugo101/SPFlow | daaeed819f3ef85e6632f2f7a3bf4f8bb663ff8c | [
"Apache-2.0"
] | 1 | 2021-09-01T16:26:20.000Z | 2021-09-01T16:26:20.000Z | src/spn/structure/Base.py | Hugo101/SPFlow | daaeed819f3ef85e6632f2f7a3bf4f8bb663ff8c | [
"Apache-2.0"
] | null | null | null | src/spn/structure/Base.py | Hugo101/SPFlow | daaeed819f3ef85e6632f2f7a3bf4f8bb663ff8c | [
"Apache-2.0"
] | null | null | null | '''
Created on March 20, 2018
@author: Alejandro Molina
'''
import numpy as np
import collections
class Node(object):
def __init__(self):
self.id = 0
self.scope = []
@property
def name(self):
return "%sNode_%s" % (self.__class__.__name__, self.id)
def __repr__(self):
return self.name
def __rmul__(self, weight):
assert type(weight) == int or type(weight) == float
self._tmp_weight = weight
return self
def __mul__(self, node):
assert isinstance(node, Node)
assert len(node.scope) > 0, "right node has no scope"
assert len(self.scope) > 0, "left node has no scope"
assert len(set(node.scope).intersection(set(self.scope))) == 0, "children's scope is not disjoint"
result = Product()
result.children.append(self)
result.children.append(node)
result.scope.extend(self.scope)
result.scope.extend(node.scope)
assign_ids(result)
return result
def __add__(self, node):
assert isinstance(node, Node)
assert hasattr(node, "_tmp_weight"), "right node has no weight"
assert hasattr(self, "_tmp_weight"), "left node has no weight"
assert len(node.scope) > 0, "right node has no scope"
assert len(self.scope) > 0, "left node has no scope"
assert set(node.scope) == (set(self.scope)), "children's scope are not the same"
from numpy import isclose
assert isclose(1.0, self._tmp_weight + node._tmp_weight), \
"unnormalized weights, maybe trying to add many nodes at the same time?"
result = Sum()
result.children.append(self)
result.weights.append(self._tmp_weight)
result.children.append(node)
result.weights.append(node._tmp_weight)
result.scope.extend(self.scope)
result._tmp_weight = self._tmp_weight + node._tmp_weight
assign_ids(result)
return result
class Sum(Node):
def __init__(self, weights=None, children=None):
Node.__init__(self)
if weights is None:
weights = []
self.weights = weights
if children is None:
children = []
self.children = children
class Product(Node):
def __init__(self, children=None):
Node.__init__(self)
if children is None:
children = []
self.children = children
class Leaf(Node):
def __init__(self, scope=None):
Node.__init__(self)
if scope is not None:
if type(scope) == int:
self.scope.append(scope)
elif type(scope) == list:
self.scope.extend(scope)
else:
raise Exception("invalid scope type %s " % (type(scope)))
class Context:
def __init__(self, meta_types=None, domains=None, parametric_types=None):
self.meta_types = meta_types
self.domains = domains
self.parametric_types = parametric_types
if self.meta_types is None and parametric_types is not None:
self.meta_types = []
for p in parametric_types:
self.meta_types.append(p.type.meta_type)
def get_meta_types_by_scope(self, scopes):
return [self.meta_types[s] for s in scopes]
def get_domains_by_scope(self, scopes):
return [self.domains[s] for s in scopes]
def add_domains(self, data):
assert len(data.shape) == 2, "data is not 2D?"
assert data.shape[1] == len(self.meta_types), "Data columns and metatype size doesn't match"
from spn.structure.StatisticalTypes import MetaType
domain = []
for col in range(data.shape[1]):
feature_meta_type = self.meta_types[col]
domain_values = [np.min(data[:, col]), np.max(data[:, col])]
if feature_meta_type == MetaType.REAL or feature_meta_type == MetaType.BINARY:
domain.append(domain_values)
elif feature_meta_type == MetaType.DISCRETE:
domain.append(np.arange(domain_values[0], domain_values[1] + 1, 1))
else:
raise Exception("Unkown MetaType " + str(feature_meta_type))
self.domains = np.asanyarray(domain)
return self
def get_number_of_edges(node):
return sum([len(c.children) for c in get_nodes_by_type(node, (Sum, Product))])
def get_number_of_layers(node):
node_depth = {}
def count_layers(node):
ndepth = node_depth.setdefault(node, 1)
if hasattr(node, "children"):
for c in node.children:
node_depth.setdefault(c, ndepth + 1)
bfs(node, count_layers)
return max(node_depth.values())
def rebuild_scopes_bottom_up(node):
# this function is not safe (updates in place)
if isinstance(node, Leaf):
return node.scope
new_scope = set()
for c in node.children:
new_scope.update(rebuild_scopes_bottom_up(c))
node.scope = list(new_scope)
return node.scope
def bfs(root, func):
seen, queue = set([root]), collections.deque([root])
while queue:
node = queue.popleft()
func(node)
if not isinstance(node, Leaf):
for c in node.children:
if c not in seen:
seen.add(c)
queue.append(c)
def get_nodes_by_type(node, ntype=Node):
assert node is not None
result = []
def add_node(node):
if isinstance(node, ntype):
result.append(node)
bfs(node, add_node)
return result
def assign_ids(node, ids=None):
if ids is None:
ids = {}
def assign_id(node):
if node not in ids:
ids[node] = len(ids)
node.id = ids[node]
bfs(node, assign_id)
return node
def eval_spn_bottom_up(node, eval_functions, all_results=None, debug=False, **args):
"""
Evaluates the spn bottom up
:param node: spn root
:param eval_functions: is a dictionary that contains k:Class of the node, v:lambda function that receives as parameters (node, args**) for leave nodes and (node, [children results], args**)
:param all_results: is a dictionary that contains k:Class of the node, v:result of the evaluation of the lambda function for that node. It is used to store intermediate results so that non-tree graphs can be computed in O(n) size of the network
:param debug: whether to present progress information on the evaluation
:param args: free parameters that will be fed to the lambda functions.
:return: the result of computing and propagating all the values throught the network
"""
# evaluating in reverse order, means that we compute all the children first then their parents
nodes = reversed(get_nodes_by_type(node))
if debug:
from tqdm import tqdm
nodes = tqdm(list(nodes))
if all_results is None:
all_results = {}
else:
all_results.clear()
for node_type, func in eval_functions.items():
if not hasattr(node_type, '_eval_func'):
node_type._eval_func = []
node_type._eval_func.append(func)
node_type._is_leaf = issubclass(node_type, Leaf)
tmp_children_list = []
len_tmp_children_list = 0
for n in nodes:
func = None
try:
func = n.__class__._eval_func[-1]
except:
pass
if func is None:
raise Exception("No lambda function associated with type: %s" % (n.__class__.__name__))
if n.__class__._is_leaf:
result = func(n, **args)
else:
len_children = len(n.children)
if len_tmp_children_list < len_children:
tmp_children_list.extend([None] * len_children)
len_tmp_children_list = len(tmp_children_list)
for i in range(len_children):
tmp_children_list[i] = all_results[n.children[i]]
result = func(n, tmp_children_list[0:len_children], **args)
all_results[n] = result
for node_type, func in eval_functions.items():
del node_type._eval_func[-1]
return all_results[node]
def eval_spn_top_down(root, eval_functions, all_results=None, parent_result=None, **args):
"""
evaluates an spn top to down
:param root: spnt root
:param eval_functions: is a dictionary that contains k:Class of the node, v:lambda function that receives as parameters (node, parent_results, args**) and returns [node, intermediate_result]. This intermediate_result will be passed to node as parent_result. If intermediate_result is None, no further propagation occurs
:param all_results: is a dictionary that contains k:Class of the node, v:result of the evaluation of the lambda function for that node.
:param parent_result: initial input to the root node
:param args: free parameters that will be fed to the lambda functions.
:return: the result of computing and propagating all the values throught the network
"""
if all_results is None:
all_results = {}
else:
all_results.clear()
queue = collections.deque([(root, parent_result)])
while queue:
node, parent_result = queue.popleft()
result = eval_functions[type(node)](node, parent_result, **args)
all_results[node] = result
if result is not None and not isinstance(node, Leaf):
assert len(result) == len(node.children), "invalid function result for node %s" % (node.id)
for i, node in enumerate(node.children):
queue.append((node, result[i]))
return all_results[root]
| 32.397306 | 323 | 0.63438 |
67c078862eae535920e34fbb278a8663e66f3bd9 | 13,783 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/subsection.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | null | null | null | pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/subsection.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | null | null | null | pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/subsection.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | null | null | null | # python
import logging
from copy import deepcopy
from operator import attrgetter
from json import dumps
# Genie Libs
from genie.libs import sdk
# ats
from ats import aetest
from ats.log.utils import banner
from ats.datastructures import AttrDict
# abstract
from genie.abstract import Lookup
# import pcall
import importlib
try:
pcall = importlib.import_module('ats.async').pcall
except ImportError:
from ats.async_ import pcall
# # import pcall
# from ats.async import pcall
# unicon
from unicon.eal.dialogs import Statement, Dialog
# Genie
from genie.utils.timeout import Timeout
from genie.utils.summary import Summary
from genie.conf.base import Base as ConfBase
from genie.ops.base import Base as OpsBase
# genie.libs
from genie.libs import ops
from genie.libs import conf
from genie.libs.sdk.libs.abstracted_libs.processors import load_config_precessor
from genie.libs.sdk.libs.utils.normalize import _to_dict
from genie.libs.sdk.libs.utils.normalize import merge_dict
log = logging.getLogger(__name__)
EXCLUDED_DEVICE_TYPES = ['tgn']
@aetest.subsection
def save_bootvar(self, testbed):
"""Check boot information and save bootvar to startup-config
Args:
testbed (`obj`): Testbed object
Returns:
None
Raises:
pyATS Results
"""
# Create Summary
summary = Summary(title='Summary', width=90)
devices = []
for dev in self.parent.mapping_data['devices']:
device = testbed.devices[dev]
if device.type in EXCLUDED_DEVICE_TYPES:
msg = " - This subsection is not supported for 'TGN' devices"
summarize(summary, message=msg, device=dev)
continue
devices.append(device)
device_dict = {}
failed = False
# We don't catch exceptions since failures will lead to passx in that
# CommonSetup subsection
asynchronous_boot_var_output = pcall(asynchronous_save_boot_variable,
ckwargs={'self':self, 'device_dict':device_dict},
device = tuple(devices))
for item in asynchronous_boot_var_output:
for dev, res in item.items():
if res == 'Failed':
failed = True
msg = " - Failed to save boot variable or copy "\
"running-config to startup-config"
summarize(summary, message=msg, device=dev)
elif res == 'Skipped':
msg = " - Skipped saving boot variable or copy "\
"running-config to startup-config"
summarize(summary, message=msg, device=dev)
else:
msg = " - Successfully saved boot variable"
summarize(summary, message=msg, device=dev)
summary.print()
if failed:
self.passx("Issue while saving boot variable on one of the devices, "
"Check section summary for more details")
@aetest.subsection
def learn_the_system(self, testbed, steps, features=None):
"""Learn and store the system properties
Args:
testbed (`obj`): Testbed object
steps (`obj`): aetest steps object
features (`dict`): dict of components and the feature that contains the component.
ex. {'pim': ['autorp',],
'bgp': ['confederationpeers', 'gracefulrestart']}
Returns:
None
Raises:
pyATS Results
"""
log.info(banner('Learn and store platform information, lldp neighbors'
', from PTS if PTS is existed, otherwise from show commands'))
# get uut, having a uut is mandatory in Genie
uut = testbed.devices['uut']
lookup = Lookup.from_device(uut)
# get platform PTS
platform_pts = self.parameters.get('pts', {}).get('platform', {}).get('uut', None)
with steps.start("Store and learn platform information from 'show lldp neighbors detail' on {}"
.format(self.name)) as step:
try:
lookup.sdk.libs.abstracted_libs\
.subsection.learn_system(device=uut, steps=steps, platform_pts=platform_pts)
except Exception as e:
step.passx('Cannot Learn and Store system info',
from_exception=e)
# learn platform lldp neighbors
with steps.start("learn platform lldp neighbors on device {}"
.format(uut.name)) as step:
# inital lldp ops object
lldp_ops = lookup.ops.lldp.lldp.Lldp(
uut, attributes=['info[interfaces][(.*)][neighbors][(.*)][port_id]'])
# learn the lldp ops
try:
lldp_ops.learn()
except Exception as e:
step.passx('Cannot learn lldp information',
from_exception=e)
if not hasattr(lldp_ops, 'info'):
step.passx('No LLDP neighbors')
# store the lldp information
uut.lldp_mapping = lldp_ops.info['interfaces']
@aetest.subsection
def learn_the_system_from_conf_ops(self, testbed, steps, features=None):
"""Learn and store the system properties
Args:
testbed (`obj`): Testbed object
steps (`obj`): aetest steps object
features (`dict`): dict of feature and attributes which want to learn.
ex. {'conf.pim.Pim': [
'pim[vrf_attr][(.*)][address_family_attr][ipv4][send_rp_announce_intf]',
'pim[vrf_attr][(.*)][address_family_attr][ipv4][send_rp_announce_group_list]'],
'conf.bgp.Bgp': ['bgp[instance][(.*)][vrf_attr][(.*)][confederation_peers_as]']}
Returns:
None
Raises:
pyATS Results
"""
def remove_parent_from_conf_dict(conf_dict):
temp_dict = deepcopy(conf_dict)
for key, val in temp_dict.items():
if key == 'parent':
conf_dict.pop('parent')
if isinstance(val, dict):
remove_parent_from_conf_dict(conf_dict[key])
def store_structure(device, feature):
# get feature and attributes
[(ft, attr)] = feature.items()
log.info(banner("Learning '{n}' feature with "
"attribues {a} on device {d}"
.format(n=ft, a=attr, d=device)))
# perform lookup per device
lib = Lookup.from_device(device)
# attach ops and conf
lib.conf = getattr(lib, 'conf', conf)
lib.ops = getattr(lib, 'ops', ops)
# create the ops/conf instance
try:
obj = attrgetter(ft)(lib)
except Exception:
raise AttributeError('Cannot load %s for '
'device %s.' % (ft, device.name))
# conf learn_config
if issubclass(obj, ConfBase):
ret = obj.learn_config(device=device, attributes=attr)
ret = _to_dict(ret[0])
# delete the non-used objects for pcall to retrun
ret.pop('__testbed__')
ret.pop('devices')
ret.pop('interfaces')
remove_parent_from_conf_dict(ret['device_attr'][device.name])
elif issubclass(obj, OpsBase):
ret = obj(device, attributes=attr)
ret.learn()
temp = AttrDict()
temp.info = getattr(ret, 'info', {})
ret = temp
ret_dict = {}
ret_dict.setdefault('lts', {}).\
setdefault(ft, {}).setdefault(device.name, ret)
# return the dictionary
return ret_dict
devices = []
for name in testbed.devices:
dev = testbed.devices[name]
if not dev.is_connected():
continue
devices.append(dev)
# create the abstract object list
merged_dict = {}
for ft in features:
worker_devs = []
worker_features = []
for device in devices:
worker_devs.append(device)
worker_features.append({ft: features[ft]})
# pcall for each feature
ret = pcall(store_structure, device=worker_devs, feature=worker_features)
[merge_dict(merged_dict, i) for i in ret]
self.parent.parameters.update(merged_dict)
# print out what we learned in LTS
log.info('LTS information is \n{d}'.format(d=dumps(merged_dict, indent=5)))
@aetest.subsection
def load_config_as_string(self, testbed, steps, configs, connect=False):
if connect:
for name in self.parent.mapping_data['devices']:
dev = testbed.devices[name]
# connect with console
try:
dev.connect(via='a')
except Exception as e:
self.failed('Cannot connect the console on {}'.format(dev.name),
from_exception=e)
try:
load_config_precessor(self, configs)
except Exception as e:
self.passx('Cannot Load configuration',
from_exception=e)
# disconnect the router from console
if connect:
for name in self.parent.mapping_data['devices']:
dev = testbed.devices[name]
# connect with console
try:
dev.disconnect()
dev.destroy()
except Exception as e:
self.passx('Cannot disconnect the console on {}'.format(dev.name),
from_exception=e)
@aetest.subsection
def configure_replace(self, testbed, steps, devices, timeout=60):
for name, dev in devices.items():
try:
device = testbed.devices.get(name, None)
if not device or not device.is_connected():
continue
try:
file_location = dev['file_location']
except KeyError:
log.error('Missing file_location for device {}'.format(name))
continue
lookup = Lookup.from_device(device)
lookup.sdk.libs.abstracted_libs.subsection.configure_replace(device, file_location, timeout=timeout)
except Exception as e:
self.failed("Failed to replace config : {}".format(str(e)))
log.info("Configure replace is done for device {}".format(name))
@aetest.subsection
def learn_system_defaults(self, testbed):
"""Execute commands to learn default system information
Args:
testbed (`obj`): Testbed object
Returns:
None
Raises:
pyATS Results
"""
# Get default memory location
self.parent.default_file_system = {}
# Create Summary
summary = Summary(title='Summary', width=150)
for device in self.parent.mapping_data['devices']:
dev = testbed.devices[device]
lookup = Lookup.from_device(dev)
# Skip in case of TGN device
if dev.type in EXCLUDED_DEVICE_TYPES:
log.info("This subsection is not supported for "
"TGN device '{}'".format(dev.name))
msg = " - This subsection is not supported for 'TGN' devices"
summarize(summary, message=msg, device=dev.name)
continue
try:
self.parent.default_file_system[dev.name] = lookup.sdk.libs.\
abstracted_libs.subsection.get_default_dir(
device=dev)
msg = " - Successfully learnt system default directory"
summarize(summary, message=msg, device=device)
except LookupError as e:
log.info('Cannot find device {d} correspoding get_default_dir'.\
format(d=dev.name))
msg = " - Didn't find device OS corresponding "\
"'get_default_dir' implementation, Please contact Genie support"
summarize(summary, message=msg, device=device)
except Exception as e:
msg = " - Failed to learn system default directory"
summarize(summary, message=msg, device=device)
summary.print()
self.failed('Unable to learn system default directory',
from_exception=e)
summary.print()
if not self.parent.default_file_system:
# Create Summary
summary = Summary(title='Summary', width=90)
summary.add_message("* Summary for device(s): "
"{}".format(', '.join(self.parent.mapping_data['devices'])))
summary.add_sep_line()
msg = " - Couldn't set system default directory"
summarize(summary, message=msg)
summary.print()
self.failed('Unable to set system default directory')
# TODO: Learn and save more system defaults in this section
def summarize(summary, message, device=None):
'''A function for building the summary table messages'''
if device:
summary.add_message('* Summary for device: {}'.\
format(device))
summary.add_sep_line()
summary.add_message(message)
summary.add_subtitle_line()
def asynchronous_save_boot_variable(self, device, device_dict):
'''Use asynchronous execution when saving boot variables on devices'''
log.info(banner("Check boot information to see if they are consistent\n"
"and save bootvar to startup-config on device '{d}'".\
format(d=device.name)))
# get platform pts
platform_pts = self.parameters.get('pts', {}).get('platform', {}).get(
device.name, None)
try:
result = Lookup.from_device(device).sdk.libs.abstracted_libs.subsection.\
save_device_information(device=device, platform_pts=platform_pts)
except Exception as e:
device_dict[device.name] = 'Failed'
else:
if result == 'Skipped':
device_dict[device.name] = 'Skipped'
else:
device_dict[device.name] = 'Passed'
return device_dict
| 33.212048 | 117 | 0.601756 |
b56990fb634f7fb9dad5c4c7f0644ae9cedb859e | 17,257 | py | Python | tests/remote-gdb/runtest.py | leviathanch/NyuziProcessor | 9d205e30c147dbf1736082bb33ba97ca2a843c17 | [
"Apache-2.0"
] | 2 | 2020-01-06T23:15:00.000Z | 2021-05-19T18:41:35.000Z | tests/remote-gdb/runtest.py | PhdBoLi/NyuziProcessor | a820fe884c055e87f13078d428c0585e8670b43e | [
"Apache-2.0"
] | null | null | null | tests/remote-gdb/runtest.py | PhdBoLi/NyuziProcessor | a820fe884c055e87f13078d428c0585e8670b43e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implieconn.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Validates remote GDB debugger interface in emulator
"""
import os
import socket
import subprocess
import sys
import time
sys.path.insert(0, '..')
import test_harness
class DebugConnection(object):
"""
Encapsulates remote GDB socket connection to emulator. It supports
__enter__ and __exit__ methods so it can be used in the 'with' construct
to automatically close the socket when the test is done.
"""
def __init__(self):
self.sock = None
def __enter__(self):
# Retry loop
for _ in range(10):
try:
time.sleep(0.3)
self.sock = socket.socket()
self.sock.connect(('localhost', 8000))
self.sock.settimeout(5)
break
except socket.error:
pass
return self
def __exit__(self, *unused):
self.sock.close()
def _send_packet(self, body):
"""
Send request 'body' to emulator. This will encapsulate the request
in a packet and add the checksum.
"""
if test_harness.DEBUG:
print('SEND: ' + body)
self.sock.send(str.encode('$' + body + '#'))
# Checksum
self.sock.send(str.encode('\x00\x00'))
def _receive_packet(self):
"""
Wait for a full packet to be received from the peer and return
just the body.
"""
while True:
leader = self.sock.recv(1)
if leader == '':
raise test_harness.TestException('unexpected socket close')
if leader == b'$':
break
if leader != b'+':
raise test_harness.TestException(
'unexpected character ' + str(leader))
body = b''
while True:
char = self.sock.recv(1)
if char == b'#':
break
body += char
# Checksum
self.sock.recv(2)
if test_harness.DEBUG:
print('RECV: ' + body.decode())
return body
def expect(self, command, value):
"""
Sends 'command' to remote GDB value, then waits for the response.
If the response doesn't match 'value', this will throw TestException.
"""
self._send_packet(command)
response = self._receive_packet()
if response != str.encode(value):
raise test_harness.TestException(
'unexpected response. Wanted ' + value + ' got ' + str(response))
class EmulatorProcess(object):
"""
Manages spawning the emulator and automatically stopping it at the
end of the test. It supports __enter__ and __exit__ methods so it
can be used in the 'with' construct.
"""
def __init__(self, hexfile, num_cores=1):
self.hexfile = hexfile
self.num_cores = num_cores
self.process = None
self.output = None
def __enter__(self):
emulator_args = [
test_harness.EMULATOR_PATH,
'-m',
'gdb',
'-p',
str(self.num_cores),
self.hexfile
]
if test_harness.DEBUG:
self.output = None
else:
self.output = open(os.devnull, 'w')
self.process = subprocess.Popen(emulator_args, stdout=self.output,
stderr=subprocess.STDOUT)
return self
def __exit__(self, *unused):
test_harness.kill_gently(self.process)
if self.output:
self.output.close()
@test_harness.test(['emulator'])
def gdb_breakpoint(*unused):
"""
Validate stopping at a breakpoint and continuing after stopping.
This sets two breakpoints
"""
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Set breakpoint
conn.expect('Z0,0000000c', 'OK')
# Set second breakpoint at next instruction
conn.expect('Z0,00000010', 'OK')
# Continue
conn.expect('C', 'S05')
# Read last signal
conn.expect('?', 'S05')
# Read PC register. Should be 0x000000c, but endian swapped
conn.expect('g40', '0c000000')
# Read s0, which should be 3
conn.expect('g00', '03000000')
# Continue again.
conn.expect('C', 'S05')
# Ensure the instruction it stopped at is
# executed and it breaks on the next instruction
conn.expect('g40', '10000000')
# Read s0, which should be 4
conn.expect('g00', '04000000')
@test_harness.test(['emulator'])
def gdb_remove_breakpoint(*unused):
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Set breakpoint
conn.expect('Z0,0000000c', 'OK')
# Set second breakpoint
conn.expect('Z0,00000014', 'OK')
# Clear first breakpoint
conn.expect('z0,0000000c', 'OK')
# Continue
conn.expect('C', 'S05')
# Read PC register. Should be at second breakpoint
conn.expect('g40', '14000000')
# Read s0, which should be 5
conn.expect('g00', '05000000')
@test_harness.test(['emulator'])
def gdb_breakpoint_errors(*unused):
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Set invalid breakpoint (memory out of range)
conn.expect('Z0,20000000', '')
# Set invalid breakpoint (unaligned)
conn.expect('Z0,00000003', '')
# Set a valid breakpoint, then try to set the same address again
conn.expect('Z0,00000008', 'OK')
conn.expect('Z0,00000008', '')
# Remove invalid breakpoint (doesn't exist)
conn.expect('z0,00000004', '')
@test_harness.test(['emulator'])
def gdb_single_step(*unused):
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Read PC register
conn.expect('g40', '00000000')
# Single step
conn.expect('S', 'S05')
# Read PC register
conn.expect('g40', '04000000')
# Read s0
conn.expect('g00', '01000000')
# Single step (note here I use the lowercase version)
conn.expect('s', 'S05')
# Read PC register
conn.expect('g40', '08000000')
# Read s0
conn.expect('g00', '02000000')
@test_harness.test(['emulator'])
def gdb_single_step_breakpoint(*unused):
"""
Ensure that if you single step through a breakpoint, it doesn't
trigger and get stuck
"""
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Set breakpoint at second instruction (address 0x8)
conn.expect('Z0,00000004', 'OK')
# Single step over first instruction
conn.expect('S', 'S05')
# Single step. This one has a breakpoint, but we won't
# stop at it.
conn.expect('S', 'S05')
# Read PC register
conn.expect('g40', '08000000')
# Read s0
conn.expect('g00', '02000000')
@test_harness.test(['emulator'])
def gdb_read_write_memory(*unused):
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Read program code at address 0. This should match values
# in count.hex
conn.expect('m0,10', '0004000f0008000f000c000f0010000f')
# (address, data)
tests = [
(0x1000, '523c66b3'),
(0x1234, '22'),
(0x2242, '45f280397a5a3255fa19238693ff13c729'),
(0x100000, '55483c091aac1e8c6db4bed1'),
(0x200000, '16e1d56029e912a04121ce41a635155f3442355533703fafcb57f8'
'295dd6330f82f9ffc40edb589fac1523665dc2f6e80c1e2de9718d'
'253fcbce1c8a52c9dc21'),
]
# Write memory
for addr, data in tests:
conn.expect('M' + hex(addr)[2:] + ',' +
hex(int(len(data) / 2))[2:] + ':' + data, 'OK')
# Read and verify
for addr, data in tests:
conn.expect('m' + hex(addr)[2:] + ',' +
hex(int(len(data) / 2))[2:], data)
# Try to write a bad address (out of range)
# Doesn't return an error, test just ensures it
# doesn't crash
conn.expect('M10000000,4,12345678', 'OK')
# Try to read a bad address (out of range)
# As above, doesn't return error (returns 0xff...),
# but ensure it doesn't crash.
conn.expect('m10000000,4', 'ffffffff')
@test_harness.test(['emulator'])
def gdb_read_write_register(*unused):
hexfile = test_harness.build_program(['register_values.S'])
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Run code to load registers
conn.expect('C', 'S05')
# Check values set by program (remote GDB returns in swapped byte
# order...)
conn.expect('g1', '7d7f3e85')
conn.expect('g20', 'f13403ef9d08309993f7819954ae4b3f7aeaa28f538fecbd95'
'36f59c6d7251269525ee70d26e8d34f48912639c86ae5dba426c83aa8455e1e2dbba4b41a4f321')
tests = [
(0, 'd3839b18'),
(1, '7b53cc78'),
(30, '0904c47d'),
(32, 'aef331bc7dbd6f1d042be4d6f1e1649855d864387eb8f0fd49c205c37790'
'd1874078516c1a05c74f67678456679ba7e05bb5aed7303c5aeeeba6e619'
'accf702a'),
(36, 'cb7e3668a97ef8ea55902658b62a682406f7206f75e5438ff95b4519fed1'
'e73e16ce5a29b4385fa2560820f0c8f42227709387dbad3a8208b57c381e'
'268ffe38'),
(63, '9e2d89afb0633c2f64b2eb4fdbba4663401ee673753a66d6d899e4a4101a'
'e4920b0b16f0e716e4f7d62d83b5784740c138ac6ab94fa14256ebb468e2'
'5f20e02f')
]
for reg, value in tests:
conn.expect('G' + hex(reg)[2:] + ',' + value, 'OK')
for reg, value in tests:
conn.expect('g' + hex(reg)[2:], value)
# Read invalid register index
conn.expect('g41', '')
# Write invalid register index
conn.expect('G41,12345678', '')
@test_harness.test(['emulator'])
def gdb_register_info(*unused):
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Scalar registers
for idx in range(28):
regid = str(idx + 1)
conn.expect('qRegisterInfo' + hex(idx + 1)[2:], 'name:s' + regid +
';bitsize:32;encoding:uint;format:hex;'
'set:General Purpose Scalar Registers;gcc:' + regid +
';dwarf:' + regid + ';')
# These registers (sp, fp, ra) are special and have additional
# information.
names = ['fp', 'sp', 'ra']
for idx, name in zip(range(28, 32), names):
regid = str(idx + 1)
conn.expect('qRegisterInfo' + hex(idx + 1)[2:], 'name:s' + regid +
';bitsize:32;encoding:uint;format:hex;'
'set:General Purpose Scalar Registers;gcc:' + regid +
';dwarf:' + regid + ';generic:' + name + ';')
# Vector registers
for idx in range(32, 63):
regid = str(idx + 1)
conn.expect('qRegisterInfo' + hex(idx + 1)[2:], 'name:v' + str(idx - 31) +
';bitsize:512;encoding:uint;format:vector-uint32;'
'set:General Purpose Vector Registers;gcc:' + regid +
';dwarf:' + regid + ';')
conn.expect('qRegisterInfo65', '')
@test_harness.test(['emulator'])
def gdb_select_thread(*unused):
hexfile = test_harness.build_program(['multithreaded.S'], image_type='raw')
with EmulatorProcess(hexfile, num_cores=2), DebugConnection() as conn:
# Read thread ID
conn.expect('qC', 'QC01')
# Each line is one thread
tests = [
(7, 0xc7733c56),
(5, 0xf54adec3),
(1, 0x5afaf01e),
(2, 0x1964682e),
(3, 0x16cc6be1),
(8, 0xcbff923),
(4, 0x4596de2),
(6, 0xcd920ca6),
]
# Step all threads through initialization code (5 instructions)
for thid in range(len(tests)):
# Switch to thread
conn.expect('Hg' + str(thid + 1), 'OK')
# Read thread ID
conn.expect('qC', 'QC0' + str(thid + 1))
for index in range(5):
conn.expect('S', 'S05')
# Read PC register
conn.expect('g40', '{:08x}'.format(
test_harness.endian_swap((index + 1) * 4)))
# Now all threads are at the same instruction:
# 00000014 move s0, 1
# Step each thread independently some number of steps and
# write a value to register 1
for index, (num_steps, regval) in enumerate(tests):
conn.expect('Hg' + str(index + 1), 'OK') # Switch to thread
for _ in range(num_steps):
conn.expect('S', 'S05')
conn.expect('G01,{:08x}'.format(regval), 'OK')
# Read back PC and register values
for index, (num_steps, regval) in enumerate(tests):
conn.expect('Hg' + str(index + 1), 'OK') # Switch to thread
conn.expect('g40', '{:08x}'.format(
test_harness.endian_swap(0x14 + num_steps * 4)))
conn.expect('g01', '{:08x}'.format(regval))
# Try to switch to an invalid thread ID
conn.expect('Hgfe', '')
# Ensure still on thread 8
conn.expect('qC', 'QC08')
@test_harness.test(['emulator'])
def gdb_thread_info(*unused):
# Run with one core, four threads
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
conn.expect('qfThreadInfo', 'm1,2,3,4')
# Run with two cores, eight threads
with EmulatorProcess(hexfile, num_cores=2), DebugConnection() as conn:
conn.expect('qfThreadInfo', 'm1,2,3,4,5,6,7,8')
@test_harness.test(['emulator'])
def gdb_invalid_command(*unused):
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# As far as I know, this is not a valid commanconn...
# An error response returns nothing in the body
conn.expect('@', '')
@test_harness.test(['emulator'])
def gdb_big_command(*unused):
""" Check for buffer overflows by sending a very large command"""
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Big, invalid command. this should return an error (empty response)
conn.expect('x' * 0x10000, '')
# Now send a valid request to ensure it is still alive.
conn.expect('qC', 'QC01')
@test_harness.test(['emulator'])
def gdb_queries(*unused):
"""Miscellaneous query commands not covered in other tests"""
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
conn.expect('qLaunchSuccess', 'OK')
conn.expect('qHostInfo', 'triple:nyuzi;endian:little;ptrsize:4')
conn.expect('qProcessInfo', 'pid:1')
conn.expect('qsThreadInfo', 'l') # No active threads
conn.expect('qThreadStopInfo', 'S00')
conn.expect('qC', 'QC01')
# Should be invalid
conn.expect('qZ', '')
@test_harness.test(['emulator'])
def gdb_vcont(*unused):
hexfile = test_harness.build_program(['count.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
# Set breakpoint
conn.expect('Z0,00000010', 'OK')
# Step
conn.expect('vCont;s:0001', 'S05')
conn.expect('g40', '04000000')
# Continue
conn.expect('vCont;c', 'S05')
conn.expect('g40', '10000000')
@test_harness.test(['emulator'])
def gdb_crash(*unused):
hexfile = test_harness.build_program(['crash.S'], image_type='raw')
with EmulatorProcess(hexfile), DebugConnection() as conn:
conn.expect('c', 'S05')
conn.expect('g40', '10000000')
test_harness.execute_tests()
| 32.016698 | 101 | 0.586603 |
e158b35760ff848ef74e936da4674fdd4c4a2ff5 | 6,242 | py | Python | core/confdb/syntax/interfaces/base.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | core/confdb/syntax/interfaces/base.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | core/confdb/syntax/interfaces/base.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# ConfDB interfaces syntax
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from ..defs import DEF
from ..patterns import ANY, CHOICES, IF_NAME, INTEGER, BOOL, FLOAT, ETHER_MODE
from ..meta.interfaces import INTERFACES_META_SYNTAX
INTERFACES_SYNTAX = DEF(
"interfaces",
[
DEF(
IF_NAME,
[
INTERFACES_META_SYNTAX,
DEF(
"type",
[
DEF(
CHOICES(
"physical",
"SVI",
"aggregated",
"loopback",
"management",
"null",
"tunnel",
"other",
"template",
"dry",
"unknown",
),
required=True,
name="type",
gen="make_interface_type",
)
],
),
DEF(
"description",
[DEF(ANY, required=True, name="description", gen="make_interface_description")],
),
DEF(
"admin-status",
[
DEF(
BOOL,
required=True,
name="admin_status",
gen="make_interface_admin_status",
)
],
),
DEF("mtu", [DEF(INTEGER, required=True, name="mtu", gen="make_interface_mtu")]),
DEF(
"speed", [DEF(INTEGER, required=True, name="speed", gen="make_interface_speed")]
),
DEF(
"duplex", [DEF(BOOL, required=True, name="duplex", gen="make_interface_duplex")]
),
DEF(
"flow-control",
[
DEF(
BOOL,
required=True,
name="flow_control",
gen="make_interface_flow_control",
)
],
),
DEF(
"ethernet",
[
DEF(
"auto-negotiation",
[
DEF(
ETHER_MODE,
multi=True,
name="mode",
gen="make_interface_ethernet_autonegotiation",
)
],
)
],
),
DEF(
"storm-control",
[
DEF(
"broadcast",
[
DEF(
"level",
[
DEF(
FLOAT,
required=True,
name="level",
gen="make_interface_storm_control_broadcast_level",
)
],
)
],
),
DEF(
"multicast",
[
DEF(
"level",
[
DEF(
FLOAT,
required=True,
name="level",
gen="make_interface_storm_control_multicast_level",
)
],
)
],
),
DEF(
"unicast",
[
DEF(
"level",
[
DEF(
FLOAT,
required=True,
name="level",
gen="make_interface_storm_control_unicast_level",
)
],
)
],
),
],
),
DEF(
"lag",
[
DEF(
"members",
[
DEF(
IF_NAME,
multi=True,
name="member_interface_name",
gen="make_interface_lag_members",
)
],
)
],
),
],
multi=True,
name="interface",
gen="make_interface",
)
],
)
| 37.377246 | 100 | 0.214995 |
e6b40095f02ec8f60d6c2306673d054478953aba | 1,456 | py | Python | Scripts/compareOutputs.py | harmim/vut-avs-project1 | d36e6b5cdebce748d2bdf2afc43950968ecf0a91 | [
"MIT"
] | null | null | null | Scripts/compareOutputs.py | harmim/vut-avs-project1 | d36e6b5cdebce748d2bdf2afc43950968ecf0a91 | [
"MIT"
] | null | null | null | Scripts/compareOutputs.py | harmim/vut-avs-project1 | d36e6b5cdebce748d2bdf2afc43950968ecf0a91 | [
"MIT"
] | null | null | null | # Simple python3 script to compare output with a reference output.
# Usage: python3 compareOutputs.py testOutput.h5 testRefOutput.h5
import sys
import h5py
import numpy as np
if len(sys.argv) != 3:
print("Expected two arguments. Output and reference output file.")
sys.exit(1)
filename = sys.argv[1]
ref_filename = sys.argv[2]
f = h5py.File(filename, 'r')
ref_f = h5py.File(ref_filename, 'r')
out = np.array(f['output_data'])
out_ref = np.array(ref_f['output_data'])
if out.shape != out_ref.shape:
print("The files do not contain the same number of outputs.")
print("The output size: {0}.".format(out.shape[0]))
print("The reference size: {0}.".format(out_ref.shape[0]))
sys.exit(1)
ref_value = np.copy(out_ref)
ref_value[ref_value == 0.0] = 1.0
error = (out_ref - out) / ref_value
maximal_error = np.amax(error)
print("Maximal error between the output and the reference is {0}.".format(maximal_error))
if maximal_error < 10**(-6):
print("OK:Output seems to match the reference.")
sys.exit(0)
print("Failure:Output does not match the reference.")
maximal_error = np.amax(error, axis=1)
print(maximal_error.shape)
for i in range(0, 5):
print("Image", i)
print("Expected:", end="")
for j in range(0, 10):
print(out_ref[i, j], end = " ")
print("\nGot:", end="")
for j in range(0, 10):
print(out[i, j], end=" ")
print("\nMaximal error:", maximal_error[i], "\n")
sys.exit(1)
| 26.472727 | 89 | 0.666896 |
460b95f140e0d3b317d2cc708fa6bf82f999b63f | 3,120 | py | Python | joeynmt/loss.py | AmitMY/joeynmt | b30d1d53823ced56113def8fb5d5f7905d3c059f | [
"Apache-2.0"
] | 563 | 2018-10-17T11:51:16.000Z | 2022-03-31T19:30:37.000Z | joeynmt/loss.py | AmitMY/joeynmt | b30d1d53823ced56113def8fb5d5f7905d3c059f | [
"Apache-2.0"
] | 132 | 2018-11-23T14:39:42.000Z | 2022-03-22T17:07:27.000Z | joeynmt/loss.py | AmitMY/joeynmt | b30d1d53823ced56113def8fb5d5f7905d3c059f | [
"Apache-2.0"
] | 166 | 2018-11-08T11:35:16.000Z | 2022-03-21T13:39:02.000Z | # coding: utf-8
"""
Module to implement training loss
"""
import torch
from torch import nn, Tensor
from torch.autograd import Variable
class XentLoss(nn.Module):
"""
Cross-Entropy Loss with optional label smoothing
"""
def __init__(self, pad_index: int, smoothing: float = 0.0):
super().__init__()
self.smoothing = smoothing
self.pad_index = pad_index
if self.smoothing <= 0.0:
# standard xent loss
self.criterion = nn.NLLLoss(ignore_index=self.pad_index,
reduction='sum')
else:
# custom label-smoothed loss, computed with KL divergence loss
self.criterion = nn.KLDivLoss(reduction='sum')
def _smooth_targets(self, targets: Tensor, vocab_size: int):
"""
Smooth target distribution. All non-reference words get uniform
probability mass according to "smoothing".
:param targets: target indices, batch*seq_len
:param vocab_size: size of the output vocabulary
:return: smoothed target distributions, batch*seq_len x vocab_size
"""
# batch*seq_len x vocab_size
smooth_dist = targets.new_zeros((targets.size(0), vocab_size)).float()
# fill distribution uniformly with smoothing
smooth_dist.fill_(self.smoothing / (vocab_size - 2))
# assign true label the probability of 1-smoothing ("confidence")
smooth_dist.scatter_(1, targets.unsqueeze(1).data, 1.0-self.smoothing)
# give padding probability of 0 everywhere
smooth_dist[:, self.pad_index] = 0
# masking out padding area (sum of probabilities for padding area = 0)
padding_positions = torch.nonzero(targets.data == self.pad_index,
as_tuple=False)
# pylint: disable=len-as-condition
if len(padding_positions) > 0:
smooth_dist.index_fill_(0, padding_positions.squeeze(), 0.0)
return Variable(smooth_dist, requires_grad=False)
# pylint: disable=arguments-differ
def forward(self, log_probs, targets):
"""
Compute the cross-entropy between logits and targets.
If label smoothing is used, target distributions are not one-hot, but
"1-smoothing" for the correct target token and the rest of the
probability mass is uniformly spread across the other tokens.
:param log_probs: log probabilities as predicted by model
:param targets: target indices
:return:
"""
if self.smoothing > 0:
targets = self._smooth_targets(
targets=targets.contiguous().view(-1),
vocab_size=log_probs.size(-1))
# targets: distributions with batch*seq_len x vocab_size
assert log_probs.contiguous().view(-1, log_probs.size(-1)).shape \
== targets.shape
else:
# targets: indices with batch*seq_len
targets = targets.contiguous().view(-1)
loss = self.criterion(
log_probs.contiguous().view(-1, log_probs.size(-1)), targets)
return loss
| 39.493671 | 78 | 0.634936 |
bd75792f68b2a81462647479d65becdeee1cd3f2 | 15,556 | py | Python | neo/rawio/examplerawio.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | 1 | 2020-06-08T14:00:03.000Z | 2020-06-08T14:00:03.000Z | neo/rawio/examplerawio.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | 22 | 2016-09-13T13:31:25.000Z | 2019-05-14T17:07:16.000Z | neo/rawio/examplerawio.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | null | null | null | """
ExampleRawIO is a class of a fake example.
This is to be used when coding a new RawIO.
Rules for creating a new class:
1. Step 1: Create the main class
* Create a file in **neo/rawio/** that endith with "rawio.py"
* Create the class that inherits BaseRawIO
* copy/paste all methods that need to be implemented.
See the end a neo.rawio.baserawio.BaseRawIO
* code hard! The main difficulty **is _parse_header()**.
In short you have a create a mandatory dict than
contains channel informations::
self.header = {}
self.header['nb_block'] = 2
self.header['nb_segment'] = [2, 3]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
2. Step 2: RawIO test:
* create a file in neo/rawio/tests with the same name with "test_" prefix
* copy paste neo/rawio/tests/test_examplerawio.py and do the same
3. Step 3 : Create the neo.io class with the wrapper
* Create a file in neo/io/ that endith with "io.py"
* Create a that inherits both your RawIO class and BaseFromRaw class
* copy/paste from neo/io/exampleio.py
4.Step 4 : IO test
* create a file in neo/test/iotest with the same previous name with "test_" prefix
* copy/paste from neo/test/iotest/test_exampleio.py
"""
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
_event_channel_dtype)
import numpy as np
class ExampleRawIO(BaseRawIO):
"""
Class for "reading" fake data from an imaginary file.
For the user, it give acces to raw data (signals, event, spikes) as they
are in the (fake) file int16 and int64.
For a developer, it is just an example showing guidelines for someone who wants
to develop a new IO module.
Two rules for developers:
* Respect the :ref:`neo_rawio_API`
* Follow the :ref:`io_guiline`
This fake IO:
* have 2 blocks
* blocks have 2 and 3 segments
* have 16 signal_channel sample_rate = 10000
* have 3 unit_channel
* have 2 event channel: one have *type=event*, the other have
*type=epoch*
Usage:
>>> import neo.rawio
>>> r = neo.rawio.ExampleRawIO(filename='itisafake.nof')
>>> r.parse_header()
>>> print(r)
>>> raw_chunk = r.get_analogsignal_chunk(block_index=0, seg_index=0,
i_start=0, i_stop=1024, channel_names=channel_names)
>>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, dtype='float64',
channel_indexes=[0, 3, 6])
>>> spike_timestamp = reader.spike_timestamps(unit_index=0, t_start=None, t_stop=None)
>>> spike_times = reader.rescale_spike_timestamp(spike_timestamp, 'float64')
>>> ev_timestamps, _, ev_labels = reader.event_timestamps(event_channel_index=0)
"""
extensions = ['fake']
rawmode = 'one-file'
def __init__(self, filename=''):
BaseRawIO.__init__(self)
# note that this filename is ued in self._source_name
self.filename = filename
def _source_name(self):
# this function is used by __repr__
# for general cases self.filename is good
# But for URL you could mask some part of the URL to keep
# the main part.
return self.filename
def _parse_header(self):
# This is the central of a RawIO
# we need to collect in the original format all
# informations needed for further fast acces
# at any place in the file
# In short _parse_header can be slow but
# _get_analogsignal_chunk need to be as fast as possible
# create signals channels information
# This is mandatory!!!!
# gain/offset/units are really important because
# the scaling to real value will be done with that
# at the end real_signal = (raw_signal* gain + offset) * pq.Quantity(units)
sig_channels = []
for c in range(16):
ch_name = 'ch{}'.format(c)
# our channel id is c+1 just for fun
# Note that chan_id should be realated to
# original channel id in the file format
# so that the end user should not be lost when reading datasets
chan_id = c + 1
sr = 10000. # Hz
dtype = 'int16'
units = 'uV'
gain = 1000. / 2 ** 16
offset = 0.
# group_id isonly for special cases when channel have diferents
# sampling rate for instance. See TdtIO for that.
# Here this is the general case :all channel have the same characteritics
group_id = 0
sig_channels.append((ch_name, chan_id, sr, dtype, units, gain, offset, group_id))
sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
# creating units channels
# This is mandatory!!!!
# Note that if there is no waveform at all in the file
# then wf_units/wf_gain/wf_offset/wf_left_sweep/wf_sampling_rate
# can be set to any value because _spike_raw_waveforms
# will return None
unit_channels = []
for c in range(3):
unit_name = 'unit{}'.format(c)
unit_id = '#{}'.format(c)
wf_units = 'uV'
wf_gain = 1000. / 2 ** 16
wf_offset = 0.
wf_left_sweep = 20
wf_sampling_rate = 10000.
unit_channels.append((unit_name, unit_id, wf_units, wf_gain,
wf_offset, wf_left_sweep, wf_sampling_rate))
unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
# creating event/epoch channel
# This is mandatory!!!!
# In RawIO epoch and event they are dealt the same way.
event_channels = []
event_channels.append(('Some events', 'ev_0', 'event'))
event_channels.append(('Some epochs', 'ep_1', 'epoch'))
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
# fille into header dict
# This is mandatory!!!!!
self.header = {}
self.header['nb_block'] = 2
self.header['nb_segment'] = [2, 3]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
# insert some annotation at some place
# at neo.io level IO are free to add some annoations
# to any object. To keep this functionality with the wrapper
# BaseFromRaw you can add annoations in a nested dict.
self._generate_minimal_annotations()
# If you are a lazy dev you can stop here.
for block_index in range(2):
bl_ann = self.raw_annotations['blocks'][block_index]
bl_ann['name'] = 'Block #{}'.format(block_index)
bl_ann['block_extra_info'] = 'This is the block {}'.format(block_index)
for seg_index in range([2, 3][block_index]):
seg_ann = bl_ann['segments'][seg_index]
seg_ann['name'] = 'Seg #{} Block #{}'.format(
seg_index, block_index)
seg_ann['seg_extra_info'] = 'This is the seg {} of block {}'.format(
seg_index, block_index)
for c in range(16):
anasig_an = seg_ann['signals'][c]
anasig_an['info'] = 'This is a good signals'
for c in range(3):
spiketrain_an = seg_ann['units'][c]
spiketrain_an['quality'] = 'Good!!'
for c in range(2):
event_an = seg_ann['events'][c]
if c == 0:
event_an['nickname'] = 'Miss Event 0'
elif c == 1:
event_an['nickname'] = 'MrEpoch 1'
def _segment_t_start(self, block_index, seg_index):
# this must return an float scale in second
# this t_start will be shared by all object in the segment
# except AnalogSignal
all_starts = [[0., 15.], [0., 20., 60.]]
return all_starts[block_index][seg_index]
def _segment_t_stop(self, block_index, seg_index):
# this must return an float scale in second
all_stops = [[10., 25.], [10., 30., 70.]]
return all_stops[block_index][seg_index]
def _get_signal_size(self, block_index, seg_index, channel_indexes=None):
# we are lucky: signals in all segment have the same shape!! (10.0 seconds)
# it is not always the case
# this must return an int = the number of sample
# Note that channel_indexes can be ignored for most cases
# except for several sampling rate.
return 100000
def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
# This give the t_start of signals.
# Very often this equal to _segment_t_start but not
# always.
# this must return an float scale in second
# Note that channel_indexes can be ignored for most cases
# except for several sampling rate.
# Here this is the same.
# this is not always the case
return self._segment_t_start(block_index, seg_index)
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
# this must return a signal chunk limited with
# i_start/i_stop (can be None)
# channel_indexes can be None (=all channel) or a list or numpy.array
# This must return a numpy array 2D (even with one channel).
# This must return the orignal dtype. No conversion here.
# This must as fast as possible.
# Everything that can be done in _parse_header() must not be here.
# Here we are lucky: our signals is always zeros!!
# it is not always the case
# internally signals are int16
# convertion to real units is done with self.header['signal_channels']
if i_start is None:
i_start = 0
if i_stop is None:
i_stop = 100000
assert i_start >= 0, "I don't like your jokes"
assert i_stop <= 100000, "I don't like your jokes"
if channel_indexes is None:
nb_chan = 16
else:
nb_chan = len(channel_indexes)
raw_signals = np.zeros((i_stop - i_start, nb_chan), dtype='int16')
return raw_signals
def _spike_count(self, block_index, seg_index, unit_index):
# Must return the nb of spike for given (block_index, seg_index, unit_index)
# we are lucky: our units have all the same nb of spikes!!
# it is not always the case
nb_spikes = 20
return nb_spikes
def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
# In our IO, timstamp are internally coded 'int64' and they
# represent the index of the signals 10kHz
# we are lucky: spikes have the same discharge in all segments!!
# incredible neuron!! This is not always the case
# the same clip t_start/t_start must be used in _spike_raw_waveforms()
ts_start = (self._segment_t_start(block_index, seg_index) * 10000)
spike_timestamps = np.arange(0, 10000, 500) + ts_start
if t_start is not None or t_stop is not None:
# restricte spikes to given limits (in seconds)
lim0 = int(t_start * 10000)
lim1 = int(t_stop * 10000)
mask = (spike_timestamps >= lim0) & (spike_timestamps <= lim1)
spike_timestamps = spike_timestamps[mask]
return spike_timestamps
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
# must rescale to second a particular spike_timestamps
# with a fixed dtype so the user can choose the precisino he want.
spike_times = spike_timestamps.astype(dtype)
spike_times /= 10000. # because 10kHz
return spike_times
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
# this must return a 3D numpy array (nb_spike, nb_channel, nb_sample)
# in the original dtype
# this must be as fast as possible.
# the same clip t_start/t_start must be used in _spike_timestamps()
# If there there is no waveform supported in the
# IO them _spike_raw_waveforms must return None
# In our IO waveforms come from all channels
# they are int16
# convertion to real units is done with self.header['unit_channels']
# Here, we have a realistic case: all waveforms are only noise.
# it is not always the case
# we 20 spikes with a sweep of 50 (5ms)
# trick to get how many spike in the slice
ts = self._get_spike_timestamps(block_index, seg_index, unit_index, t_start, t_stop)
nb_spike = ts.size
np.random.seed(2205) # a magic number (my birthday)
waveforms = np.random.randint(low=-2**4, high=2**4, size=nb_spike * 50, dtype='int16')
waveforms = waveforms.reshape(nb_spike, 1, 50)
return waveforms
def _event_count(self, block_index, seg_index, event_channel_index):
# event and spike are very similar
# we have 2 event channels
if event_channel_index == 0:
# event channel
return 6
elif event_channel_index == 1:
# epoch channel
return 10
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
# the main difference between spike channel and event channel
# is that for here we have 3 numpy array timestamp, durations, labels
# durations must be None for 'event'
# label must a dtype ='U'
# in our IO event are directly coded in seconds
seg_t_start = self._segment_t_start(block_index, seg_index)
if event_channel_index == 0:
timestamp = np.arange(0, 6, dtype='float64') + seg_t_start
durations = None
labels = np.array(['trigger_a', 'trigger_b'] * 3, dtype='U12')
elif event_channel_index == 1:
timestamp = np.arange(0, 10, dtype='float64') + .5 + seg_t_start
durations = np.ones((10), dtype='float64') * .25
labels = np.array(['zoneX'] * 5 + ['zoneZ'] * 5, dtype='U12')
if t_start is not None:
keep = timestamp >= t_start
timestamp, labels = timestamp[keep], labels[keep]
if durations is not None:
durations = durations[keep]
if t_stop is not None:
keep = timestamp <= t_stop
timestamp, labels = timestamp[keep], labels[keep]
if durations is not None:
durations = durations[keep]
return timestamp, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype):
# must rescale to second a particular event_timestamps
# with a fixed dtype so the user can choose the precisino he want.
# really easy here because in our case it is already seconds
event_times = event_timestamps.astype(dtype)
return event_times
def _rescale_epoch_duration(self, raw_duration, dtype):
# really easy here because in our case it is already seconds
durations = raw_duration.astype(dtype)
return durations
| 41.817204 | 98 | 0.622075 |
c300cca3dbc17379c032a2c70dc9374042ec00dd | 962 | py | Python | ironic_inspector/cmd/all.py | tyws/inspector | 7c27046906d0cf5eef099efab61ac0a97a091017 | [
"Apache-2.0"
] | null | null | null | ironic_inspector/cmd/all.py | tyws/inspector | 7c27046906d0cf5eef099efab61ac0a97a091017 | [
"Apache-2.0"
] | null | null | null | ironic_inspector/cmd/all.py | tyws/inspector | 7c27046906d0cf5eef099efab61ac0a97a091017 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Ironic Inspector service."""
import sys
from ironic_inspector.common import service_utils
from ironic_inspector import wsgi_service
def main(args=sys.argv[1:]):
# Parse config file and command line options, then start logging
service_utils.prepare_service(args)
server = wsgi_service.WSGIService()
server.run()
if __name__ == '__main__':
sys.exit(main())
| 31.032258 | 78 | 0.730769 |
93c0968babadec5ccc78269314cc07f3cd4d1314 | 3,028 | py | Python | huaweicloud-sdk-rabbitmq/huaweicloudsdkrabbitmq/v2/model/show_rabbit_mq_tags_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-rabbitmq/huaweicloudsdkrabbitmq/v2/model/show_rabbit_mq_tags_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-rabbitmq/huaweicloudsdkrabbitmq/v2/model/show_rabbit_mq_tags_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowRabbitMqTagsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id'
}
def __init__(self, instance_id=None):
"""ShowRabbitMqTagsRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self.discriminator = None
self.instance_id = instance_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowRabbitMqTagsRequest.
实例ID。
:return: The instance_id of this ShowRabbitMqTagsRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowRabbitMqTagsRequest.
实例ID。
:param instance_id: The instance_id of this ShowRabbitMqTagsRequest.
:type: str
"""
self._instance_id = instance_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowRabbitMqTagsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.561404 | 79 | 0.556473 |
0990dde2d5d83a23295fa278d07ff7119cedb7a8 | 497 | py | Python | tests/test_obofoundry.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | tests/test_obofoundry.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | tests/test_obofoundry.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for OBO Foundry data."""
import unittest
from bioregistry import get_obofoundry_prefix
class TestOBO(unittest.TestCase):
"""Tests for OBO Foundry data."""
def test_prefix(self):
"""Test looking up stylized prefixes."""
for expected, query in [
('FBbt', 'fbbt'),
('CHEBI', 'chebi'),
]:
with self.subTest(query=query):
self.assertEqual(expected, get_obofoundry_prefix(query))
| 23.666667 | 72 | 0.593561 |
1e6663e36086af73dbf2eb89730ec8b0955c5699 | 5,674 | py | Python | test/functional/p2p_invalid_block.py | HashUnlimited/chaincoin | 9a035680d6d9b9a0524dc7524c55cfedd1a683ca | [
"MIT"
] | null | null | null | test/functional/p2p_invalid_block.py | HashUnlimited/chaincoin | 9a035680d6d9b9a0524dc7524c55cfedd1a683ca | [
"MIT"
] | null | null | null | test/functional/p2p_invalid_block.py | HashUnlimited/chaincoin | 9a035680d6d9b9a0524dc7524c55cfedd1a683ca | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid blocks.
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
"""
import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class InvalidBlockRequestTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=noban@127.0.0.1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Add p2p connection to node0
node = self.nodes[0] # convenience reference to the node
node.add_p2p_connection(P2PDataStore())
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Create a new block with an anyone-can-spend coinbase")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block1], node, success=True)
self.log.info("Mature the block.")
node.generate(100)
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
# Use merkle-root malleability to generate an invalid block with
# same blockheader (CVE-2012-2459).
# Manufacture a block with 3 transactions (coinbase, spend of prior
# coinbase, spend of that spend). Duplicate the 3rd transaction to
# leave merkle root and blockheader unchanged but invalidate the block.
# For more information on merkle-root malleability see src/consensus/merkle.cpp.
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert block2_orig.vtx != block2.vtx
node.p2p.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate')
# Check transactions for duplicate inputs (CVE-2018-17144)
self.log.info("Test duplicate input block.")
block2_dup = copy.deepcopy(block2_orig)
block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0])
block2_dup.vtx[2].rehash()
block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root()
block2_dup.rehash()
block2_dup.solve()
node.p2p.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
block3 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256 = None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
node.p2p.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')
# Complete testing of CVE-2012-2459 by sending the original block.
# It should be accepted even though it has the same hash as the mutated one.
self.log.info("Test accepting original block after rejecting its mutated version.")
node.p2p.send_blocks_and_test([block2_orig], node, success=True, timeout=5)
# Update tip info
height += 1
block_time += 1
tip = int(block2_orig.hash, 16)
# Complete testing of CVE-2018-17144, by checking for the inflation bug.
# Create a block that spends the output of a tx in a previous block.
block4 = create_block(tip, create_coinbase(height), block_time)
tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', amount=50 * COIN)
# Duplicates input
tx3.vin.append(tx3.vin[0])
tx3.rehash()
block4.vtx.append(tx3)
block4.hashMerkleRoot = block4.calc_merkle_root()
block4.rehash()
block4.solve()
self.log.info("Test inflation by duplicating input")
node.p2p.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate')
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| 40.241135 | 115 | 0.679767 |
24aa26670b764932337edd5d8a172977e2fa982b | 1,436 | py | Python | setup.py | koudyk/NiMARE | 5bbc6bc2cbd2083bea450ce67047e69f75b70d28 | [
"MIT"
] | null | null | null | setup.py | koudyk/NiMARE | 5bbc6bc2cbd2083bea450ce67047e69f75b70d28 | [
"MIT"
] | 1 | 2020-12-20T18:44:03.000Z | 2020-12-20T18:44:03.000Z | setup.py | jdkent/NiMARE | 5bbc6bc2cbd2083bea450ce67047e69f75b70d28 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" NiMARE setup script """
import versioneer
from io import open
import os.path as op
from inspect import getfile, currentframe
from setuptools import setup, find_packages
def main():
""" Install entry-point """
ver_file = op.join('nimare', 'info.py')
with open(ver_file) as f:
exec(f.read())
vars = locals()
pkg_data = {
'nimare': [
'tests/data/*',
'resources/*'
]
}
root_dir = op.dirname(op.abspath(getfile(currentframe())))
cmdclass = versioneer.get_cmdclass()
setup(
name=vars['PACKAGENAME'],
version=vars['VERSION'],
description=vars['DESCRIPTION'],
long_description=vars['LONGDESC'],
author=vars['AUTHOR'],
author_email=vars['EMAIL'],
maintainer=vars['MAINTAINER'],
maintainer_email=vars['EMAIL'],
url=vars['URL'],
license=vars['LICENSE'],
classifiers=vars['CLASSIFIERS'],
download_url=vars['DOWNLOAD_URL'],
# Dependencies handling
install_requires=vars['REQUIRES'],
tests_require=vars['TESTS_REQUIRES'],
extras_require=vars['EXTRA_REQUIRES'],
entry_points=vars['ENTRY_POINTS'],
packages=find_packages(exclude=('tests',)),
package_data=pkg_data,
zip_safe=False,
cmdclass=cmdclass
)
if __name__ == '__main__':
main()
| 26.109091 | 62 | 0.603064 |
e35fbd41c27cf8df42fdcb47fa7871e51911f736 | 266 | py | Python | run.py | acatiadroid/captcha | 3e2777a503bb3d09e3eeff7308dd8dee7d0d5021 | [
"MIT"
] | null | null | null | run.py | acatiadroid/captcha | 3e2777a503bb3d09e3eeff7308dd8dee7d0d5021 | [
"MIT"
] | null | null | null | run.py | acatiadroid/captcha | 3e2777a503bb3d09e3eeff7308dd8dee7d0d5021 | [
"MIT"
] | null | null | null | from decouple import config
from utils.botinstance import Bot
extensions = [
"cogs.setup",
"cogs.captcha",
]
bot = Bot()
if __name__ == "__main__":
for ext in extensions:
bot.load_extension(ext)
bot.ipc.start()
bot.run(config("TOKEN")) | 17.733333 | 33 | 0.650376 |
a2cbf444a399f53063bc4321314894bf0721e1f0 | 803 | py | Python | chat/views.py | raybesiga/citychat | 66d7c3ffe5cd6804ef4193ce28455376f51f9164 | [
"MIT"
] | null | null | null | chat/views.py | raybesiga/citychat | 66d7c3ffe5cd6804ef4193ce28455376f51f9164 | [
"MIT"
] | null | null | null | chat/views.py | raybesiga/citychat | 66d7c3ffe5cd6804ef4193ce28455376f51f9164 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.conf import settings
from django.http import JsonResponse
from twilio.access_token import AccessToken, IpMessagingGrant
def chat(request):
return render(request, 'chat.html')
def token(request):
device_id = request.GET.get('device', 'unknown')
identity = request.GET.get('identity', 'guest').encode('utf-8')
endpoint_id = "NeighborChat:{0}, {1}".format(device_id, identity)
token = AccessToken(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_API_KEY, settings.TWILIO_API_SECRET, identity)
grant = IpMessagingGrant()
grant.service_sid = settings.TWILIO_IPM_SERVICE_SID
grant.endpoint_id = endpoint_id
token.add_grant(grant)
response = {'identity': identity, 'token': token.to_jwt()}
return JsonResponse(response) | 40.15 | 115 | 0.753425 |
a3d7c223e1cc83f1082a810056d290bf66fc7d93 | 13,271 | py | Python | gamestonk_terminal/stocks/fundamental_analysis/av_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | gamestonk_terminal/stocks/fundamental_analysis/av_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | gamestonk_terminal/stocks/fundamental_analysis/av_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | """Alpha Vantage Model"""
__docformat__ = "numpy"
import logging
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
import requests
from alpha_vantage.fundamentaldata import FundamentalData
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import long_number_format
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.fundamental_analysis.fa_helper import clean_df_index
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_overview(ticker: str) -> pd.DataFrame:
"""Get alpha vantage company overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of fundamentals
"""
# Request OVERVIEW data from Alpha Vantage API
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
# Parse json data to dataframe
if "Note" in result.json():
console.print(result.json()["Note"], "\n")
return pd.DataFrame()
df_fa = pd.json_normalize(result.json())
# Keep json data sorting in dataframe
df_fa = df_fa[list(result.json().keys())].T
df_fa.iloc[5:] = df_fa.iloc[5:].applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Revenue per share t t m": "Revenue per share TTM",
"Operating margin t t m": "Operating margin TTM",
"Return on assets t t m": "Return on assets TTM",
"Return on equity t t m": "Return on equity TTM",
"Revenue t t m": "Revenue TTM",
"Gross profit t t m": "Gross profit TTM",
"Diluted e p s t t m": "Diluted EPS TTM",
"Quarterly earnings growth y o y": "Quarterly earnings growth YOY",
"Quarterly revenue growth y o y": "Quarterly revenue growth YOY",
"Trailing p e": "Trailing PE",
"Forward p e": "Forward PE",
"Price to sales ratio t t m": "Price to sales ratio TTM",
"E v to revenue": "EV to revenue",
"E v to e b i t d a": "EV to EBITDA",
}
)
return df_fa
return pd.DataFrame()
@log_start_end(log=logger)
def get_key_metrics(ticker: str) -> pd.DataFrame:
"""Get key metrics from overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of key metrics
"""
# Request OVERVIEW data
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
df_fa = pd.json_normalize(result.json())
df_fa = df_fa[list(result.json().keys())].T
df_fa = df_fa.applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Return on equity t t m": "Return on equity TTM",
"Price to sales ratio t t m": "Price to sales ratio TTM",
}
)
as_key_metrics = [
"Market capitalization",
"EBITDA",
"EPS",
"PE ratio",
"PEG ratio",
"Price to book ratio",
"Return on equity TTM",
"Price to sales ratio TTM",
"Dividend yield",
"50 day moving average",
"Analyst target price",
"Beta",
]
return df_fa.loc[as_key_metrics]
return pd.DataFrame()
@log_start_end(log=logger)
def get_income_statements(
ticker: str, number: int, quarterly: bool = False
) -> pd.DataFrame:
"""Get income statements for company
Parameters
----------
ticker : str
Stock ticker
number : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
Returns
-------
pd.DataFrame
Dataframe of income statements
"""
url = f"https://www.alphavantage.co/query?function=INCOME_STATEMENT&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
if r.status_code == 200:
statements = r.json()
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = pd.DataFrame(statements["annualReports"])
if df_fa.empty:
return pd.DataFrame()
df_fa = df_fa.set_index("fiscalDateEnding")
df_fa = df_fa.head(number)
df_fa = df_fa.applymap(lambda x: long_number_format(x))
return df_fa[::-1].T
return pd.DataFrame()
@log_start_end(log=logger)
def get_balance_sheet(
ticker: str, number: int, quarterly: bool = False
) -> pd.DataFrame:
"""Get balance sheets for company
Parameters
----------
ticker : str
Stock ticker
number : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
Returns
-------
pd.DataFrame
Dataframe of income statements
"""
url = f"https://www.alphavantage.co/query?function=BALANCE_SHEET&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
if r.status_code == 200:
statements = r.json()
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = pd.DataFrame(statements["annualReports"])
if df_fa.empty:
return pd.DataFrame()
df_fa = df_fa.set_index("fiscalDateEnding")
df_fa = df_fa.head(number)
df_fa = df_fa.applymap(lambda x: long_number_format(x))
return df_fa[::-1].T
return pd.DataFrame()
@log_start_end(log=logger)
def get_cash_flow(ticker: str, number: int, quarterly: bool = False) -> pd.DataFrame:
"""Get cash flows for company
Parameters
----------
ticker : str
Stock ticker
number : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
Returns
-------
pd.DataFrame
Dataframe of income statements
"""
url = f"https://www.alphavantage.co/query?function=CASH_FLOW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
if r.status_code == 200:
statements = r.json()
df_fa = pd.DataFrame()
if quarterly:
if "quarterlyReports" in statements:
df_fa = pd.DataFrame(statements["quarterlyReports"])
else:
if "annualReports" in statements:
df_fa = pd.DataFrame(statements["annualReports"])
if df_fa.empty:
return pd.DataFrame()
df_fa = df_fa.set_index("fiscalDateEnding")
df_fa = df_fa.head(number)
df_fa = df_fa.applymap(lambda x: long_number_format(x))
return df_fa[::-1].T
return pd.DataFrame()
@log_start_end(log=logger)
def get_earnings(ticker: str, quarterly: bool = False) -> pd.DataFrame:
"""Get earnings calendar for ticker
Parameters
----------
ticker : str
Stock ticker
quarterly : bool, optional
[Flag to get quarterly and not annual, by default False
Returns
-------
pd.DataFrame
Dataframe of earnings
"""
# Request EARNINGS data from Alpha Vantage API
s_req = (
"https://www.alphavantage.co/query?function=EARNINGS&"
f"symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
)
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
df_fa = pd.json_normalize(result.json())
if quarterly:
df_fa = pd.DataFrame(df_fa["quarterlyEarnings"][0])
df_fa = df_fa[
[
"fiscalDateEnding",
"reportedDate",
"reportedEPS",
"estimatedEPS",
"surprise",
"surprisePercentage",
]
]
df_fa = df_fa.rename(
columns={
"fiscalDateEnding": "Fiscal Date Ending",
"reportedEPS": "Reported EPS",
"estimatedEPS": "Estimated EPS",
"reportedDate": "Reported Date",
"surprise": "Surprise",
"surprisePercentage": "Surprise Percentage",
}
)
else:
df_fa = pd.DataFrame(df_fa["annualEarnings"][0])
df_fa = df_fa.rename(
columns={
"fiscalDateEnding": "Fiscal Date Ending",
"reportedEPS": "Reported EPS",
}
)
return df_fa
return pd.DataFrame()
@log_start_end(log=logger)
def df_values(df: pd.DataFrame, item: str) -> List[int]:
"""Clean the values from the df
Parameters
----------
df : pd.DataFrame
The Dataframe to use
item : str
The item to select
Returns
-------
values : List[int]
The values for the dataframe
"""
selection = df[item]
values = selection.apply(lambda x: int(x) if x else 0).values
return values.tolist()
@log_start_end(log=logger)
def get_fraud_ratios(ticker: str) -> Tuple[Dict[str, float], float, float]:
"""Get fraud ratios based on fundamentals
Parameters
----------
ticker : str
Stock ticker
Returns
-------
Dict[float]:
Dictionary of fraud metrics
float:
Z score for fraud metrics
"""
fd = FundamentalData(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
# pylint: disable=unbalanced-tuple-unpacking
# pylint: disable=no-member
df_cf, _ = fd.get_cash_flow_annual(symbol=ticker)
df_bs, _ = fd.get_balance_sheet_annual(symbol=ticker)
df_is, _ = fd.get_income_statement_annual(symbol=ticker)
df_cf = df_cf.set_index("fiscalDateEnding").iloc[:2]
df_bs = df_bs.set_index("fiscalDateEnding").iloc[:2]
df_is = df_is.set_index("fiscalDateEnding").iloc[:2]
ar = df_values(df_bs, "currentNetReceivables")
sales = df_values(df_is, "totalRevenue")
cogs = df_values(df_is, "costofGoodsAndServicesSold")
ni = df_values(df_is, "netIncome")
ca = df_values(df_bs, "totalCurrentAssets")
cl = df_values(df_bs, "totalCurrentLiabilities")
ppe = df_values(df_bs, "propertyPlantEquipment")
cash = df_values(df_bs, "cashAndCashEquivalentsAtCarryingValue")
cash_and_sec = df_values(df_bs, "cashAndShortTermInvestments")
sec = [y - x for (x, y) in zip(cash, cash_and_sec)]
ta = df_values(df_bs, "totalAssets")
dep = df_values(df_bs, "accumulatedDepreciationAmortizationPPE")
sga = df_values(df_is, "sellingGeneralAndAdministrative")
tl = df_values(df_bs, "totalLiabilities")
icfo = df_values(df_is, "netIncomeFromContinuingOperations")
cfo = df_values(df_cf, "operatingCashflow")
ratios: Dict = {}
ratios["DSRI"] = (ar[0] / sales[0]) / (ar[1] / sales[1])
ratios["GMI"] = ((sales[1] - cogs[1]) / sales[1]) / (
(sales[0] - cogs[0]) / sales[0]
)
ratios["AQI"] = (1 - ((ca[0] + ppe[0] + sec[0]) / ta[0])) / (
1 - ((ca[1] + ppe[1] + sec[1]) / ta[1])
)
ratios["SGI"] = sales[0] / sales[1]
ratios["DEPI"] = (dep[1] / (ppe[1] + dep[1])) / (dep[0] / (ppe[0] + dep[0]))
ratios["SGAI"] = (sga[0] / sales[0]) / (sga[1] / sales[1])
ratios["LVGI"] = (tl[0] / ta[0]) / (tl[1] / ta[1])
ratios["TATA"] = (icfo[0] - cfo[0]) / ta[0]
ratios["MSCORE"] = (
-4.84
+ (0.92 * ratios["DSRI"])
+ (0.58 * ratios["GMI"])
+ (0.404 * ratios["AQI"])
+ (0.892 * ratios["SGI"])
+ (0.115 * ratios["DEPI"] - (0.172 * ratios["SGAI"]))
+ (4.679 * ratios["TATA"])
- (0.327 * ratios["LVGI"])
)
zscore = (
-4.336
- (4.513 * (ni[0] / ta[0]))
+ (5.679 * (tl[0] / ta[0]))
+ (0.004 * (ca[0] / cl[0]))
)
v1 = np.log(ta[0] / 1000)
v2 = ni[0] / ta[0]
v3 = cash[0] / cl[0]
x = ((v1 + 0.85) * v2) - 0.85
y = 1 + v3
mckee = x**2 / (x**2 + y**2)
return ratios, zscore, mckee
| 31.673031 | 122 | 0.574109 |
6ce4d566581d2ebf2a29918bc325229de0008819 | 2,263 | py | Python | docs/makers/make_reflected_regions.py | ischigal/gammapy | c56ca1bb237d9eb4a7a3aed8eaf359206bf0e628 | [
"BSD-3-Clause"
] | 155 | 2015-02-25T12:38:02.000Z | 2022-03-13T17:54:30.000Z | docs/makers/make_reflected_regions.py | ischigal/gammapy | c56ca1bb237d9eb4a7a3aed8eaf359206bf0e628 | [
"BSD-3-Clause"
] | 3,131 | 2015-01-06T15:36:23.000Z | 2022-03-31T17:30:57.000Z | docs/makers/make_reflected_regions.py | ischigal/gammapy | c56ca1bb237d9eb4a7a3aed8eaf359206bf0e628 | [
"BSD-3-Clause"
] | 158 | 2015-03-16T20:36:44.000Z | 2022-03-30T16:05:37.000Z | import matplotlib.pyplot as plt
from astropy.coordinates import Angle, SkyCoord
from regions import CircleSkyRegion
from gammapy.makers import ReflectedRegionsFinder
from gammapy.maps import WcsNDMap, RegionGeom
# Exclude a rectangular region
exclusion_mask = WcsNDMap.create(npix=(801, 701), binsz=0.01, skydir=(83.6, 23.0))
coords = exclusion_mask.geom.get_coord().skycoord
data = (Angle("23 deg") < coords.dec) & (coords.dec < Angle("24 deg"))
exclusion_mask.data = ~data
pos = SkyCoord(83.633, 22.014, unit="deg")
radius = Angle(0.3, "deg")
on_region = CircleSkyRegion(pos, radius)
center = SkyCoord(83.633, 24, unit="deg")
# One can impose a minimal distance between ON region and first reflected regions
finder = ReflectedRegionsFinder(
region=on_region,
center=center,
exclusion_mask=exclusion_mask,
min_distance_input="0.2 rad",
)
regions = finder.run()
fig, axes = plt.subplots(
ncols=3,
subplot_kw={"projection": exclusion_mask.geom.wcs},
figsize=(12, 3),
)
def plot_regions(ax, regions, on_region, exclusion_mask):
"""Little helper function to plot off regions"""
exclusion_mask.plot_mask(ax=ax, colors="gray")
on_region.to_pixel(ax.wcs).plot(ax=ax, edgecolor="tab:orange")
geom = RegionGeom.from_regions(regions)
geom.plot_region(ax=ax, color="tab:blue")
ax = axes[0]
ax.set_title("Min. distance first region")
plot_regions(
ax=ax, regions=regions, on_region=on_region, exclusion_mask=exclusion_mask
)
# One can impose a minimal distance between two adjacent regions
finder = ReflectedRegionsFinder(
region=on_region,
center=center,
exclusion_mask=exclusion_mask,
min_distance="0.1 rad",
)
regions = finder.run()
ax = axes[1]
ax.set_title("Min. distance all regions")
plot_regions(
ax=ax, regions=regions, on_region=on_region, exclusion_mask=exclusion_mask
)
# One can impose a maximal number of regions to be extracted
finder = ReflectedRegionsFinder(
region=on_region,
center=center,
exclusion_mask=exclusion_mask,
max_region_number=5,
min_distance="0.1 rad",
)
regions = finder.run()
ax = axes[2]
ax.set_title("Max. number of regions")
plot_regions(
ax=ax, regions=regions, on_region=on_region, exclusion_mask=exclusion_mask
)
plt.show() | 27.938272 | 82 | 0.739726 |
9570a3a871a3dd68f52df5ddaaed8eb5909f3ecf | 1,640 | py | Python | experiments/riemannian_example.py | utiasSTARS/GraphIK | c2d05386bf9f9baf8ad146125bfebc3b73fccd14 | [
"MIT"
] | 1 | 2020-11-08T23:26:03.000Z | 2020-11-08T23:26:03.000Z | experiments/riemannian_example.py | utiasSTARS/GraphIK | c2d05386bf9f9baf8ad146125bfebc3b73fccd14 | [
"MIT"
] | null | null | null | experiments/riemannian_example.py | utiasSTARS/GraphIK | c2d05386bf9f9baf8ad146125bfebc3b73fccd14 | [
"MIT"
] | null | null | null | from graphik.utils.utils import table_environment
from graphik.solvers.riemannian_solver import solve_with_riemannian
# Multiple robot models to try out, or you can implement your own
from graphik.utils.roboturdf import load_ur10
if __name__ == "__main__":
# Load an example robot
robot, graph = load_ur10()
# Load an example obstacle environment, or construct your own (see implementation of table_environment())
obstacles = table_environment()
# Initialize the graph object with obstacles from the chosen environment
for idx, obs in enumerate(obstacles):
graph.add_spherical_obstacle(f"o{idx}", obs[0], obs[1])
# Generate anchor nodes representing a pose goal the end-effector
q_goal = robot.random_configuration()
T_goal = robot.pose(q_goal, f"p{robot.n}") # Can be any desired pose, this is just a simple example
# Run Riemannian solver
q_sol, solution_points = solve_with_riemannian(graph, T_goal, use_jit=False) # Returns None if infeasible or didn't solve
# Compare the solution's end effector pose to the goal.
# Don't be surprised if the configurations are different, even for the UR10!
# Each pose has up to 16 unique solutions for 6-DOF manipulators.
print("Target pose: ")
print(T_goal)
print("Target configuration: ")
print(q_goal)
print("--------------------------------------------")
if q_sol:
print("Riemannian solution's pose: ")
print(robot.pose(q_sol, f"p{robot.n}"))
print("Riemannian configuration: ")
print(q_sol)
else:
print("Riemannian did not return a feasible solution.")
| 40 | 126 | 0.693293 |
570a6876e48bc4fdb1d0b41924f0ea831d7da6f1 | 1,762 | py | Python | aisenv/Scripts/rst2odt_prepstyles.py | claudiavr/AIS | 5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea | [
"MIT"
] | null | null | null | aisenv/Scripts/rst2odt_prepstyles.py | claudiavr/AIS | 5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea | [
"MIT"
] | null | null | null | aisenv/Scripts/rst2odt_prepstyles.py | claudiavr/AIS | 5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea | [
"MIT"
] | null | null | null | #!c:\users\denis\desktop\ais\aisenv\scripts\python.exe
# $Id: rst2odt_prepstyles.py 8346 2019-08-26 12:11:32Z milde $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
# Author: Michael Schutte <michi@uiae.at>
from __future__ import print_function
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print(__doc__, file=sys.stderr)
print("Usage: %s STYLE_FILE.odt\n" % sys.argv[0], file=sys.stderr)
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
| 25.911765 | 75 | 0.643587 |
727a5dab1ffb067a1d31edf7f93524706fb7e0a3 | 10,191 | py | Python | src/pydex_app/order_update_handler.py | DeFi-Coder-News-Letter/StormSurge-pydex | 735efe9a9c2f16290ddff2c565dab655278fbddf | [
"MIT"
] | 28 | 2019-02-09T12:48:21.000Z | 2022-02-10T02:35:43.000Z | src/pydex_app/order_update_handler.py | ccamateur/pydex | e6a0fdb81751a34df40d8563a4f33fdd39a91a70 | [
"MIT"
] | 20 | 2019-02-09T02:16:48.000Z | 2021-02-02T21:55:33.000Z | src/pydex_app/order_update_handler.py | ccamateur/pydex | e6a0fdb81751a34df40d8563a4f33fdd39a91a70 | [
"MIT"
] | 5 | 2019-02-09T11:58:39.000Z | 2021-08-24T15:14:17.000Z | """
Use an OrderWatcherClient to listen to order updates from
the order-watcher-server and take necessary actions to ensure
order statuses are kept up to date in the database
author: officialcryptomaster@gmail.com
"""
from time import sleep
from pydex_app.database import PYDEX_DB as db
from pydex_app.db_models import SignedOrder, OrderStatus
from pydex_app.order_watcher_client import OrderWatcherClient
from utils.logutils import setup_logger
from utils.miscutils import now_epoch_msecs
LOGGER = setup_logger(__name__)
class OrderUpdateHandler:
"""Class for keeping SignedOrder statuses up-to-date.
When orders are initially submitted and entered into the database, they will have
order_status OrderStatus.MAYBE_FILLABLE and will not be displayed in the orderbook
since the orderbook only displays FILLABLE orders. The OrderUpdateHandler is reponsible
for fetching these orders from the database, registering them with the order watcher
server and only then mark them as fillable. It will also listen to the order watcher
server for updates on order statuses and update them as needed.
Internally will make use of the OrderWatcherClient which will run and handle status
updates on a separate thread.
"""
def __init__(
self,
app,
order_watcher_server_url="ws://127.0.0.1:8080",
db_check_period_secs=1,
heatbeat_period=30,
):
"""Get an instance of the OrderUpdateHandler.
Keyword arguments:
app -- PyDEX flask app instance, so we can make sure the context for database
operations can be controlled
server_url -- string url and port where order-watcher-server is running the
json RPC service
db_check_period_secs -- integer seconds between checking db for new orders
heatbeat_period_secs -- integers number of db_check_period_secs betweeen logging a
debug message (default: 30)
"""
self._app = app
self.db_check_period_secs = db_check_period_secs
self.heartbeat_period_secs = heatbeat_period
self._last_db_check_at_msecs = None
self._last_update_at_msecs = None
self.running = False
# For now, all unfillable handlers will just mark the order as unfillable,
# but you can potentially change this if you desire...
self.unfillable_handlers = {
"ORDER_FILL_EXPIRED": self.handle_unfillable_order,
"ORDER_ALREADY_CANCELLED_OR_FILLED": self.handle_unfillable_order,
"ORDER_REMAINING_FILL_AMOUNT_ZERO": self.handle_unfillable_order,
"ORDER_FILL_ROUNDING_ERROR": self.handle_unfillable_order,
"FILL_BALANCE_ALLOWANCE_ERROR": self.handle_unfillable_order,
"INSUFFICIENT_TAKER_BALANCE": self.handle_unfillable_order,
"INSUFFICIENT_TAKER_ALLOWANCE": self.handle_unfillable_order,
"INSUFFICIENT_MAKER_BALANCE": self.handle_unfillable_order,
"INSUFFICIENT_MAKER_ALLOWANCE": self.handle_unfillable_order,
"TRANSACTION_SENDER_IS_NOT_FILL_ORDER_TAKER": self.handle_unfillable_order,
"INSUFFICIENT_REMAINING_FILL_AMOUNT": self.handle_unfillable_order,
}
self.owc = OrderWatcherClient(
server_url=order_watcher_server_url,
on_update=self.on_update,
)
def _fetch_non_unfillables(self):
"""Fetch dict of orders which are not unfillable and have had updates"""
fillables = {}
maybe_fillables = {}
filter_cond = SignedOrder.order_status_ >= 0
if self._last_update_at_msecs:
filter_cond &= SignedOrder.last_updated_at_msecs_ > self._last_update_at_msecs
self._last_db_check_at_msecs = now_epoch_msecs()
non_unfillables = {o.hash: o for o in
SignedOrder.query.filter(filter_cond)}
if non_unfillables:
self._last_update_at_msecs = max(
[o.last_updated_at_msecs for o in non_unfillables.values()])
LOGGER.info("fetched %s non-unfillable orders", len(non_unfillables))
fillables = {h: o for h, o in non_unfillables.items() if o.order_status_ > 0}
maybe_fillables = {h: o for h, o in non_unfillables.items() if o.order_status_ == 0}
return fillables, maybe_fillables
def run(self):
"""Infinite loop of the updater"""
self.owc.run()
self.running = True
i = 1
with self._app.app_context():
LOGGER.info("starting main loop....")
while self.running:
i += 1
if i % self.heartbeat_period_secs == 0:
LOGGER.debug(".")
if self._last_db_check_at_msecs:
wait_secs = (self.db_check_period_secs
- (now_epoch_msecs()
- self._last_db_check_at_msecs) / 1000)
if wait_secs > 0:
sleep(wait_secs)
fillables, maybe_fillables = self._fetch_non_unfillables()
if fillables: # force update from order-watcher-server
for order in fillables.values():
self.owc.add_order(order.to_json())
if maybe_fillables:
for order in maybe_fillables.values():
self.handle_maybe_fillable_order(order=order, commit=False)
self._commit_db()
LOGGER.info("main loop stopped!")
LOGGER.info("stopping OrderWacherClient...")
self.owc.stop()
def on_update(self, res):
"""Handle messages coming from order-watcher-server.
Note that this will be running in the order-watcher-client thread, so
we need to make sure it operates within the app_context.
Keyword argument:
res -- dict 'result' from order-watcher-server on_message callback
"""
LOGGER.info("handling update=%s", res)
order_hash = res.get("orderHash")
if order_hash is None:
LOGGER.error("result missing 'orderHash' key")
return res
is_valid = res.get("isValid")
if is_valid is None:
LOGGER.error("result is missing 'isValid' key")
return res
# Since, handler is running in order-watcher-client thread,
# it will need to be told about the app context
with self._app.app_context():
if not is_valid:
invalid_reason = res.get("error")
self.unfillable_handlers[invalid_reason](
order_hash, invalid_reason)
else:
LOGGER.info("Got valid order %s", order_hash)
self.handle_fillable_order(order_hash)
return res
def get_order_by_hash(self, order_hash): # pylint: disable=no-self-use
"""Get an order by hash from the database.
Keyword argument:
order_hash -- string hex hash of order
"""
order = SignedOrder.query.get(order_hash)
if not order:
LOGGER.warning("Got update for ghost order with hash %s", order_hash)
return order
def handle_maybe_fillable_order(self, order, commit=True):
"""Add the order to the order-watcher-server and set it to FILLABLE.
Keyword arguments:
order_hash -- string hex hash of order
commit -- boolean of whether to commit the change (default: True)
"""
LOGGER.debug("Adding order_hash=%s to order-watcher-server", order.hash)
# WARNING: this approach may be vulnerable to a race conditions, however,
# since the order watcher server does not confirm valid status, it is the
# simplest way we can mark a MAYBE_FILLABLE as FILLABLE...
order_count_before = self.owc.get_stats()["result"]["orderCount"]
self.owc.add_order(order.to_json())
order_count_after = self.owc.get_stats()["result"]["orderCount"]
if order_count_after > order_count_before:
order.order_status = OrderStatus.FILLABLE
if commit:
self._commit_db()
def handle_fillable_order(self, order_hash, commit=True):
"""Handle fillable order update.
Keyword argument:
order_hash -- string hex hash of order
commit -- boolean of whether to commit the change (default: True)
"""
LOGGER.debug("order with hash=%s is fillable", order_hash)
order = self.get_order_by_hash(order_hash)
if order:
order.order_status = OrderStatus.FILLABLE
if commit:
self._commit_db()
def handle_unfillable_order(
self,
order_hash,
reason,
commit=True,
):
"""Handle unfillable order update by marking it as unfillable
Keyword argument:
order_hash -- string hex hash of order
reason -- string error code. This is the 'error' key from inside
order-watcher-server's on_message 'result' key and must match
one of the keys from self.unfillable_handlers.
commit -- boolean of whether to commit the change (default: True)
"""
LOGGER.debug("Setting order_hash=%s to NOT_FILLABLE due to %s",
order_hash, reason)
order = self.get_order_by_hash(order_hash)
if order:
order.order_status = OrderStatus.UNFILLABLE
if commit:
self._commit_db()
def _commit_db(self): # pylint: disable=no-self-use
LOGGER.debug("commit changes to DB...")
db.session.commit() # pylint: disable=no-member
if __name__ == "__main__":
import signal
from pydex_app import create_app # pylint: disable=ungrouped-imports
APP = create_app()
OSU = OrderUpdateHandler(app=APP)
def signal_handler(_signal, _frame):
"""Handle Ctrl+C signal by telling OrderStatusUpdate to stop running"""
LOGGER.warning("Ctrl+C detected... Will Stop!")
OSU.running = False
OSU.owc.stop()
signal.signal(signal.SIGINT, signal_handler)
OSU.run()
| 42.286307 | 96 | 0.644196 |
90a8b3e88fa092732125b7a135aedab63211c185 | 3,644 | py | Python | python/lib/Lib/site-packages/django/test/utils.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 27 | 2015-02-11T16:31:43.000Z | 2021-12-18T04:24:19.000Z | python/lib/Lib/site-packages/django/test/utils.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 1 | 2021-06-30T10:10:56.000Z | 2021-06-30T10:10:56.000Z | python/lib/Lib/site-packages/django/test/utils.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 14 | 2015-12-27T20:19:14.000Z | 2020-12-14T01:41:22.000Z | import sys
import time
import os
import warnings
from django.conf import settings
from django.core import mail
from django.core.mail.backends import locmem
from django.test import signals
from django.template import Template
from django.utils.translation import deactivate
__all__ = ('Approximate', 'ContextList', 'setup_test_environment',
'teardown_test_environment', 'get_runner')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val-other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, basestring):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
signals.template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_SMTPConnection = mail.SMTPConnection
mail.SMTPConnection = locmem.EmailBackend
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
mail.SMTPConnection = mail.original_SMTPConnection
del mail.original_SMTPConnection
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
del mail.outbox
def get_warnings_state():
"""
Returns an object containing the state of the warnings module
"""
# There is no public interface for doing this, but this implementation of
# get_warnings_state and restore_warnings_state appears to work on Python
# 2.4 to 2.7.
return warnings.filters[:]
def restore_warnings_state(state):
"""
Restores the state of the warnings module when passed an object that was
returned by get_warnings_state()
"""
warnings.filters = state[:]
def get_runner(settings):
test_path = settings.TEST_RUNNER.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
| 28.46875 | 79 | 0.690999 |
4579a050a702caa6641f5a65ad70e56d7132d5b2 | 1,425 | py | Python | test_frame/best_simple_example/test_qps_consume.py | fakegit/distributed_framework | bb183729a9328d654242d7b01c345a7be8007394 | [
"Apache-2.0"
] | 333 | 2019-08-08T10:25:27.000Z | 2022-03-30T07:32:04.000Z | test_frame/best_simple_example/test_qps_consume.py | 975278060/distributed_framework | bb183729a9328d654242d7b01c345a7be8007394 | [
"Apache-2.0"
] | 38 | 2020-04-24T01:47:51.000Z | 2021-12-20T07:22:15.000Z | test_frame/best_simple_example/test_qps_consume.py | 975278060/distributed_framework | bb183729a9328d654242d7b01c345a7be8007394 | [
"Apache-2.0"
] | 84 | 2019-08-09T11:51:14.000Z | 2022-03-02T06:29:09.000Z | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 14:57
import time
import threading
from function_scheduling_distributed_framework import task_deco, BrokerEnum,ConcurrentModeEnum
t_start = time.time()
@task_deco('queue_test2_qps', qps=2, broker_kind=BrokerEnum.PERSISTQUEUE,concurrent_mode=ConcurrentModeEnum.THREADING,concurrent_num=600 )
def f2(a, b):
"""
concurrent_num = 600 不用怕,因为这是智能线程池,如果函数耗时短,不会真开那么多线程。
这个例子是测试函数耗时是动态变化的,这样就不可能通过提前设置参数预估函数固定耗时和搞鬼了。看看能不能实现qps稳定和线程池自动扩大自动缩小
要说明的是打印的线程数量也包含了框架启动时候几个其他的线程,所以数量不是刚好和所需的线程计算一样的。
## 可以在运行控制台搜索 新启动线程 这个关键字,看看是不是何时适合扩大线程数量。
## 可以在运行控制台搜索 停止线程 这个关键字,看看是不是何时适合缩小线程数量。
"""
result = a + b
sleep_time = 0.01
if time.time() - t_start > 60: # 先测试函数耗时慢慢变大了,框架能不能按需自动增大线程数量
sleep_time = 7
if time.time() - t_start > 120:
sleep_time = 31
if time.time() - t_start > 200:
sleep_time = 79
if time.time() - t_start > 400: # 最后把函数耗时又减小,看看框架能不能自动缩小线程数量。
sleep_time = 0.8
if time.time() - t_start > 500:
sleep_time = None
print(f'{time.strftime("%H:%M:%S")} ,当前线程数量是 {threading.active_count()}, {a} + {b} 的结果是 {result}, sleep {sleep_time} 秒')
if sleep_time is not None:
time.sleep(sleep_time) # 模拟做某事需要阻塞n秒种,必须用并发绕过此阻塞。
return result
if __name__ == '__main__':
f2.clear()
for i in range(1400):
f2.push(i, i * 2)
f2.consume() | 34.756098 | 138 | 0.675789 |
34a56de3227ca06943c3525c8ba102a367f5accb | 5,409 | py | Python | chromatose/extraction.py | atisor73/chromatose | c7fba6da3854c57e757e450206bc65f438e212a5 | [
"MIT"
] | 10 | 2020-09-12T14:42:18.000Z | 2021-10-05T03:16:20.000Z | chromatose/extraction.py | atisor73/chromatose | c7fba6da3854c57e757e450206bc65f438e212a5 | [
"MIT"
] | null | null | null | chromatose/extraction.py | atisor73/chromatose | c7fba6da3854c57e757e450206bc65f438e212a5 | [
"MIT"
] | null | null | null | """
The following code was written by Ivar Stangeby (@qTipTip) in his package `Pylette`,
and modified by Rosita Fu (@atisor73).
Date: 25 March 2021
"""
import warnings
import numpy as np
import panel as pn
from PIL import Image
from sklearn.cluster import KMeans
from .utils import *
from .viz import *
pn.extension()
class _ColorBox(object):
"""
Represents a box in the RGB color space w/ associated attributes
used in Median Cut algorithm
"""
def __init__(self, colors):
"""
Initialize with a numpy array of RGB colors.
colors: np.ndarray (width * height, 3)
"""
self.colors = colors
self._get_min_max()
def _get_min_max(self):
min_channel = np.min(self.colors, axis=0)
max_channel = np.max(self.colors, axis=0)
self.min_channel = min_channel
self.max_channel = max_channel
def __lt__(self, other):
return self.size < other.size # compare cubes by volumes
@property
def size(self):
return self.volume
def _get_dominant_channel(self):
dominant_channel = np.argmax(self.max_channel - self.min_channel)
return dominant_channel
@property
def average(self):
return np.mean(self.colors, axis=0) # avg color contained in _ColorBox
@property
def volume(self):
return np.prod(self.max_channel - self.min_channel)
def split(self):
"""
Splits _ColorBox in to two _ColorBoxes at median of dominant color channel.
Return: [_ColorBox1, _ColorBox2]
"""
dominant_channel = self._get_dominant_channel() # dominant
self.colors = self.colors[self.colors[:, dominant_channel].argsort()] # sorting...
median_index = len(self.colors) // 2 # median
return [ _ColorBox(self.colors[:median_index]),
_ColorBox(self.colors[median_index:]) ]
def _median_cut(arr, height, width, n_colors):
arr = arr.reshape((width * height, -1))
c = [_ColorBox(arr)]
full_box_size = c[0].size
# Each iteration:
# 1. find largest box
# 2. split it
# 3. remove original box from list of boxes
# 4. add two new boxes
while len(c) < n_colors:
largest_c_idx = np.argmax(c)
c = c[:largest_c_idx] + c[largest_c_idx].split() + c[largest_c_idx + 1:]
colors = [tuple([int(x) for x in box.average]) for box in c]
return colors
def _k_means(arr, height, width, n_colors):
arr = np.reshape(arr, (width * height, -1))
model = KMeans(n_clusters=n_colors)
labels = model.fit_predict(arr)
palette = np.array(model.cluster_centers_, dtype=np.int)
color_count = np.bincount(labels)
color_frequency = color_count / float(np.sum(color_count))
palette = [tuple(c) for c in palette]
return palette
def extract(path,
n_colors=5,
method='kmeans',
resize=True,
sort=False,
show=True
):
'''
Arguments:
----------
path : image path (str)
n_colors : desired length (int)
method : either K-means or median-cut algorithm (str)
'kmeans', 'median', 'both'
resize : shrink image for quicker return (bool)
sort : amateur sort by luminance (bool)
show : prints hex and returns panel object (bool)
Returns:
--------
palette : list of hex values (list)
if method == "both", list of palettes: [kmeans, median]
'''
img = Image.open(path).convert('RGB')
if resize: img = img.resize((256, 256))
width, height = img.size
arr = np.asarray(img)
if (method in ["MC", "mc", "median", "median cut", "median-cut", "Median Cut"]):
colors = _median_cut(arr, height, width, n_colors)
elif (method in ["KM", "km", "kmeans", "KMEANS", "k-means", "K-means", "K-MEANS"]):
colors = _k_means(arr, height, width, n_colors)
elif (method in ["both", "BOTH", "Both"]):
k = _k_means(arr, height, width, n_colors)
m = _median_cut(arr, height, width, n_colors)
if sort: k, m = luminance_sort(k), luminance_sort(m)
k, m = rgb_to_hex(k), rgb_to_hex(m)
palettes = {"kmeans": k, "median": m}
if show:
print(" ", k)
print(" ", m)
return pn.Column(palplot(k), palplot(m))
# return pn.Column(pn.pane.Markdown(f" {k}",
# style={'font-family':'Open Sans', 'font-size':'17px'},
# align="center"
# ),
# palplot(k),
# pn.pane.Markdown(f" {m}",
# style={'font-family':'Open Sans', 'font-size':'17px'},
# align='center'
# ),
# palplot(m))
return [k, m]
else:
warnings.warn("\nWarning: Defaulting to K-means ...",stacklevel=2)
colors = _k_means(arr, height, width, n_colors)
if sort: colors = luminance_sort(colors)
palette = rgb_to_hex(colors)
if show:
print(" ", palette, end="\n\n")
return palplot(palette)
return palette
| 33.184049 | 90 | 0.555925 |
3834cf3caf709fa05a8a890a69e5a0e4afb17a89 | 216 | py | Python | Python/NLP/nltk_book1.py | vbsteja/code | 0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687 | [
"Apache-2.0"
] | null | null | null | Python/NLP/nltk_book1.py | vbsteja/code | 0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687 | [
"Apache-2.0"
] | null | null | null | Python/NLP/nltk_book1.py | vbsteja/code | 0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687 | [
"Apache-2.0"
] | null | null | null | import nltk
from nltk.book import *
nltk.corpus.gutenberg.fileids()
def lexical_diversity(text):
return len(set(text)) / len(text)
def percentage(count,total):
return 100 * count / total
percentage(12, 119)
| 16.615385 | 37 | 0.731481 |
e0a2a52c757f06dced8221d9a98c43539ed80b6b | 13,482 | py | Python | robosuite/controllers/joint_pos.py | wangcongrobot/robosuite | fb9220491607ead5f1cd5eb25d17626c9ae3756c | [
"MIT"
] | 3 | 2020-07-15T22:36:58.000Z | 2021-01-06T17:04:50.000Z | robosuite/controllers/joint_pos.py | wangcongrobot/robosuite | fb9220491607ead5f1cd5eb25d17626c9ae3756c | [
"MIT"
] | null | null | null | robosuite/controllers/joint_pos.py | wangcongrobot/robosuite | fb9220491607ead5f1cd5eb25d17626c9ae3756c | [
"MIT"
] | null | null | null | from robosuite.controllers.base_controller import Controller
from robosuite.utils.control_utils import *
import numpy as np
# Supported impedance modes
IMPEDANCE_MODES = {"fixed", "variable", "variable_kp"}
class JointPositionController(Controller):
"""
Controller for controlling robot arm via impedance control. Allows position control of the robot's joints.
NOTE: Control input actions assumed to be taken relative to the current joint positions. A given action to this
controller is assumed to be of the form: (dpos_j0, dpos_j1, ... , dpos_jn-1) for an n-joint robot
Args:
sim (MjSim): Simulator instance this controller will pull robot state updates from
eef_name (str): Name of controlled robot arm's end effector (from robot XML)
joint_indexes (dict): Each key contains sim reference indexes to relevant robot joint information, namely:
:`'joints'`: list of indexes to relevant robot joints
:`'qpos'`: list of indexes to relevant robot joint positions
:`'qvel'`: list of indexes to relevant robot joint velocities
actuator_range (2-tuple of array of float): 2-Tuple (low, high) representing the robot joint actuator range
input_max (float or Iterable of float): Maximum above which an inputted action will be clipped. Can be either be
a scalar (same value for all action dimensions), or a list (specific values for each dimension). If the
latter, dimension should be the same as the control dimension for this controller
input_min (float or Iterable of float): Minimum below which an inputted action will be clipped. Can be either be
a scalar (same value for all action dimensions), or a list (specific values for each dimension). If the
latter, dimension should be the same as the control dimension for this controller
output_max (float or Iterable of float): Maximum which defines upper end of scaling range when scaling an input
action. Can be either be a scalar (same value for all action dimensions), or a list (specific values for
each dimension). If the latter, dimension should be the same as the control dimension for this controller
output_min (float or Iterable of float): Minimum which defines upper end of scaling range when scaling an input
action. Can be either be a scalar (same value for all action dimensions), or a list (specific values for
each dimension). If the latter, dimension should be the same as the control dimension for this controller
kp (float or Iterable of float): positional gain for determining desired torques based upon the joint pos error.
Can be either be a scalar (same value for all action dims), or a list (specific values for each dim)
damping_ratio (float or Iterable of float): used in conjunction with kp to determine the velocity gain for
determining desired torques based upon the joint pos errors. Can be either be a scalar (same value for all
action dims), or a list (specific values for each dim)
impedance_mode (str): Impedance mode with which to run this controller. Options are {"fixed", "variable",
"variable_kp"}. If "fixed", the controller will have fixed kp and damping_ratio values as specified by the
@kp and @damping_ratio arguments. If "variable", both kp and damping_ratio will now be part of the
controller action space, resulting in a total action space of num_joints * 3. If "variable_kp", only kp
will become variable, with damping_ratio fixed at 1 (critically damped). The resulting action space will
then be num_joints * 2.
kp_limits (2-list of float or 2-list of Iterable of floats): Only applicable if @impedance_mode is set to either
"variable" or "variable_kp". This sets the corresponding min / max ranges of the controller action space
for the varying kp values. Can be either be a 2-list (same min / max for all kp action dims), or a 2-list
of list (specific min / max for each kp dim)
damping_ratio_limits (2-list of float or 2-list of Iterable of floats): Only applicable if @impedance_mode is
set to "variable". This sets the corresponding min / max ranges of the controller action space for the
varying damping_ratio values. Can be either be a 2-list (same min / max for all damping_ratio action dims),
or a 2-list of list (specific min / max for each damping_ratio dim)
policy_freq (int): Frequency at which actions from the robot policy are fed into this controller
qpos_limits (2-list of float or 2-list of Iterable of floats): Limits (rad) below and above which the magnitude
of a calculated goal joint position will be clipped. Can be either be a 2-list (same min/max value for all
joint dims), or a 2-list of list (specific min/max values for each dim)
interpolator (Interpolator): Interpolator object to be used for interpolating from the current joint position to
the goal joint position during each timestep between inputted actions
**kwargs: Does nothing; placeholder to "sink" any additional arguments so that instantiating this controller
via an argument dict that has additional extraneous arguments won't raise an error
Raises:
AssertionError: [Invalid impedance mode]
"""
def __init__(self,
sim,
eef_name,
joint_indexes,
actuator_range,
input_max=1,
input_min=-1,
output_max=0.05,
output_min=-0.05,
kp=50,
damping_ratio=1,
impedance_mode="fixed",
kp_limits=(0, 300),
damping_ratio_limits=(0, 100),
policy_freq=20,
qpos_limits=None,
interpolator=None,
**kwargs # does nothing; used so no error raised when dict is passed with extra terms used previously
):
super().__init__(
sim,
eef_name,
joint_indexes,
actuator_range,
)
# Control dimension
self.control_dim = len(joint_indexes["joints"])
# input and output max and min (allow for either explicit lists or single numbers)
self.input_max = self.nums2array(input_max, self.control_dim)
self.input_min = self.nums2array(input_min, self.control_dim)
self.output_max = self.nums2array(output_max, self.control_dim)
self.output_min = self.nums2array(output_min, self.control_dim)
# limits
self.position_limits = np.array(qpos_limits) if qpos_limits is not None else qpos_limits
# kp kd
self.kp = self.nums2array(kp, self.control_dim)
self.kd = 2 * np.sqrt(self.kp) * damping_ratio
# kp and kd limits
self.kp_min = self.nums2array(kp_limits[0], self.control_dim)
self.kp_max = self.nums2array(kp_limits[1], self.control_dim)
self.damping_ratio_min = self.nums2array(damping_ratio_limits[0], self.control_dim)
self.damping_ratio_max = self.nums2array(damping_ratio_limits[1], self.control_dim)
# Verify the proposed impedance mode is supported
assert impedance_mode in IMPEDANCE_MODES, "Error: Tried to instantiate OSC controller for unsupported " \
"impedance mode! Inputted impedance mode: {}, Supported modes: {}". \
format(impedance_mode, IMPEDANCE_MODES)
# Impedance mode
self.impedance_mode = impedance_mode
# Add to control dim based on impedance_mode
if self.impedance_mode == "variable":
self.control_dim *= 3
elif self.impedance_mode == "variable_kp":
self.control_dim *= 2
# control frequency
self.control_freq = policy_freq
# interpolator
self.interpolator = interpolator
# initialize
self.goal_qpos = None
def set_goal(self, action, set_qpos=None):
"""
Sets goal based on input @action. If self.impedance_mode is not "fixed", then the input will be parsed into the
delta values to update the goal position / pose and the kp and/or damping_ratio values to be immediately updated
internally before executing the proceeding control loop.
Note that @action expected to be in the following format, based on impedance mode!
:Mode `'fixed'`: [joint pos command]
:Mode `'variable'`: [damping_ratio values, kp values, joint pos command]
:Mode `'variable_kp'`: [kp values, joint pos command]
Args:
action (Iterable): Desired relative joint position goal state
set_qpos (Iterable): If set, overrides @action and sets the desired absolute joint position goal state
Raises:
AssertionError: [Invalid action dimension size]
"""
# Update state
self.update()
# Parse action based on the impedance mode, and update kp / kd as necessary
jnt_dim = len(self.qpos_index)
if self.impedance_mode == "variable":
damping_ratio, kp, delta = action[:jnt_dim], action[jnt_dim:2*jnt_dim], action[2*jnt_dim:]
self.kp = np.clip(kp, self.kp_min, self.kp_max)
self.kd = 2 * np.sqrt(self.kp) * np.clip(damping_ratio, self.damping_ratio_min, self.damping_ratio_max)
elif self.impedance_mode == "variable_kp":
kp, delta = action[:jnt_dim], action[jnt_dim:]
self.kp = np.clip(kp, self.kp_min, self.kp_max)
self.kd = 2 * np.sqrt(self.kp) # critically damped
else: # This is case "fixed"
delta = action
# Check to make sure delta is size self.joint_dim
assert len(delta) == jnt_dim, "Delta qpos must be equal to the robot's joint dimension space!"
if delta is not None:
scaled_delta = self.scale_action(delta)
else:
scaled_delta = None
self.goal_qpos = set_goal_position(scaled_delta,
self.joint_pos,
position_limit=self.position_limits,
set_pos=set_qpos)
if self.interpolator is not None:
self.interpolator.set_goal(self.goal_qpos)
def run_controller(self):
"""
Calculates the torques required to reach the desired setpoint
Returns:
np.array: Command torques
"""
# Make sure goal has been set
if self.goal_qpos is None:
self.set_goal(np.zeros(self.control_dim))
# Update state
self.update()
desired_qpos = None
# Only linear interpolator is currently supported
if self.interpolator is not None:
# Linear case
if self.interpolator.order == 1:
desired_qpos = self.interpolator.get_interpolated_goal(self.joint_pos)
else:
# Nonlinear case not currently supported
pass
else:
desired_qpos = np.array(self.goal_qpos)
# torques = pos_err * kp + vel_err * kd
position_error = desired_qpos - self.joint_pos
vel_pos_error = -self.joint_vel
desired_torque = (np.multiply(np.array(position_error), np.array(self.kp))
+ np.multiply(vel_pos_error, self.kd))
# Return desired torques plus gravity compensations
self.torques = np.dot(self.mass_matrix, desired_torque) + self.torque_compensation
# Always run superclass call for any cleanups at the end
super().run_controller()
return self.torques
def reset_goal(self):
"""
Resets joint position goal to be current position
"""
self.goal_qpos = self.joint_pos
# Reset interpolator if required
if self.interpolator is not None:
self.interpolator.set_goal(self.goal_qpos)
@property
def control_limits(self):
"""
Returns the limits over this controller's action space, overrides the superclass property
Returns the following (generalized for both high and low limits), based on the impedance mode:
:Mode `'fixed'`: [joint pos command]
:Mode `'variable'`: [damping_ratio values, kp values, joint pos command]
:Mode `'variable_kp'`: [kp values, joint pos command]
Returns:
2-tuple:
- (np.array) minimum action values
- (np.array) maximum action values
"""
if self.impedance_mode == "variable":
low = np.concatenate([self.damping_ratio_min, self.kp_min, self.input_min])
high = np.concatenate([self.damping_ratio_max, self.kp_max, self.input_max])
elif self.impedance_mode == "variable_kp":
low = np.concatenate([self.kp_min, self.input_min])
high = np.concatenate([self.kp_max, self.input_max])
else: # This is case "fixed"
low, high = self.input_min, self.input_max
return low, high
@property
def name(self):
return 'JOINT_POSITION'
| 46.489655 | 120 | 0.644637 |
980ce82614f5eb160088af74fca488b1f0fe6c00 | 1,903 | py | Python | Python/simplePortScan.py | frankcash/Misc | 18a8ac6032ba48f48c3c6402a0ec3fdd164b9e34 | [
"MIT"
] | null | null | null | Python/simplePortScan.py | frankcash/Misc | 18a8ac6032ba48f48c3c6402a0ec3fdd164b9e34 | [
"MIT"
] | null | null | null | Python/simplePortScan.py | frankcash/Misc | 18a8ac6032ba48f48c3c6402a0ec3fdd164b9e34 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import socket
import subprocess
import sys
from datetime import datetime
from optparse import OptionParser
# Check what time the scan started
# using the range function to specify ports (this will only go from 1 to 1024)
# also put in some error handling
def getHost(remoteServerIP):
try:
ips = socket.gethostbyname(remoteServerIP)
except socket.gaierror:
ips= False
return ips
def runScan(remoteServerIP):
try:
for port in range(1, 1025):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP Socket
result = sock.connect_ex((remoteServerIP, port))
if result==0:
print "Port {}: \t Open".format(port)
sock.close()
except KeyboardInterrupt:
print "Keyboard interrupt."
sys.exit()
except socket.gaierror:
print 'Hostname could not be resolved. Exiting'
sys.exit()
except socket.error:
print "Couldn't connect to server"
sys.exit()
if __name__=="__main__":
parser=OptionParser()
parser.add_option("-t", "--target", dest="host", type="string",
help="enter host IP or website", metavar="107.170.175.213")
(options, args)=parser.parse_args()
if options.host==None:
parser.print_help()
else:
host =getHost(options.host)
remoteServerIP = socket.gethostbyname(host)
try:
print '-' * 60
print 'Beginning port scan.'
print '-' * 60
t1 = datetime.now()
remoteServerIP = socket.gethostbyname(host)
runScan(remoteServerIP)
# get final time
t2 = datetime.now()
# calculate time difference
total = t2-t1
print "Scanning Completed in: ",total
except:
print "Something went wrong."
| 25.716216 | 81 | 0.598003 |
2c35f736b1c156f965b3e390f57e3caa3441da8e | 2,652 | py | Python | aliyun-python-sdk-dts/aliyunsdkdts/request/v20200101/ConfigureSynchronizationJobReplicatorCompareRequest.py | jorsonzen/aliyun-openapi-python-sdk | 0afbfa8e5f9e19455695aa799f7dcc1cd853d827 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-dts/aliyunsdkdts/request/v20200101/ConfigureSynchronizationJobReplicatorCompareRequest.py | jorsonzen/aliyun-openapi-python-sdk | 0afbfa8e5f9e19455695aa799f7dcc1cd853d827 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-dts/aliyunsdkdts/request/v20200101/ConfigureSynchronizationJobReplicatorCompareRequest.py | jorsonzen/aliyun-openapi-python-sdk | 0afbfa8e5f9e19455695aa799f7dcc1cd853d827 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdts.endpoint import endpoint_data
class ConfigureSynchronizationJobReplicatorCompareRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dts', '2020-01-01', 'ConfigureSynchronizationJobReplicatorCompare','dts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SynchronizationJobId(self):
return self.get_query_params().get('SynchronizationJobId')
def set_SynchronizationJobId(self,SynchronizationJobId):
self.add_query_param('SynchronizationJobId',SynchronizationJobId)
def get_AccountId(self):
return self.get_query_params().get('AccountId')
def set_AccountId(self,AccountId):
self.add_query_param('AccountId',AccountId)
def get_SynchronizationReplicatorCompareEnable(self):
return self.get_query_params().get('SynchronizationReplicatorCompareEnable')
def set_SynchronizationReplicatorCompareEnable(self,SynchronizationReplicatorCompareEnable):
self.add_query_param('SynchronizationReplicatorCompareEnable',SynchronizationReplicatorCompareEnable)
def get_SynchronizationDirection(self):
return self.get_query_params().get('SynchronizationDirection')
def set_SynchronizationDirection(self,SynchronizationDirection):
self.add_query_param('SynchronizationDirection',SynchronizationDirection) | 39 | 104 | 0.794872 |
c0c7ce7cc4188a6d029ca5ac9ac1efcb5aaaea93 | 162 | py | Python | test.py | chrisheckler/flask-basic | d9cae050466014f9b6da54f900eae2fa005c4944 | [
"MIT"
] | null | null | null | test.py | chrisheckler/flask-basic | d9cae050466014f9b6da54f900eae2fa005c4944 | [
"MIT"
] | null | null | null | test.py | chrisheckler/flask-basic | d9cae050466014f9b6da54f900eae2fa005c4944 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World'>
if __name__ == '__main__':
app.run(DEBUG=True)
| 13.5 | 26 | 0.660494 |
2344c09634e64a351d7152c180344d661c6b41a9 | 8,922 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_hub_virtual_network_connections_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_hub_virtual_network_connections_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_hub_virtual_network_connections_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HubVirtualNetworkConnectionsOperations:
"""HubVirtualNetworkConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_hub_name: str,
connection_name: str,
**kwargs
) -> "models.HubVirtualNetworkConnection":
"""Retrieves the details of a HubVirtualNetworkConnection.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HubVirtualNetworkConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.HubVirtualNetworkConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.HubVirtualNetworkConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HubVirtualNetworkConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> AsyncIterable["models.ListHubVirtualNetworkConnectionsResult"]:
"""Retrieves the details of all HubVirtualNetworkConnections.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListHubVirtualNetworkConnectionsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ListHubVirtualNetworkConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListHubVirtualNetworkConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListHubVirtualNetworkConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections'} # type: ignore
| 49.292818 | 215 | 0.675633 |
7608640833267b1039aa4555ecd0199eac6409d2 | 16,286 | py | Python | bayesian_decision_tree/hyperplane_optimization.py | UBS-IB/bayesian_tree | 718aecc68e7ea527380b8e299b4f7d69e86f7400 | [
"Apache-2.0"
] | 29 | 2019-02-06T19:39:52.000Z | 2022-02-17T08:09:55.000Z | bayesian_decision_tree/hyperplane_optimization.py | UBS-IB/bayesian_tree | 718aecc68e7ea527380b8e299b4f7d69e86f7400 | [
"Apache-2.0"
] | 1 | 2022-01-27T01:31:50.000Z | 2022-01-27T01:31:50.000Z | bayesian_decision_tree/hyperplane_optimization.py | UBS-IB/bayesian_tree | 718aecc68e7ea527380b8e299b4f7d69e86f7400 | [
"Apache-2.0"
] | 13 | 2019-05-15T01:14:14.000Z | 2021-08-17T02:35:03.000Z | import numpy as np
from abc import ABC, abstractmethod
from numpy.random import RandomState
from scipy.sparse import csr_matrix, csc_matrix
from bayesian_decision_tree.utils import r2_series_generator, hypercube_to_hypersphere_surface
class HyperplaneOptimizationFunction:
"""
The function to optimize for hyperplane trees. This is a function of `n_dim` variables representing
the normal vector of a hyperplane in `n_dim` dimensions. Given such a hyperplane normal the function
computes the optimum split location (i.e., the origin of the hyperplane) in the data such that the
data likelihood is maximized.
"""
def __init__(self, X, y, prior, compute_log_p_data_split, log_p_data_no_split, search_space_is_unit_hypercube, split_precision):
self.X = X
self.y = y
self.prior = prior
self.compute_log_p_data_split = compute_log_p_data_split
self.log_p_data_no_split = log_p_data_no_split
self.search_space_is_unit_hypercube = search_space_is_unit_hypercube
self.split_precision = split_precision
# results of the optimization - to be set later during the actual optimization
self.function_evaluations = 0
self.best_log_p_data_split = log_p_data_no_split
self.best_cumulative_distances = 0
self.best_hyperplane_normal = None
self.best_hyperplane_origin = None
def compute(self, hyperplane_normal):
self.function_evaluations += 1
if self.search_space_is_unit_hypercube:
hyperplane_normal = hypercube_to_hypersphere_surface(hyperplane_normal, half_hypersphere=True)
# catch some special cases and normalize to unit length
hyperplane_normal = np.nan_to_num(hyperplane_normal)
if np.all(hyperplane_normal == 0):
hyperplane_normal[0] = 1
hyperplane_normal /= np.linalg.norm(hyperplane_normal)
dense = isinstance(self.X, np.ndarray)
if not dense and isinstance(self.X, csr_matrix):
self.X = csc_matrix(self.X)
# compute distance of all points to the hyperplane: https://mathinsight.org/distance_point_plane
projections = self.X @ hyperplane_normal # up to an additive constant which doesn't matter to distance ordering
sort_indices = np.argsort(projections)
split_indices = 1 + np.where(np.abs(np.diff(projections)) > self.split_precision)[0] # we can only split between *different* data points
if len(split_indices) == 0:
# no split possible along this dimension
return -self.log_p_data_no_split
y_sorted = self.y[sort_indices]
# compute data likelihoods of all possible splits along this projection and find split with highest data likelihood
n_dim = self.X.shape[1]
log_p_data_split = self.compute_log_p_data_split(y_sorted, self.prior, n_dim, split_indices)
i_max = log_p_data_split.argmax()
if log_p_data_split[i_max] >= self.best_log_p_data_split:
best_split_index = split_indices[i_max]
p1 = self.X[sort_indices[best_split_index-1]]
p2 = self.X[sort_indices[best_split_index]]
if not dense:
p1 = p1.toarray()[0]
p2 = p2.toarray()[0]
hyperplane_origin = 0.5 * (p1 + p2) # middle between the points that are being split
projections_with_origin = projections - np.dot(hyperplane_normal, hyperplane_origin)
cumulative_distances = np.sum(np.abs(projections_with_origin))
if log_p_data_split[i_max] > self.best_log_p_data_split:
is_log_p_better_or_same_but_with_better_distance = True
else:
# accept new split with same log(p) only if it increases the cumulative distance of all points to the hyperplane
is_log_p_better_or_same_but_with_better_distance = cumulative_distances > self.best_cumulative_distances
if is_log_p_better_or_same_but_with_better_distance:
self.best_log_p_data_split = log_p_data_split[i_max]
self.best_cumulative_distances = cumulative_distances
self.best_hyperplane_normal = hyperplane_normal
self.best_hyperplane_origin = hyperplane_origin
return -log_p_data_split[i_max]
class StrMixin:
"""Auto-generate `__str__()` and `__repr__()` from attributes."""
def __str__(self):
attributes = ['{}={}'.format(k, v) for k, v in self.__dict__.items()]
return '{}[{}]'.format(type(self).__name__, ', '.join(attributes))
def __repr__(self):
return self.__str__()
class HyperplaneOptimizer(ABC, StrMixin):
"""
Abstract base class of all hyperplane optimizers.
"""
def __init__(self, search_space_is_unit_hypercube):
self.search_space_is_unit_hypercube = search_space_is_unit_hypercube
@abstractmethod
def solve(self, optimization_function):
raise NotImplementedError
class ScipyOptimizer(HyperplaneOptimizer):
"""An optimizer using one of the scipy global optimizers, see [1].
References
----------
.. [1] https://docs.scipy.org/doc/scipy/reference/optimize.html#global-optimization
"""
def __init__(self, solver_type, seed, **extra_solver_kwargs):
super().__init__(search_space_is_unit_hypercube=True)
self.solver_type = solver_type
self.seed = seed
self.extra_solver_kwargs = extra_solver_kwargs
def solve(self, optimization_function):
# bounds for scipy optimizers: unit hypercube (will be mapped to
# (half) hypersphere uniformly later on)
X = optimization_function.X
n_dim = X.shape[1]
unit_hypercube_bounds = np.vstack((np.zeros(n_dim-1), np.ones(n_dim-1))).T
solver = self.solver_type(
func=optimization_function.compute,
bounds=unit_hypercube_bounds,
seed=self.seed,
**self.extra_solver_kwargs)
solver.solve()
class RandomTwoPointOptimizer(HyperplaneOptimizer):
"""
An optimizer randomly choosing two points of different classes to construct
a bisecting hyperplane (experimental).
TODO: Complete
"""
def __init__(self, n_mc, seed):
super().__init__(search_space_is_unit_hypercube=False)
self.n_mc = n_mc
self.seed = seed
def solve(self, optimization_function):
rand = RandomState(self.seed)
X = optimization_function.X
y = optimization_function.y
if np.any(np.round(y) != y):
raise TypeError('Cannot use {} for regression problems as there are no classes to pick points from'.format(
RandomTwoPointOptimizer.__name__))
dense = isinstance(X, np.ndarray)
if len(set(y)) <= 1:
# can't pick two points of different classes if there aren't at least two classes
return
# find indices of each class
n_classes = int(y.max()) + 1
class_indices = [np.where(y == i)[0] for i in range(n_classes)]
# evaluate 'n_mc' hyperplane normals passing through two random points form different classes
for i in range(self.n_mc):
indices1 = []
indices2 = []
while len(indices1) == 0 or len(indices2) == 0:
class1 = rand.randint(0, n_classes)
indices1 = class_indices[class1]
class2 = class1
while class2 == class1:
class2 = rand.randint(0, n_classes)
indices2 = class_indices[class2]
p1 = X[indices1[rand.randint(0, len(indices1))]]
p2 = X[indices2[rand.randint(0, len(indices2))]]
if not dense:
p1 = p1.toarray()[0]
p2 = p2.toarray()[0]
normal = p2-p1
if normal[0] < 0:
normal *= -1 # make sure the first coordinate is positive to match the scipy search space
optimization_function.compute(normal)
class RandomHyperplaneOptimizer(HyperplaneOptimizer):
"""
An optimizer generating hyperplanes with random orientation
in space (experimental).
TODO: Complete
"""
def __init__(self, n_mc, seed):
super().__init__(search_space_is_unit_hypercube=False)
self.n_mc = n_mc
self.seed = seed
def solve(self, optimization_function):
rand = RandomState(self.seed)
X = optimization_function.X
n_dim = X.shape[1]
for i in range(self.n_mc):
hyperplane_normal = rand.normal(0, 1, n_dim)
optimization_function.compute(hyperplane_normal)
class QuasiRandomHyperplaneOptimizer(HyperplaneOptimizer):
"""
An optimizer generating hyperplanes with quasi-random orientation
in space, see
http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/
"""
def __init__(self, n):
super().__init__(search_space_is_unit_hypercube=True)
self.n = n
def solve(self, optimization_function):
X = optimization_function.X
n_dim = X.shape[1]
n_dim_surface = n_dim-1
# quasi-random R2 sequence
r2gen = r2_series_generator(n_dim_surface)
for i in range(self.n):
uniform = next(r2gen)
optimization_function.compute(uniform)
class OptunaOptimizer(HyperplaneOptimizer):
def __init__(self, n_trials, seed):
super().__init__(search_space_is_unit_hypercube=True)
self.n_trials = n_trials
self.seed = seed
def solve(self, optimization_function):
from optuna import create_study
from optuna.logging import set_verbosity
from optuna.samplers import TPESampler
study = create_study(direction='minimize', sampler=TPESampler(self.seed))
n_dim = optimization_function.X.shape[1]
n_dim_surface = n_dim-1
def objective(trial):
uniform = np.zeros(n_dim_surface)
for i in range(n_dim_surface):
uniform[i] = trial.suggest_uniform(f'uniform[{i}]', 0, 1)
return optimization_function.compute(uniform)
set_verbosity(0)
study.optimize(objective, n_trials=self.n_trials)
class SimulatedAnnealingOptimizer(HyperplaneOptimizer):
"""
A simple simulated annealing optimizer (experimental).
TODO: Complete
"""
def __init__(self, n_scan, n_keep, spread_factor, seed):
super().__init__(search_space_is_unit_hypercube=True)
self.n_scan = n_scan
self.n_keep = n_keep
self.spread_factor = spread_factor
self.seed = seed
def solve(self, optimization_function):
rand = RandomState(self.seed)
X = optimization_function.X
n_dim = X.shape[1]-1
candidates = {}
no_improvements = 0
best_value = np.inf
f = 1
while no_improvements < 50:
if len(candidates) == 0:
# first run
for i in range(self.n_scan):
candidate = rand.uniform(0, 1, n_dim)
value = optimization_function.compute(candidate)
candidates[value] = candidate
else:
# evolution
vectors = list(candidates.values())
ranges = [np.max([v[i] for v in vectors]) - np.min([v[i] for v in vectors]) for i in range(n_dim)]
values_sorted = sorted(candidates.keys())
best_value = values_sorted[0]
for i in range(self.n_keep):
i_candidate = i*len(values_sorted)//self.n_keep
candidate = candidates[values_sorted[i_candidate]]
# perturbation = ranges * rand.uniform(-1, 1, len(ranges))
perturbation = f * rand.uniform(-1, 1, len(ranges))
new_candidate = candidate + perturbation
new_candidate = np.clip(new_candidate, 0, 1)
value = optimization_function.compute(new_candidate)
candidates[value] = new_candidate
f *= self.spread_factor
# only keep the best candidates
values_sorted = sorted(candidates.keys())
values_sorted = values_sorted[:self.n_keep]
if values_sorted[0] < best_value:
no_improvements = 0
else:
no_improvements += 1
candidates = {v: candidates[v] for v in values_sorted}
class GradientDescentOptimizer(HyperplaneOptimizer):
"""
A simple gradient descent optimizer (experimental).
TODO: Complete
"""
def __init__(self, n_init, n_keep):
super().__init__(search_space_is_unit_hypercube=True)
self.n_init = n_init
self.n_keep = n_keep
def solve(self, optimization_function):
X = optimization_function.X
n_dim = X.shape[1]-1
rand = RandomState(666)
candidates = {}
no_improvements = 0
best_value = np.inf
start_delta = 1e-6
while no_improvements < 3:
if len(candidates) == 0:
# first run
for i in range(self.n_init):
candidate = rand.uniform(0, 1, n_dim)
value = optimization_function.compute(candidate)
candidates[value] = candidate
else:
# compute numerical gradient for each of the best vectors
values_sorted = sorted(candidates.keys())
best_value = values_sorted[0]
for i in range(self.n_keep):
i_candidate = i*len(values_sorted)//self.n_keep
value = values_sorted[i_candidate]
candidate = candidates[value]
gradient = np.zeros(n_dim)
delta = start_delta
while True:
delta_too_small = False
for i_dim in range(n_dim):
new_candidate = candidate.copy()
new_candidate[i_dim] += delta
if new_candidate[i_dim] > 1:
delta *= -1
new_candidate[i_dim] = candidate[i_dim] + delta
new_value = optimization_function.compute(new_candidate)
gradient[i_dim] = (new_value - value) / delta
delta = np.abs(delta)
if gradient[i_dim] == 0:
delta_too_small = True
break
if delta_too_small:
delta *= 10
if delta >= 1:
# can't compute gradient, so give up
break
else:
break
if delta_too_small:
continue
start_delta = delta / 10
# add gradient to vector
lambda_ = 1e-6
best_new_candidate = candidate
best_new_value = value
while True:
new_candidate = candidate - lambda_ * gradient
new_candidate = np.clip(new_candidate, 0, 1)
new_value = optimization_function.compute(new_candidate)
if new_value < best_new_value:
lambda_ *= 2
best_new_candidate = new_candidate
best_new_value = new_value
else:
break
candidates[best_new_value] = best_new_candidate
# only keep the best candidates
values_sorted = sorted(candidates.keys())
values_sorted = values_sorted[:self.n_keep]
if values_sorted[0] < best_value:
no_improvements = 0
else:
no_improvements += 1
candidates = {v: candidates[v] for v in values_sorted}
| 36.76298 | 145 | 0.602542 |
141b249db0fc68c26de0c24e959729b99f51825f | 10,203 | py | Python | python/pyspark/sql/tests/test_catalog.py | lresende/spark | a0bd273bb04d9a5684e291ec44617972dcd4accd | [
"Apache-2.0"
] | 4 | 2020-01-17T06:23:43.000Z | 2022-02-05T18:01:45.000Z | python/pyspark/sql/tests/test_catalog.py | yangzan0816/spark | 812d0918a88ca5677c0bdbd8cffd765d53de50ca | [
"Apache-2.0"
] | 6 | 2020-10-21T13:44:10.000Z | 2022-03-31T05:04:46.000Z | python/pyspark/sql/tests/test_catalog.py | yangzan0816/spark | 812d0918a88ca5677c0bdbd8cffd765d53de50ca | [
"Apache-2.0"
] | 1 | 2020-07-16T03:50:14.000Z | 2020-07-16T03:50:14.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.types import StructType, StructField, IntegerType
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase
class CatalogTests(ReusedSQLTestCase):
def test_current_database(self):
spark = self.spark
with self.database("some_db"):
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
with self.database("some_db"):
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
with self.database("some_db"):
spark.sql("CREATE DATABASE some_db")
with self.table("tab1", "some_db.tab2", "tab3_via_catalog"):
with self.tempView("temp_tab"):
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
schema = StructType([StructField("a", IntegerType(), True)])
description = "this a table created via Catalog.createTable()"
spark.catalog.createTable(
"tab3_via_catalog", schema=schema, description=description)
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = \
sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = \
sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 3)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="tab3_via_catalog",
database="default",
description=description,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[2], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
with self.database("some_db"):
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
with self.function("func1", "some_db.func2"):
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = \
dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
with self.database("some_db"):
spark.sql("CREATE DATABASE some_db")
with self.table("tab1", "some_db.tab2"):
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql(
"CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = \
sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = \
sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_catalog import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 47.67757 | 97 | 0.556013 |
7347d6f3b36ff7ff1ef66a9e80509602acef9a50 | 11,560 | py | Python | app/pubmed/sink_db.py | aaronnorrish/PubMedConnections | dc17e141d94afe6d26a9b49b2183c06f3630e561 | [
"CC-BY-4.0"
] | 4 | 2022-03-09T05:20:46.000Z | 2022-03-13T11:18:58.000Z | app/pubmed/sink_db.py | aaronnorrish/PubMedConnections | dc17e141d94afe6d26a9b49b2183c06f3630e561 | [
"CC-BY-4.0"
] | null | null | null | app/pubmed/sink_db.py | aaronnorrish/PubMedConnections | dc17e141d94afe6d26a9b49b2183c06f3630e561 | [
"CC-BY-4.0"
] | 1 | 2022-03-09T05:21:53.000Z | 2022-03-09T05:21:53.000Z | """
Allows dumping data into SQLite to speed up iteration
over the ~33 million records.
"""
from typing import Optional
import atomics
import neo4j
from app.pubmed.model import Article, DBMetadata, MeshHeading
from app.utils import or_else
from config import NEO4J_URI, NEO4J_USER, NEO4J_PASSWORD, NEO4J_DATABASE
class IdCounter:
"""
Increments IDs to use for database nodes. This is a bit dodgy,
but we only ever add to the database from one process, so it
should be fine. This is required as Neo4J does not have its
own auto-incrementing IDs.
"""
id: atomics.atomic = atomics.atomic(width=4, atype=atomics.INT)
def __init__(self, initial_id: int = None):
if initial_id is not None:
self.id.store(initial_id)
def next(self) -> int:
return self.id.fetch_inc()
class PubmedCacheConn:
"""
Can be used to connect to the pubmed cache Neo4J database.
"""
def __init__(self, database: Optional[str] = None, *, reset_on_connect: bool = False):
self.database: str = database if database is not None else NEO4J_DATABASE
self.driver: Optional[neo4j.Driver] = None
self.reset_on_connect: bool = reset_on_connect
# We store metadata about the database within a Metadata node.
self.metadata: Optional[DBMetadata] = None
# We maintain our own counters for the IDs of authors. This is required as we
# cannot trust the IDs generated by Neo4J as they can change, nor the IDs from
# PubMed as they often don't exist.
self.author_id_counter: Optional[IdCounter] = None
def __enter__(self):
if self.driver is not None:
raise ValueError("Already created connection!")
self.driver = neo4j.GraphDatabase.driver(NEO4J_URI, auth=(NEO4J_USER, NEO4J_PASSWORD))
# Create a default connection first to create the database.
with self.driver.session() as session:
if self.reset_on_connect:
session.run("CREATE OR REPLACE DATABASE {}".format(self.database)).consume()
else:
session.run("CREATE DATABASE {} IF NOT EXISTS".format(self.database)).consume()
# Create a connection to the database to create its constraints and grab metadata.
with self.new_session() as session:
self._create_constraints(session)
self._fetch_metadata(session)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.driver is None:
return
self.driver.close()
self.driver = None
def new_session(self) -> neo4j.Session:
return self.driver.session(database=self.database)
def _create_constraints(self, session: neo4j.Session):
"""
This method creates all the constraints required for the database.
Uniqueness constraints implicitly create an index for the constraint as well.
"""
# MeSH Headings.
session.run(
"CREATE CONSTRAINT unique_mesh_heading_ids IF NOT EXISTS "
"FOR (h:MeshHeading) REQUIRE h.id IS UNIQUE"
).consume()
# Authors.
session.run(
"CREATE CONSTRAINT unique_author_names IF NOT EXISTS "
"FOR (a:Author) REQUIRE a.name IS UNIQUE"
).consume()
session.run(
"CREATE CONSTRAINT unique_author_ids IF NOT EXISTS "
"FOR (a:Author) REQUIRE a.id IS UNIQUE"
).consume()
# Journals.
session.run(
"CREATE CONSTRAINT unique_journal_ids IF NOT EXISTS "
"FOR (j:Journal) REQUIRE j.id IS UNIQUE"
).consume()
# Articles.
session.run(
"CREATE CONSTRAINT unique_article_pmids IF NOT EXISTS "
"FOR (a:Article) REQUIRE a.pmid IS UNIQUE"
).consume()
# We don't want to start inserting data until the indices are created.
session.run("CALL db.awaitIndexes").consume()
def _fetch_metadata(self, session: neo4j.Session):
"""
Fetches the values to use for the author and article counters from the database.
"""
self.author_id_counter = IdCounter(self._fetch_max_id(session, "Author") + 1)
def _fetch_max_id(self, session: neo4j.Session, label: str) -> int:
"""
Fetches the maximum ID of any nodes matching the given label, or 0 if no nodes could be found.
"""
result = session.run(
"""
MATCH (n:{})
RETURN max(n.id)
""".format(label)
).single()[0]
# If there are no nodes, then None will be returned.
return 0 if result is None else result
def insert_article_batch(self, articles: list[Article], *, max_batch_size=10000):
"""
Inserts a batch of articles into the database, including their authors.
"""
if len(articles) == 0:
return
# We batch the articles as otherwise we can hit maximum memory issues with Neo4J...
required_batches = (len(articles) + max_batch_size - 1) // max_batch_size
articles_per_batch = (len(articles) + required_batches - 1) // required_batches
total_articles_inserted = 0
for batch_no in range(required_batches):
start_index = batch_no * articles_per_batch
end_index = len(articles) if batch_no == required_batches - 1 else (batch_no + 1) * articles_per_batch
batch = articles[start_index:end_index]
total_articles_inserted += len(batch)
with self.new_session() as session:
session.write_transaction(self._insert_article_batch, batch)
# Just to be sure...
assert total_articles_inserted == len(articles)
def _insert_article_batch(self, tx: neo4j.Transaction, articles: list[Article]):
"""
Inserts a batch of articles into the database, including their authors.
"""
articles_data = []
for article in articles:
authors_data = []
for author in article.authors:
authors_data.append({
"id": self.author_id_counter.next(),
"name": author.full_name,
"is_collective": author.is_collective
})
journal = article.journal
articles_data.append({
"pmid": article.pmid,
"date": article.date,
"title": article.title,
"journal": {
"id": journal.identifier,
"title": journal.title,
"volume": journal.volume,
"issue": journal.issue
},
"authors": authors_data,
"refs": article.reference_pmids,
"mesh_desc_ids": article.mesh_descriptor_ids
})
tx.run(
"""
// Loop through all the articles we want to insert.
UNWIND $articles AS article WITH article, article.journal as journal, article.authors as authors
// Make sure the journal exists.
CALL {
WITH journal
MERGE (journal_node:Journal {id: journal.id})
ON CREATE
SET journal_node.title = journal.title
ON MATCH
SET journal_node.title = journal.title
RETURN journal_node
}
// Delete any old version of the current article.
CALL {
WITH article
MATCH (article_node:Article {pmid: article.pmid})
DETACH DELETE article_node
}
// Insert the article and its relationship to the journal.
CALL {
WITH article, journal, journal_node
CREATE (article_node:Article {
pmid: article.pmid,
title: article.title,
date: article.date
})-[:PUBLISHED_IN {
volume: journal.volume,
issue: journal.issue
}]->(journal_node)
RETURN article_node
}
// Add the references from this article to other articles.
CALL {
WITH article_node, article
UNWIND article.refs as ref_pmid
MATCH (ref_node:Article)
WHERE ref_node.pmid = ref_pmid
CREATE (article_node)-[:REFERENCES]->(ref_node)
}
// Add the mesh headings of the article.
CALL {
WITH article_node, article
UNWIND article.mesh_desc_ids as mesh_id
MATCH (mesh_node:MeshHeading)
WHERE mesh_node.id = mesh_id
CREATE (article_node)-[:CATEGORISED_BY]->(mesh_node)
}
// Add all of the authors of the article.
UNWIND authors AS author
CALL {
WITH author
MERGE (author_node:Author {name: author.name})
ON CREATE
SET
author_node.id = author.id,
author_node.is_collective = author.is_collective
RETURN author_node
}
CREATE (author_node)-[:AUTHOR_OF]->(article_node)
""",
articles=articles_data
).consume()
def insert_mesh_heading_batch(self, headings: list[MeshHeading], *, max_batch_size=500):
"""
Inserts a batch of headings into the database.
"""
if len(headings) == 0:
return
# We batch the articles as otherwise we can hit maximum memory issues with Neo4J...
required_batches = (len(headings) + max_batch_size - 1) // max_batch_size
headings_per_batch = (len(headings) + required_batches - 1) // required_batches
total_headings_inserted = 0
for batch_no in range(required_batches):
start_index = batch_no * headings_per_batch
end_index = len(headings) if batch_no == required_batches - 1 else (batch_no + 1) * headings_per_batch
batch = headings[start_index:end_index]
total_headings_inserted += len(batch)
with self.new_session() as session:
session.write_transaction(self._insert_mesh_heading_batch, batch)
# Just to be sure...
assert total_headings_inserted == len(headings)
def _insert_mesh_heading_batch(self, tx: neo4j.Transaction, headings: list[MeshHeading]):
"""
Inserts a batch of headings into the database.
"""
headings_data = []
for heading in headings:
headings_data.append({
"desc_id": heading.descriptor_id,
"name": heading.name,
"tree_numbers": heading.tree_numbers
})
tx.run(
"""
UNWIND $headings AS heading
MERGE (heading_node:MeshHeading {id: heading.desc_id})
ON CREATE
SET
heading_node.name = heading.name,
heading_node.tree_numbers = heading.tree_numbers
""",
headings=headings_data
).consume()
| 38.151815 | 114 | 0.571107 |
a404c9ce45a0079672e5115ad46008e541366b21 | 806 | py | Python | tests/request_hooks.py | shapiy/flask-graphite | 053274eb1a998e6bd9bd1a3ecd80c64195eaccf0 | [
"MIT"
] | 15 | 2018-03-30T14:35:28.000Z | 2020-07-31T17:14:29.000Z | tests/request_hooks.py | shapiy/flask-graphite | 053274eb1a998e6bd9bd1a3ecd80c64195eaccf0 | [
"MIT"
] | 15 | 2018-04-02T17:47:54.000Z | 2019-06-03T08:59:23.000Z | tests/request_hooks.py | shapiy/flask-graphite | 053274eb1a998e6bd9bd1a3ecd80c64195eaccf0 | [
"MIT"
] | 1 | 2019-06-01T13:55:14.000Z | 2019-06-01T13:55:14.000Z | import pytest
from flask import Response
from flask_graphite.request_hooks import default_hooks, request_status_type
@pytest.fixture(params=default_hooks, ids=[x.name for x in default_hooks])
def hook(mocker, request):
_hook = request.param
mocker.patch.object(_hook, "function")
_hook.function.return_value = ("foo", 1)
yield _hook
@pytest.fixture
def plugged_client(plugged_app):
return plugged_app.test_client()
def test_dont_modify_response(plugged_client, hook):
plugged_client.get("/foo/42/bar")
assert hook.function.called
def test_status_type():
fct = request_status_type.function
resp = Response(None, 203)
metric, value = fct(resp)
assert "2XX" in metric
resp = Response(None, 500)
metric, value = fct(resp)
assert "5XX" in metric
| 24.424242 | 75 | 0.73201 |
d1b3083dedf5f40e7343f0dc0b1552432ed8508b | 4,235 | py | Python | cronjob/python/Loan_bak_-13122019_1118AM/importSibs.py | heodat234/worldfone4xs_ibm | 6b508c3d99c48c5b8c9f1d979c356fc573e999a2 | [
"MIT"
] | null | null | null | cronjob/python/Loan_bak_-13122019_1118AM/importSibs.py | heodat234/worldfone4xs_ibm | 6b508c3d99c48c5b8c9f1d979c356fc573e999a2 | [
"MIT"
] | null | null | null | cronjob/python/Loan_bak_-13122019_1118AM/importSibs.py | heodat234/worldfone4xs_ibm | 6b508c3d99c48c5b8c9f1d979c356fc573e999a2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.6
# -*- coding: utf-8 -*-
log = open("/var/www/html/worldfone4xs_ibm/cronjob/python/Telesales/importSibs.txt","a")
import ftplib
import calendar
import time
import sys
import os
sys.path.insert(1, '/var/www/html/worldfone4xs_ibm/cronjob/python')
from ftp import Ftp
from pprint import pprint
from mongod import Mongodb
from excel import Excel
from datetime import datetime
from datetime import date
from bson import ObjectId
try:
filename = 'ZACCF full.csv'
mongodb = Mongodb("worldfone4xs")
_mongodb = Mongodb("_worldfone4xs")
excel = Excel()
ftp = Ftp()
now = datetime.now()
sibsColumns = []
sibsConverters = {}
ftp.connect()
ftp.downLoadFile("/var/www/html/worldfone4xs_ibm/upload/csv/ftp/" + filename, filename)
ftp.close()
path, filename = os.path.split("/var/www/html/worldfone4xs_ibm/upload/csv/ftp/" + filename)
importLogInfo = {
'collection' : "Sibs",
'begin_import' : time.time(),
'file_name' : filename,
'file_path' : path + '/' + filename,
'source' : 'ftp',
'file_type' : 'csv',
'status' : 2,
'created_by' : 'system'
}
importLogId = mongodb.insert(MONGO_COLLECTION='2_Import', insert_data=importLogInfo)
modelsSibs = _mongodb.get(MONGO_COLLECTION='Model', WHERE={'collection': '2_Zaccf'}, SORT=[('index', 1)], SELECT=['index', 'collection', 'field', 'type'])
for model in modelsSibs:
sibsColumns.append(model['field'])
if(model['type'] == 'string'):
sibsConverters[model['field']] = str
zaccfs = excel.getDataCSV(file_path=importLogInfo['file_path'], header=0, names=sibsColumns, converters=sibsConverters)
zaccfList = zaccfs.to_dict('records')
insertData = []
updateData = []
errorData = []
temp = {}
countList = 0
for idx, zaccf in enumerate(zaccfList):
if zaccf['account_no'] not in (None, '') and zaccf['cif'] not in (None, '') and zaccf['cus_name'] not in (None, ''):
result = True
checkSibs = mongodb.getOne(MONGO_COLLECTION='2_Sibs', WHERE={'account_no': zaccf['account_no']}, SELECT=['account_no'])
zaccf['import_id'] = str(importLogId)
try:
zaccf['advance'] = float(zaccf['advance'])
except Exception as errorConvertDM:
zaccf['error_cell'] = 'DM' + str(idx + 2)
zaccf['type'] = 'number'
zaccf['error_mesg'] = 'Sai kiểu dữ liệu nhập'
zaccf['result'] = 'error'
result = False
try:
zaccf['current_balance'] = float(zaccf['current_balance'])
except Exception as errorConvertDS:
zaccf['error_cell'] = 'DS' + str(idx + 2)
zaccf['type'] = 'number'
zaccf['error_mesg'] = 'Sai kiểu dữ liệu nhập'
zaccf['result'] = 'error'
result = False
if(result == False):
errorData.append(zaccf)
else:
if(checkSibs is None):
insertData.append(zaccf)
else:
updateData.append(zaccf)
zaccf['result'] = 'success'
result = True
else:
continue
if(len(errorData) > 0):
mongodb.batch_insert("2_Sibs_result", errorData)
else:
if len(updateData) > 0:
for upData in updateData:
mongodb.update(MONGO_COLLECTION='2_Sibs', WHERE={'account_no': upData['account_no']}, VALUE=upData)
mongodb.batch_insert("2_Sibs_result", updateData)
if len(insertData) > 0:
mongodb.batch_insert(MONGO_COLLECTION="2_Sibs", insert_data=insertData)
mongodb.batch_insert("2_Sibs_result", insert_data=insertData)
mongodb.update(MONGO_COLLECTION='2_Import', WHERE={'_id': importLogId}, VALUE={'status': 1, 'complete_import': time.time()})
except Exception as e:
log.write(now.strftime("%d/%m/%Y, %H:%M:%S") + ': ' + str(e) + '\n')
| 37.149123 | 159 | 0.56647 |
609102595eefe3c072f7d01390faf61bcf87426a | 8,528 | py | Python | girder/queues/queues/models/queue.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 14 | 2015-05-04T16:40:48.000Z | 2021-07-13T08:00:30.000Z | girder/queues/queues/models/queue.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 88 | 2015-07-24T07:58:43.000Z | 2021-02-23T19:37:13.000Z | girder/queues/queues/models/queue.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 8 | 2015-06-12T20:54:39.000Z | 2021-04-09T01:07:15.000Z | import sys
from bson.objectid import ObjectId, InvalidId
from girder import logger
from girder.constants import AccessType
from girder.models.model_base import AccessControlledModel
from girder.models.model_base import ValidationException
from girder.models.user import User as UserModel
from girder.utility.model_importer import ModelImporter
import cumulus
from cumulus.taskflow import load_class, TaskFlowState
from taskflow.models.taskflow import Taskflow as TaskflowModel
TASKFLOW_NON_RUNNING_STATES = [
TaskFlowState.CREATED,
TaskFlowState.COMPLETE,
TaskFlowState.ERROR,
TaskFlowState.UNEXPECTEDERROR,
TaskFlowState.TERMINATED,
TaskFlowState.DELETED
]
class QueueType(object):
FIFO = 'fifo'
LIFO = 'lifo'
TYPES = [FIFO, LIFO]
class TaskStatus(object):
PENDING = 'pending'
RUNNING = 'running'
class Queue(AccessControlledModel):
def initialize(self):
self.name = 'queues'
self.ensureIndices(['name'])
self.mutable_props = ['maxRunning']
def validate(self, queue):
name = queue['name']
userId = queue['userId']
# Do we already have this name?
if queue.get('_id') is None:
if len(list(self.find(name=name, owner=userId, force=True))) > 0:
raise ValidationException('"%s" has already been taken.' % name, field='name')
return queue
def find(self, name=None, owner=None, offset=0, limit=None, sort=None, user=None, force=False):
query = {}
if name is not None:
query['name'] = name
if owner is not None:
if not isinstance(owner, ObjectId):
try:
owner = ObjectId(owner)
except InvalidId:
raise ValidationException('Invalid ObjectId: %s' % owner,
field='owner')
query['userId'] = owner
cursor = super(Queue, self).find(query=query, sort=sort, user=user)
if not force:
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
else:
for r in cursor:
yield r
def create(self, name, type_, max_running, user=None):
queue = {
'name': name,
'type': type_,
'nRunning': 0,
'maxRunning': max_running,
'pending': [],
'taskflows': {}
}
userId = None
if user is not None:
userId = user['_id']
queue['userId'] = userId
self.setUserAccess(queue, user=user, level=AccessType.ADMIN)
return self.save(queue)
def apply_updates(self, queue, model_updates, user):
query = {
'_id': queue['_id']
}
updates = {}
for prop in model_updates:
if prop in self.mutable_props:
updates.setdefault('$set', {})[prop] = model_updates[prop]
if updates:
super(Queue, self).update(query, updates, multi=False)
queue = self.load(queue['_id'], user=user, level=AccessType.READ)
return queue
def add(self, queue, taskflow, params, user):
query = {
'_id': queue['_id'],
'taskflows.%s' % taskflow['_id']: {
'$exists': False
}
}
payload = {
'taskflowId': taskflow['_id'],
'startParams': params
}
if queue['type'] == QueueType.FIFO:
push = {
'pending': payload
}
else:
push = {
'pending': {
'$each': [ payload ],
'$position': 0
}
}
updates = {
'$push': push,
'$set': {
'taskflows.%s' % taskflow['_id']: TaskStatus.PENDING
}
}
self.update(query, updates)
queue = self.load(queue['_id'], user=user, level=AccessType.READ)
return queue
def pop(self, queue, limit, user):
queue, popped = self._pop_many(queue, limit, user)
for task in popped:
self._start_taskflow(queue['_id'], task['taskflowId'], task['start_params'], user)
return queue
def finish(self, queue, taskflow, user):
query = {
'_id': queue['_id'],
'taskflows.%s' % taskflow['_id']: TaskStatus.RUNNING
}
updates = {
'$inc': {
'nRunning': -1
},
'$unset': {
'taskflows.%s' % taskflow['_id']: ""
}
}
self.update(query, updates)
queue = self.load(queue['_id'], user=user, level=AccessType.READ)
return queue
def _pop_one(self, queue, user):
max_running = queue['maxRunning']
if max_running == 0:
max_running = sys.maxsize
query = {
'_id': queue['_id'],
'nRunning': {
'$lt': max_running
},
'$where': 'this.pending.length > 0'
}
updates = {
'$inc': {
'nRunning': 1
},
'$pop': {
'pending': -1
}
}
# queue is the document BEFORE the updates
queue = self.collection.find_one_and_update(query, updates)
taskflow_id = None
start_params = None
if queue is None:
return queue, taskflow_id, start_params
n_running = queue['nRunning']
pending = queue['pending']
if (n_running >= max_running or len(pending) == 0):
return queue, taskflow_id, start_params
task = pending.pop(0)
taskflow_id = task['taskflowId']
start_params = task['startParams']
query = {
'_id': queue['_id']
}
updates = {
'$set': {
'taskflows.%s' % taskflow_id: TaskStatus.RUNNING
}
}
self.update(query, updates)
queue = self.load(queue['_id'], user=user, level=AccessType.READ)
return queue, taskflow_id, start_params
def _pop_many(self, queue, limit, user):
popped = []
queue_, taskflow_id, start_params = self._pop_one(queue, user)
while taskflow_id is not None and len(popped) < limit:
queue = queue_
popped.append({'taskflowId': taskflow_id, 'start_params': start_params})
queue_, taskflow_id, start_params = self._pop_one(queue, user)
return queue, popped
def _start_taskflow(self, queue_id, taskflow_id, params, user):
taskflow = {"_id": taskflow_id}
updates = {"meta": {"queueId": queue_id}}
taskflow = TaskflowModel().update_taskflow(user, taskflow, updates)
constructor = load_class(taskflow['taskFlowClass'])
token = ModelImporter.model('token').createToken(user=user, days=7)
workflow = constructor(
id=str(taskflow['_id']),
girder_token=token['_id'],
girder_api_url=cumulus.config.girder.baseUrl
)
if params is None:
params = {}
workflow.start(**params)
return workflow
def cleanup_failed_taskflows():
queues = list(Queue().find(limit=sys.maxsize, force=True))
for queue in queues:
user = UserModel().load(queue['userId'], force=True)
if user is None:
continue
for taskflow_id, status in queue['taskflows'].items():
if status == TaskStatus.RUNNING:
taskflow = TaskflowModel().load(taskflow_id, force=True)
if taskflow['status'] in TASKFLOW_NON_RUNNING_STATES:
logger.warning("Removing non-running taskflow {} from the queue {}".format(taskflow_id, queue["_id"]))
Queue().finish(queue, taskflow, user)
def on_taskflow_status_update(event):
taskflow = event.info['taskflow']
queue_id = taskflow.get('meta', {}).get('queueId')
if queue_id is None:
return
if taskflow['status'] in TASKFLOW_NON_RUNNING_STATES:
queue = Queue().load(queue_id, force=True)
user = UserModel().load(queue['userId'], force=True)
Queue().finish(queue, taskflow, user)
Queue().pop(queue, sys.maxsize, user)
| 29.922807 | 122 | 0.544676 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.