max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 7 115 | max_stars_count int64 101 368k | id stringlengths 2 8 | content stringlengths 6 1.03M |
|---|---|---|---|---|
isotope/run_tests.py | daixiang0/tools | 264 | 79576 | #!/usr/bin/env python3
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from runner import cluster, config as cfg, consts, entrypoint, mesh, pipeline
def main(args: argparse.Namespace) -> None:
log_level = getattr(logging, args.log_level)
logging.basicConfig(level=log_level, format='%(levelname)s\t> %(message)s')
config = cfg.from_toml_file(args.config_path)
cluster.set_up_if_not_exists(
config.cluster_project_id, config.cluster_name, config.cluster_zones,
config.cluster_version, config.server_machine_type,
config.server_disk_size_gb, config.server_num_nodes,
config.client_machine_type, config.client_disk_size_gb)
for topology_path in config.topology_paths:
for env_name in config.environments:
entrypoint_service_name = entrypoint.extract_name(topology_path)
mesh_environment = mesh.for_state(
env_name, entrypoint_service_name,
consts.SERVICE_GRAPH_NAMESPACE, config, args.helm_values)
pipeline.run(topology_path, mesh_environment, config.server_image,
config.client_image, config.istio_archive_url,
config.client_qps, config.client_duration,
config.client_num_conc_conns, config.labels())
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('config_path', type=str)
parser.add_argument('helm_values', type=str)
parser.add_argument(
'--log_level',
type=str,
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
default='DEBUG')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
|
tests/common/factories/__init__.py | tgiardina/rpp-h | 2,103 | 79580 | """Factory classes for easily generating test objects."""
from .activation import Activation
from .annotation import Annotation
from .annotation_moderation import AnnotationModeration
from .auth_client import AuthClient, ConfidentialAuthClient
from .auth_ticket import AuthTicket
from .authz_code import AuthzCode
from .base import set_session
from .document import Document, DocumentMeta, DocumentURI
from .feature import Feature
from .flag import Flag
from .group import Group, OpenGroup, RestrictedGroup
from .group_scope import GroupScope
from .job import Job, SyncAnnotationJob
from .organization import Organization
from .setting import Setting
from .token import DeveloperToken, OAuth2Token
from .user import User
from .user_identity import UserIdentity
__all__ = (
"Activation",
"Annotation",
"AnnotationModeration",
"AuthClient",
"AuthTicket",
"AuthzCode",
"ConfidentialAuthClient",
"DeveloperToken",
"Document",
"DocumentMeta",
"DocumentURI",
"Feature",
"Flag",
"Group",
"GroupScope",
"Job",
"OAuth2Token",
"OpenGroup",
"Organization",
"RestrictedGroup",
"Setting",
"SyncAnnotationJob",
"User",
"UserIdentity",
"set_session",
)
|
Filters/Geometry/Testing/Python/geomFilter.py | cclauss/VTK | 1,755 | 79587 | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
import sys
# create pipeline - structured grid
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(output)
gMapper = vtk.vtkPolyDataMapper()
gMapper.SetInputConnection(gf.GetOutputPort())
gMapper.SetScalarRange(output.GetScalarRange())
gActor = vtk.vtkActor()
gActor.SetMapper(gMapper)
gf2 = vtk.vtkGeometryFilter()
gf2.SetInputData(output)
gf2.ExtentClippingOn()
gf2.SetExtent(10,17,-6,6,23,37)
gf2.PointClippingOn()
gf2.SetPointMinimum(0)
gf2.SetPointMaximum(10000)
gf2.CellClippingOn()
gf2.SetCellMinimum(0)
gf2.SetCellMaximum(7500)
g2Mapper = vtk.vtkPolyDataMapper()
g2Mapper.SetInputConnection(gf2.GetOutputPort())
g2Mapper.SetScalarRange(output.GetScalarRange())
g2Actor = vtk.vtkActor()
g2Actor.SetMapper(g2Mapper)
g2Actor.AddPosition(0,15,0)
# create pipeline - poly data
#
gf3 = vtk.vtkGeometryFilter()
gf3.SetInputConnection(gf.GetOutputPort())
g3Mapper = vtk.vtkPolyDataMapper()
g3Mapper.SetInputConnection(gf3.GetOutputPort())
g3Mapper.SetScalarRange(output.GetScalarRange())
g3Actor = vtk.vtkActor()
g3Actor.SetMapper(g3Mapper)
g3Actor.AddPosition(0,0,15)
gf4 = vtk.vtkGeometryFilter()
gf4.SetInputConnection(gf2.GetOutputPort())
gf4.ExtentClippingOn()
gf4.SetExtent(10,17,-6,6,23,37)
gf4.PointClippingOn()
gf4.SetPointMinimum(0)
gf4.SetPointMaximum(10000)
gf4.CellClippingOn()
gf4.SetCellMinimum(0)
gf4.SetCellMaximum(7500)
g4Mapper = vtk.vtkPolyDataMapper()
g4Mapper.SetInputConnection(gf4.GetOutputPort())
g4Mapper.SetScalarRange(output.GetScalarRange())
g4Actor = vtk.vtkActor()
g4Actor.SetMapper(g4Mapper)
g4Actor.AddPosition(0,15,15)
# create pipeline - unstructured grid
#
s = vtk.vtkSphere()
s.SetCenter(output.GetCenter())
s.SetRadius(100.0)
#everything
eg = vtk.vtkExtractGeometry()
eg.SetInputData(output)
eg.SetImplicitFunction(s)
gf5 = vtk.vtkGeometryFilter()
gf5.SetInputConnection(eg.GetOutputPort())
g5Mapper = vtk.vtkPolyDataMapper()
g5Mapper.SetInputConnection(gf5.GetOutputPort())
g5Mapper.SetScalarRange(output.GetScalarRange())
g5Actor = vtk.vtkActor()
g5Actor.SetMapper(g5Mapper)
g5Actor.AddPosition(0,0,30)
gf6 = vtk.vtkGeometryFilter()
gf6.SetInputConnection(eg.GetOutputPort())
gf6.ExtentClippingOn()
gf6.SetExtent(10,17,-6,6,23,37)
gf6.PointClippingOn()
gf6.SetPointMinimum(0)
gf6.SetPointMaximum(10000)
gf6.CellClippingOn()
gf6.SetCellMinimum(0)
gf6.SetCellMaximum(7500)
g6Mapper = vtk.vtkPolyDataMapper()
g6Mapper.SetInputConnection(gf6.GetOutputPort())
g6Mapper.SetScalarRange(output.GetScalarRange())
g6Actor = vtk.vtkActor()
g6Actor.SetMapper(g6Mapper)
g6Actor.AddPosition(0,15,30)
# create pipeline - rectilinear grid
#
rgridReader = vtk.vtkRectilinearGridReader()
rgridReader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/RectGrid2.vtk")
rgridReader.Update()
gf7 = vtk.vtkGeometryFilter()
gf7.SetInputConnection(rgridReader.GetOutputPort())
g7Mapper = vtk.vtkPolyDataMapper()
g7Mapper.SetInputConnection(gf7.GetOutputPort())
g7Mapper.SetScalarRange(rgridReader.GetOutput().GetScalarRange())
g7Actor = vtk.vtkActor()
g7Actor.SetMapper(g7Mapper)
g7Actor.SetScale(3,3,3)
gf8 = vtk.vtkGeometryFilter()
gf8.SetInputConnection(rgridReader.GetOutputPort())
gf8.ExtentClippingOn()
gf8.SetExtent(0,1,-2,2,0,4)
gf8.PointClippingOn()
gf8.SetPointMinimum(0)
gf8.SetPointMaximum(10000)
gf8.CellClippingOn()
gf8.SetCellMinimum(0)
gf8.SetCellMaximum(7500)
g8Mapper = vtk.vtkPolyDataMapper()
g8Mapper.SetInputConnection(gf8.GetOutputPort())
g8Mapper.SetScalarRange(rgridReader.GetOutput().GetScalarRange())
g8Actor = vtk.vtkActor()
g8Actor.SetMapper(g8Mapper)
g8Actor.SetScale(3,3,3)
g8Actor.AddPosition(0,15,0)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(gActor)
ren1.AddActor(g2Actor)
ren1.AddActor(g3Actor)
ren1.AddActor(g4Actor)
ren1.AddActor(g5Actor)
ren1.AddActor(g6Actor)
ren1.AddActor(g7Actor)
ren1.AddActor(g8Actor)
renWin.SetSize(340,550)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(84,174)
cam1.SetFocalPoint(5.22824,6.09412,35.9813)
cam1.SetPosition(100.052,62.875,102.818)
cam1.SetViewUp(-0.307455,-0.464269,0.830617)
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# test that the cell data is properly mapped in the output
ug = vtk.vtkUnstructuredGrid()
p = vtk.vtkPoints()
p.InsertNextPoint(0, 0, 0)
p.InsertNextPoint(1, 0, 0)
p.InsertNextPoint(2, 0, 0)
p.InsertNextPoint(3, 0, 0)
ug.SetPoints(p)
ug.GetNumberOfPoints()
ug.Allocate(4)
lpts = [0, 1]
ug.InsertNextCell(vtk.VTK_LINE, 2, lpts)
vpts = [1]
ug.InsertNextCell(vtk.VTK_VERTEX, 1, vpts)
lpts = [2, 3]
ug.InsertNextCell(vtk.VTK_LINE, 2, lpts)
vpts = [3]
ug.InsertNextCell(vtk.VTK_VERTEX, 1, vpts)
aa = vtk.vtkIntArray()
aa.InsertNextValue(0)
aa.InsertNextValue(1)
aa.InsertNextValue(2)
aa.InsertNextValue(3)
aa.SetName('testarray')
ug.GetCellData().AddArray(aa)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(ug)
gf.Update()
pd = gf.GetOutput()
oa = pd.GetCellData().GetArray('testarray')
# Check that the ordering of polydata arrays is correct. Verts should come before
# lines.
correctcelldata = [1, 3, 0, 2]
if oa.GetValue(0) != correctcelldata[0] and oa.GetValue(0) != correctcelldata[1]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(1) != correctcelldata[0] and oa.GetValue(1) != correctcelldata[1]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(2) != correctcelldata[2] and oa.GetValue(2) != correctcelldata[3]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(3) != correctcelldata[2] and oa.GetValue(3) != correctcelldata[3]:
print('Bad celldata of test array')
sys.exit(1)
# --- end of script --
|
pyro/contrib/gp/models/gpr.py | GautamV234/pyro | 4,959 | 79597 | <filename>pyro/contrib/gp/models/gpr.py
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import torch.distributions as torchdist
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.contrib.gp.models.model import GPModel
from pyro.contrib.gp.util import conditional
from pyro.nn.module import PyroParam, pyro_method
from pyro.util import warn_if_nan
class GPRegression(GPModel):
r"""
Gaussian Process Regression model.
The core of a Gaussian Process is a covariance function :math:`k` which governs
the similarity between input points. Given :math:`k`, we can establish a
distribution over functions :math:`f` by a multivarite normal distribution
.. math:: p(f(X)) = \mathcal{N}(0, k(X, X)),
where :math:`X` is any set of input points and :math:`k(X, X)` is a covariance
matrix whose entries are outputs :math:`k(x, z)` of :math:`k` over input pairs
:math:`(x, z)`. This distribution is usually denoted by
.. math:: f \sim \mathcal{GP}(0, k).
.. note:: Generally, beside a covariance matrix :math:`k`, a Gaussian Process can
also be specified by a mean function :math:`m` (which is a zero-value function
by default). In that case, its distribution will be
.. math:: p(f(X)) = \mathcal{N}(m(X), k(X, X)).
Given inputs :math:`X` and their noisy observations :math:`y`, the Gaussian Process
Regression model takes the form
.. math::
f &\sim \mathcal{GP}(0, k(X, X)),\\
y & \sim f + \epsilon,
where :math:`\epsilon` is Gaussian noise.
.. note:: This model has :math:`\mathcal{O}(N^3)` complexity for training,
:math:`\mathcal{O}(N^3)` complexity for testing. Here, :math:`N` is the number
of train inputs.
Reference:
[1] `Gaussian Processes for Machine Learning`,
<NAME>, <NAME>
:param torch.Tensor X: A input data for training. Its first dimension is the number
of data points.
:param torch.Tensor y: An output data for training. Its last dimension is the
number of data points.
:param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which
is the covariance function :math:`k`.
:param torch.Tensor noise: Variance of Gaussian noise of this model.
:param callable mean_function: An optional mean function :math:`m` of this Gaussian
process. By default, we use zero mean.
:param float jitter: A small positive term which is added into the diagonal part of
a covariance matrix to help stablize its Cholesky decomposition.
"""
def __init__(self, X, y, kernel, noise=None, mean_function=None, jitter=1e-6):
assert isinstance(
X, torch.Tensor
), "X needs to be a torch Tensor instead of a {}".format(type(X))
if y is not None:
assert isinstance(
y, torch.Tensor
), "y needs to be a torch Tensor instead of a {}".format(type(y))
super().__init__(X, y, kernel, mean_function, jitter)
noise = self.X.new_tensor(1.0) if noise is None else noise
self.noise = PyroParam(noise, constraints.positive)
@pyro_method
def model(self):
self.set_mode("model")
N = self.X.size(0)
Kff = self.kernel(self.X)
Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to diagonal
Lff = torch.linalg.cholesky(Kff)
zero_loc = self.X.new_zeros(self.X.size(0))
f_loc = zero_loc + self.mean_function(self.X)
if self.y is None:
f_var = Lff.pow(2).sum(dim=-1)
return f_loc, f_var
else:
return pyro.sample(
self._pyro_get_fullname("y"),
dist.MultivariateNormal(f_loc, scale_tril=Lff)
.expand_by(self.y.shape[:-1])
.to_event(self.y.dim() - 1),
obs=self.y,
)
@pyro_method
def guide(self):
self.set_mode("guide")
self._load_pyro_samples()
def forward(self, Xnew, full_cov=False, noiseless=True):
r"""
Computes the mean and covariance matrix (or variance) of Gaussian Process
posterior on a test input data :math:`X_{new}`:
.. math:: p(f^* \mid X_{new}, X, y, k, \epsilon) = \mathcal{N}(loc, cov).
.. note:: The noise parameter ``noise`` (:math:`\epsilon`) together with
kernel's parameters have been learned from a training procedure (MCMC or
SVI).
:param torch.Tensor Xnew: A input data for testing. Note that
``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.
:param bool full_cov: A flag to decide if we want to predict full covariance
matrix or just variance.
:param bool noiseless: A flag to decide if we want to include noise in the
prediction output or not.
:returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`
:rtype: tuple(torch.Tensor, torch.Tensor)
"""
self._check_Xnew_shape(Xnew)
self.set_mode("guide")
N = self.X.size(0)
Kff = self.kernel(self.X).contiguous()
Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to the diagonal
Lff = torch.linalg.cholesky(Kff)
y_residual = self.y - self.mean_function(self.X)
loc, cov = conditional(
Xnew,
self.X,
self.kernel,
y_residual,
None,
Lff,
full_cov,
jitter=self.jitter,
)
if full_cov and not noiseless:
M = Xnew.size(0)
cov = cov.contiguous()
cov.view(-1, M * M)[:, :: M + 1] += self.noise # add noise to the diagonal
if not full_cov and not noiseless:
cov = cov + self.noise
return loc + self.mean_function(Xnew), cov
def iter_sample(self, noiseless=True):
r"""
Iteratively constructs a sample from the Gaussian Process posterior.
Recall that at test input points :math:`X_{new}`, the posterior is
multivariate Gaussian distributed with mean and covariance matrix
given by :func:`forward`.
This method samples lazily from this multivariate Gaussian. The advantage
of this approach is that later query points can depend upon earlier ones.
Particularly useful when the querying is to be done by an optimisation
routine.
.. note:: The noise parameter ``noise`` (:math:`\epsilon`) together with
kernel's parameters have been learned from a training procedure (MCMC or
SVI).
:param bool noiseless: A flag to decide if we want to add sampling noise
to the samples beyond the noise inherent in the GP posterior.
:returns: sampler
:rtype: function
"""
noise = self.noise.detach()
X = self.X.clone().detach()
y = self.y.clone().detach()
N = X.size(0)
Kff = self.kernel(X).contiguous()
Kff.view(-1)[:: N + 1] += noise # add noise to the diagonal
outside_vars = {"X": X, "y": y, "N": N, "Kff": Kff}
def sample_next(xnew, outside_vars):
"""Repeatedly samples from the Gaussian process posterior,
conditioning on previously sampled values.
"""
warn_if_nan(xnew)
# Variables from outer scope
X, y, Kff = outside_vars["X"], outside_vars["y"], outside_vars["Kff"]
# Compute Cholesky decomposition of kernel matrix
Lff = torch.linalg.cholesky(Kff)
y_residual = y - self.mean_function(X)
# Compute conditional mean and variance
loc, cov = conditional(
xnew, X, self.kernel, y_residual, None, Lff, False, jitter=self.jitter
)
if not noiseless:
cov = cov + noise
ynew = torchdist.Normal(
loc + self.mean_function(xnew), cov.sqrt()
).rsample()
# Update kernel matrix
N = outside_vars["N"]
Kffnew = Kff.new_empty(N + 1, N + 1)
Kffnew[:N, :N] = Kff
cross = self.kernel(X, xnew).squeeze()
end = self.kernel(xnew, xnew).squeeze()
Kffnew[N, :N] = cross
Kffnew[:N, N] = cross
# No noise, just jitter for numerical stability
Kffnew[N, N] = end + self.jitter
# Heuristic to avoid adding degenerate points
if Kffnew.logdet() > -15.0:
outside_vars["Kff"] = Kffnew
outside_vars["N"] += 1
outside_vars["X"] = torch.cat((X, xnew))
outside_vars["y"] = torch.cat((y, ynew))
return ynew
return lambda xnew: sample_next(xnew, outside_vars)
|
Steg_Tool/steg.py | Affanmir/Awesome-Python-Scripts | 1,026 | 79625 | #Image Stego using LSB
import cv2
def encode(input_image_name, output_image_name, file_name):
input_image = cv2.imread(input_image_name)
height, width, nbchannels = input_image.shape
size = width*height
current_width = 0
current_height = 0
current_channel = 0
maskonevalues = [1, 2, 4, 8, 16, 32, 64, 128]
maskone = maskonevalues.pop(0)
maskzerovalues = [254, 253, 251, 247, 239, 223, 191, 127]
maskzero = maskzerovalues.pop(0)
data = open(file_name, "rb").read()
length = len(data)
if(width*height*nbchannels < length + 64):
raise Exception("Not enough space to hold all steganographic data")
binary_value = bin(length)[2:]
if(len(binary_value) > 64):
raise Exception("Binary Value larger than expected")
else:
while(len(binary_value) < 64):
binary_value = "0" + binary_value
for c in binary_value:
value = list(input_image[current_height, current_width])
if(int(c) == 1):
value[current_channel] = int(value[current_channel]) | maskone
else:
value[current_channel] = int(value[current_channel]) & maskzero
input_image[current_height, current_width] = tuple(value)
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if maskone == 128:
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
for byte in data:
if(isinstance(byte, int)):
pass
else:
byte = ord(byte)
binv = bin(byte)[2:]
if(len(binv) > 8):
raise Exception("Binary Value larger than expected")
else:
while(len(binv) < 8):
binv = "0" + binv
for c in binv:
val = list(input_image[current_height, current_width])
if(int(c) == 1):
val[current_channel] = int(val[current_channel]) | maskone
else:
val[current_channel] = int(val[current_channel]) & maskzero
input_image[current_height, current_width] = tuple(val)
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if maskone == 128:
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
cv2.imwrite(output_image_name, input_image)
def decode(encoded_image_name, extracted_file_name):
encoded_image = cv2.imread(encoded_image_name)
height, width, nbchannels = encoded_image.shape
size = width*height
current_width = 0
current_height = 0
current_channel = 0
maskonevalues = [1, 2, 4, 8, 16, 32, 64, 128]
maskone = maskonevalues.pop(0)
maskzerovalues = [254, 253, 251, 247, 239, 223, 191, 127]
maskzero = maskzerovalues.pop(0)
bits = ""
for i in range(64):
value = encoded_image[current_height, current_width][current_channel]
value = int(value) & maskone
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if(maskone == 128):
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
if(value > 0):
bits += "1"
else:
bits += "0"
length = int(bits, 2)
output = b""
for i in range(length):
bits = ""
for i in range(8):
value = encoded_image[current_height, current_width][current_channel]
value = int(value) & maskone
if(current_channel == nbchannels-1):
current_channel = 0
if(current_width == width-1):
current_width = 0
if(current_height == height-1):
current_height = 0
if(maskone == 128):
raise Exception("No more space available in image")
else:
maskone = maskonevalues.pop(0)
maskzero = maskzerovalues.pop(0)
else:
current_height += 1
else:
current_width += 1
else:
current_channel += 1
if(value > 0):
bits += "1"
else:
bits += "0"
output += bytearray([int(bits, 2)])
f = open(extracted_file_name, "wb")
f.write(output)
f.close()
if __name__ == "__main__":
input_string = input()
#encode input_image_name output_image_name file_name
#decode encoded_image_name extracted_file_name
input_list = input_string.split()
if input_list[0] == "encode":
encode(input_list[1], input_list[2], input_list[3])
print(f"{input_list[2]}")
elif input_list[0] == "decode":
decode(input_list[1], input_list[2])
print(f"{input_list[2]}")
else:
print("Invalid Entry")
|
tensorlog/testxcomp.py | saraswat/TensorLog | 108 | 79638 | <gh_stars>100-1000
# (C) <NAME> and Carnegie Mellon University, 2017
import logging
import numpy as np
import os
import unittest
import sys
import collections
import tempfile
from tensorlog import xctargets
if xctargets.tf:
import tensorflow as tf
from tensorlog import tensorflowxcomp
else:
tensorflowxcomp=None
if xctargets.theano:
import theano
from tensorlog import theanoxcomp
else:
theanoxcomp=None
from tensorlog import bpcompiler
from tensorlog import comline
from tensorlog import dataset
from tensorlog import declare
from tensorlog import matrixdb
from tensorlog import learn
from tensorlog import mutil
from tensorlog import parser
from tensorlog import program
from tensorlog import simple
from tensorlog import testtensorlog
from tensorlog import funs
from tensorlog import ops
from tensorlog import learnxcomp as learnxc
from tensorlog.expt import Expt
if xctargets.tf:
tf.logging.set_verbosity(tf.logging.WARN)
TESTED_COMPILERS = []
TESTED_LEARNERS = {}
if xctargets.theano:
for c in [
theanoxcomp.DenseMatDenseMsgCrossCompiler,
theanoxcomp.SparseMatDenseMsgCrossCompiler
]:
TESTED_COMPILERS.append(c)
TESTED_LEARNERS[c]=theanoxcomp.FixedRateGDLearner
if xctargets.tf:
for c in [
tensorflowxcomp.DenseMatDenseMsgCrossCompiler,
tensorflowxcomp.SparseMatDenseMsgCrossCompiler,
]:
TESTED_COMPILERS.append(c)
TESTED_LEARNERS[c]=tensorflowxcomp.FixedRateGDLearner
RUN_OLD_INFERENCE_TESTS = False
SAVE_SUMMARIES = False
def close_cross_compiler(xc):
xc.close()
if xctargets.tf and isinstance(xc,tensorflowxcomp.TensorFlowCrossCompiler):
tf.reset_default_graph()
class TestXCSmallProofs(testtensorlog.TestSmallProofs):
def test_if(self):
self.xcomp_check(['p(X,Y):-spouse(X,Y).'], 'p(i,o)', 'william', {'susan':1.0})
def test_failure(self):
self.xcomp_check(['p(X,Y):-spouse(X,Y).'], 'p(i,o)', 'lottie', {matrixdb.NULL_ENTITY_NAME:1.0})
def test_reverse_if(self):
self.xcomp_check(['p(X,Y):-sister(Y,X).'], 'p(i,o)', 'rachel', {'william':1.0})
def test_or(self):
self.xcomp_check(['p(X,Y):-spouse(X,Y).', 'p(X,Y):-sister(X,Y).'], 'p(i,o)', 'william',
{'susan':1.0, 'rachel':1.0, 'lottie':1.0, 'sarah':1.0})
def test_chain(self):
self.xcomp_check(['p(X,Z):-spouse(X,Y),sister(Y,Z).'], 'p(i,o)', 'susan',
{'rachel':1.0, 'lottie':1.0, 'sarah':1.0})
self.xcomp_check(['p(X,Z):-sister(X,Y),child(Y,Z).'], 'p(i,o)', 'william',
{'charlotte':1.0, 'lucas':1.0, 'poppy':1.0, 'caroline':1.0, 'elizabeth':1.0})
def test_mid(self):
self.xcomp_check(['p(X,Y):-sister(X,Y),child(Y,Z).'], 'p(i,o)', 'william',
{'sarah': 1.0, 'rachel': 2.0, 'lottie': 2.0})
def test_nest(self):
self.xcomp_check(['s(X,Y):-spouse(X,Y).','t(X,Z):-spouse(X,Y),s(Y,Z).'], 't(i,o)', 'susan', {'susan': 1.0})
def test_back1(self):
# fails for tensorflowxcomp
self.xcomp_check(['p(X,Y):-spouse(X,Y),sister(X,Z).'], 'p(i,o)', 'william', {'susan': 3.0})
def test_back2(self):
self.xcomp_check(['p(X,Y):-spouse(X,Y),sister(X,Z1),sister(X,Z2).'],'p(i,o)','william',{'susan': 9.0})
def test_rec1(self):
program.DEFAULT_MAXDEPTH=4
self.xcomp_check(['p(X,Y):-spouse(X,Y).','p(X,Y):-p(Y,X).'], 'p(i,o)','william',{'susan': 5.0})
program.DEFAULT_MAXDEPTH=10
self.xcomp_check(['p(X,Y):-spouse(X,Y).','p(X,Y):-p(Y,X).'], 'p(i,o)','william',{'susan': 11.0})
def test_const_output(self):
self.xcomp_check(['sis(X,W):-assign(W,william),child(X,Y).'], 'sis(i,o)', 'sarah', {'william': 1.0})
self.xcomp_check(['sis(X,W):-assign(W,william),child(X,Y).'], 'sis(i,o)', 'lottie', {'william': 2.0})
def test_const_chain1(self):
self.xcomp_check(['p(X,S) :- assign(S,susan),sister(X,Y),child(Y,Z).'],'p(i,o)','william',{'susan': 5.0})
def test_const_chain2(self):
self.xcomp_check(['p(X,Pos) :- assign(Pos,pos),child(X,Y),young(Y).'],'p(i,o)','sarah',{'pos':1.0})
self.xcomp_check(['p(X,Pos) :- assign(Pos,pos),child(X,Y),young(Y).'],'p(i,o)','lottie',{'pos':2.0})
def test_alt_chain(self):
self.xcomp_check(['p(X,W) :- spouse(X,W),sister(X,Y),child(Y,Z).'],'p(i,o)','william',{'susan': 5.0})
pass
def test_proppr1(self):
w = 7*self.db.onehot('r1')+3*self.db.onehot('r2')
self.proppr_xcomp_check(w,['p(X,Y):-sister(X,Y) {r1}.','p(X,Y):-spouse(X,Y) {r2}.'],'p(i,o)',
'william', {'sarah': 7.0, 'rachel': 7.0, 'lottie': 7.0, 'susan': 3.0})
def test_proppr2(self):
w = 3*self.db.onehot('r2')
self.proppr_xcomp_check(w,['p(X,Y):-spouse(Y,X) {r2}.'],'p(i,o)',
'susan', {'william': 3.0})
def test_reuse1(self):
self.xcomp_check(['p(X,Y) :- r(X,Z),r(Z,Y).', 'r(X,Y):-spouse(X,Y).'], 'p(i,o)', 'william',
{'william':1.0})
def _removeZeros(self, sdict):
if True: return sdict
e = sdict[None]
ret = dict([ (k,v-e) for (k,v) in list(sdict.items()) if v != e])
z = sum(ret.values())
for k in ret: ret[k] = ret[k]/z
return ret
def xcomp_check(self,ruleStrings,mode_string,input_symbol,expected_result_dict,compare=False):
self._xcomp_check('vanilla',None,ruleStrings,mode_string,input_symbol,expected_result_dict,compare)
def proppr_xcomp_check(self,weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict):
self._xcomp_check('proppr',weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict)
def _xcomp_check(self,progType,weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict,compare=False):
# run the base class check to see that the inference is correct
if RUN_OLD_INFERENCE_TESTS:
if progType=='proppr':
self.proppr_inference_check(weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict)
else:
self.inference_check(ruleStrings,mode_string,input_symbol,expected_result_dict)
# setup the next round of tests by compiling a tensorlog
# Program - this code is lifted from the testtensorlog
# inference routines
print('xcomp inference for mode',mode_string,'on input',input_symbol)
testtensorlog.softmax_normalize(expected_result_dict)
rules = parser.RuleCollection()
for r in ruleStrings:
rules.add(parser.Parser().parseRule(r))
if progType=='proppr':
prog = program.ProPPRProgram(db=self.db,rules=rules,weights=weightVec)
else:
prog = program.Program(db=self.db,rules=rules)
for compilerClass in TESTED_COMPILERS:
#cross-compile the function
xc = compilerClass(prog)
# evaluate the function and get the output y
#xc.show()
print('== performing eval with',compilerClass,'==')
inferenceFun = xc.inferenceFunction(mode_string)
y = inferenceFun(prog.db.onehot(input_symbol))
# print 'input',xc.getInputName(mode_string),'args,fun
# =',xc.inference(mode_string) theano output will a be (probably
# dense) message, so just compare and check that the maximal
# elements from these two dicts are the same
actual_result_dict = self.db.rowAsSymbolDict(y)
self.check_maxes_in_dicts(actual_result_dict, expected_result_dict)
# check it's normalized
l1_error = abs(sum(actual_result_dict.values()) - 1.0)
#print 'l1_error',l1_error,'actual_result_dict',actual_result_dict,'expected_result_dict',expected_result_dict
self.assertTrue( l1_error < 0.0001)
# also test proofCountFun
proofCountFun = xc.proofCountFunction(mode_string)
pc = proofCountFun(prog.db.onehot(input_symbol))
# theano output will a be (probably dense) message, so
# just compare that maximal elements from these two dicts
# are the same
pc_result_dict = self.db.rowAsSymbolDict(pc)
if len(pc_result_dict)>0:
self.check_maxes_in_dicts(pc_result_dict, expected_result_dict)
print('== eval checks passed ==')
close_cross_compiler(xc)
def check_maxes_in_dicts(self,actual,expected):
def maximalElements(d):
m = max(d.values())
return set(k for k in d if d[k]==m)
actualMaxes = maximalElements(actual)
expectedMaxes = maximalElements(expected)
print('actual',actualMaxes,'expected',expectedMaxes)
for a in actualMaxes:
self.assertTrue(a in expectedMaxes)
for a in expectedMaxes:
self.assertTrue(a in actualMaxes)
class TestXCGrad(testtensorlog.TestGrad):
def setUp(self):
self.db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,'fam.cfacts'))
def test_if(self):
rules = ['p(X,Y):-sister(X,Y).']
mode = 'p(i,o)'
params = [('sister',2)]
self.xgrad_check(rules, mode, params,
[('william',['rachel','sarah'])],
{'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})
self.xgrad_check(rules, mode, params,
[('william',['lottie'])],
{'sister(william,rachel)': -1,'sister(william,lottie)': +1})
def test_if2(self):
rules = ['p(X,Y):-sister(X,Y).']
mode = 'p(i,o)'
params = [('sister',2)]
self.xgrad_check(rules, mode, params,
[('william',['rachel','sarah']), ('william',['rachel','sarah'])],
{'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})
self.xgrad_check(rules, mode, params,
[('william',['lottie']), ('william',['lottie'])],
{'sister(william,rachel)': -1,'sister(william,lottie)': +1})
def test_reverse_if(self):
rules = ['p(X,Y):-parent(Y,X).']
mode = 'p(i,o)'
params = [('parent',2)]
self.xgrad_check(rules, mode, params,
[('lottie',['charlotte'])],
{'parent(charlotte,lottie)': +1,'parent(lucas,lottie)': -1})
def test_chain1(self):
rules = ['p(X,Z):-sister(X,Y),child(Y,Z).']
mode = 'p(i,o)'
self.xgrad_check(rules,mode,
[('sister',2)],
[('william',['caroline','elizabeth'])],
{'sister(william,rachel)': +1,'sister(william,lottie)': -1})
self.xgrad_check(rules,mode,
[('child',2)],
[('william',['caroline','elizabeth'])],
{'child(rachel,elizabeth)': +1,'child(lottie,lucas)': -1})
self.xgrad_check(rules,mode,
[('child',2),('sister',2)],
[('william',['caroline','elizabeth'])],
{'child(rachel,elizabeth)': +1,'child(lottie,lucas)': -1, 'sister(william,rachel)': +1,'sister(william,lottie)': -1})
def test_chain2(self):
rules = ['p(X,Z):-spouse(X,Y),sister(Y,Z).']
mode = 'p(i,o)'
self.xgrad_check(rules,mode,
[('sister',2)],
[('susan',['rachel'])],
{'sister(william,rachel)': +1,'sister(william,lottie)': -1})
def test_call1(self):
rules = ['q(X,Y):-sister(X,Y).','p(Z,W):-q(Z,W).']
mode = 'p(i,o)'
params = [('sister',2)]
self.xgrad_check(rules, mode, params,
[('william',['rachel','sarah'])],
{'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})
self.xgrad_check(rules, mode, params,
[('william',['lottie'])],
{'sister(william,rachel)': -1,'sister(william,lottie)': +1})
def test_call2(self):
rules = ['q(X,Y):-sister(X,Y).','p(Z,W):-r(Z,W).','r(Z,W):-q(Z,W).']
mode = 'p(i,o)'
params = [('sister',2)]
self.xgrad_check(rules, mode, params,
[('william',['rachel','sarah'])],
{'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})
self.xgrad_check(rules, mode, params,
[('william',['lottie'])],
{'sister(william,rachel)': -1,'sister(william,lottie)': +1})
def test_split(self):
rules = ['p(X,Y):-sister(X,Y),child(Y,Z),young(Z).']
mode = 'p(i,o)'
params = [('child',2)]
self.xgrad_check(rules, mode, params,
[('william',['lottie'])],
{'child(lottie,lucas)': +1,'child(lottie,charlotte)': +1,'child(sarah,poppy)': -1})
params = [('sister',2)]
self.xgrad_check(rules, mode, params,
[('william',['lottie'])],
{'sister(william,lottie)': +1,'sister(william,sarah)': -1})
def test_or(self):
rules = ['p(X,Y):-child(X,Y).', 'p(X,Y):-sister(X,Y).']
mode = 'p(i,o)'
params = [('sister',2)]
self.xgrad_check(rules, mode, params,
[('william',['charlie','rachel'])],
{'sister(william,rachel)': +1,'sister(william,sarah)': -1,'sister(william,lottie)': -1})
params = [('child',2)]
self.xgrad_check(rules, mode, params,
[('william',['charlie','rachel'])],
{'child(william,charlie)': +1,'child(william,josh)': -1})
params = [('child',2),('sister',2)]
self.xgrad_check(rules, mode, params,
[('william',['charlie','rachel'])],
{'child(william,charlie)': +1,'child(william,josh)': -1,'sister(william,rachel)': +1,'sister(william,sarah)': -1})
def test_weighted_vec(self):
rules = ['p(X,Y):-sister(X,Y),assign(R,r1),feat(R).','p(X,Y):-child(X,Y),assign(R,r2),feat(R).']
mode = 'p(i,o)'
params = [('sister',2)]
self.xgrad_check(rules, mode, params,
[('william',['rachel','charlie'])],
{'sister(william,rachel)': +1,'sister(william,sarah)': -1})
params = [('child',2)]
self.xgrad_check(rules, mode, params,
[('william',['rachel','charlie'])],
{'child(william,charlie)': +1,'child(william,josh)': -1})
params = [('feat',1)]
self.xgrad_check(rules, mode, params,
[('william',['josh','charlie'])],
{'feat(r1)': -1,'feat(r2)': +1})
self.xgrad_check(rules, mode, params,
[('william',['rachel','sarah','lottie'])],
{'feat(r1)': +1,'feat(r2)': -1})
def learnxc_check(self,rule_strings,mode_string,params,xyPairs,expected):
print("XLearner loss/grad eval")
rules = testtensorlog.rules_from_strings(rule_strings)
prog = program.Program(db=self.db,rules=rules)
mode = declare.ModeDeclaration(mode_string)
prog.db.clearParameterMarkings()
for (functor,arity) in params:
prog.db.markAsParameter(functor,arity)
# TODO: not working yet for mini-batches so check each example
# individually
for x,ys in xyPairs:
data = testtensorlog.DataBuffer(self.db)
data.add_data_symbols(x,ys)
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(prog)
print('learner check for compiler',xc.__class__)
learner = learnxc.XLearner(prog,xc)
paramsWithUpdates = learner.crossEntropyGrad(mode,data.get_x(),data.get_y())
updates_with_string_keys = {}
for (functor,arity),up in paramsWithUpdates:
print('testxcomp update for',functor,arity,'is',up)
upDict = prog.db.matrixAsPredicateFacts(functor,arity,up)
print('upDict',upDict)
for fact,grad_of_fact in list(upDict.items()):
# need to flip for cross-compilers
updates_with_string_keys[str(fact)] = -grad_of_fact
self.check_directions(updates_with_string_keys,expected)
def xgrad_check(self,rule_strings,mode_string,params,xyPairs,expected):
print("direct loss/grad eval")
rules = testtensorlog.rules_from_strings(rule_strings)
prog = program.Program(db=self.db,rules=rules)
prog.db.clearParameterMarkings()
for (functor,arity) in params:
prog.db.markAsParameter(functor,arity)
for x,ys in xyPairs:
data = testtensorlog.DataBuffer(self.db)
data.add_data_symbols(x,ys)
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(prog)
print('grad check for compiler',xc.__class__)
gradFun = xc.dataLossGradFunction(mode_string)
updates_with_string_keys = {}
paramsWithUpdates = gradFun(data.get_x(),data.get_y())
for (functor,arity),up in paramsWithUpdates:
upDict = prog.db.matrixAsPredicateFacts(functor,arity,up)
for fact,grad_of_fact in list(upDict.items()):
# need to flip for cross-compilers
updates_with_string_keys[str(fact)] = -grad_of_fact
self.check_directions(updates_with_string_keys,expected)
self.learnxc_check(rule_strings,mode_string,params,xyPairs,expected)
close_cross_compiler(xc)
class TestXCProPPR(testtensorlog.TestProPPR):
def setUp(self):
super(TestXCProPPR,self).setUp()
def debug(self):
return self
def evalxc(self,xc,input):
inferenceFun = xc.inferenceFunction('predict/io')
print(inferenceFun)
rawPred = inferenceFun(input)
# trim small numbers to zero
pred = mutil.mapData(lambda d:np.clip((d - 1e-5),0.00,9999.99), rawPred)
pred.eliminate_zeros()
return pred
def testNativeRow(self):
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(self.prog)
for i in range(self.numExamples):
pred = self.evalxc(xc, self.X.getrow(i))
d = self.prog.db.rowAsSymbolDict(pred)
uniform = {'pos':0.5,'neg':0.5}
self.check_dicts(d,uniform)
close_cross_compiler(xc)
def testNativeMatrix(self):
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(self.prog)
xc.ensureCompiled(self.mode,inputs=None)
pred = self.prog.eval(self.mode,[self.X])
d0 = self.prog.db.matrixAsSymbolDict(pred)
for i,d in list(d0.items()):
uniform = {'pos':0.5,'neg':0.5,}
self.check_dicts(d,uniform)
close_cross_compiler(xc)
def testGradVector(self):
data = testtensorlog.DataBuffer(self.prog.db)
X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)
learner = learn.OnePredFixedRateGDLearner(self.prog)
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(self.prog)
self.prog.db.markAsParameter('weighted',1)
#xc.compile(self.mode)
gradFun = xc.dataLossGradFunction('predict/io')
for i in range(X.shape[0]):
print("example",i)
updates = learner.crossEntropyGrad(declare.ModeDeclaration('predict(i,o)'),X[i],Y[i])
w0 = updates[('weighted',1)].sum(axis=0)
print(w0)
updates = gradFun(X[i],Y[i])
paramKey,w = updates[0]
print(w)
# w is different from the w in the corresponding testtensorlog test,
# which is a crossEntropy gradient for each example, but it should have
# opposite directions
nrow,ncol = w.shape
for i in range(nrow):
for j in range(ncol):
self.assertTrue((w[i,j]==0) == (w0[i,j]==0))
self.assertTrue(w[i,j] * w0[i,j] <= 0)
def testGradMatrix(self):
data = testtensorlog.DataBuffer(self.prog.db)
X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)
learner = learn.OnePredFixedRateGDLearner(self.prog)
updates = learner.crossEntropyGrad(declare.ModeDeclaration('predict(i,o)'),X,Y)
w0 = updates[('weighted',1)].sum(axis=0)
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(self.prog)
self.prog.db.markAsParameter('weighted',1)
#xc.compile(self.mode)
gradFun = xc.dataLossGradFunction('predict/io')
updates = gradFun(X,Y)
paramKey,w = updates[0]
# w is different from the w in the corresponding testtensorlog test,
# which is a crossEntropy gradient for each example, but it should have
# opposite directions
nrow,ncol = w.shape
for i in range(nrow):
for j in range(ncol):
self.assertTrue((w[i,j]==0) == (w0[i,j]==0),"i=%d,j=%d,w=%g,w0=%g"%(i,j,w[i,j],w0[i,j]))
self.assertTrue(w[i,j] * w0[i,j] <= 0.0,"i=%d,j=%d,w=%g,w0=%g"%(i,j,w[i,j],w0[i,j]))
close_cross_compiler(xc)
def testMultiLearn1(self):
pass
def testLearn(self):
mode = declare.ModeDeclaration('predict(i,o)')
modestr = 'predict/io'
X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)
for compilerClass in TESTED_COMPILERS:
self.prog.setRuleWeights()
self.prog.setFeatureWeights()
if SAVE_SUMMARIES:
xc = compilerClass(self.prog,compilerClass.__name__+".summary")
else:
xc = compilerClass(self.prog)
self.prog.db.markAsParameter('weighted',1)
v = self.prog.db.getParameter('weighted',1)
d = self.prog.db.rowAsSymbolDict(v)
# sanity check a couple of values
self.assertTrue(d['little_pos'] == d['little_neg'])
self.assertTrue(d['big_pos'] == d['big_neg'])
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)
lossFun = xc.dataLossFunction('predict/io')
loss0 = lossFun(X,Y)
print('initial train data loss',loss0)
TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)
loss1 = lossFun(TX,TY)
print('initial test data loss',loss1)
P = learner.predict('predict/io',X)
#acc0 = xc.accuracy('predict/io',X,Y)
acc0 = learner.accuracy(Y,P)
print('initial train accuracy',acc0)
TP = learner.predict('predict/io',TX)
#acc1 = xc.accuracy('predict/io',TX,TY)
acc1 = learner.accuracy(TY,TP)
print('initial test accuracy',acc1)
print('params to optimize',xc.prog.getParamList())
print('vars to optimize',xc.getParamVariables('predict/io'))
# xc.optimizeDataLoss('predict/io', optimizer, X, Y, epochs=20)
learner.trainMode('predict/io',X,Y)
loss2 = lossFun(X,Y)
print('final train data loss',loss2)
loss3 = lossFun(TX,TY)
print('final test data loss',loss3)
P2 = learner.predict('predict/io',X)
#acc2 = xc.accuracy('predict/io',X,Y)
acc2 = learner.accuracy(Y,P2)
print('final train accuracy',acc2)
TP2 = learner.predict('predict/io',TX)
#acc3 = xc.accuracy('predict/io',TX,TY)
acc3 = learner.accuracy(TY,TP2)
print('final test accuracy',acc3)
xc.exportAllLearnedParams()
v = self.prog.db.getParameter('weighted',1)
d = self.prog.db.rowAsSymbolDict(v)
# sanity check a couple of values
self.assertTrue(d['little_pos'] > d['little_neg'])
self.assertTrue(d['big_pos'] < d['big_neg'])
close_cross_compiler(xc)
self.assertTrue(acc2>=acc0)
self.assertTrue(acc3>=acc1)
self.assertTrue(loss2<loss0)
self.assertTrue(loss2<loss1)
self.assertTrue(acc2>=0.9)
self.assertTrue(acc2==1.0)
def testDatasetPredict(self):
mode = declare.ModeDeclaration('predict(i,o)')
modestr = 'predict/io'
X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)
for compilerClass in TESTED_COMPILERS:
self.prog.setRuleWeights()
self.prog.setFeatureWeights()
if SAVE_SUMMARIES:
xc = compilerClass(self.prog,compilerClass.__name__+".summary")
else:
xc = compilerClass(self.prog)
self.prog.db.markAsParameter('weighted',1)
learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)
P = learner.predict(mode,X)
print("X",X.shape)
print("P",P.shape)
self.assertTrue(X.shape==P.shape)
P = learner.datasetPredict(dataset.Dataset({mode:X},{mode:Y}))
print("X",X.shape)
print("P",P.getX(mode).shape)
self.assertTrue(X.shape==P.getX(mode).shape)
return xc,learner,X,Y,P
def testExptScaffold(self):
mode = declare.ModeDeclaration('predict(i,o)')
X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)
TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)
self.prog.setAllWeights()
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(self.prog)
learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)
Expt({'prog':self.prog,
'trainData':dataset.Dataset({mode:X},{mode:Y}),
'testData':dataset.Dataset({mode:TX},{mode:TY}),
'targetMode':mode,
'learner':learner
}).run()
@unittest.skipUnless(xctargets.tf,"Tensorflow not available")
def testExpt(self):
mode = declare.ModeDeclaration('predict(i,o)')
X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)
TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)
for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,
tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:
xc = compilerClass(self.prog)
xc.runExpt(
prog=self.prog,
trainData=dataset.Dataset({mode:X},{mode:Y}),
testData=dataset.Dataset({mode:TX},{mode:TY}),
targetMode=mode)
close_cross_compiler(xc)
class TestXCOpGen(unittest.TestCase):
# TODO tests for other xcompilers?
@unittest.skipUnless(xctargets.tf,"Tensorflow not available")
def testTCToyTypes(self):
matrixdb.conf.ignore_types = False
tlog = simple.Compiler(
db=os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"),
prog=os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"))
trainData = tlog.load_small_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"))
mode = list(trainData.keys())[0]
docs,labels = trainData[mode]
xc = tlog.get_cross_compiler()
ops = xc.possibleOps(docs,'doc')
print('doc ops',ops)
self.assertTrue(len(ops)==1)
(words,wordType) = ops[0]
self.assertTrue(wordType=='word')
ops = xc.possibleOps(words,'word')
self.assertTrue(len(ops)==3)
pairs = None
for (expr,exprType) in ops:
if exprType=='labelWordPair':
pairs = expr
break
self.assertTrue(pairs is not None)
ops = xc.possibleOps(pairs,'labelWordPair')
self.assertTrue(len(ops)==2)
for (expr,exprType) in ops:
self.assertTrue(exprType=='word')
close_cross_compiler(xc)
@unittest.skipUnless(xctargets.tf,"Tensorflow not available")
def testTCToyIgnoringTypes(self):
matrixdb.conf.ignore_types = True
tlog = simple.Compiler(
db=os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"),
prog=os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"))
trainData = tlog.load_small_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"))
mode = list(trainData.keys())[0]
docs,labels = trainData[mode]
xc = tlog.get_cross_compiler()
ops = xc.possibleOps(docs)
binary_predicates = [functor for (functor,arity) in tlog.db.matEncoding if arity==2]
self.assertTrue(len(ops) == len(binary_predicates)*2)
for x in ops:
# ops should just be tensors
self.assertFalse(isinstance(x,tuple))
close_cross_compiler(xc)
class TestXCExpt(unittest.TestCase):
def testTCToyTypes_wscaffold(self):
matrixdb.conf.ignore_types = False
optdict,args = comline.parseCommandLine(
["--db", os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"),
"--prog", os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"),
"--trainData", os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"),
"--testData", os.path.join(testtensorlog.TEST_DATA_DIR,"toytest.exam"),
"--proppr"])
optdict['prog'].setAllWeights()
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(optdict['prog'])
learner = TESTED_LEARNERS[compilerClass](optdict['prog'],xc)
Expt({
'prog':optdict['prog'],
'trainData':optdict['trainData'],
'testData':optdict['testData'],
'learner':learner,
'targetMode':declare.asMode("predict/io")
}).run()
pbDoc = xc.db.onehot('pb','doc')
self.checkXC(xc,'predict/io',pbDoc,{'negPair':115,'posPair':115,'hasWord':59,'weighted':115,'label':5})
# some checks on the output of pprint
lines = xc.pprint('predict/io')
self.assertTrue(lines[0].find("SoftMaxFunction") >= 0)
self.assertTrue(lines[1].find("SumFunction") >= 0)
self.assertEqual(len(lines), 16)
# some checks on misc xcomp API
self.assertEqual(xc.inferenceOutputType('predict/io'),'label')
pbId = xc.asSymbolId('pb',typeName='doc')
pbSym = xc.asSymbol(pbId,typeName='doc')
self.assertEqual(pbSym,'pb')
self.assertEqual(xc.asSymbolId('this does not appear in the data',typeName='doc'), -1)
@unittest.skipUnless(xctargets.tf,"Tensorflow not available")
def testTCToyTypes(self):
matrixdb.conf.ignore_types = False
optdict,args = comline.parseCommandLine(
["--db", os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"),
"--prog", os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"),
"--trainData", os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"),
"--testData", os.path.join(testtensorlog.TEST_DATA_DIR,"toytest.exam"),
"--proppr"])
for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,
tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:
xc = compilerClass(optdict['prog'])
xc.runExpt(
prog=optdict['prog'],
trainData=optdict['trainData'],
testData=optdict['testData'],
targetMode=declare.asMode("predict/io"))
# check trainability
for (functor,arity) in xc.db.matEncoding:
v = xc.parameterFromDBToVariable(functor,arity)
if v is not None:
vIsTrainable = (v in tf.trainable_variables())
vIsParameter = ((functor,arity) in xc.db.paramSet)
self.assertEqual(vIsTrainable,vIsParameter)
pbDoc = xc.db.onehot('pb','doc')
self.checkXC(xc,'predict/io',pbDoc,{'negPair':115,'posPair':115,'hasWord':59,'weighted':115,'label':5})
# some checks on the output of pprint
lines = xc.pprint('predict/io')
self.assertTrue(lines[0].find("SoftMaxFunction") >= 0)
self.assertTrue(lines[1].find("SumFunction") >= 0)
self.assertEqual(len(lines), 16)
# some checks on misc xcomp API
self.assertEqual(xc.inferenceOutputType('predict/io'),'label')
pbId = xc.asSymbolId('pb',typeName='doc')
pbSym = xc.asSymbol(pbId,typeName='doc')
self.assertEqual(pbSym,'pb')
self.assertEqual(xc.asSymbolId('this does not appear in the data',typeName='doc'), -1)
close_cross_compiler(xc)
def testTCToyIgnoringTypes_wscaffold(self):
matrixdb.conf.ignore_types = True
optdict,args = comline.parseCommandLine(
["--db", os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"),
"--prog", os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"),
"--trainData", os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"),
"--testData", os.path.join(testtensorlog.TEST_DATA_DIR,"toytest.exam"),
"--proppr"])
optdict['prog'].setAllWeights()
for compilerClass in TESTED_COMPILERS:
xc = compilerClass(optdict['prog'])
learner = TESTED_LEARNERS[compilerClass](optdict['prog'],xc)
Expt({
'prog':optdict['prog'],
'trainData':optdict['trainData'],
'testData':optdict['testData'],
'learner':learner,
'targetMode':declare.asMode("predict/io")
}).run()
pbDoc = xc.db.onehot('pb')
self.checkXC(xc,'predict/io',pbDoc,collections.defaultdict(lambda:191))
@unittest.skipUnless(xctargets.tf,"Tensorflow not available")
def testTCToyIgnoringTypes(self):
matrixdb.conf.ignore_types = True
optdict,args = comline.parseCommandLine(
["--db", os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"),
"--prog", os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"),
"--trainData", os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"),
"--testData", os.path.join(testtensorlog.TEST_DATA_DIR,"toytest.exam"),
"--proppr"])
for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,
tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:
xc = compilerClass(optdict['prog'])
xc.runExpt(
prog=optdict['prog'],
trainData=optdict['trainData'],
testData=optdict['testData'],
targetMode=declare.asMode("predict/io"))
pbDoc = xc.db.onehot('pb')
self.checkXC(xc,'predict/io',pbDoc,collections.defaultdict(lambda:191))
close_cross_compiler(xc)
def checkXC(self,xc,mode,rawInput,expectedCols):
print('matrixdb.conf.ignore_types',matrixdb.conf.ignore_types)
db = xc.db
for (functor,arity),mat in list(db.matEncoding.items()):
print(functor,arity,'shape',mat.shape)
r,c = mat.shape
self.assertEqual(c,expectedCols[functor])
inferenceFun = xc.inferenceFunction(mode)
y = inferenceFun(rawInput)
r,c = y.shape
self.assertEqual(c,expectedCols['label'])
class TestMultiModeXC(unittest.TestCase):
def setUp(self):
self.db = matrixdb.MatrixDB.loadFile(
os.path.join(testtensorlog.TEST_DATA_DIR,'matchtoy.cfacts'))
self.prog = program.ProPPRProgram.loadRules(
os.path.join(testtensorlog.TEST_DATA_DIR,"matchtoy.ppr"),db=self.db)
self.dset = dataset.Dataset.loadExamples(
self.db, os.path.join(testtensorlog.TEST_DATA_DIR,'matchtoy-train.exam'),proppr=False)
self.prog.setAllWeights()
def testInScaffold(self):
print(TESTED_COMPILERS)
self.assertTrue(self.dset.modesToLearn() > 1)
self.prog.setAllWeights()
for compilerClass in TESTED_COMPILERS:
print(compilerClass)
xc = compilerClass(self.prog)
# compile everything
for mode in self.dset.modesToLearn():
xc.ensureCompiled(mode)
learner = TESTED_LEARNERS[compilerClass](self.prog,xc)
testAcc,testXent = Expt({
'prog':self.prog,
'trainData':self.dset,
'testData':self.dset,
'learner':learner,
'savedTestPredictions':'TestMultiModeXC.testInScaffold.%s.solutions.txt'%compilerClass.__name__
}).run()
print(testAcc)
@unittest.skipUnless(xctargets.tf,"Tensorflow not available")
def testIt(self):
self.assertTrue(self.dset.modesToLearn() > 1)
for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,
tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:
xc = compilerClass(self.prog)
# compile everything
for mode in self.dset.modesToLearn():
xc.ensureCompiled(mode,inputs=None)
# check the variables
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
session = tf.Session()
session.run(tf.global_variables_initializer())
# set up for training
trainStep = {}
for mode in self.dset.modesToLearn():
(dataLossArgs,dataLossExpr) = xc.dataLoss(mode)
trainStep[mode] = optimizer.minimize(dataLossExpr, var_list=xc.getParamVariables(mode))
# train
for i in range(2): #epochs
for mode in self.dset.modesToLearn():
X = self.dset.getX(mode)
Y = self.dset.getY(mode)
fd = xc.getFeedDict(mode,X,Y,wrapped=False)
session.run(trainStep[mode],feed_dict=fd)
# test
for mode in self.dset.modesToLearn():
X = self.dset.getX(mode)
Y = self.dset.getY(mode)
Y_ = xc.inferenceFunction(mode)(X)
acc = xc.accuracy(mode,X,Y)
print('mode',mode,'acc',acc)
session.close()
close_cross_compiler(xc)
class TestMatParams(unittest.TestCase):
def setUp(self):
self.cacheDir = tempfile.mkdtemp()
def cacheFile(self,fileName):
return os.path.join(self.cacheDir,fileName)
def testMToyMatParam(self):
tlog = simple.Compiler(
db=os.path.join(testtensorlog.TEST_DATA_DIR,"matchtoy.cfacts"),
prog=os.path.join(testtensorlog.TEST_DATA_DIR,"matchtoy.ppr"))
trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"matchtoy-train.exam"))
tlog.db.markAsParameter('dabbrev',2)
factDict = tlog.db.matrixAsPredicateFacts('dabbrev',2,tlog.db.matEncoding[('dabbrev',2)])
print('before learning',len(factDict),'dabbrevs')
self.assertTrue(len(factDict)==5)
for f in sorted(factDict.keys()):
print('>',str(f),factDict[f])
# expt pipeline
mode = list(trainData.keys())[0]
TX,TY = trainData[mode]
inference = tlog.inference(mode)
trueY = tf.placeholder(tf.float32, shape=TY.shape, name='tensorlog/trueY')
loss = tlog.loss(mode)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_step = optimizer.minimize(loss)
train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}
session = tf.Session()
session.run(tf.global_variables_initializer())
for i in range(5):
print('epoch',i+1)
session.run(train_step, feed_dict=train_batch_fd)
tlog.set_all_db_params_to_learned_values(session)
# params = {'prog':prog,'trainData':trainData, 'testData':testData}
# result = expt.Expt(params).run()
# factDict = db.matrixAsPredicateFacts('dabbrev',2,db.matEncoding[('dabbrev',2)])
# print 'after learning',len(factDict),'dabbrevs'
# for f in sorted(factDict.keys()):
# print '>',str(f),factDict[f]
# self.assertTrue(len(factDict)>5)
@unittest.skipUnless(xctargets.tf,"Tensorflow not available")
class TestSimple(unittest.TestCase):
def testEmptyRules(self):
# should not throw an error
tlog = simple.Compiler(
db=os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"))
def testIncrementalDBLoad(self):
b = simple.Builder()
predict,label,hasWord,posPair,negPair = b.predicates("predict,label,hasWord,posPair,negPair")
doc_t,label_t,word_t,labelWordPair_t = b.types("doc_t,label_t,word_t,labelWordPair_t")
b.schema += predict(doc_t,label_t) & label(label_t)
b.schema += hasWord(doc_t,word_t) & posPair(word_t,labelWordPair_t) & negPair(word_t,labelWordPair_t)
for basename in "textcattoy_corpus.cfacts textcattoy_labels.cfacts textcattoy_pairs.cfacts".split(" "):
b.db += os.path.join(testtensorlog.TEST_DATA_DIR, basename)
tlog = simple.Compiler(db=b.db)
for (functor,arity,nnz) in [('hasWord',2,99),('label',1,2),('negPair',2,56)]:
m = tlog.db.matEncoding[(functor,arity)]
self.assertTrue(m.nnz == nnz)
def testBatch(self):
tlog = simple.Compiler(
db=os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"),
prog=os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"))
trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"))
testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytest.exam"))
mode = list(trainData.keys())[0]
TX,TY = trainData[mode]
UX,UY = testData[mode]
inference = tlog.inference(mode)
trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')
correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}
loss = tlog.loss(mode)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_step = optimizer.minimize(loss)
train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}
session = tf.Session()
session.run(tf.global_variables_initializer())
acc0 = session.run(accuracy, feed_dict=test_batch_fd)
print('initial accuracy',acc0)
self.assertTrue(acc0<0.6)
for i in range(10):
print('epoch',i+1)
session.run(train_step, feed_dict=train_batch_fd)
acc1 = session.run(accuracy, feed_dict=test_batch_fd)
print('final accuracy',acc1)
self.assertTrue(acc1>=0.9)
# test a round-trip serialization
# saves the db
cacheDir = tempfile.mkdtemp()
db_file = os.path.join(cacheDir,'simple.db')
tlog.set_all_db_params_to_learned_values(session)
tlog.serialize_db(db_file)
# load everything into a new graph and don't reset the learned params
new_graph = tf.Graph()
with new_graph.as_default():
tlog2 = simple.Compiler(
db=db_file,
prog=os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"),
autoset_db_params=False)
# reconstruct the accuracy measure
inference2 = tlog2.inference(mode)
trueY2 = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY2')
correct2 = tf.equal(tf.argmax(trueY2,1), tf.argmax(inference2,1))
accuracy2 = tf.reduce_mean(tf.cast(correct2, tf.float32))
# eval accuracy in a new session
session2 = tf.Session()
session2.run(tf.global_variables_initializer())
test_batch_fd2 = {tlog2.input_placeholder_name(mode):UX, trueY2.name:UY}
acc3 = session2.run(accuracy2, feed_dict=test_batch_fd2)
print('accuracy after round-trip serialization',acc3)
self.assertTrue(acc3>=0.9)
session.close()
def testMinibatch(self):
tlog = simple.Compiler(
db=os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"),
prog=os.path.join(testtensorlog.TEST_DATA_DIR,"textcat3.ppr"))
self.runTextCatLearner(tlog)
def runTextCatLearner(self,tlog):
trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"))
testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytest.exam"))
mode = list(trainData.keys())[0]
UX,UY = testData[mode]
inference = tlog.inference(mode)
trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')
correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}
loss = tlog.loss(mode)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_step = optimizer.minimize(loss)
session = tf.Session()
session.run(tf.global_variables_initializer())
acc0 = session.run(accuracy, feed_dict=test_batch_fd)
print('initial accuracy',acc0)
self.assertTrue(acc0<0.6)
for i in range(10):
print('epoch',i+1, end=' ')
for mode,(TX,TY) in tlog.minibatches(trainData,batch_size=2):
print('.', end=' ')
train_minibatch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}
session.run(train_step, feed_dict=train_minibatch_fd)
print('epoch',i+1,'finished')
acc1 = session.run(accuracy, feed_dict=test_batch_fd)
print('final accuracy',acc1)
self.assertTrue(acc1>=0.9)
session.close()
def testBuilder1(self):
b = simple.Builder()
X,Y,Z = b.variables("X Y Z")
aunt,parent,sister,wife = b.predicates("aunt parent sister wife")
uncle = b.predicate("uncle")
b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y)
b += aunt(X,Y) <= parent(X,Z) & sister(Z,Y)
r1 = b.rule_id("ruleid_t","r1")
r2 = b.rule_id("ruleid_t","r2")
b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y) // r1
b += aunt(X,Y) <= parent(X,Z) & sister(Z,Y) // r2
feature,description = b.predicates("feature description")
weight = b.predicate("weight")
F = b.variable("F")
D = b.variable("D")
b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y) // (weight(F) | description(X,D) & feature(X,F))
b.rules.listing()
rs = b.rules.rulesFor(parser.Goal('aunt',[X,Y]))
self.assertEqual(str(rs[0]), "aunt(X,Y) :- uncle(X,Z), wife(Z,Y).")
self.assertEqual(str(rs[1]), "aunt(X,Y) :- parent(X,Z), sister(Z,Y).")
self.assertEqual(str(rs[2]), "aunt(X,Y) :- uncle(X,Z), wife(Z,Y) {weight(R1) : assign(R1,r1,ruleid_t)}.")
self.assertEqual(str(rs[3]), "aunt(X,Y) :- parent(X,Z), sister(Z,Y) {weight(R2) : assign(R2,r2,ruleid_t)}.")
self.assertEqual(str(rs[4]), "aunt(X,Y) :- uncle(X,Z), wife(Z,Y) {weight(F) : description(X,D),feature(X,F)}.")
def testBuilder2(self):
b = simple.Builder()
predict,assign,weighted,hasWord,posPair,negPair = b.predicates("predict assign weighted hasWord posPair negPair")
X,Pos,Neg,F,W = b.variables("X Pos Neg F W")
b += predict(X,Pos) <= assign(Pos,'pos','label') // (weighted(F) | hasWord(X,W) & posPair(W,F))
b += predict(X,Neg) <= assign(Neg,'neg','label') // (weighted(F) | hasWord(X,W) & negPair(W,F))
dbSpec = os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts")
self.runTextCatLearner(simple.Compiler(db=dbSpec,prog=b.rules))
def testBuilder3(self):
b = simple.Builder()
predict,assign,weighted,hasWord,posPair,negPair,label = b.predicates("predict assign weighted hasWord posPair negPair label")
doc_t,label_t,word_t,labelWordPair_t = b.types("doc_t label_t word_t labelWordPair_t")
b.schema += predict(doc_t,label_t)
b.schema += hasWord(doc_t,word_t)
b.schema += posPair(word_t,labelWordPair_t)
b.schema += negPair(word_t,labelWordPair_t)
b.schema += label(label_t)
X,Pos,Neg,F,W = b.variables("X Pos Neg F W")
b.rules += predict(X,Pos) <= assign(Pos,'pos','label_t') // (weighted(F) | hasWord(X,W) & posPair(W,F))
b.rules += predict(X,Neg) <= assign(Neg,'neg','label_t') // (weighted(F) | hasWord(X,W) & negPair(W,F))
# use the untyped version of the facts to make sure the schema works
b.db = os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy.cfacts")
self.runTextCatLearner(simple.Compiler(db=b.db, prog=b.rules))
class TestReparameterizationAndTypedLoading(unittest.TestCase):
def testBugWasFixed(self):
# use the untyped version of the facts to make sure the schema works
db = matrixdb.MatrixDB()
db.addLines(["# :- r(lo_or_hi_t)\n",
"\t".join("r low 0.1".split()) + "\n",
"\t".join("r hi 0.9".split()) + "\n"])
db.markAsParameter('r',1)
prog = program.Program(db=db)
typeName = db.schema.getArgType("r",1,0)
idLow = db.schema.getId(typeName,"low")
idHi = db.schema.getId(typeName,"hi")
db_r = db.matEncoding[('r',1)]
self.approxEqual(db_r[0,idLow], 0.1)
self.approxEqual(db_r[0,idHi], 0.9)
xc = tensorflowxcomp.SparseMatDenseMsgCrossCompiler(prog)
v_r = xc._vector(declare.asMode("r(i)"))
session = tf.Session()
session.run(tf.global_variables_initializer())
xc.exportAllLearnedParams()
print('exported to xc',db.matEncoding[('r',1)])
db_r = db.matEncoding[('r',1)]
self.approxEqual(db_r[0,idLow], 0.1)
self.approxEqual(db_r[0,idHi], 0.9)
def approxEqual(self,a,b):
self.assertTrue(abs(float(a)-b) < 0.0001)
class TestPlugins(unittest.TestCase):
def test_identity_io(self):
ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp1(Pos,Y) {weighted(F): hasWord(X,W),posPair(W,F)}.',
'predict(X,Y) :- assign(Neg,neg,label),udp1(Neg,Y) {weighted(F): hasWord(X,W),negPair(W,F)}.']
plugins = program.Plugins()
plugins.define('udp1/io', lambda x:x, lambda inputType:'label')
self.check_learning_with_udp(ruleStrings,plugins)
def test_identity_oi(self):
ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp2(Y,Pos) {weighted(F): hasWord(X,W),posPair(W,F)}.',
'predict(X,Y) :- assign(Neg,neg,label),udp2(Y,Neg) {weighted(F): hasWord(X,W),negPair(W,F)}.']
plugins = program.Plugins()
plugins.define('udp2/oi', lambda x:x, lambda inputType:'label')
self.check_learning_with_udp(ruleStrings,plugins)
def test_double_io1(self):
ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp3(Pos,Y) {weighted(F): hasWord(X,W),posPair(W,F)}.',
'predict(X,Y) :- assign(Neg,neg,label),udp3(Neg,Y) {weighted(F): hasWord(X,W),negPair(W,F)}.']
plugins = program.Plugins()
plugins.define('udp3/io', lambda x:2*x, lambda inputType:'label')
self.check_learning_with_udp(ruleStrings,plugins)
def test_double_io2(self):
ruleStrings = ['predict(X,Pos) :- assign(Pos,pos,label) {weighted(F): hasWord(X,W),double(W,W2),posPair(W2,F)}.',
'predict(X,Neg) :- assign(Neg,neg,label) {weighted(F2): hasWord(X,W),negPair(W,F),double(F,F2)}.']
plugins = program.Plugins()
plugins.define('double/io', lambda x:2*x, lambda inputType:inputType)
self.check_learning_with_udp(ruleStrings,plugins)
def test_kw_i(self):
ruleStrings = ['predict(X,Pos) :- assign(Pos,pos,label),hasWord(X,W),poskw(W).',
'predict(X,Neg) :- assign(Neg,neg,label),hasWord(X,W),negkw(W).']
plugins = program.Plugins()
db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"))
poskw_v = (db.onehot('little','word') + db.onehot('red','word')).todense()
negkw_v = (db.onehot('big','word') + db.onehot('job','word') + db.onehot('huge','word')).todense()
plugins.define('poskw/i', lambda:poskw_v, lambda:'word')
plugins.define('negkw/i', lambda:negkw_v, lambda:'word')
self.check_udp(ruleStrings,plugins)
def check_udp(self,ruleStrings,plugins):
db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts"))
rules = testtensorlog.rules_from_strings(ruleStrings)
prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)
mode = declare.asMode("predict/io")
prog.compile(mode)
fun = prog.function[(mode,0)]
print("\n".join(fun.pprint()))
tlog = simple.Compiler(db=db, prog=prog)
testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytest.exam"))
mode = list(testData.keys())[0]
UX,UY = testData[mode]
inference = tlog.inference(mode)
trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')
correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}
session = tf.Session()
session.run(tf.global_variables_initializer())
acc1 = session.run(accuracy, feed_dict=test_batch_fd)
print('final accuracy',acc1)
session.close()
# TOFIX needs some work to pass
# - you can't do polytree BP with multiple inputs
# - so there's not a simple fix
# - probably do this: (1) treat inputs to leftmost userDef as outputs (2) run message-passing for those outputs
# (3) add the user def operator (4) repeat .... (5) when there are no more plugins
def notest_isect_iio(self):
bpcompiler.conf.trace = True
ruleStrings = ['predict(X,Y) :- hasWord(X,W),posPair(W,P1),negPair(W,P2),isect(P1,P2,Y).']
plugins = program.Plugins()
plugins.define('isect/iio', lambda x1,x2:x1*x2, lambda t1,t2:t1)
self.assertTrue(plugins.isDefined(declare.asMode('isect/iio')))
self.check_learning_with_udp(ruleStrings,plugins)
def argmax(self):
bpcompiler.conf.trace = True
ruleStrings = ['predict(X,Y):-olympics(X,Z),nations(Z),argmax(Z,Y).']
plugins = program.Plugins()
plugins.define('argmax/io',lambda x1:tf.nn.softmax(x1), lambda t1:t1)
db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,'argmax.cfacts'))
rules = testtensorlog.rules_from_strings(ruleStrings)
prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)
prog.setAllWeights()
mode = declare.asMode("predict/io")
prog.compile(mode)
fun = prog.function[(mode,0)]
print("\n".join(fun.pprint()))
tlog = simple.Compiler(db=db, prog=prog)
data = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"argmax.exam"))
mode = list(data.keys())[0]
UX,UY = data[mode]
inference = tlog.inference(mode)
trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')
correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}
session = tf.Session()
session.run(tf.global_variables_initializer())
acc0 = session.run(accuracy, feed_dict=test_batch_fd)
print('initial accuracy',acc0)
self.assertTrue(acc0>0.9)
session.close()
# acc0 = session.run(inference, feed_dict=test_batch_fd)
# print "inference results:"
# print acc0
# print np.argmax(acc0,1)
# print "trueY:"
# print UY
# print np.argmax(UY,1)
@unittest.skipUnless(xctargets.tf,"Tensorflow not available")
def check_learning_with_udp(self,ruleStrings,plugins,dbfile=os.path.join(testtensorlog.TEST_DATA_DIR,"textcattoy3.cfacts")):
db = matrixdb.MatrixDB.loadFile(dbfile)
rules = testtensorlog.rules_from_strings(ruleStrings)
prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)
prog.setAllWeights()
mode = declare.asMode("predict/io")
prog.compile(mode)
fun = prog.function[(mode,0)]
print("\n".join(fun.pprint()))
tlog = simple.Compiler(db=db, prog=prog)
trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytrain.exam"))
testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,"toytest.exam"))
mode = list(trainData.keys())[0]
TX,TY = trainData[mode]
UX,UY = testData[mode]
inference = tlog.inference(mode)
trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')
correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}
loss = tlog.loss(mode)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_step = optimizer.minimize(loss)
train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}
session = tf.Session()
session.run(tf.global_variables_initializer())
acc0 = session.run(accuracy, feed_dict=test_batch_fd)
print('initial accuracy',acc0)
self.assertTrue(acc0<0.6)
for i in range(10):
print('epoch',i+1)
session.run(train_step, feed_dict=train_batch_fd)
acc1 = session.run(accuracy, feed_dict=test_batch_fd)
print('final accuracy',acc1)
self.assertTrue(acc1>=0.9)
session.close()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# default is to test on everything adding command line arguments
# 'tensorflow' 'theano' 'sparse' 'dense' filters the list (so
# 'testxcomp.py tensorflow sparse' will run just
# tensorflowxcomp.SparseMatDenseMsgCrossCompiler)
if 'theano' in sys.argv[1:]:
TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__module__.endswith("theanoxcomp")]
if 'tensorflow' in sys.argv[1:]:
TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__module__.endswith("tensorflowxcomp")]
if 'dense' in sys.argv[1:]:
TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__name__.startswith("Dense")]
if 'sparse' in sys.argv[1:]:
TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__name__.startswith("Sparse")]
sys.argv = [a for a in sys.argv if a not in "theano tensorflow dense sparse".split()]
print('TESTED_COMPILERS',TESTED_COMPILERS)
unittest.main()
|
checkov/terraform/checks/provider/base_registry.py | pmalkki/checkov | 4,013 | 79654 | <filename>checkov/terraform/checks/provider/base_registry.py<gh_stars>1000+
from typing import Dict, Any, Tuple
from checkov.common.checks.base_check_registry import BaseCheckRegistry
class Registry(BaseCheckRegistry):
def extract_entity_details(self, entity: Dict[str, Any]) -> Tuple[str, str, Dict[str, Any]]:
provider_type = list(entity.keys())[0]
provider_name = list(entity.keys())[0]
provider_configuration = entity[provider_name]
return provider_type, provider_name, provider_configuration
|
spotpy/examples/hymod_python/hymod.py | cheginit/spotpy | 182 | 79664 | <filename>spotpy/examples/hymod_python/hymod.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by <NAME>
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: <NAME> and <NAME>
:paper: <NAME>., <NAME>., <NAME>. and <NAME>.:
SPOTting Model Parameters Using a Ready-Made Python Package,
PLoS ONE, 10(12), e0145180, doi:10.1371/journal.pone.0145180, 2015.
'''
from numba import jit
def hymod(Precip, PET, cmax,bexp,alpha,Rs,Rq):
"""
See https://www.proc-iahs.net/368/180/2015/piahs-368-180-2015.pdf for a scientific paper:
<NAME>.; <NAME>.; <NAME>.; <NAME>. & <NAME>. (2015): Evaluation of the HYMOD model
for rainfall–runoff simulation using the GLUE method. Remote Sensing and GIS for Hydrology
and Water Resources, 180 - 185, IAHS Publ. 368. DOI: 10.5194/piahs-368-180-2015.
:param cmax:
:param bexp:
:param alpha:
:param Rs:
:param Rq:
:return: Dataset of water in hymod (has to be calculated in litres)
:rtype: list
"""
# HYMOD PROGRAM IS SIMPLE RAINFALL RUNOFF MODEL
x_loss = 0.0
# Initialize slow tank state
x_slow = 2.3503 / (Rs * 22.5)
x_slow = 0 # --> works ok if calibration data starts with low discharge
# Initialize state(s) of quick tank(s)
x_quick = [0,0,0]
t = 0
output = []
# START PROGRAMMING LOOP WITH DETERMINING RAINFALL - RUNOFF AMOUNTS
while t <= len(Precip)-1:
Pval = Precip[t]
PETval = PET[t]
# Compute excess precipitation and evaporation
ER1, ER2, x_loss = excess(x_loss, cmax, bexp, Pval, PETval)
# Calculate total effective rainfall
ET = ER1 + ER2
# Now partition ER between quick and slow flow reservoirs
UQ = alpha * ET
US = (1 - alpha) * ET
# Route slow flow component with single linear reservoir
x_slow, QS = linres(x_slow, US, Rs)
# Route quick flow component with linear reservoirs
inflow = UQ
for i in range(3):
# Linear reservoir
x_quick[i], outflow = linres(x_quick[i], inflow, Rq)
inflow = outflow
# Compute total flow for timestep
output.append(QS + outflow)
t = t+1
return output
@jit
def power(X,Y):
X=abs(X) # Needed to capture invalid overflow with netgative values
return X**Y
@jit
def linres(x_slow,inflow,Rs):
# Linear reservoir
x_slow = (1 - Rs) * x_slow + (1 - Rs) * inflow
outflow = (Rs / (1 - Rs)) * x_slow
return x_slow,outflow
@jit
def excess(x_loss,cmax,bexp,Pval,PETval):
# this function calculates excess precipitation and evaporation
xn_prev = x_loss
ct_prev = cmax * (1 - power((1 - ((bexp + 1) * (xn_prev) / cmax)), (1 / (bexp + 1))))
# Calculate Effective rainfall 1
ER1 = max((Pval - cmax + ct_prev), 0.0)
Pval = Pval - ER1
dummy = min(((ct_prev + Pval) / cmax), 1)
xn = (cmax / (bexp + 1)) * (1 - power((1 - dummy), (bexp + 1)))
# Calculate Effective rainfall 2
ER2 = max(Pval - (xn - xn_prev), 0)
# Alternative approach
evap = (1 - (((cmax / (bexp + 1)) - xn) / (cmax / (bexp + 1)))) * PETval # actual ET is linearly related to the soil moisture state
xn = max(xn - evap, 0) # update state
return ER1,ER2,xn
|
mayan/apps/announcements/permissions.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 79694 | <reponame>nattangwiwat/Mayan-EDMS-recitation
from django.utils.translation import ugettext_lazy as _
from mayan.apps.permissions import PermissionNamespace
namespace = PermissionNamespace(
label=_('Announcements'), name='announcements'
)
permission_announcement_create = namespace.add_permission(
label=_('Create announcements'), name='announcement_create'
)
permission_announcement_delete = namespace.add_permission(
label=_('Delete announcements'), name='announcement_delete'
)
permission_announcement_edit = namespace.add_permission(
label=_('Edit announcements'), name='announcement_edit'
)
permission_announcement_view = namespace.add_permission(
label=_('View announcements'), name='announcement_view'
)
|
deeppavlov/models/morpho_tagger/cells.py | xbodx/DeepPavlov | 5,893 | 79732 | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.initializers import Constant
from tensorflow.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply
INFTY = -100
class Highway(Layer):
def __init__(self, activation=None, bias_initializer=-1, **kwargs):
super().__init__(**kwargs)
self.activation = tf.keras.activations.get(activation)
self.bias_initializer = bias_initializer
if isinstance(self.bias_initializer, int):
self.bias_initializer = Constant(self.bias_initializer)
self.input_spec = [InputSpec(min_ndim=2)]
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.gate_kernel = self.add_weight(
shape=(input_dim, input_dim), initializer='uniform', name='gate_kernel')
self.gate_bias = self.add_weight(
shape=(input_dim,), initializer=self.bias_initializer, name='gate_bias')
self.dense_kernel = self.add_weight(
shape=(input_dim, input_dim), initializer='uniform', name='dense_kernel')
self.dense_bias = self.add_weight(
shape=(input_dim,), initializer=self.bias_initializer, name='dense_bias')
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs, **kwargs):
gate = K.dot(inputs, self.gate_kernel)
gate = K.bias_add(gate, self.gate_bias, data_format="channels_last")
gate = self.activation(gate)
new_value = K.dot(inputs, self.dense_kernel)
new_value = K.bias_add(new_value, self.dense_bias, data_format="channels_last")
return gate * new_value + (1.0 - gate) * inputs
def compute_output_shape(self, input_shape):
return input_shape
def weighted_sum(first, second, sigma, first_threshold=-np.inf, second_threshold=np.inf):
logit_probs = first * sigma + second * (1.0 - sigma)
infty_tensor = K.ones_like(logit_probs) * INFTY
logit_probs = K.switch(K.greater(first, first_threshold), logit_probs, infty_tensor)
logit_probs = K.switch(K.greater(second, second_threshold), logit_probs, infty_tensor)
return logit_probs
class WeightedCombinationLayer(Layer):
"""
A class for weighted combination of probability distributions
"""
def __init__(self, first_threshold=None, second_threshold=None,
use_dimension_bias=False, use_intermediate_layer=False,
intermediate_dim=64, intermediate_activation=None,
from_logits=False, return_logits=False,
bias_initializer=1.0, **kwargs):
# if 'input_shape' not in kwargs:
# kwargs['input_shape'] = [(None, input_dim,), (None, input_dim)]
super(WeightedCombinationLayer, self).__init__(**kwargs)
self.first_threshold = first_threshold if first_threshold is not None else INFTY
self.second_threshold = second_threshold if second_threshold is not None else INFTY
self.use_dimension_bias = use_dimension_bias
self.use_intermediate_layer = use_intermediate_layer
self.intermediate_dim = intermediate_dim
self.intermediate_activation = tf.keras.activations.get(intermediate_activation)
self.from_logits = from_logits
self.return_logits = return_logits
self.bias_initializer = bias_initializer
self.input_spec = [InputSpec(), InputSpec(), InputSpec()]
def build(self, input_shape):
assert len(input_shape) == 3
assert input_shape[0] == input_shape[1]
assert input_shape[0][:-1] == input_shape[2][:-1]
input_dim, features_dim = input_shape[0][-1], input_shape[2][-1]
if self.use_intermediate_layer:
self.first_kernel = self.add_weight(
shape=(features_dim, self.intermediate_dim),
initializer="random_uniform", name='first_kernel')
self.first_bias = self.add_weight(
shape=(self.intermediate_dim,),
initializer="random_uniform", name='first_bias')
self.features_kernel = self.add_weight(
shape=(features_dim, 1), initializer="random_uniform", name='kernel')
self.features_bias = self.add_weight(
shape=(1,), initializer=Constant(self.bias_initializer), name='bias')
if self.use_dimension_bias:
self.dimensions_bias = self.add_weight(
shape=(input_dim,), initializer="random_uniform", name='dimension_bias')
super(WeightedCombinationLayer, self).build(input_shape)
def call(self, inputs, **kwargs):
assert isinstance(inputs, list) and len(inputs) == 3
first, second, features = inputs[0], inputs[1], inputs[2]
if not self.from_logits:
first = K.clip(first, 1e-10, 1.0)
second = K.clip(second, 1e-10, 1.0)
first_, second_ = K.log(first), K.log(second)
else:
first_, second_ = first, second
# embedded_features.shape = (M, T, 1)
if self.use_intermediate_layer:
features = K.dot(features, self.first_kernel)
features = K.bias_add(features, self.first_bias, data_format="channels_last")
features = self.intermediate_activation(features)
embedded_features = K.dot(features, self.features_kernel)
embedded_features = K.bias_add(
embedded_features, self.features_bias, data_format="channels_last")
if self.use_dimension_bias:
tiling_shape = [1] * (K.ndim(first) - 1) + [K.shape(first)[-1]]
embedded_features = K.tile(embedded_features, tiling_shape)
embedded_features = K.bias_add(
embedded_features, self.dimensions_bias, data_format="channels_last")
sigma = K.sigmoid(embedded_features)
result = weighted_sum(first_, second_, sigma,
self.first_threshold, self.second_threshold)
probs = K.softmax(result)
if self.return_logits:
return [probs, result]
return probs
def compute_output_shape(self, input_shape):
first_shape = input_shape[0]
if self.return_logits:
return [first_shape, first_shape]
return first_shape
def TemporalDropout(inputs, dropout=0.0):
"""
Drops with :dropout probability temporal steps of input 3D tensor
"""
# TO DO: adapt for >3D tensors
if dropout == 0.0:
return inputs
inputs_func = lambda x: K.ones_like(inputs[:, :, 0:1])
inputs_mask = Lambda(inputs_func)(inputs)
inputs_mask = Dropout(dropout)(inputs_mask)
tiling_shape = [1, 1, K.shape(inputs)[2]] + [1] * (K.ndim(inputs) - 3)
inputs_mask = Lambda(K.tile, arguments={"n": tiling_shape},
output_shape=inputs._keras_shape[1:])(inputs_mask)
answer = Multiply()([inputs, inputs_mask])
return answer
def positions_func(inputs, pad=0):
"""
A layer filling i-th column of a 2D tensor with
1+ln(1+i) when it contains a meaningful symbol
and with 0 when it contains PAD
"""
position_inputs = K.cumsum(K.ones_like(inputs, dtype="float32"), axis=1)
position_inputs *= K.cast(K.not_equal(inputs, pad), "float32")
return K.log(1.0 + position_inputs) |
dnanexus/filter_qc/src/filter_qc.py | strattan/test-merge2 | 108 | 79751 | <filename>dnanexus/filter_qc/src/filter_qc.py
#!/usr/bin/env python
# filter_qc 0.0.1
# Generated by dx-app-wizard.
#
# Basic execution pattern: Your app will run on a single machine from
# beginning to end.
#
# See https://wiki.dnanexus.com/Developer-Portal for documentation and
# tutorials on how to modify this file.
#
# DNAnexus Python Bindings (dxpy) documentation:
# http://autodoc.dnanexus.com/bindings/python/current/
import os
import subprocess
import shlex
import re
import common
import dxpy
import logging
from pprint import pprint, pformat
logger = logging.getLogger(__name__)
logger.addHandler(dxpy.DXLogHandler())
logger.propagate = False
logger.setLevel(logging.INFO)
def dup_parse(fname):
with open(fname, 'r') as dup_file:
if not dup_file:
return None
lines = iter(dup_file.read().splitlines())
for line in lines:
if line.startswith('## METRICS CLASS'):
headers = lines.next().rstrip('\n').lower()
metrics = lines.next().rstrip('\n')
break
headers = headers.split('\t')
metrics = metrics.split('\t')
headers.pop(0)
metrics.pop(0)
dup_qc = dict(zip(headers, metrics))
return dup_qc
def pbc_parse(fname):
with open(fname, 'r') as pbc_file:
if not pbc_file:
return None
lines = pbc_file.read().splitlines()
line = lines[0].rstrip('\n')
# PBC File output:
# TotalReadPairs <tab>
# DistinctReadPairs <tab>
# OneReadPair <tab>
# TwoReadPairs <tab>
# NRF=Distinct/Total <tab>
# PBC1=OnePair/Distinct <tab>
# PBC2=OnePair/TwoPair
headers = ['TotalReadPairs',
'DistinctReadPairs',
'OneReadPair',
'TwoReadPairs',
'NRF',
'PBC1',
'PBC2']
metrics = line.split('\t')
pbc_qc = dict(zip(headers, metrics))
return pbc_qc
def flagstat_parse(fname):
with open(fname, 'r') as flagstat_file:
if not flagstat_file:
return None
flagstat_lines = flagstat_file.read().splitlines()
qc_dict = {
# values are regular expressions,
# will be replaced with scores [hiq, lowq]
'in_total': 'in total',
'duplicates': 'duplicates',
'mapped': 'mapped',
'paired_in_sequencing': 'paired in sequencing',
'read1': 'read1',
'read2': 'read2',
'properly_paired': 'properly paired',
'with_self_mate_mapped': 'with itself and mate mapped',
'singletons': 'singletons',
# i.e. at the end of the line
'mate_mapped_different_chr': 'with mate mapped to a different chr$',
# RE so must escape
'mate_mapped_different_chr_hiQ':
'with mate mapped to a different chr \(mapQ>=5\)'
}
for (qc_key, qc_pattern) in qc_dict.items():
qc_metrics = next(re.split(qc_pattern, line)
for line in flagstat_lines
if re.search(qc_pattern, line))
(hiq, lowq) = qc_metrics[0].split(' + ')
qc_dict[qc_key] = [int(hiq.rstrip()), int(lowq.rstrip())]
return qc_dict
@dxpy.entry_point('main')
def main(input_bam, paired_end, samtools_params, scrub, debug):
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
raw_bam_file = dxpy.DXFile(input_bam)
raw_bam_filename = raw_bam_file.name
raw_bam_basename = raw_bam_file.name.rstrip('.bam')
raw_bam_file_mapstats_filename = raw_bam_basename + '.flagstat.qc'
dxpy.download_dxfile(raw_bam_file.get_id(), raw_bam_filename)
subprocess.check_output('set -x; ls -l', shell=True)
# Generate initial mapping statistics
with open(raw_bam_file_mapstats_filename, 'w') as fh:
flagstat_command = "samtools flagstat %s" % (raw_bam_filename)
logger.info(flagstat_command)
subprocess.check_call(shlex.split(flagstat_command), stdout=fh)
filt_bam_prefix = raw_bam_basename + ".filt.srt"
filt_bam_filename = filt_bam_prefix + ".bam"
if paired_end:
# =============================
# Remove unmapped, mate unmapped
# not primary alignment, reads failing platform
# Remove low MAPQ reads
# Only keep properly paired reads
# Obtain name sorted BAM file
# ==================
tmp_filt_bam_prefix = "tmp.%s" % (filt_bam_prefix) # was tmp.prefix.nmsrt
tmp_filt_bam_filename = tmp_filt_bam_prefix + ".bam"
out, err = common.run_pipe([
# filter: -F 1804 FlAG bits to exclude; -f 2 FLAG bits to reqire;
# -q 30 exclude MAPQ < 30; -u uncompressed output
# exclude FLAG 1804: unmapped, next segment unmapped, secondary
# alignments, not passing platform q, PCR or optical duplicates
# require FLAG 2: properly aligned
"samtools view -F 1804 -f 2 %s -u %s" % (samtools_params, raw_bam_filename),
# sort: -n sort by name; - take input from stdin;
# out to specified filename
# Will produce name sorted BAM
"samtools sort -n - %s" % (tmp_filt_bam_prefix)])
if err:
logger.error("samtools error: %s" % (err))
# Remove orphan reads (pair was removed)
# and read pairs mapping to different chromosomes
# Obtain position sorted BAM
subprocess.check_output('set -x; ls -l', shell=True)
out, err = common.run_pipe([
# fill in mate coordinates, ISIZE and mate-related flags
# fixmate requires name-sorted alignment; -r removes secondary and
# unmapped (redundant here because already done above?)
# - send output to stdout
"samtools fixmate -r %s -" % (tmp_filt_bam_filename),
# repeat filtering after mate repair
"samtools view -F 1804 -f 2 -u -",
# produce the coordinate-sorted BAM
"samtools sort - %s" % (filt_bam_prefix)])
subprocess.check_output('set -x; ls -l', shell=True)
else: # single-end data
# =============================
# Remove unmapped, mate unmapped
# not primary alignment, reads failing platform
# Remove low MAPQ reads
# Obtain name sorted BAM file
# ==================
with open(filt_bam_filename, 'w') as fh:
samtools_filter_command = (
"samtools view -F 1804 %s -b %s"
% (samtools_params, raw_bam_filename)
)
logger.info(samtools_filter_command)
subprocess.check_call(
shlex.split(samtools_filter_command),
stdout=fh)
# ========================
# Mark duplicates
# ======================
tmp_filt_bam_filename = raw_bam_basename + ".dupmark.bam"
dup_file_qc_filename = raw_bam_basename + ".dup.qc"
picard_string = ' '.join([
"java -Xmx4G -jar /picard/MarkDuplicates.jar",
"INPUT=%s" % (filt_bam_filename),
"OUTPUT=%s" % (tmp_filt_bam_filename),
"METRICS_FILE=%s" % (dup_file_qc_filename),
"VALIDATION_STRINGENCY=LENIENT",
"ASSUME_SORTED=true",
"REMOVE_DUPLICATES=false"
])
logger.info(picard_string)
subprocess.check_output(shlex.split(picard_string))
os.rename(tmp_filt_bam_filename, filt_bam_filename)
if paired_end:
final_bam_prefix = raw_bam_basename + ".filt.srt.nodup"
else:
final_bam_prefix = raw_bam_basename + ".filt.nodup.srt"
final_bam_filename = final_bam_prefix + ".bam" # To be stored
final_bam_index_filename = final_bam_filename + ".bai" # To be stored
# QC file
final_bam_file_mapstats_filename = final_bam_prefix + ".flagstat.qc"
if paired_end:
samtools_dedupe_command = \
"samtools view -F 1804 -f2 -b %s" % (filt_bam_filename)
else:
samtools_dedupe_command = \
"samtools view -F 1804 -b %s" % (filt_bam_filename)
# ============================
# Remove duplicates
# Index final position sorted BAM
# ============================
with open(final_bam_filename, 'w') as fh:
logger.info(samtools_dedupe_command)
subprocess.check_call(
shlex.split(samtools_dedupe_command),
stdout=fh)
# Index final bam file
samtools_index_command = \
"samtools index %s %s" % (final_bam_filename, final_bam_index_filename)
logger.info(samtools_index_command)
subprocess.check_output(shlex.split(samtools_index_command))
# Generate mapping statistics
with open(final_bam_file_mapstats_filename, 'w') as fh:
flagstat_command = "samtools flagstat %s" % (final_bam_filename)
logger.info(flagstat_command)
subprocess.check_call(shlex.split(flagstat_command), stdout=fh)
# =============================
# Compute library complexity
# =============================
# Sort by name
# convert to bedPE and obtain fragment coordinates
# sort by position and strand
# Obtain unique count statistics
pbc_file_qc_filename = final_bam_prefix + ".pbc.qc"
# PBC File output
# TotalReadPairs [tab]
# DistinctReadPairs [tab]
# OneReadPair [tab]
# TwoReadPairs [tab]
# NRF=Distinct/Total [tab]
# PBC1=OnePair/Distinct [tab]
# PBC2=OnePair/TwoPair
if paired_end:
steps = [
"samtools sort -no %s -" % (filt_bam_filename),
"bamToBed -bedpe -i stdin",
r"""awk 'BEGIN{OFS="\t"}{print $1,$2,$4,$6,$9,$10}'"""]
else:
steps = [
"bamToBed -i %s" % (filt_bam_filename),
r"""awk 'BEGIN{OFS="\t"}{print $1,$2,$3,$6}'"""]
steps.extend([
"grep -v 'chrM'",
"sort",
"uniq -c",
r"""awk 'BEGIN{mt=0;m0=0;m1=0;m2=0} ($1==1){m1=m1+1} ($1==2){m2=m2+1} {m0=m0+1} {mt=mt+$1} END{printf "%d\t%d\t%d\t%d\t%f\t%f\t%f\n",mt,m0,m1,m2,m0/mt,m1/m0,m1/m2}'"""
])
out, err = common.run_pipe(steps, pbc_file_qc_filename)
if err:
logger.error("PBC file error: %s" % (err))
output = {}
logger.info("Uploading results files to the project")
filtered_bam = dxpy.upload_local_file(final_bam_filename)
filtered_bam_index = dxpy.upload_local_file(final_bam_index_filename)
output.update({
"filtered_bam": dxpy.dxlink(filtered_bam),
"filtered_bam_index": dxpy.dxlink(filtered_bam_index)
})
# If the scrub parameter is true, pass the bams to the scrub applet.
if scrub:
scrub_applet = dxpy.find_one_data_object(
classname='applet',
name='scrub',
project=dxpy.PROJECT_CONTEXT_ID,
zero_ok=False,
more_ok=False,
return_handler=True)
scrub_subjob = \
scrub_applet.run(
{"input_bams": [input_bam, dxpy.dxlink(filtered_bam)]},
name='Scrub bams')
scrubbed_unfiltered_bam = scrub_subjob.get_output_ref("scrubbed_bams", index=0)
scrubbed_filtered_bam = scrub_subjob.get_output_ref("scrubbed_bams", index=1)
# Add the optional scrubbed outputs.
output.update({
"scrubbed_unfiltered_bam": dxpy.dxlink(scrubbed_unfiltered_bam),
"scrubbed_filtered_bam": dxpy.dxlink(scrubbed_filtered_bam)
})
# Upload or calculate the remaining outputs.
filtered_mapstats = \
dxpy.upload_local_file(final_bam_file_mapstats_filename)
dup_file = dxpy.upload_local_file(dup_file_qc_filename)
pbc_file = dxpy.upload_local_file(pbc_file_qc_filename)
logger.info("Calcualting QC metrics")
dup_qc = dup_parse(dup_file_qc_filename)
pbc_qc = pbc_parse(pbc_file_qc_filename)
initial_mapstats_qc = flagstat_parse(raw_bam_file_mapstats_filename)
final_mapstats_qc = flagstat_parse(final_bam_file_mapstats_filename)
if paired_end:
useable_fragments = final_mapstats_qc.get('in_total')[0]/2
else:
useable_fragments = final_mapstats_qc.get('in_total')[0]
logger.info("initial_mapstats_qc: %s" % (initial_mapstats_qc)),
logger.info("final_mapstats_qc: %s" % (final_mapstats_qc)),
logger.info("dup_qc: %s" % (dup_qc))
logger.info("pbc_qc: %s" % (pbc_qc))
# Return links to the output files and values.
output.update({
"filtered_mapstats": dxpy.dxlink(filtered_mapstats),
"dup_file_qc": dxpy.dxlink(dup_file),
"pbc_file_qc": dxpy.dxlink(pbc_file),
"paired_end": paired_end,
"n_reads_input": str(initial_mapstats_qc.get('in_total')[0]),
"picard_read_pairs_examined": str(dup_qc.get('read_pairs_examined')),
"picard_unpaired_reads_examined": str(dup_qc.get('unpaired_reads_examined')),
"picard_read_pair_duplicates": str(dup_qc.get('read_pair_duplicates')),
"picard_unpaired_read_duplicates": str(dup_qc.get('unpaired_read_duplicates')),
"useable_fragments": str(useable_fragments),
"NRF": str(pbc_qc.get('NRF')),
"PBC1": str(pbc_qc.get('PBC1')),
"PBC2": str(pbc_qc.get('PBC2')),
"duplicate_fraction": str(dup_qc.get('percent_duplication'))
})
logger.info("Exiting with output:\n%s" % (pformat(output)))
return output
dxpy.run()
|
engine/compile/ops/onnx_input.py | kevinintel/neural-compressor | 172 | 79756 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .op import Operator, operator_registry
from .tensor import Tensor
from ..graph_utils import names_from_input
# graph.input
@operator_registry(operator_type='ONNXINPUT')
class ONNXINPUT(Operator):
def __init__(self):
super().__init__()
def extract(self, framework, node, model, nodes_dict):
from ..onnx_utils import ONNX_DTYPE_ID
self._name = node.name
self._op_type = 'ONNXINPUT'
output_tensor_name = names_from_input(self._name)[1]
shape_len = len(node.type.tensor_type.shape.dim)
shape = [-1] * shape_len
dtype = ONNX_DTYPE_ID[node.type.tensor_type.elem_type]
output_tensor = Tensor(
name=output_tensor_name,
shape=shape,
dtype=dtype,
source_op=[self._name],
dest_op=nodes_dict[self._name].outputs,
)
self._output_tensors = [output_tensor]
|
tests/test_type_hooks.py | HallerPatrick/frosch | 204 | 79775 | <gh_stars>100-1000
import contextlib
import unittest
from unittest.mock import Mock, patch
import sys
from frosch.type_hooks import HookLoader
from frosch.parser import Variable
@contextlib.contextmanager
def mock_numpy_module():
"""Inject a numpy mock to avoid import errors"""
numpy_mock = Mock()
numpy_mock.name = "numpy"
sys.modules["numpy"] = numpy_mock
yield numpy_mock
del sys.modules["numpy"]
class TestLoader(unittest.TestCase):
def test_lazy_load_hooks(self):
with mock_numpy_module() as numpy_mock:
hook_loader = HookLoader()
hook_loader._lazy_load_hooks("hook_numpy")
self.assertEqual(len(hook_loader._hooks), 2)
def test_lazy_load_hooks_from_variable(self):
class ndarray: pass
nd_array = ndarray()
var = Variable("nd_array", 2, nd_array)
with patch("frosch.type_hooks.HookLoader._lazy_load_hooks") as lazy_hook_mock:
hook_loader = HookLoader()
hook_loader.lazy_load_hooks_from_variable(var)
lazy_hook_mock.assert_called_once_with("hook_numpy")
|
utils/metrics_accumulator.py | pytti-tools/blended-diffusion | 121 | 79778 | from collections import defaultdict
import numpy as np
class MetricsAccumulator:
def __init__(self) -> None:
self.accumulator = defaultdict(lambda: [])
def update_metric(self, metric_name, metric_value):
self.accumulator[metric_name].append(metric_value)
def print_average_metric(self):
for k, v in self.accumulator.items():
average_v = np.array(v).mean()
print(f"{k} - {average_v:.2f}")
self.__init__()
|
resource-timing/resources/eventsource.py | meyerweb/wpt | 14,668 | 79797 | <reponame>meyerweb/wpt<filename>resource-timing/resources/eventsource.py<gh_stars>1000+
def main(request, response):
response.headers.set(b"Content-Type", b"text/event-stream")
return u""
|
twiml/voice/pay/pay-1/pay-1.6.x.py | Tshisuaka/api-snippets | 234 | 79804 | from twilio.twiml.voice_response import Pay, VoiceResponse
response = VoiceResponse()
response.pay()
print(response)
|
tensorflow_gan/examples/dme_cyclegan/train_test.py | nivedwho/gan | 783 | 79813 | <gh_stars>100-1000
# coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cyclegan.train."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
from tensorflow_gan.examples.dme_cyclegan import train_lib
mock = tf.test.mock
def _test_generator(input_images):
"""Simple generator function."""
return input_images * tf.get_variable('dummy_g', initializer=2.0)
def _test_discriminator(image_batch, unused_conditioning=None):
"""Simple discriminator function."""
return tf.layers.flatten(image_batch *
tf.get_variable('dummy_d', initializer=2.0))
class TrainTest(tf.test.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
# Force the TF lazy loading to kick in before mocking these out below.
_ = tf.train.get_or_create_global_step
_ = tf.train.AdamOptimizer
self._original_generator = train_lib.networks.generator
self._original_discriminator = train_lib.networks.discriminator
train_lib.networks.generator = _test_generator
train_lib.networks.discriminator = _test_discriminator
self.hparams = train_lib.HParams(
image_set_x_file_pattern=None,
image_set_y_file_pattern=None,
batch_size=1,
patch_size=64,
master='',
train_log_dir='/tmp/tfgan_logdir/cyclegan/',
generator_lr=0.0002,
discriminator_lr=0.0001,
max_number_of_steps=500000,
ps_replicas=0,
task=0,
cycle_consistency_loss_weight=10.0)
def tearDown(self):
super(TrainTest, self).tearDown()
train_lib.networks.generator = self._original_generator
train_lib.networks.discriminator = self._original_discriminator
@mock.patch.object(tfgan, 'eval', autospec=True)
def test_define_model(self, mock_eval):
if tf.executing_eagerly():
# `tfgan.cyclegan_model` doesn't work when executing eagerly.
return
self.hparams = self.hparams._replace(batch_size=2)
images_shape = [self.hparams.batch_size, 4, 4, 3]
images_x_np = np.zeros(shape=images_shape)
images_y_np = np.zeros(shape=images_shape)
images_x = tf.constant(images_x_np, dtype=tf.float32)
images_y = tf.constant(images_y_np, dtype=tf.float32)
cyclegan_model = train_lib._define_model(images_x, images_y)
self.assertIsInstance(cyclegan_model, tfgan.CycleGANModel)
self.assertShapeEqual(images_x_np, cyclegan_model.reconstructed_x)
self.assertShapeEqual(images_y_np, cyclegan_model.reconstructed_y)
@mock.patch.object(train_lib.networks, 'generator', autospec=True)
@mock.patch.object(train_lib.networks, 'discriminator', autospec=True)
@mock.patch.object(tf.train, 'get_or_create_global_step', autospec=True)
def test_get_lr(self, mock_get_or_create_global_step,
unused_mock_discriminator, unused_mock_generator):
if tf.executing_eagerly():
return
base_lr = 0.01
max_number_of_steps = 10
with self.cached_session(use_gpu=True) as sess:
mock_get_or_create_global_step.return_value = tf.constant(2)
lr_step2 = sess.run(train_lib._get_lr(base_lr, max_number_of_steps))
mock_get_or_create_global_step.return_value = tf.constant(9)
lr_step9 = sess.run(train_lib._get_lr(base_lr, max_number_of_steps))
self.assertAlmostEqual(base_lr, lr_step2)
self.assertAlmostEqual(base_lr * 0.2, lr_step9)
@mock.patch.object(tf.train, 'AdamOptimizer', autospec=True)
def test_get_optimizer(self, mock_adam_optimizer):
gen_lr, dis_lr = 0.1, 0.01
train_lib._get_optimizer(gen_lr=gen_lr, dis_lr=dis_lr)
mock_adam_optimizer.assert_has_calls([
mock.call(gen_lr, beta1=mock.ANY, use_locking=True),
mock.call(dis_lr, beta1=mock.ANY, use_locking=True)
])
def test_define_train_ops(self):
if tf.executing_eagerly():
# `tfgan.cyclegan_model` doesn't work when executing eagerly.
return
self.hparams = self.hparams._replace(
batch_size=2, generator_lr=0.1, discriminator_lr=0.01)
images_shape = [self.hparams.batch_size, 4, 4, 3]
images_x = tf.zeros(images_shape, dtype=tf.float32)
images_y = tf.zeros(images_shape, dtype=tf.float32)
cyclegan_model = train_lib._define_model(images_x, images_y)
cyclegan_loss = tfgan.cyclegan_loss(
cyclegan_model, cycle_consistency_loss_weight=10.0)
train_ops = train_lib._define_train_ops(cyclegan_model, cyclegan_loss,
self.hparams)
self.assertIsInstance(train_ops, tfgan.GANTrainOps)
@mock.patch.object(tf.io, 'gfile', autospec=True)
@mock.patch.object(train_lib, 'data_provider', autospec=True)
@mock.patch.object(train_lib, '_define_model', autospec=True)
@mock.patch.object(tfgan, 'cyclegan_loss', autospec=True)
@mock.patch.object(train_lib, '_define_train_ops', autospec=True)
@mock.patch.object(tfgan, 'gan_train', autospec=True)
def test_main(self, mock_gan_train, mock_define_train_ops, mock_cyclegan_loss,
mock_define_model, mock_data_provider, mock_gfile):
self.hparams = self.hparams._replace(
image_set_x_file_pattern='/tmp/x/*.jpg',
image_set_y_file_pattern='/tmp/y/*.jpg',
batch_size=3,
patch_size=8,
generator_lr=0.02,
discriminator_lr=0.3,
train_log_dir='/tmp/foo',
master='master',
task=0,
cycle_consistency_loss_weight=2.0,
max_number_of_steps=1)
mock_data_provider.provide_custom_data.return_value = (tf.zeros(
[3, 2, 2, 3], dtype=tf.float32), tf.zeros([3, 2, 2, 3],
dtype=tf.float32))
train_lib.train(self.hparams)
mock_data_provider.provide_custom_data.assert_called_once_with(
batch_size=3, image_file_patterns=['/tmp/x/*.jpg', '/tmp/y/*.jpg'],
patch_size=8)
mock_define_model.assert_called_once_with(mock.ANY, mock.ANY)
mock_cyclegan_loss.assert_called_once_with(
mock_define_model.return_value,
cycle_consistency_loss_weight=2.0,
tensor_pool_fn=mock.ANY)
mock_define_train_ops.assert_called_once_with(
mock_define_model.return_value, mock_cyclegan_loss.return_value,
self.hparams)
mock_gan_train.assert_called_once_with(
mock_define_train_ops.return_value,
'/tmp/foo',
get_hooks_fn=mock.ANY,
hooks=mock.ANY,
master='master',
is_chief=True)
if __name__ == '__main__':
tf.test.main()
|
unit1/spiders/spider_2_quotes.py | nulearn3296/scrapy-training | 182 | 79831 | import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes2"
start_urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
def parse(self, response):
self.log('I just visited {}'.format(response.url))
|
bcbio/variation/prioritize.py | naumenko-sa/bcbio-nextgen | 418 | 79841 | """Prioritization scheme for identifying follow up variants in tumor-only samples.
Generalizes the filtering scheme used in VarDict post-processing:
https://github.com/AstraZeneca-NGS/VarDict/blob/9ffec9168e91534fac5fb74b3ec7bdd2badd3464/vcf2txt.pl#L190
The goal is to build up a standard set of prioritization filters based on known
data. Uses GEMINI to load a database of variants with associated third party
query information. Makes use of ExAC, dbSNP, 1000 genomes, clinvar, cosmic and
effects annotations. The general idea is to prioritize deleterious variants
missing or present at a low frequency in the population, or secondarily identified
in external databases like COSMIC and ClinVar.
"""
import collections
import csv
import re
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import population, vcfutils
geneimpacts = utils.LazyImport("geneimpacts")
cyvcf2 = utils.LazyImport("cyvcf2")
def handle_vcf_calls(vcf_file, data, orig_items):
"""Prioritize VCF calls based on external annotations supplied through GEMINI.
"""
if not _do_prioritize(orig_items):
return vcf_file
else:
ann_vcf = population.run_vcfanno(vcf_file, data)
if ann_vcf:
priority_file = _prep_priority_filter_vcfanno(ann_vcf, data)
return _apply_priority_filter(ann_vcf, priority_file, data)
# No data available for filtering, return original file
else:
return vcf_file
def _apply_priority_filter(in_file, priority_file, data):
"""Annotate variants with priority information and use to apply filters.
"""
out_file = "%s-priority%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
header = ('##INFO=<ID=EPR,Number=.,Type=String,'
'Description="Somatic prioritization based on external annotations, '
'identify as likely germline">')
header_file = "%s-repeatheader.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
out_handle.write(header)
if "tumoronly_germline_filter" in dd.get_tools_on(data):
filter_cmd = ("bcftools filter -m '+' -s 'LowPriority' "
"""-e "EPR[0] != 'pass'" |""")
else:
filter_cmd = ""
# bcftools 1.13+ requires to skip TO
cmd = ("bcftools annotate -a {priority_file} -h {header_file} "
"-c CHROM,FROM,-,REF,ALT,INFO/EPR {in_file} | "
"{filter_cmd} bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "Run external annotation based prioritization filtering")
vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
def _prep_priority_filter_vcfanno(in_vcf, data):
"""Prepare tabix file with priority filters based on vcfanno annotations.
"""
pops = ['af_adj_exac_afr', 'af_adj_exac_amr', 'af_adj_exac_eas',
'af_adj_exac_fin', 'af_adj_exac_nfe', 'af_adj_exac_oth', 'af_adj_exac_sas',
'af_exac_all', 'max_aaf_all',
"af_esp_ea", "af_esp_aa", "af_esp_all", "af_1kg_amr", "af_1kg_eas",
"af_1kg_sas", "af_1kg_afr", "af_1kg_eur", "af_1kg_all"]
known = ["cosmic_ids", "cosmic_id", "clinvar_sig"]
out_file = "%s-priority.tsv" % utils.splitext_plus(in_vcf)[0]
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
header = ["#chrom", "start", "end", "ref", "alt", "filter"]
writer.writerow(header)
vcf_reader = cyvcf2.VCF(in_vcf)
impact_info = _get_impact_info(vcf_reader)
for rec in vcf_reader:
row = _prepare_vcf_rec(rec, pops, known, impact_info)
cur_filter = _calc_priority_filter(row, pops)
writer.writerow([rec.CHROM, rec.start, rec.end, rec.REF, ",".join(rec.ALT), cur_filter])
return vcfutils.bgzip_and_index(out_file, data["config"],
tabix_args="-0 -c '#' -s 1 -b 2 -e 3")
def _get_impact_info(vcf_reader):
"""Retrieve impact parsing information from INFO header.
"""
ImpactInfo = collections.namedtuple("ImpactInfo", "header, gclass, id")
KEY_2_CLASS = {
'CSQ': geneimpacts.VEP,
'ANN': geneimpacts.SnpEff,
'BCSQ': geneimpacts.BCFT}
for l in (x.strip() for x in _from_bytes(vcf_reader.raw_header).split("\n")):
if l.startswith("##INFO"):
patt = re.compile(r"(\w+)=(\"[^\"]+\"|[^,]+)")
stub = l.split("=<")[1].rstrip(">")
d = dict(patt.findall(_from_bytes(stub)))
if d["ID"] in KEY_2_CLASS:
return ImpactInfo(_parse_impact_header(d), KEY_2_CLASS[d["ID"]], d["ID"])
def _from_bytes(s):
if isinstance(s, bytes):
import locale
ENC = locale.getpreferredencoding()
try:
return s.decode(ENC)
except UnicodeDecodeError:
return s.decode('utf8')
return s
def _parse_impact_header(hdr_dict):
"""Parse fields for impact, taken from vcf2db
"""
desc = hdr_dict["Description"]
if hdr_dict["ID"] == "ANN":
parts = [x.strip("\"'") for x in re.split("\s*\|\s*", desc.split(":", 1)[1].strip('" '))]
elif hdr_dict["ID"] == "EFF":
parts = [x.strip(" [])'(\"") for x in re.split("\||\(", desc.split(":", 1)[1].strip())]
elif hdr_dict["ID"] == "CSQ":
parts = [x.strip(" [])'(\"") for x in re.split("\||\(", desc.split(":", 1)[1].strip())]
elif hdr_dict["ID"] == "BCSQ":
parts = desc.split(']', 1)[1].split(']')[0].replace('[','').split("|")
else:
raise Exception("don't know how to use %s as annotation" % hdr_dict["ID"])
return parts
def _prepare_vcf_rec(rec, pops, known, impact_info):
"""Parse a vcfanno output into a dictionary of useful attributes.
"""
out = {}
for k in pops + known:
out[k] = rec.INFO.get(k)
if impact_info:
cur_info = rec.INFO.get(impact_info.id)
if cur_info:
cur_impacts = [impact_info.gclass(e, impact_info.header) for e in _from_bytes(cur_info).split(",")]
top = geneimpacts.Effect.top_severity(cur_impacts)
if isinstance(top, list):
top = top[0]
out["impact_severity"] = top.effect_severity
return out
def _calc_priority_filter(row, pops):
"""Calculate the priority filter based on external associated data.
- Pass high/medium impact variants not found in population databases
- Pass variants found in COSMIC or Clinvar provided they don't have two
additional reasons to filter (found in multiple external populations)
"""
filters = []
passes = []
passes.extend(_find_known(row))
filters.extend(_known_populations(row, pops))
if len(filters) == 0 or (len(passes) > 0 and len(filters) < 2):
passes.insert(0, "pass")
return ",".join(passes + filters)
def _known_populations(row, pops):
"""Find variants present in substantial frequency in population databases.
"""
cutoff = 0.01
out = set([])
for pop, base in [("esp", "af_esp_all"), ("1000g", "af_1kg_all"),
("exac", "af_exac_all"), ("anypop", "max_aaf_all")]:
for key in [x for x in pops if x.startswith(base)]:
val = row[key]
if val and val > cutoff:
out.add(pop)
return sorted(list(out))
def _find_known(row):
"""Find variant present in known pathogenic databases.
"""
out = []
clinvar_no = set(["unknown", "untested", "non-pathogenic", "probable-non-pathogenic",
"uncertain_significance", "uncertain_significance", "not_provided",
"benign", "likely_benign"])
if row["cosmic_ids"] or row["cosmic_id"]:
out.append("cosmic")
if row["clinvar_sig"] and not row["clinvar_sig"].lower() in clinvar_no:
out.append("clinvar")
return out
def _do_prioritize(items):
"""Determine if we should perform prioritization.
Currently done on tumor-only input samples and feeding into PureCN
which needs the germline annotations.
"""
if not any("tumoronly-prioritization" in dd.get_tools_off(d) for d in items):
if vcfutils.get_paired_phenotype(items[0]):
has_tumor = False
has_normal = False
for sub_data in items:
if vcfutils.get_paired_phenotype(sub_data) == "tumor":
has_tumor = True
elif vcfutils.get_paired_phenotype(sub_data) == "normal":
has_normal = True
return has_tumor and not has_normal
|
odps/lib/importer.py | wjsi/aliyun-odps-python-sdk | 412 | 79868 | <reponame>wjsi/aliyun-odps-python-sdk<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zipfile
import tarfile
import os
import random
import sys
import types
import warnings
from collections import defaultdict
_SEARCH_ORDER = [
('.py', False),
('/__init__.py', True),
]
try:
os.path.exists('/tmp')
ALLOW_BINARY = True
except:
ALLOW_BINARY = False
if sys.version_info[0] <= 2:
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
string_types = (basestring, unicode)
else:
iterkeys = lambda d: d.keys()
itervalues = lambda d: d.values()
string_types = (str, bytes)
def _clean_extract():
if CompressImporter._extract_path:
import shutil
shutil.rmtree(CompressImporter._extract_path)
class CompressImportError(ImportError):
"""Exception raised by CompressImporter objects."""
pass
class CompressImporter(object):
"""
A PEP-302-style importer that can import from a zipfile.
Just insert or append this class (not an instance) to sys.path_hooks
and you're in business. Instances satisfy both the 'importer' and
'loader' APIs specified in PEP 302.
"""
_extract_path = None
def __init__(self, *compressed_files, **kwargs):
"""
Constructor.
Args:
compressed_files zipfile.ZipFile or tarfile.TarFile
"""
self._files = []
self._prefixes = defaultdict(lambda: set(['']))
self._extract = kwargs.get('extract', False)
self._supersede = kwargs.get('supersede', False)
self._match_version = kwargs.get('_match_version', True)
self._local_warned = False
for f in compressed_files:
if isinstance(f, zipfile.ZipFile):
bin_package = any(n.endswith('.so') or n.endswith('.pxd') or n.endswith('.dylib')
for n in f.namelist())
need_extract = True
elif isinstance(f, tarfile.TarFile):
bin_package = any(m.name.endswith('.so') or m.name.endswith('.pxd') or m.name.endswith('.dylib')
for m in f.getmembers())
need_extract = True
elif isinstance(f, dict):
bin_package = any(name.endswith('.so') or name.endswith('.pxd') or name.endswith('.dylib')
for name in iterkeys(f))
need_extract = False
elif isinstance(f, list):
bin_package = any(name.endswith('.so') or name.endswith('.pxd') or name.endswith('.dylib')
for name in f)
need_extract = False
else:
raise TypeError('Compressed file can only be zipfile.ZipFile or tarfile.TarFile')
if bin_package:
if not ALLOW_BINARY:
raise SystemError('Cannot load binary package. It is quite possible that you are using an old '
'MaxCompute service which does not support binary packages. If this is '
'not true, please set `odps.isolation.session.enable` to True or ask your '
'project owner to change project-level configuration.')
if need_extract:
f = self._extract_archive(f)
prefixes = set([''])
dir_prefixes = set()
if isinstance(f, zipfile.ZipFile):
for name in f.namelist():
name = name if name.endswith('/') else (name.rsplit('/', 1)[0] + '/')
if name in prefixes:
continue
try:
f.getinfo(name + '__init__.py')
except KeyError:
prefixes.add(name)
elif isinstance(f, tarfile.TarFile):
for member in f.getmembers():
name = member.name if member.isdir() else member.name.rsplit('/', 1)[0]
if name in prefixes:
continue
try:
f.getmember(name + '/__init__.py')
except KeyError:
prefixes.add(name + '/')
elif isinstance(f, (list, dict)):
# Force ArchiveResource to run under binary mode to resolve manually
# opening __file__ paths in pure-python code.
if ALLOW_BINARY:
bin_package = True
rendered_names = set()
for name in f:
name = name.replace(os.sep, '/')
rendered_names.add(name)
for name in rendered_names:
name = name if name.endswith('/') else (name.rsplit('/', 1)[0] + '/')
if name in prefixes or '/tests/' in name:
continue
if name + '__init__.py' not in rendered_names:
prefixes.add(name)
dir_prefixes.add(name)
else:
if '/' in name.rstrip('/'):
ppath = name.rstrip('/').rsplit('/', 1)[0]
else:
ppath = ''
prefixes.add(ppath)
dir_prefixes.add(ppath)
if bin_package:
path_patch = []
for p in sorted(dir_prefixes):
if p in sys.path:
continue
parent_exist = False
for pp in path_patch:
if p[:len(pp)] == pp:
parent_exist = True
break
if parent_exist:
continue
path_patch.append(p)
if self._supersede:
sys.path = path_patch + sys.path
else:
sys.path = sys.path + path_patch
else:
self._files.append(f)
if prefixes:
self._prefixes[id(f)] = sorted(prefixes)
def _extract_archive(self, archive):
if not self._extract:
raise SystemError('We do not allow file-type resource for binary packages. Please upload an '
'archive-typed resource instead.')
if self._match_version and (sys.version_info[:2] != (2, 7) or sys.maxunicode != 65535 or os.name != 'posix'):
if not self._local_warned:
self._local_warned = True
warnings.warn('Your python version may not match the version on MaxCompute clusters. '
'Package installed in your site-packages will be used instead of those '
'specified in libraries. If no corresponding packages were discovered, '
'an ImportError might be raised in execution.')
return
cls = type(self)
if not cls._extract_path:
import tempfile
import atexit
cls._extract_path = tempfile.mkdtemp(prefix='tmp-pyodps-')
atexit.register(_clean_extract)
extract_dir = os.path.join(cls._extract_path,
'archive-' + str(random.randint(100000, 999999)))
os.makedirs(extract_dir)
if isinstance(archive, zipfile.ZipFile):
archive.extractall(extract_dir)
elif isinstance(archive, tarfile.TarFile):
archive.extractall(extract_dir)
mock_archive = dict()
for root, dirs, files in os.walk(extract_dir):
for name in files:
full_name = os.path.join(root, name)
mock_archive[name] = open(full_name, 'rb')
return mock_archive
def _get_info(self, fullmodname):
"""
Internal helper for find_module() and load_module().
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
A tuple (submodname, is_package, relpath, fileobj) where:
submodname: The final component of the module name, e.g. 'mail'.
is_package: A bool indicating whether this is a package.
relpath: The path to the module's source code within to the zipfile or tarfile.
fileobj: The file object
Raises:
ImportError if the module is not found in the archive.
"""
parts = fullmodname.split('.')
submodname = parts[-1]
for f in self._files:
for prefix in self._prefixes[id(f)]:
for suffix, is_package in _SEARCH_ORDER:
l = [prefix] + parts[:-1] + [submodname + suffix.replace('/', os.sep)]
relpath = os.path.join(*l)
try:
relpath = relpath.replace(os.sep, '/')
if isinstance(f, zipfile.ZipFile):
f.getinfo(relpath)
elif isinstance(f, tarfile.TarFile):
f.getmember(relpath)
else:
if relpath not in f:
raise KeyError
except KeyError:
pass
else:
return submodname, is_package, relpath, f
msg = 'Can\'t find module %s' % fullmodname
raise CompressImportError(msg)
def _get_source(self, fullmodname):
"""
Internal helper for load_module().
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
A tuple (submodname, is_package, fullpath, source) where:
submodname: The final component of the module name, e.g. 'mail'.
is_package: A bool indicating whether this is a package.
fullpath: The path to the module's source code including the
zipfile's or tarfile's filename.
source: The module's source code.
Raises:
ImportError if the module is not found in the archive.
"""
submodname, is_package, relpath, fileobj = self._get_info(fullmodname)
fullpath = '%s%s%s' % (fileobj, os.sep, relpath)
if isinstance(fileobj, zipfile.ZipFile):
source = fileobj.read(relpath.replace(os.sep, '/'))
elif isinstance(fileobj, tarfile.TarFile):
source = fileobj.extractfile(relpath.replace(os.sep, '/')).read()
elif isinstance(fileobj, dict):
source = fileobj[relpath.replace(os.sep, '/')].read()
else:
source = open(fileobj[relpath.replace(os.sep, '/')], 'rb').read()
source = source.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
return submodname, is_package, fullpath, source
def find_module(self, fullmodname, path=None):
"""
PEP-302-compliant find_module() method.
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
path: Optional and ignored; present for API compatibility only.
Returns:
None if the module isn't found in the archive; self if it is found.
"""
try:
self._get_info(fullmodname)
except ImportError:
return None
else:
return self
def load_module(self, fullmodname):
"""
PEP-302-compliant load_module() method.
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
The module object constructed from the source code.
Raises:
SyntaxError if the module's source code is syntactically incorrect.
ImportError if there was a problem accessing the source code.
Whatever else can be raised by executing the module's source code.
"""
submodname, is_package, fullpath, source = self._get_source(fullmodname)
code = compile(source, fullpath, 'exec')
mod = sys.modules.get(fullmodname)
try:
if mod is None:
mod = sys.modules[fullmodname] = types.ModuleType(fullmodname)
mod.__loader__ = self
mod.__file__ = fullpath
mod.__name__ = fullmodname
if is_package:
mod.__path__ = [os.path.dirname(mod.__file__)]
exec(code, mod.__dict__)
except:
if fullmodname in sys.modules:
del sys.modules[fullmodname]
raise
return mod
|
poco/utils/simplerpc/jsonrpc/manager.py | HBoPRC/Poco | 1,444 | 79872 | import json
# import logging
from .utils import is_invalid_params
from .exceptions import (
JSONRPCInvalidParams,
JSONRPCInvalidRequest,
JSONRPCInvalidRequestException,
JSONRPCMethodNotFound,
JSONRPCParseError,
JSONRPCServerError,
JSONRPCDispatchException,
)
from .jsonrpc1 import JSONRPC10Response
from .jsonrpc2 import (
JSONRPC20BatchRequest,
JSONRPC20BatchResponse,
JSONRPC20Response,
)
from .jsonrpc import JSONRPCRequest
# logger = logging.getLogger(__name__)
class JSONRPCResponseManager(object):
""" JSON-RPC response manager.
Method brings syntactic sugar into library. Given dispatcher it handles
request (both single and batch) and handles errors.
Request could be handled in parallel, it is server responsibility.
:param str request_str: json string. Will be converted into
JSONRPC20Request, JSONRPC20BatchRequest or JSONRPC10Request
:param dict dispather: dict<function_name:function>.
"""
RESPONSE_CLASS_MAP = {
"1.0": JSONRPC10Response,
"2.0": JSONRPC20Response,
}
@classmethod
def handle(cls, request_str, dispatcher):
if isinstance(request_str, bytes):
request_str = request_str.decode("utf-8")
try:
json.loads(request_str)
except (TypeError, ValueError):
return JSONRPC20Response(error=JSONRPCParseError()._data)
try:
request = JSONRPCRequest.from_json(request_str)
except JSONRPCInvalidRequestException:
return JSONRPC20Response(error=JSONRPCInvalidRequest()._data)
return cls.handle_request(request, dispatcher)
@classmethod
def handle_request(cls, request, dispatcher):
""" Handle request data.
At this moment request has correct jsonrpc format.
:param dict request: data parsed from request_str.
:param jsonrpc.dispatcher.Dispatcher dispatcher:
.. versionadded: 1.8.0
"""
rs = request if isinstance(request, JSONRPC20BatchRequest) \
else [request]
responses = [r for r in cls._get_responses(rs, dispatcher)
if r is not None]
# notifications
if not responses:
return
if isinstance(request, JSONRPC20BatchRequest):
return JSONRPC20BatchResponse(*responses)
else:
return responses[0]
@classmethod
def _get_responses(cls, requests, dispatcher):
""" Response to each single JSON-RPC Request.
:return iterator(JSONRPC20Response):
.. versionadded: 1.9.0
TypeError inside the function is distinguished from Invalid Params.
"""
for request in requests:
def response(**kwargs):
return cls.RESPONSE_CLASS_MAP[request.JSONRPC_VERSION](
_id=request._id, **kwargs)
try:
method = dispatcher[request.method]
except KeyError:
output = response(error=JSONRPCMethodNotFound()._data)
else:
try:
result = method(*request.args, **request.kwargs)
except JSONRPCDispatchException as e:
output = response(error=e.error._data)
except Exception as e:
data = {
"type": e.__class__.__name__,
"args": e.args,
"message": str(e),
}
if isinstance(e, TypeError) and is_invalid_params(
method, *request.args, **request.kwargs):
output = response(
error=JSONRPCInvalidParams(data=data)._data)
else:
# logger.exception("API Exception: {0}".format(data))
print("API Exception: {0}".format(data))
output = response(
error=JSONRPCServerError(data=data)._data)
else:
output = response(result=result)
finally:
if not request.is_notification:
yield output
|
tests/io_components/test_mutate.py | detritus3872/kartothek | 171 | 79873 | <filename>tests/io_components/test_mutate.py<gh_stars>100-1000
import types
import pandas as pd
import pytest
from kartothek.io_components.merge import align_datasets
from kartothek.io_components.metapartition import MetaPartition
from kartothek.io_components.write import store_dataset_from_partitions
def test_align_datasets_prefix(dataset, evaluation_dataset, store_session):
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how="prefix",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
# Two separate cluster_groups (e.g. cluster_1*)
assert len(list_metapartitions) == 2
mp_list = list_metapartitions[0]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
mp_list = list_metapartitions[1]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
# Test sorting of datasets by length, i.e. order of dataframes is different
generator = align_datasets(
left_dataset_uuid=evaluation_dataset.uuid,
right_dataset_uuid=dataset.uuid,
store=store_session,
match_how="prefix",
)
list_metapartitions = list(generator)
mp_list = list_metapartitions[0]
def test_align_datasets_prefix__equal_number_of_partitions(
dataset, evaluation_dataset, store_session
):
"""
Test a scenario where the simple prefix match algorithm didn't find any
matches in case of equal number of partitions in both datasets.
"""
# Create a reference dataset which matches the problem (equal number of
# partitions and suitable for prefix matching)
mp = MetaPartition(label="cluster_1_1", metadata_version=dataset.metadata_version)
mp2 = MetaPartition(label="cluster_2_1", metadata_version=dataset.metadata_version)
metapartitions = [mp, mp2]
store_dataset_from_partitions(
partition_list=metapartitions,
dataset_uuid="reference_dataset_uuid",
store=store_session,
)
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid="reference_dataset_uuid",
store=store_session,
match_how="prefix",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
# Two separate cluster_groups (e.g. cluster_1*)
assert len(list_metapartitions) == 2
mp_list = list_metapartitions[0]
assert len(mp_list) == 2
mp_list = list_metapartitions[1]
assert len(mp_list) == 2
# Test sorting of datasets by length, i.e. order of dataframes is different
generator = align_datasets(
left_dataset_uuid=evaluation_dataset.uuid,
right_dataset_uuid=dataset.uuid,
store=store_session,
match_how="prefix",
)
list_metapartitions = list(generator)
mp_list = list_metapartitions[0]
def test_align_datasets_exact(dataset, evaluation_dataset, store_session):
with pytest.raises(RuntimeError):
list(
align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how="exact",
)
)
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=dataset.uuid,
store=store_session,
match_how="exact",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
# Two separate cluster_groups (e.g. cluster_1*)
assert len(list_metapartitions) == 2
mp_list = list_metapartitions[0]
assert len(mp_list) == 2, [mp.label for mp in mp_list]
assert [mp.label for mp in mp_list] == ["cluster_1", "cluster_1"]
mp_list = list_metapartitions[1]
assert len(mp_list) == 2, [mp.label for mp in mp_list]
assert [mp.label for mp in mp_list] == ["cluster_2", "cluster_2"]
def test_align_datasets_left(dataset, evaluation_dataset, store_session):
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how="left",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
assert len(list_metapartitions) == len(dataset.partitions)
mp_list = list_metapartitions[0]
assert len(mp_list) == 5, [mp.label for mp in mp_list]
expected = ["cluster_1", "cluster_1_1", "cluster_1_2", "cluster_2_1", "cluster_2_2"]
assert [mp.label for mp in mp_list] == expected
mp_list = list_metapartitions[1]
assert len(mp_list) == 5, [mp.label for mp in mp_list]
expected = ["cluster_2", "cluster_1_1", "cluster_1_2", "cluster_2_1", "cluster_2_2"]
assert [mp.label for mp in mp_list] == expected
def test_align_datasets_right(dataset, evaluation_dataset, store_session):
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how="right",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
assert len(list_metapartitions) == len(evaluation_dataset.partitions)
mp_list = list_metapartitions[0]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
expected = ["cluster_1_1", "cluster_1", "cluster_2"]
assert [mp.label for mp in mp_list] == expected
mp_list = list_metapartitions[1]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
expected = ["cluster_1_2", "cluster_1", "cluster_2"]
assert [mp.label for mp in mp_list] == expected
mp_list = list_metapartitions[2]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
expected = ["cluster_2_1", "cluster_1", "cluster_2"]
assert [mp.label for mp in mp_list] == expected
mp_list = list_metapartitions[3]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
expected = ["cluster_2_2", "cluster_1", "cluster_2"]
assert [mp.label for mp in mp_list] == expected
def test_align_datasets_callable(dataset, evaluation_dataset, store_session):
def comp(left, right):
return left == right
with pytest.raises(RuntimeError):
list(
align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how=comp,
)
)
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=dataset.uuid,
store=store_session,
match_how=comp,
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
# Two separate cluster_groups (e.g. cluster_1*)
assert len(list_metapartitions) == 2
mp_list = list_metapartitions[0]
assert len(mp_list) == 2, [mp.label for mp in mp_list]
assert [mp.label for mp in mp_list] == ["cluster_1", "cluster_1"]
mp_list = list_metapartitions[1]
assert len(mp_list) == 2, [mp.label for mp in mp_list]
assert [mp.label for mp in mp_list] == ["cluster_2", "cluster_2"]
def test_merge_metapartitions():
df = pd.DataFrame({"P": [1, 1], "L": [1, 2], "TARGET": [1, 2]})
df_2 = pd.DataFrame({"P": [1], "info": "a"})
mp = MetaPartition(label="cluster_1", data={"core": df, "helper": df_2})
df_3 = pd.DataFrame({"P": [1, 1], "L": [1, 2], "PRED": [0.1, 0.2]})
mp2 = MetaPartition(label="cluster_1", data={"predictions": df_3})
merged_mp = MetaPartition.merge_metapartitions(metapartitions=[mp, mp2])
df = pd.DataFrame(
{
"P": [1, 1],
"L": [1, 2],
"TARGET": [1, 2],
"info": ["a", "a"],
"PRED": [0.1, 0.2],
}
)
assert merged_mp.label == "cluster_1"
assert len(merged_mp.data) == 3
|
plugins/dcmp/auth/models.py | qmgeng/docker-airflow | 269 | 79909 | <reponame>qmgeng/docker-airflow
# encoding: utf-8
import logging
from datetime import datetime
from sqlalchemy import (
Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType,
Index, Float)
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.event import listen
from sqlalchemy.orm import sessionmaker
from airflow import settings, configuration
from airflow.utils.db import provide_session
from airflow.models import User
Base = declarative_base()
class DcmpUserProfile(Base):
__tablename__ = "dcmp_user_profile"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, unique=True, nullable=False)
is_superuser = Column(Boolean, index=True, default=False, nullable=False)
is_data_profiler = Column(Boolean, index=True, default=False, nullable=False)
is_approver = Column(Boolean, index=True, default=False, nullable=False)
approval_notification_emails = Column(Text, default="", nullable=False)
updated_at = Column(DateTime, index=True, default=datetime.now, onupdate=datetime.now)
created_at = Column(DateTime, index=True, default=datetime.now)
def __repr__(self):
return "<DcmpUserProfile: %s user#%s>" % (self.id, self.user_id)
@property
@provide_session
def username(self, session=None):
user = session.query(User).filter(User.id == self.user_id).first()
if user:
return user.username
return ""
@property
def approval_notification_emails_list(self):
return [email.strip() for email in self.approval_notification_emails.split(",") if email.strip()]
def sync_profiles(action=None, target=None):
session = sessionmaker(autocommit=False, autoflush=False, bind=settings.engine)()
user_ids = {user.id for user in session.query(User)}
if action == "insert":
user_ids.add(target.id)
elif action == "delete":
user_ids.remove(target.id)
profile_user_ids = {profile.user_id for profile in session.query(DcmpUserProfile)}
no_profile_user_ids = user_ids - profile_user_ids
no_user_user_ids = profile_user_ids - user_ids
for user_id in no_profile_user_ids:
profile = DcmpUserProfile()
profile.user_id = user_id
session.add(profile)
session.query(DcmpUserProfile).filter(DcmpUserProfile.user_id.in_(no_user_user_ids)).delete(synchronize_session=False)
session.commit()
session.close()
if __package__:
try:
sync_profiles()
except Exception as e:
logging.warn("Run python {AIRFLOW_HOME}/plugins/dcmp/tools/upgradedb.py first")
listen(User, 'after_insert', lambda mapper, connection, target: sync_profiles(action="insert", target=target))
listen(User, 'after_delete', lambda mapper, connection, target: sync_profiles(action="delete", target=target))
if configuration.get('webserver', 'auth_backend').endswith('dcmp.auth.backends.password_auth'):
from dcmp.auth.backends.password_auth import PasswordUser
listen(PasswordUser, 'after_insert', lambda mapper, connection, target: sync_profiles(action="insert", target=target))
listen(PasswordUser, 'after_delete', lambda mapper, connection, target: sync_profiles(action="delete", target=target)) |
tests/test_label_smoothing.py | blufb/fairseq | 307 | 79912 | <reponame>blufb/fairseq<filename>tests/test_label_smoothing.py
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import copy
import unittest
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
import tests.utils as test_utils
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])},
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.probs = torch.FloatTensor([
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]).unsqueeze(0).expand(2, 3, 7) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main()
|
django_version.py | Muflhi01/flow-dashboard | 1,623 | 79931 | <reponame>Muflhi01/flow-dashboard<filename>django_version.py
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
|
dolly_tests/launch/follow_ignition_TEST.launch.py | Russ76/dolly | 169 | 79945 | <filename>dolly_tests/launch/follow_ignition_TEST.launch.py<gh_stars>100-1000
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import launch_testing
import launch
import launch.actions
import launch_testing.actions
import launch_testing.markers
from ament_index_python.packages import get_package_share_directory
from launch_ros.actions import Node
def generate_test_description():
# Test fixture
gazebo_test_fixture = Node(
package='dolly_tests',
executable='follow_ignition_TEST',
output='screen'
)
# Spawn dolly
pkg_dolly_ignition = get_package_share_directory('dolly_ignition')
spawn = Node(package='ros_ign_gazebo', executable='create',
arguments=[
'-name', 'dolly',
'-z', '0.225',
'-file', os.path.join(pkg_dolly_ignition, 'models', 'dolly_ignition',
'model.sdf')],
output='screen')
# Bridge
bridge = Node(
package='ros_ign_bridge',
executable='parameter_bridge',
arguments=['/dolly/cmd_vel@geometry_msgs/msg/Twist@ignition.msgs.Twist',
'/dolly/laser_scan@sensor_msgs/msg/LaserScan@ignition.msgs.LaserScan'],
output='screen'
)
# Follow node
follow = Node(
package='dolly_follow',
executable='dolly_follow',
output='screen',
remappings=[
('cmd_vel', '/dolly/cmd_vel'),
('laser_scan', '/dolly/laser_scan')
]
)
return launch.LaunchDescription([
gazebo_test_fixture,
spawn,
bridge,
follow,
launch_testing.util.KeepAliveProc(),
launch_testing.actions.ReadyToTest()
]), locals()
class DollyFollowTest(unittest.TestCase):
def test_termination(self, gazebo_test_fixture, proc_info):
proc_info.assertWaitForShutdown(process=gazebo_test_fixture, timeout=200)
@launch_testing.post_shutdown_test()
class DollyFollowTestAfterShutdown(unittest.TestCase):
def test_exit_code(self, gazebo_test_fixture, proc_info):
launch_testing.asserts.assertExitCodes(
proc_info,
[launch_testing.asserts.EXIT_OK],
gazebo_test_fixture
)
|
pipetools/__init__.py | 0101/pipetools | 164 | 79971 | <gh_stars>100-1000
from pipetools.utils import foreach
__version__ = VERSION = 1, 0, 1
__versionstr__ = VERSION > foreach(str) | '.'.join
from pipetools.main import pipe, X, maybe, xpartial
from pipetools.utils import *
# prevent namespace pollution
import pipetools.compat
for symbol in dir(pipetools.compat):
if globals().get(symbol) is getattr(pipetools.compat, symbol):
globals().pop(symbol)
|
images/mariadb-galera/src/entrypoint.py | makr17/docker-tools | 180 | 80003 | <reponame>makr17/docker-tools<filename>images/mariadb-galera/src/entrypoint.py
#!/usr/bin/env python
# MariaDB cluster startup
# This script upon startup waits for a specified number of nodes
# (environment variable CLUSTER_SIZE) to report state of files
# in /var/lib/mysql to etcd. It will halt if the cluster is
# degraded, with fewer than that number of nodes. It follows the
# Galera documentation's specified steps to bring up nodes in
# proper sequence, by electing a leader with the most current data.
#
# created 4 Jun 2017 by <NAME> <EMAIL>b at instantlinux. net
#
# License: https://www.apache.org/licenses/LICENSE-2.0
import logging
import os
import pwd
import random
import re
import socket
import subprocess
import sys
import time
import etcd
class Constants(object):
DATA_DIR = '/var/lib/mysql'
DEFAULT_CLUSTER_SIZE = 3
ETCD_PREFIX = '/galera'
LOG_DIR = '/var/log/mysql'
KEY_CLUSTER_UPDATE_TIMER = 'update_timer'
KEY_HEALTH = 'health'
KEY_HOSTNAME = 'hostname'
KEY_RECOVERED_POSITION = 'recovered_position'
KEY_SAFE_TO_BOOTSTRAP = 'safe_to_bootstrap'
KEY_WSREP_GCOMM_UUID = 'wsrep_gcomm_uuid'
KEY_WSREP_LOCAL_STATE_COMMENT = 'wsrep_local_state_comment'
STATUS_DEGRADED = 'degraded'
STATUS_DONOR = 'donor'
STATUS_INIT = 'initializing'
STATUS_INSTALL = 'installing'
STATUS_NEW = 'new'
STATUS_OK = 'ok'
STATUS_RESTARTING = 'restarting'
DEFAULT_TTL = 10
TTL_DIR = 900
TTL_LOCK = 45
TTL_STACK_UP = 600
TTL_UPDATE_TIMER = 90
class ClusterDegradedError(Exception):
pass
class NotYetImplementedError(Exception):
pass
class MariaDBCluster(object):
def __init__(self):
self.name = os.environ['CLUSTER_NAME']
try:
self.join = os.environ['CLUSTER_JOIN']
except KeyError:
self.join = None
try:
self.cluster_size = int(os.environ['CLUSTER_SIZE'])
except KeyError:
self.cluster_size = Constants.DEFAULT_CLUSTER_SIZE
self.reinstall_ok = 'REINSTALL_OK' in os.environ
self.ttl_lock = Constants.TTL_LOCK
self.ttl_stack_up = Constants.TTL_STACK_UP
self.ttl_update_timer = Constants.TTL_UPDATE_TIMER
self.update_timer_active = False
self.my_hostname = socket.gethostname()
self.my_ipv4 = socket.gethostbyname(self.my_hostname)
self.data_dir = self._invoke(
'mysqld --verbose --help --wsrep-cluster-address=none '
'| grep ^datadir').split()[1].strip()
self.root_password = self._get_root_password()
self.sst_password = self._get_sst_password()
self.prev_address = None
def share_initial_state(self, discovery):
"""Query data_dir contents for initial state, and share via
the etcd discovery service
params: discovery - connection to etcd
"""
self.discovery = discovery
if self._is_new_install():
self.health = Constants.STATUS_NEW
discovery.set_key(Constants.KEY_HEALTH, self.health,
ttl=self.ttl_stack_up)
else:
self.health = Constants.STATUS_INIT
discovery.set_key(Constants.KEY_HEALTH, self.health,
ttl=self.ttl_stack_up)
discovery.set_key(Constants.KEY_HOSTNAME, self.my_hostname,
ttl=self.ttl_stack_up)
try:
discovery.set_key(Constants.KEY_SAFE_TO_BOOTSTRAP,
self._is_safe_to_boot(),
ttl=self.ttl_stack_up)
except AssertionError:
pass
try:
discovery.set_key(Constants.KEY_RECOVERED_POSITION,
self._get_recovered_position(),
ttl=self.ttl_stack_up)
except AssertionError:
pass
gcomm_uuid = self._get_gcomm_uuid()
if gcomm_uuid:
discovery.set_key(Constants.KEY_WSREP_GCOMM_UUID,
gcomm_uuid,
ttl=self.ttl_stack_up)
def wait_checkin(self, retry_interval=5):
"""wait for all cluster nodes to check in
looks for self.cluster_size nodes to report health
returns a dict of status values keyed by nodes' ipv4 addresses
returns: dict
raises: ClusterDegradedError
"""
while self.discovery.get_key(Constants.KEY_HEALTH, ipv4=self.my_ipv4):
retval = self._cluster_health()
if len(retval) >= self.cluster_size:
break
time.sleep(retry_interval)
logging.debug(dict(retval, **{
'action': 'wait_checkin', 'status': 'retry'}))
if len(retval) >= self.cluster_size:
logging.info(dict(retval, **{
'action': 'wait_checkin', 'status': 'ok',
'peers': ','.join(retval.keys())}))
return retval
logging.error(dict(retval, **{
'action': 'wait_checkin', 'status': 'error'}))
raise ClusterDegradedError(
'Insufficient number (%d) of nodes (need %d)' %
(len(retval), self.cluster_size))
def start_database(self, cluster_address='', wsrep_new_cluster=False,
cmdarg=None):
command = (
'exec /usr/sbin/mysqld --wsrep_cluster_name=%(cluster_name)s '
'--wsrep-cluster-address="gcomm://%(address)s" '
'--wsrep_sst_auth="sst:%(sst_password)s"' % {
'cluster_name': self.name,
'address': cluster_address,
'sst_password': self.sst_password})
if wsrep_new_cluster:
command += ' --wsrep-new-cluster'
if cmdarg:
command += ' %s' % cmdarg
os.chown(Constants.DATA_DIR, pwd.getpwnam('mysql').pw_uid, -1)
os.chown(Constants.LOG_DIR, pwd.getpwnam('mysql').pw_uid, -1)
if cluster_address:
assert self._peer_reachable(cluster_address.split(',')[0]), (
'Network connectivity problem for %s' % cluster_address)
while True:
# skew startup of concurrent launches by self.ttl_lock seconds
try:
self.discovery.acquire_lock('bootstrap', ttl=self.ttl_lock)
except etcd.EtcdLockExpired:
pass
if Constants.STATUS_DONOR in self._cluster_health():
# perform only one SST join at a time, loop until others done
time.sleep(5)
else:
break
logging.info({
'action': 'start_database',
'status': 'start',
'cluster_name': self.name,
'cluster_address': cluster_address,
'wsrep_new_cluster': wsrep_new_cluster,
'cmdarg': cmdarg
})
self.proc = self._run_background(command)
def start(self, ipv4, initial_state=Constants.STATUS_RESTARTING,
cluster_address='', install_ok=False):
"""start database
Bootstrap if running on the node elected as leader (param 'ipv4')
Otherwise join cluster
"""
log_info = {'action': 'start', 'leader': ipv4}
if self.my_ipv4 == ipv4:
self.discovery.set_key(Constants.KEY_HEALTH, initial_state,
ttl=self.ttl_stack_up)
if initial_state == Constants.STATUS_INSTALL and install_ok:
self._install_new_database()
self.start_database(wsrep_new_cluster=True)
else:
# join other nodes after first is up
# TODO: may need a timeout, currently relying on healthcheck
logging.info(dict(log_info, **{'status': 'waiting'}))
while (self.discovery.get_key(Constants.KEY_HEALTH, ipv4=ipv4) !=
Constants.STATUS_OK):
time.sleep(1)
if self.health == Constants.STATUS_NEW:
if not (initial_state == Constants.STATUS_INSTALL or
install_ok):
logging.error(dict(log_info, **{
'status': 'error',
'message': 'missing_data_reinstall_is_not_ok'}))
raise ClusterDegradedError('Missing database')
self.start_database(cluster_address=cluster_address)
def restart_database(self, node_list):
"""Restart down cluster"""
peer_state = {ipv4: self.discovery.get_key_recursive('', ipv4=ipv4)
for ipv4 in sorted(node_list)}
safe_to_bootstrap = 0
recovered_position = -1
recoverable_nodes = 0
for ipv4, peer in peer_state.items():
# Leader election
if (not peer or
peer.get(Constants.KEY_HEALTH) == Constants.STATUS_NEW):
continue
if int(peer.get(Constants.KEY_SAFE_TO_BOOTSTRAP, 0)) == 1:
safe_to_bootstrap += 1
addr_bootstrap = ipv4
val = peer.get(Constants.KEY_RECOVERED_POSITION, 0)
if val is not None:
recoverable_nodes += 1
if int(val) > recovered_position:
recovered_position = int(val)
addr_highest_pos = ipv4
if safe_to_bootstrap == 1:
# Cluster was shut down normally
logging.info({'action': 'restart_database',
'message': 'restart_safe',
'leader': addr_bootstrap})
self._restart(addr_bootstrap)
elif recoverable_nodes >= self.cluster_size - 1:
# Cluster crashed
logging.info({'action': 'restart_database',
'message': 'restart_recovery',
'leader': addr_highest_pos,
'position': recovered_position})
self._restart(addr_highest_pos,
reset_grastate=(addr_highest_pos == self.my_ipv4))
else:
logging.error({
'action': 'restart_database',
'status': 'error',
'message': 'unhandled_state',
'safe_to_bootstrap': safe_to_bootstrap,
'recoverable_nodes': recoverable_nodes,
'recovered_position': recovered_position
})
# Leave nodes up long enough to diagnose
time.sleep(300)
raise NotYetImplementedError('Unhandled cluster state')
def report_status(self):
"""update etcd keys (health, etc) with current state"""
def _set_wsrep_key(key):
val = self._invoke(
'mysql -u root -p%(pw)s -Bse '
'"SHOW STATUS LIKE \'%(key)s\';"' % {
'pw': self.root_password, 'key': key}).split()[1]
self.discovery.set_key(key, val, ttl=self.discovery.ttl)
return val
log_info = {'action': 'report_status', 'status': 'warn'}
try:
self.discovery.set_key(Constants.KEY_HOSTNAME, self.my_hostname)
except etcd.EtcdException as ex:
logging.warn(dict(log_info, **{'message': str(ex)}))
try:
status = _set_wsrep_key(Constants.KEY_WSREP_LOCAL_STATE_COMMENT)
if status == 'Synced':
if self.cluster_size > 1:
self._update_cluster_address()
self.discovery.set_key(Constants.KEY_HEALTH,
Constants.STATUS_OK)
elif status == 'Donor/Desynced':
self.discovery.set_key(Constants.KEY_HEALTH,
Constants.STATUS_DONOR)
else:
self.discovery.set_key(Constants.KEY_HEALTH,
Constants.STATUS_DEGRADED)
except IndexError:
pass
except etcd.EtcdException as ex:
logging.warn(dict(log_info, **{'message': str(ex)}))
def _get_root_password(self):
"""get root password from environment or Docker secret
if not specified, and environment MYSQL_RANDOM_ROOT_PASSWORD has
any value, a new random pw will be generated
"""
if 'MYSQL_ROOT_PASSWORD' in os.environ:
return os.environ['MYSQL_ROOT_PASSWORD']
try:
with open(os.path.join('/run/secrets',
os.environ['ROOT_PASSWORD_SECRET']),
'r') as f:
pw = f.read()
return pw
except IOError:
pass
if 'MYSQL_RANDOM_ROOT_PASSWORD' in os.environ:
return '%020x' % random.randrange(16**20)
else:
raise AssertionError('Root password must be specified')
def _get_sst_password(self):
if 'SST_PASSWORD' in os.environ:
return os.environ['SST_PASSWORD']
try:
with open(os.path.join('/run/secrets',
os.environ['SST_AUTH_SECRET']), 'r') as f:
pw = f.read()
return pw
except IOError:
pass
return ''
def _is_new_install(self):
return (not os.path.exists(os.path.join(self.data_dir, 'ibdata1')) and
not os.path.exists(os.path.join(self.data_dir, 'mysql')))
def _is_safe_to_boot(self):
"""query grastate.dat safe_to_bootstrap value"""
try:
with open(os.path.join(self.data_dir, 'grastate.dat'), 'r') as f:
for line in f:
if line.split(':')[0] == Constants.KEY_SAFE_TO_BOOTSTRAP:
return int(line.split(':')[1])
except IOError as ex:
logging.error({'action': '_is_safe_to_boot', 'status': 'error',
'message': str(ex)})
raise AssertionError('Invalid content or missing grastate.dat')
def _reset_grastate(self, value=1):
"""reset safe_to_bootstrap value on current node"""
self._invoke(
'sed -i "s/safe_to_bootstrap.*/safe_to_bootstrap: %d/" %s' %
(value, os.path.join(self.data_dir, 'grastate.dat')),
ignore_errors=False)
def _cluster_health(self):
instances = self.discovery.get_key('')
health_status = {
item: self.discovery.get_key(Constants.KEY_HEALTH, ipv4=item)
for item in instances
}
return dict((key, val) for key, val in
health_status.items() if val)
def _get_recovered_position(self):
"""parse recovered position using wsrep-recover
returns: int
raises: AssertionError if not found
"""
uuid_pat = re.compile('[a-z0-9]*-[a-z0-9]*:-*[0-9]', re.I)
filename = os.path.join(self.data_dir, '%s.err' % self.my_hostname)
self._invoke('mysqld_safe --wsrep-cluster-address=gcomm:// '
'--wsrep-recover --skip-syslog', ignore_errors=False)
with open(filename, 'r') as f:
for line in f:
match = re.match(uuid_pat, line)
if match:
return int(match.split(':')[1])
os.unlink(filename)
raise AssertionError('No recovery position identified')
def _get_gcomm_uuid(self):
"""query gvwstate.dat my_uuid value
returns: None (from a clean shutdown) or my_uuid value
"""
try:
with open(os.path.join(self.data_dir, 'gvwstate.dat'), 'r') as f:
for line in f:
if line.split(':')[0] == 'my_uuid':
return line.split(':')[1].strip()
except IOError:
pass
return None
def _install_new_database(self, timeout=30):
"""run the mysql_install_db installer and set up system users"""
script_setusers = r"""
SET @@SESSION.SQL_LOG_BIN=0;
DELETE FROM mysql.user WHERE user='root' AND host!='localhost';
DELETE FROM mysql.user WHERE user='';
UPDATE mysql.user set host='%%' where user='root' and host='localhost';
CREATE USER 'sst'@'localhost' IDENTIFIED BY '%(sst_password)s';
GRANT RELOAD,PROCESS,LOCK TABLES,REPLICATION CLIENT ON *.* TO
'sst'@'localhost';
DROP DATABASE IF EXISTS test;
FLUSH PRIVILEGES;
"""
logging.info({'action': '_install_new_database', 'status': 'start'})
opts = '--user=mysql --datadir=%s --wsrep_on=OFF' % self.data_dir
mysql_client = '/usr/bin/mysql --protocol=socket -u root'
sys.stdout.write(self._invoke('mysql_install_db %s --rpm' %
opts + ' --no-defaults'))
start_time = time.time()
proc = self._run_background(
'exec /usr/sbin/mysqld %s --skip-networking' % opts)
while time.time() - start_time < timeout:
time.sleep(1)
if self._invoke('%s -e "SELECT 1;"' % mysql_client
).split() == ['1', '1']:
break
if time.time() - start_time > timeout:
logging.error({'action': '_install_new_database',
'message': 'timeout', 'status': 'error'})
# Leave node up long enough to diagnose
time.sleep(30)
exit(1)
logging.info({'action': '_install_new_database', 'step': '0'})
sys.stdout.write(self._invoke(
'mysqladmin password "%s"' % self.root_password,
ignore_errors=False, suppress_log=True))
sys.stdout.write(self._invoke(
'mysql_tzinfo_to_sql /usr/share/zoneinfo | '
'sed "s/Local time zone must be set--see zic manual page/FCTY/" | '
'%s mysql -u root -p%s' % (mysql_client, self.root_password),
ignore_errors=False))
logging.info({'action': '_install_new_database', 'step': '1'})
sys.stdout.write(self._invoke(
'%(mysql)s -u root -p%(mysql_root_password)s -e "%(script)s"' % {
'mysql': mysql_client,
'mysql_root_password': self.root_password,
'script': script_setusers % {
'sst_password': self.sst_password
}}, ignore_errors=False, suppress_log=False))
logging.info({'action': '_install_new_database', 'step': '2'})
time.sleep(60)
proc.terminate()
proc.wait()
logging.info({'action': '_install_new_database', 'status': 'ok'})
def _restart(self, ipv4, reset_grastate=False):
logging.info({'action': '_restart', 'leader': ipv4,
'reset_grastate': reset_grastate})
if reset_grastate:
self._reset_grastate()
self.start(ipv4, cluster_address=ipv4, install_ok=self.reinstall_ok)
def _update_cluster_address(self):
"""find healthy nodes and update wsrep_cluster_address
Once all of the cluster's nodes are in Synced state, each should
be updated with the peer list of nodes in order to ensure full
HA operation.
A randomized timer is needed in order to stagger these updates;
if they all happen within less than a minute, replication will
stall and the cluster will go out of sync.
"""
nodes = self.discovery.get_key('')
synced = [
ipv4 + ':' for ipv4 in nodes if
self.discovery.get_key(Constants.KEY_WSREP_LOCAL_STATE_COMMENT,
ipv4=ipv4) == 'Synced']
address = 'gcomm://' + ','.join(sorted(synced))
log_info = {'action': '_update_cluster_address',
'prev_address': self.prev_address,
'cluster_address': address}
if (len(synced) >= self.cluster_size and address != self.prev_address
and not self.discovery.get_key(
Constants.KEY_CLUSTER_UPDATE_TIMER, ipv4=self.my_ipv4)):
if not self.update_timer_active:
ttl = (self.ttl_update_timer +
random.randrange(self.ttl_update_timer))
self.discovery.set_key(Constants.KEY_CLUSTER_UPDATE_TIMER, '1',
ttl=ttl)
self.update_timer_active = True
logging.info(dict(log_info, **{
'status': 'start_timer', 'ttl': ttl}))
else:
self.update_timer_active = False
self._invoke(
'mysql -u root -p%(pw)s -e '
'"SET GLOBAL wsrep_cluster_address=\'%(address)s\'";' %
{'pw': self.root_password, 'address': address})
self.prev_address = address
logging.info(dict(log_info, **{'status': 'ok'}))
def _peer_reachable(self, ipv4):
"""confirm that a peer can be reached"""
try:
self._invoke('ping -c 2 -w 2 %s' % ipv4, ignore_errors=False)
except AssertionError:
return False
return True
@staticmethod
def _invoke(command, ignore_errors=True, suppress_log=False):
"""invoke a shell command and return its stdout"""
log_info = {'action': '_invoke', 'command': re.sub(
'-u root -p.+ ', '-u root -p[redacted] ', command)}
log_info['command'] = re.sub(
"IDENTIFIED BY '.+'", "IDENTIFIED BY '[redacted]'",
log_info['command'])
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
retval = proc.communicate()[0].decode('ascii')
if proc.returncode == 0:
if not (suppress_log or 'SHOW STATUS LIKE' in command):
logging.info(dict(log_info, **{
'status': 'ok',
'returncode': proc.returncode,
'output': retval}))
else:
logging.error(dict(log_info, **{
'status': 'error',
'returncode': proc.returncode,
'output': retval}))
if not ignore_errors:
raise AssertionError('Command returned %d' % proc.returncode)
return retval
@staticmethod
def _run_background(command):
"""run a command in background"""
return subprocess.Popen(command, shell=True)
class DiscoveryService(object):
def __init__(self, nodes, cluster):
self.ipv4 = socket.gethostbyname(socket.gethostname())
self.etcd = etcd.Client(host=nodes, allow_reconnect=True,
lock_prefix='/%s/_locks' % cluster)
self.prefix = Constants.ETCD_PREFIX + '/' + cluster
try:
self.ttl = int(os.environ['TTL'])
except KeyError:
self.ttl = Constants.DEFAULT_TTL
self.ttl_dir = Constants.TTL_DIR
self.locks = {}
def __del__(self):
self.delete_key(self.ipv4)
def set_key(self, keyname, value, my_host=True, ttl=None):
"""set a key under /galera/<cluster>/<my_host>"""
logging.debug({'action': 'set_key', 'keyname': keyname,
'value': value})
key_path = self.prefix + '/' + self.ipv4 if my_host else self.prefix
try:
self.etcd.write(key_path, None, dir=True, ttl=self.ttl_dir)
except etcd.EtcdNotFile:
pass
self.etcd.write('%(key_path)s/%(keyname)s' % {
'key_path': key_path, 'keyname': keyname
},
value, ttl=ttl if ttl else Constants.DEFAULT_TTL)
def get_key(self, keyname, ipv4=None):
"""Fetch the key for a given ipv4 node
returns: scalar value or list of child keys
"""
log_info = {'action': 'get_key', 'keyname': keyname, 'ipv4': ipv4}
key_path = self.prefix + '/' + ipv4 if ipv4 else self.prefix
key_path += '/' + keyname if keyname else ''
try:
item = self.etcd.read(key_path, timeout=10)
except (etcd.EtcdKeyNotFound, etcd.EtcdNotDir):
logging.debug(dict(log_info, **{
'status': 'error',
'message': 'not_found'}))
return None
log_info['status'] = 'ok'
if item.dir:
retval = [child.key[len(key_path) + 1:]
for child in item.children]
return retval
else:
logging.debug(dict(log_info, **{'value': item.value}))
return item.value
def delete_key(self, keyname, ipv4=None):
log_info = {'action': 'delete_key', 'keyname': keyname, 'ipv4': ipv4}
key_path = self.prefix + '/' + ipv4 if ipv4 else self.prefix
key_path += '/' + keyname if keyname else ''
try:
self.etcd.delete(key_path, recursive=True)
logging.debug(dict(log_info, **{'status': 'ok'}))
except etcd.EtcdKeyNotFound:
logging.debug(dict(log_info, **{
'status': 'error',
'message': 'not_found'}))
def get_key_recursive(self, keyname, ipv4=None, nest_level=0):
"""Fetch all keys under the given node """
assert nest_level < 10, 'Recursion too deep'
retval = self.get_key(keyname, ipv4=ipv4)
if type(retval) is list:
return {key: self.get_key_recursive(key, ipv4=ipv4,
nest_level=nest_level + 1)
for key in retval}
else:
return retval
def acquire_lock(self, lock_name, ttl=Constants.DEFAULT_TTL):
"""acquire cluster lock - used upon electing leader"""
logging.info({'action': 'acquire_lock',
'lock_name': lock_name, 'ttl': ttl})
self.locks[lock_name] = etcd.Lock(self.etcd, lock_name)
# TODO: make this an atomic mutex with etcd3 (currently using etcd2)
while self.get_key('lock-%s' % lock_name):
time.sleep(0.25)
self.set_key('lock-%s' % lock_name, self.ipv4, my_host=False, ttl=ttl)
self.locks[lock_name].acquire(lock_ttl=2)
def release_lock(self, lock_name):
"""release cluster lock"""
logging.info({'action': 'release_lock', 'lock_name': lock_name})
self.delete_key('lock-%s' % lock_name)
self.locks[lock_name].release()
class LoggingDictFormatter(logging.Formatter):
def format(self, record):
if type(record.msg) is dict:
record.msg = self._dict_to_str(record.msg)
return super(LoggingDictFormatter, self).format(record)
@staticmethod
def _dict_to_str(values):
"""Convert a dict to string key1=val key2=val ... """
return ' '.join(sorted(['%s=%s' % (key, str(val).strip())
for key, val in values.items()]))
def setup_logging(level=logging.INFO, output=sys.stdout):
"""For Docker, send logging to stdout"""
logger = logging.getLogger()
handler = logging.StreamHandler(output)
handler.setFormatter(LoggingDictFormatter(
'%(asctime)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M:%S'))
logger.setLevel(level)
logger.addHandler(handler)
def main():
setup_logging()
cluster = MariaDBCluster()
logging.info({'action': 'main', 'status': 'start',
'my_ipv4': cluster.my_ipv4})
cluster.share_initial_state(DiscoveryService(
tuple([(item.split(':')[0], int(item.split(':')[1]))
for item in os.environ['DISCOVERY_SERVICE'].split(',')]),
cluster.name))
try:
peers = cluster.wait_checkin(retry_interval=5)
time.sleep(6)
except ClusterDegradedError as ex:
logging.error({'action': 'main', 'status': 'failed',
'message': str(ex)})
exit(1)
if cluster.join:
cluster.start_database(cluster_address=cluster.join)
elif all(status == Constants.STATUS_NEW for status in peers.values()):
# No data on any node: safe to install new cluster
first_node = sorted(peers.keys())[0]
cluster.start(first_node, initial_state=Constants.STATUS_INSTALL,
cluster_address=first_node, install_ok=True)
elif (Constants.STATUS_OK in peers.values() or
Constants.STATUS_DONOR in peers.values()):
# At least one instance is synchronized, join them
cluster.start_database(cluster_address=','.join(
sorted([peer for peer, status in peers.items()
if status in (Constants.STATUS_OK,
Constants.STATUS_DONOR)])))
elif (cluster.health == Constants.STATUS_INIT and
list(peers.values()).count(Constants.STATUS_NEW) ==
cluster.cluster_size - 1):
# Single instance plus new ones: resume installation on leader
cluster.start(cluster.my_ipv4)
else:
# Cluster is down
cluster.restart_database(peers.keys())
while cluster.proc.returncode is None:
cluster.report_status()
time.sleep(cluster.discovery.ttl * 0.8)
cluster.proc.poll()
logging.error({'action': 'main', 'status': 'failed',
'returncode': cluster.proc.returncode})
time.sleep(30)
raise AssertionError('MariaDB daemon died (%d)' % cluster.proc.returncode)
if __name__ == '__main__':
main()
|
data/gener_rdat.py | eldipa/termgraph | 2,790 | 80118 | <filename>data/gener_rdat.py
#!/usr/bin/env python
import random
b = [2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]
a = [1, 40, 24, 26, 29, 80, 100, 36]
BASE = 1990
YEARS = 28
f = open('random.dat', 'w')
for offset in range(YEARS):
date = BASE + offset
value = random.randint(-500, 500)
f.write('{} {}\n'.format(date, int(value)))
f.close()
|
tools/test_waymo.py | collector-m/SST | 217 | 80133 | <reponame>collector-m/SST<filename>tools/test_waymo.py
import tensorflow as tf
from waymo_open_dataset import dataset_pb2 as open_dataset
from waymo_open_dataset.utils import frame_utils
def get_data_from_seg(segment):
dataset = tf.data.TFRecordDataset(segment, compression_type='')
for data in dataset:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
(range_images, camera_projections,
range_image_top_pose) = frame_utils.parse_range_image_and_camera_projection(
frame)
if __name__ == '__main__':
seg = './segment-967082162553397800_5102_900_5122_900_with_camera_labels.tfrecord'
get_data_from_seg(seg)
|
examples/overlay.py | kampelmuehler/HTML4Vision | 155 | 80135 | <filename>examples/overlay.py
from html4vision import Col, imagetable
cols = [
Col('img', 'Image', 'images/road_*_image.jpg'),
Col('img', 'Label', 'images/road_*_label.png', 1), # 1 is used to select only the first item
Col('img', 'Image + Label', 'images/road_*_image.jpg'),
Col('overlay', '', 'images/road_*_label.png', 1, 'opacity: 0.4'),
]
imagetable(cols, 'overlay.html', 'Image Overlay', imscale=1.5, overlay_toggle=True)
|
MagneticField/GeomBuilder/test/python/mfwriter.py | ckamtsikis/cmssw | 852 | 80146 | ###
### Read a geometry from a single xml file created from mfxmlwriter.py
### and write it into a db file.
###
import FWCore.ParameterSet.Config as cms
process = cms.Process("MagneticFieldWriter")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
#GEOMETRY_VERSION = '90322'
#GEOMETRY_VERSION = '120812'
#GEOMETRY_VERSION = '130503'
GEOMETRY_VERSION = '160812'
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
# This reads the big XML file and the only way to fill the
# nonreco part of the database is to read this file. It
# somewhat duplicates the information read from the little
# XML files, but there is no way to directly build the
# DDCompactView from this.
process.XMLGeometryWriter = cms.EDAnalyzer("XMLGeometryBuilder",
XMLFileName = cms.untracked.string("./mfGeometry_"+GEOMETRY_VERSION+".xml"),
ZIP = cms.untracked.bool(True),
record = cms.untracked.string('MFGeometryFileRcd')
)
process.CondDBCommon.BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService')
process.CondDBCommon.timetype = cms.untracked.string('runnumber')
process.CondDBCommon.connect = cms.string('sqlite_file:mfGeometry_'+GEOMETRY_VERSION+'.db')
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
toPut = cms.VPSet(cms.PSet(record = cms.string('MFGeometryFileRcd'),tag = cms.string('MagneticFieldGeometry_'+str(GEOMETRY_VERSION))))
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.p1 = cms.Path(process.XMLGeometryWriter)
# Create the corresponding metadata file
f = open('mfGeometry_'+GEOMETRY_VERSION+'.txt','w')
f.write('{\n'+
' \"destinationDatabase\": \"oracle://cms_orcon_prod/CMS_CONDITIONS\",\n'+
' \"destinationTags\": {\n'+
' \"MFGeometry_'+GEOMETRY_VERSION+'\": {}\n'+
' },\n'+
' \"inputTag\": "MagneticFieldGeometry_'+GEOMETRY_VERSION+'\",\n'+
' \"since\": 1,\n'+
' \"userText\": "Mag field geometry, version '+GEOMETRY_VERSION+'\"\n'+
'}\n'
)
|
watsor/test/model/prepare.py | firefly2442/watsor | 123 | 80164 | <filename>watsor/test/model/prepare.py
import os
import tensorflow as tf
from string import Template
from PIL import Image
from logging import DEBUG
from pathlib import Path
from threading import Thread, Event
from logging import getLogger
from logging.handlers import QueueHandler
from multiprocessing import Queue
from watsor.stream.log import LogHandler
from watsor.stream.work import Work, WorkPublish, Payload
from watsor.stream.share import FrameBuffer
from watsor.stream.sync import CountDownLatch, CountableQueue
from watsor.test.detect_stream import Artist, ShapeDetector
CLASSES = {idx: shape for idx, shape in enumerate(['unlabeled', 'triangle', 'ellipse', 'rectangle'])}
CONFIG = """model {
ssd {
num_classes: 3
image_resizer {
fixed_shape_resizer {
height: $height
width: $width
}
}
feature_extractor {
type: "ssd_mobilenet_v1"
depth_multiplier: 1.0
min_depth: 16
conv_hyperparams {
regularizer {
l2_regularizer {
weight: 3.99999989895e-05
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.0299999993294
}
}
activation: RELU_6
batch_norm {
decay: 0.999700009823
center: true
scale: true
epsilon: 0.0010000000475
train: true
}
}
}
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
}
}
similarity_calculator {
iou_similarity {
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
weight: 3.99999989895e-05
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.0299999993294
}
}
activation: RELU_6
batch_norm {
decay: 0.999700009823
center: true
scale: true
epsilon: 0.0010000000475
train: true
}
}
min_depth: 0
max_depth: 0
num_layers_before_predictor: 0
use_dropout: false
dropout_keep_probability: 0.800000011921
kernel_size: 1
box_code_size: 4
apply_sigmoid_to_scores: false
}
}
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.20000000298
max_scale: 0.949999988079
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.333299994469
}
}
post_processing {
batch_non_max_suppression {
score_threshold: 0.300000011921
iou_threshold: 0.600000023842
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
normalize_loss_by_num_matches: true
loss {
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_sigmoid {
}
}
hard_example_miner {
num_hard_examples: 3000
iou_threshold: 0.990000009537
loss_type: CLASSIFICATION
max_negatives_per_positive: 3
min_negatives_per_image: 0
}
classification_weight: 1.0
localization_weight: 1.0
}
}
}
train_config {
batch_size: 24
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
ssd_random_crop {
}
}
optimizer {
rms_prop_optimizer {
learning_rate {
exponential_decay_learning_rate {
initial_learning_rate: 0.00400000018999
decay_steps: 800720
decay_factor: 0.949999988079
}
}
momentum_optimizer_value: 0.899999976158
decay: 0.899999976158
epsilon: 1.0
}
}
fine_tune_checkpoint: "$path/training/model.ckpt-XXXX"
from_detection_checkpoint: true
num_steps: 200000
}
train_input_reader {
label_map_path: "$path/annotations/label_map.pbtxt"
tf_record_input_reader {
input_path: "$path/annotations/train.record"
}
}
eval_config {
num_examples: 8000
max_evals: 10
use_moving_averages: false
}
eval_input_reader {
label_map_path: "$path/annotations/label_map.pbtxt"
shuffle: false
num_readers: 1
tf_record_input_reader {
input_path: "$path/annotations/test.record"
}
}
"""
class Classifier(WorkPublish):
def __init__(self, delegate_class, name: str, stop_event, log_queue, frame_queue, frame_buffer,
path, group, latch, kwargs=None):
super().__init__(delegate_class, name, stop_event, log_queue, frame_queue, frame_buffer,
args=(latch, path, group),
kwargs={} if kwargs is None else kwargs)
def _run(self, stop_event, log_queue, *args, **kwargs):
super(Work, self)._run(stop_event, log_queue, *args, **kwargs)
try:
path = args[-2]
group = args[-1]
output_path = os.path.join(path, "annotations", "{}.record".format(group))
with tf.io.TFRecordWriter(output_path) as writer:
self._spin(self._process, stop_event, *args, writer, **kwargs)
self._gen_label_map(os.path.join(path, "annotations", "label_map.pbtxt"))
self._gen_config(os.path.join(path, "ssd.config"), CONFIG, *args, **kwargs)
except FileNotFoundError as e:
self._logger.error(e)
except Exception:
self._logger.exception('Classification failure')
def _new_frame(self, frame, payload: Payload, stop_event, frame_buffer: FrameBuffer, latch, path, group, writer,
*args, **kwargs):
try:
detections = filter(lambda d: d.label > 0, frame.header.detections)
with Image.frombytes('RGB',
(frame.header.width, frame.header.height),
frame.image.get_obj()) as img:
count = latch.count_down()
filename = self._gen_filename(path, group, count + 1, *args, **kwargs)
img.save(filename)
self._logger.debug("Frame saved to {}".format(filename))
tf_example = self._gen_tf_record(frame, detections, filename, *args, **kwargs)
writer.write(tf_example.SerializeToString())
finally:
frame.latch.next()
@staticmethod
def _gen_filename(path, group, count, *args, **kwargs):
return os.path.abspath(os.path.join(path, "images", group, "{:03d}.jpg".format(count)))
@staticmethod
def _gen_tf_record(frame, detections, filename, *args, **kwargs):
width = frame.header.width
height = frame.header.height
image_format = b'jpeg'
with open(filename, "rb") as file:
encoded_jpg = file.read()
filename = os.path.basename(filename).encode('utf-8')
xmins = []
xmaxs = []
ymins = []
ymaxs = []
label = []
label_text = []
for detection in detections:
xmins.append(detection.bounding_box.x_min / width)
xmaxs.append(detection.bounding_box.x_max / width)
ymins.append(detection.bounding_box.y_min / height)
ymaxs.append(detection.bounding_box.y_max / height)
label.append(detection.label)
label_text.append(CLASSES.get(detection.label).encode('utf-8'))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),
'image/width': tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),
'image/filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename])),
'image/source_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename])),
'image/format': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_format])),
'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[encoded_jpg])),
'image/object/bbox/xmin': tf.train.Feature(float_list=tf.train.FloatList(value=xmins)),
'image/object/bbox/xmax': tf.train.Feature(float_list=tf.train.FloatList(value=xmaxs)),
'image/object/bbox/ymin': tf.train.Feature(float_list=tf.train.FloatList(value=ymins)),
'image/object/bbox/ymax': tf.train.Feature(float_list=tf.train.FloatList(value=ymaxs)),
'image/object/class/text': tf.train.Feature(bytes_list=tf.train.BytesList(value=label_text)),
'image/object/class/label': tf.train.Feature(int64_list=tf.train.Int64List(value=label)),
}))
return tf_example
@staticmethod
def _gen_label_map(path):
contents = ''
for idx, shape in CLASSES.items():
if idx == 0:
continue
contents = contents + "item {\n"
contents = contents + " id: " + str(idx) + "\n"
contents = contents + " name: '" + shape + "'\n}\n\n"
with open(path, 'w') as f:
f.write(contents)
@staticmethod
def _gen_config(filename, config, frame_queue, stop_event, frame_buffer, *args, **kwargs):
path = os.path.dirname(filename)
config = Template(config).substitute(path=path,
width=frame_buffer.frames[0].header.width,
height=frame_buffer.frames[0].header.height)
os.makedirs(path, exist_ok=True)
with open(filename, 'w') as f:
f.write(config)
def prepare_shape_model(groups):
frame_buffer = FrameBuffer(10, 300, 300)
frame_queue = Queue(1)
subscriber_queue = Queue(1)
log_queue = CountableQueue()
getLogger().addHandler(QueueHandler(log_queue))
stop_logging_event = Event()
log_handler = LogHandler(Thread, "logger", stop_logging_event, log_queue, filename=None)
log_handler.start()
for group, count in groups.items():
path = os.path.abspath(os.path.join(Path(__file__).parent.parent.parent.parent, 'build/test/model'))
os.makedirs(os.path.join(path, "images", group), exist_ok=True)
os.makedirs(os.path.join(path, "annotations"), exist_ok=True)
stop_process_event = Event()
latch = CountDownLatch(count)
artist = Artist("artist", stop_process_event, log_queue, frame_queue, frame_buffer)
processes = [artist,
ShapeDetector(Thread, "detector", stop_process_event, log_queue, frame_queue, frame_buffer),
Classifier(Thread, "classifier", stop_process_event, log_queue, subscriber_queue, frame_buffer,
path, group, latch,
kwargs={'log_level': DEBUG})]
artist.subscribe(subscriber_queue)
for process in processes:
process.start()
try:
latch.wait()
finally:
stop_process_event.set()
for process in processes:
process.join(30)
stop_logging_event.set()
log_queue.join()
if __name__ == '__main__':
prepare_shape_model({"train": 900, "test": 100})
|
generate/lib/run-firefox/cuddlefish/runner.py | flamencist/browser-extensions | 102 | 80171 | <gh_stars>100-1000
import os
import sys
import time
import tempfile
import atexit
import shutil
import shlex
import subprocess
import re
import simplejson as json
import mozrunner
from cuddlefish.prefs import DEFAULT_COMMON_PREFS
from cuddlefish.prefs import DEFAULT_FIREFOX_PREFS
from cuddlefish.prefs import DEFAULT_THUNDERBIRD_PREFS
def follow_file(filename):
"""
Generator that yields the latest unread content from the given
file, or None if no new content is available.
For example:
>>> f = open('temp.txt', 'w')
>>> f.write('hello')
>>> f.flush()
>>> tail = follow_file('temp.txt')
>>> tail.next()
'hello'
>>> tail.next() is None
True
>>> f.write('there')
>>> f.flush()
>>> tail.next()
'there'
>>> f.close()
>>> os.remove('temp.txt')
"""
last_pos = 0
last_size = 0
while True:
newstuff = None
if os.path.exists(filename):
size = os.stat(filename).st_size
if size > last_size:
last_size = size
f = open(filename, 'r')
f.seek(last_pos)
newstuff = f.read()
last_pos = f.tell()
f.close()
yield newstuff
# subprocess.check_output only appeared in python2.7, so this code is taken
# from python source code for compatibility with py2.5/2.6
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
class FennecProfile(mozrunner.Profile):
preferences = {}
names = ['fennec']
class FennecRunner(mozrunner.Runner):
profile_class = FennecProfile
names = ['fennec']
__DARWIN_PATH = '/Applications/Fennec.app/Contents/MacOS/fennec'
def __init__(self, binary=None, **kwargs):
if sys.platform == 'darwin' and binary and binary.endswith('.app'):
# Assume it's a Fennec app dir.
binary = os.path.join(binary, 'Contents/MacOS/fennec')
self.__real_binary = binary
mozrunner.Runner.__init__(self, **kwargs)
def find_binary(self):
if not self.__real_binary:
if sys.platform == 'darwin':
if os.path.exists(self.__DARWIN_PATH):
return self.__DARWIN_PATH
self.__real_binary = mozrunner.Runner.find_binary(self)
return self.__real_binary
class XulrunnerAppProfile(mozrunner.Profile):
preferences = {}
names = []
class XulrunnerAppRunner(mozrunner.Runner):
"""
Runner for any XULRunner app. Can use a Firefox binary in XULRunner
mode to execute the app, or can use XULRunner itself. Expects the
app's application.ini to be passed in as one of the items in
'cmdargs' in the constructor.
This class relies a lot on the particulars of mozrunner.Runner's
implementation, and does some unfortunate acrobatics to get around
some of the class' limitations/assumptions.
"""
profile_class = XulrunnerAppProfile
# This is a default, and will be overridden in the instance if
# Firefox is used in XULRunner mode.
names = ['xulrunner']
# Default location of XULRunner on OS X.
__DARWIN_PATH = "/Library/Frameworks/XUL.framework/xulrunner-bin"
__LINUX_PATH = "/usr/bin/xulrunner"
# What our application.ini's path looks like if it's part of
# an "installed" XULRunner app on OS X.
__DARWIN_APP_INI_SUFFIX = '.app/Contents/Resources/application.ini'
def __init__(self, binary=None, **kwargs):
if sys.platform == 'darwin' and binary and binary.endswith('.app'):
# Assume it's a Firefox app dir.
binary = os.path.join(binary, 'Contents/MacOS/firefox-bin')
self.__app_ini = None
self.__real_binary = binary
mozrunner.Runner.__init__(self, **kwargs)
# See if we're using a genuine xulrunner-bin from the XULRunner SDK,
# or if we're being asked to use Firefox in XULRunner mode.
self.__is_xulrunner_sdk = 'xulrunner' in self.binary
if sys.platform == 'linux2' and not self.env.get('LD_LIBRARY_PATH'):
self.env['LD_LIBRARY_PATH'] = os.path.dirname(self.binary)
newargs = []
for item in self.cmdargs:
if 'application.ini' in item:
self.__app_ini = item
else:
newargs.append(item)
self.cmdargs = newargs
if not self.__app_ini:
raise ValueError('application.ini not found in cmdargs')
if not os.path.exists(self.__app_ini):
raise ValueError("file does not exist: '%s'" % self.__app_ini)
if (sys.platform == 'darwin' and
self.binary == self.__DARWIN_PATH and
self.__app_ini.endswith(self.__DARWIN_APP_INI_SUFFIX)):
# If the application.ini is in an app bundle, then
# it could be inside an "installed" XULRunner app.
# If this is the case, use the app's actual
# binary instead of the XUL framework's, so we get
# a proper app icon, etc.
new_binary = '/'.join(self.__app_ini.split('/')[:-2] +
['MacOS', 'xulrunner'])
if os.path.exists(new_binary):
self.binary = new_binary
@property
def command(self):
"""Returns the command list to run."""
if self.__is_xulrunner_sdk:
return [self.binary, self.__app_ini, '-profile',
self.profile.profile]
else:
return [self.binary, '-app', self.__app_ini, '-profile',
self.profile.profile]
def __find_xulrunner_binary(self):
if sys.platform == 'darwin':
if os.path.exists(self.__DARWIN_PATH):
return self.__DARWIN_PATH
if sys.platform == 'linux2':
if os.path.exists(self.__LINUX_PATH):
return self.__LINUX_PATH
return None
def find_binary(self):
# This gets called by the superclass constructor. It will
# always get called, even if a binary was passed into the
# constructor, because we want to have full control over
# what the exact setting of self.binary is.
if not self.__real_binary:
self.__real_binary = self.__find_xulrunner_binary()
if not self.__real_binary:
dummy_profile = {}
runner = mozrunner.FirefoxRunner(profile=dummy_profile)
self.__real_binary = runner.find_binary()
self.names = runner.names
return self.__real_binary
def run_app(harness_root_dir, harness_options,
app_type, binary=None, profiledir=None, verbose=False,
timeout=None, logfile=None, addons=None, args=None, norun=None):
if binary:
binary = os.path.expanduser(binary)
if addons is None:
addons = []
else:
addons = list(addons)
cmdargs = []
preferences = dict(DEFAULT_COMMON_PREFS)
if app_type == "xulrunner":
profile_class = XulrunnerAppProfile
runner_class = XulrunnerAppRunner
cmdargs.append(os.path.join(harness_root_dir, 'application.ini'))
else:
addons.append(harness_root_dir)
if app_type == "firefox":
profile_class = mozrunner.FirefoxProfile
preferences.update(DEFAULT_FIREFOX_PREFS)
runner_class = mozrunner.FirefoxRunner
elif app_type == "thunderbird":
profile_class = mozrunner.ThunderbirdProfile
preferences.update(DEFAULT_THUNDERBIRD_PREFS)
runner_class = mozrunner.ThunderbirdRunner
elif app_type == "fennec":
profile_class = FennecProfile
runner_class = FennecRunner
else:
raise ValueError("Unknown app: %s" % app_type)
if sys.platform == 'darwin':
cmdargs.append('-foreground')
if args:
cmdargs.extend(shlex.split(args))
# tempfile.gettempdir() was constant, preventing two simultaneous "cfx
# run"/"cfx test" on the same host. On unix it points at /tmp (which is
# world-writeable), enabling a symlink attack (e.g. imagine some bad guy
# does 'ln -s ~/.ssh/id_rsa /tmp/harness_result'). NamedTemporaryFile
# gives us a unique filename that fixes both problems. We leave the
# (0-byte) file in place until the browser-side code starts writing to
# it, otherwise the symlink attack becomes possible again.
fileno,resultfile = tempfile.mkstemp(prefix="harness-result-")
os.close(fileno)
harness_options['resultFile'] = resultfile
def maybe_remove_logfile():
if os.path.exists(logfile):
os.remove(logfile)
logfile_tail = None
if sys.platform in ['win32', 'cygwin']:
if not logfile:
# If we're on Windows, we need to keep a logfile simply
# to print console output to stdout.
fileno,logfile = tempfile.mkstemp(prefix="harness-log-")
os.close(fileno)
logfile_tail = follow_file(logfile)
atexit.register(maybe_remove_logfile)
if logfile:
logfile = os.path.abspath(os.path.expanduser(logfile))
maybe_remove_logfile()
harness_options['logFile'] = logfile
env = {}
env.update(os.environ)
env['MOZ_NO_REMOTE'] = '1'
env['XPCOM_DEBUG_BREAK'] = 'warn'
env['NS_TRACE_MALLOC_DISABLE_STACKS'] = '1'
if norun:
cmdargs.append("-no-remote")
# Write the harness options file to the SDK's extension template directory
# so mozrunner will copy it to the profile it creates. We don't want
# to leave such files lying around the SDK's directory tree, so we delete it
# below after getting mozrunner to create the profile.
optionsFile = os.path.join(harness_root_dir, 'harness-options.json')
open(optionsFile, "w").write(str(json.dumps(harness_options)))
starttime = time.time()
popen_kwargs = {}
profile = None
profile = profile_class(addons=addons,
profile=profiledir,
preferences=preferences)
# Delete the harness options file we wrote to the SDK's extension template
# directory.
os.remove(optionsFile)
runner = runner_class(profile=profile,
binary=binary,
env=env,
cmdargs=cmdargs,
kp_kwargs=popen_kwargs)
sys.stdout.flush(); sys.stderr.flush()
print >>sys.stderr, "Using binary at '%s'." % runner.binary
# Ensure cfx is being used with Firefox 4.0+.
# TODO: instead of dying when Firefox is < 4, warn when Firefox is outside
# the minVersion/maxVersion boundaries.
version_output = check_output(runner.command + ["-v"])
# Note: this regex doesn't handle all valid versions in the Toolkit Version
# Format <https://developer.mozilla.org/en/Toolkit_version_format>, just the
# common subset that we expect Mozilla apps to use.
mo = re.search(r"Mozilla (Firefox|Iceweasel) ((\d+)\.\S*)",
version_output)
if not mo:
# cfx may be used with Thunderbird, SeaMonkey or an exotic Firefox
# version.
print """
WARNING: cannot determine Firefox version; please ensure you are running
a Mozilla application equivalent to Firefox 4.0 or greater.
"""
else:
version = mo.group(3)
if int(version) < 4:
print """
cfx requires Firefox 4 or greater and is unable to find a compatible
binary. Please install a newer version of Firefox or provide the path to
your existing compatible version with the --binary flag:
cfx --binary=PATH_TO_FIREFOX_BINARY"""
return
# Set the appropriate extensions.checkCompatibility preference to false,
# so the tests run even if the SDK is not marked as compatible with the
# version of Firefox on which they are running, and we don't have to
# ensure we update the maxVersion before the version of Firefox changes
# every six weeks.
#
# The regex we use here is effectively the same as BRANCH_REGEX from
# /toolkit/mozapps/extensions/content/extensions.js, which toolkit apps
# use to determine whether or not to load an incompatible addon.
#
br = re.search(r"^([^\.]+\.[0-9]+[a-z]*).*", mo.group(2), re.I)
if br:
prefname = 'extensions.checkCompatibility.' + br.group(1)
profile.preferences[prefname] = False
# Calling profile.set_preferences here duplicates the list of prefs
# in prefs.js, since the profile calls self.set_preferences in its
# constructor, but that is ok, because it doesn't change the set of
# preferences that are ultimately registered in Firefox.
profile.set_preferences(profile.preferences)
print >>sys.stderr, "Using profile at '%s'." % profile.profile
sys.stderr.flush()
if norun:
print "To launch the application, enter the following command:"
print " ".join(runner.command) + " " + (" ".join(runner.cmdargs))
return 0
runner.start()
done = False
output = None
try:
while not done:
time.sleep(0.05)
if logfile_tail:
new_chars = logfile_tail.next()
if new_chars:
sys.stderr.write(new_chars)
sys.stderr.flush()
if os.path.exists(resultfile):
output = open(resultfile).read()
if output:
if output in ['OK', 'FAIL']:
done = True
else:
sys.stderr.write("Hrm, resultfile (%s) contained something weird (%d bytes)\n" % (resultfile, len(output)))
sys.stderr.write("'"+output+"'\n")
if timeout and (time.time() - starttime > timeout):
raise Exception("Wait timeout exceeded (%ds)" %
timeout)
except:
runner.stop()
raise
else:
runner.wait(10)
finally:
if profile:
profile.cleanup()
print >>sys.stderr, "Total time: %f seconds" % (time.time() - starttime)
if output == 'OK':
print >>sys.stderr, "Program terminated successfully."
return 0
else:
print >>sys.stderr, "Program terminated unsuccessfully."
return -1
|
pythonFiles/testing_tools/adapter/pytest/_cli.py | JesterOrNot/vscode-python | 404 | 80175 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import absolute_import
from ..errors import UnsupportedCommandError
def add_subparser(cmd, name, parent):
"""Add a new subparser to the given parent and add args to it."""
parser = parent.add_parser(name)
if cmd == 'discover':
# For now we don't have any tool-specific CLI options to add.
pass
else:
raise UnsupportedCommandError(cmd)
return parser
|
tests/fixtures/foo_command.py | Ivoz/cleo | 859 | 80187 | <gh_stars>100-1000
from cleo.commands.command import Command
from cleo.io.io import IO
class FooCommand(Command):
name = "foo bar"
description = "The foo bar command"
aliases = ["afoobar"]
def interact(self, io: IO) -> None:
io.write_line("interact called")
def handle(self) -> int:
self._io.write_line("called")
return 0
|
tests/unit/dynamodb/test_batch.py | Yurzs/boto | 5,079 | 80188 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import unittest
from boto.dynamodb.batch import Batch
from boto.dynamodb.table import Table
from boto.dynamodb.layer2 import Layer2
from boto.dynamodb.batch import BatchList
DESCRIBE_TABLE_1 = {
'Table': {
'CreationDateTime': 1349910554.478,
'ItemCount': 1,
'KeySchema': {'HashKeyElement': {'AttributeName': u'foo',
'AttributeType': u'S'}},
'ProvisionedThroughput': {'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10},
'TableName': 'testtable',
'TableSizeBytes': 54,
'TableStatus': 'ACTIVE'}
}
DESCRIBE_TABLE_2 = {
'Table': {
'CreationDateTime': 1349910554.478,
'ItemCount': 1,
'KeySchema': {'HashKeyElement': {'AttributeName': u'baz',
'AttributeType': u'S'},
'RangeKeyElement': {'AttributeName': 'myrange',
'AttributeType': 'N'}},
'ProvisionedThroughput': {'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10},
'TableName': 'testtable2',
'TableSizeBytes': 54,
'TableStatus': 'ACTIVE'}
}
class TestBatchObjects(unittest.TestCase):
maxDiff = None
def setUp(self):
self.layer2 = Layer2('access_key', 'secret_key')
self.table = Table(self.layer2, DESCRIBE_TABLE_1)
self.table2 = Table(self.layer2, DESCRIBE_TABLE_2)
def test_batch_to_dict(self):
b = Batch(self.table, ['k1', 'k2'], attributes_to_get=['foo'],
consistent_read=True)
self.assertDictEqual(
b.to_dict(),
{'AttributesToGet': ['foo'],
'Keys': [{'HashKeyElement': {'S': 'k1'}},
{'HashKeyElement': {'S': 'k2'}}],
'ConsistentRead': True}
)
def test_batch_consistent_read_defaults_to_false(self):
b = Batch(self.table, ['k1'])
self.assertDictEqual(
b.to_dict(),
{'Keys': [{'HashKeyElement': {'S': 'k1'}}],
'ConsistentRead': False}
)
def test_batch_list_consistent_read(self):
b = BatchList(self.layer2)
b.add_batch(self.table, ['k1'], ['foo'], consistent_read=True)
b.add_batch(self.table2, [('k2', 54)], ['bar'], consistent_read=False)
self.assertDictEqual(
b.to_dict(),
{'testtable': {'AttributesToGet': ['foo'],
'Keys': [{'HashKeyElement': {'S': 'k1'}}],
'ConsistentRead': True},
'testtable2': {'AttributesToGet': ['bar'],
'Keys': [{'HashKeyElement': {'S': 'k2'},
'RangeKeyElement': {'N': '54'}}],
'ConsistentRead': False}})
if __name__ == '__main__':
unittest.main()
|
dcdownloader/main.py | dev-techmoe/python-dcdownloader | 173 | 80209 | <filename>dcdownloader/main.py
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from dcdownloader import arg_parse, version
# for unittest
cmd_args = None
def main():
args = arg_parse.parser.parse_args(cmd_args)
version.show_welcome()
from dcdownloader.scheduler import Scheduler
from dcdownloader import parser_selector
s = Scheduler(url=args.url, output_path=args.output_path, parser=parser_selector.get_parser(args.url),
fetch_only=args.fetch_only, proxy=args.proxy, verify_ssl=args.verify_ssl)
s.run()
if __name__ == '__main__':
main()
|
tools/datasets/gen_sceneflow_anns.py | jiaw-z/DenseMatchingBenchmark | 160 | 80213 | import os
import numpy as np
import argparse
import os.path as osp
import json
from tqdm import tqdm
from mmcv import mkdir_or_exist
def getFlying3dMetas(root, Type, data_type='clean'):
Metas = []
imgDir = 'flyingthings3d/frames_' + data_type + 'pass'
dispDir = 'flyingthings3d/disparity'
Parts = ['A', 'B', 'C']
for Part in Parts:
partDir = osp.join(root, dispDir, Type, Part)
idxDirs = os.listdir(partDir)
for idxDir in idxDirs:
dispNames = os.listdir(osp.join(partDir, idxDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, Type, Part, idxDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, Type, Part, idxDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, Type, Part, idxDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, Type, Part, idxDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def getMonkaaMetas(root, data_type='clean'):
Metas = []
imgDir = 'Monkaa/frames_' + data_type + 'pass'
dispDir = 'Monkaa/disparity'
sceneDirs = os.listdir(osp.join(root, dispDir))
for sceneDir in sceneDirs:
dispNames = os.listdir(osp.join(root, dispDir, sceneDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, sceneDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, sceneDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, sceneDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, sceneDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def getDrivingMetas(root, data_type='clean'):
Metas = []
imgDir = 'driving/frames_' + data_type + 'pass'
dispDir = 'driving/disparity'
focalLengthDirs = os.listdir(osp.join(root, dispDir))
for focalLengthDir in focalLengthDirs:
wardDirs = os.listdir(osp.join(root, dispDir, focalLengthDir))
for wardDir in wardDirs:
speedDirs = os.listdir(osp.join(root, dispDir, focalLengthDir, wardDir))
for speedDir in speedDirs:
dispNames = os.listdir(osp.join(root, dispDir, focalLengthDir, wardDir, speedDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, focalLengthDir, wardDir, speedDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, focalLengthDir, wardDir, speedDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, focalLengthDir, wardDir, speedDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, focalLengthDir, wardDir, speedDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def build_annoFile(root, save_annotation_root, data_type='clean'):
"""
Build annotation files for Scene Flow Dataset.
Args:
root:
"""
# check existence
assert osp.exists(root), 'Path: {} not exists!'.format(root)
mkdir_or_exist(save_annotation_root)
trainMetas = getFlying3dMetas(root, 'TRAIN', data_type)
testMetas = getFlying3dMetas(root, 'TEST', data_type)
trainMetas.extend(getMonkaaMetas(root, data_type))
trainMetas.extend(getDrivingMetas(root, data_type))
for meta in tqdm(trainMetas):
for k, v in meta.items():
assert osp.exists(osp.join(root, v)), 'trainMetas:{} not exists'.format(v)
for meta in tqdm(testMetas):
for k, v in meta.items():
assert osp.exists(osp.join(root, v)), 'testMetas: {} not exists'.format(v)
info_str = 'SceneFlow Dataset contains:\n' \
' {:5d} training samples \n' \
' {:5d} validation samples'.format(len(trainMetas), len(testMetas))
print(info_str)
def make_json(name, metas):
filepath = osp.join(save_annotation_root, data_type + 'pass_' + name + '.json')
print('Save to {}'.format(filepath))
with open(file=filepath, mode='w') as fp:
json.dump(metas, fp=fp)
make_json(name='train', metas=trainMetas)
make_json(name='test', metas=testMetas)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SceneFlow Data PreProcess.")
parser.add_argument(
"--data-root",
default=None,
help="root of data",
type=str,
)
parser.add_argument(
"--save-annotation-root",
default='./',
help="save root of generated annotation file",
type=str,
)
parser.add_argument(
"--data-type",
default='clean',
help="the type of data, (clean or final)pass",
type=str,
)
args = parser.parse_args()
build_annoFile(args.data_root, args.save_annotation_root, args.data_type)
|
tests/pytests/unit/states/test_layman.py | markgras/salt | 9,425 | 80224 | """
:codeauthor: <NAME> <<EMAIL>>
"""
import pytest
import salt.states.layman as layman
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {layman: {}}
def test_present():
"""
Test to verify that the overlay is present.
"""
name = "sunrise"
ret = {"name": name, "result": True, "comment": "", "changes": {}}
mock = MagicMock(side_effect=[[name], []])
with patch.dict(layman.__salt__, {"layman.list_local": mock}):
comt = "Overlay {} already present".format(name)
ret.update({"comment": comt})
assert layman.present(name) == ret
with patch.dict(layman.__opts__, {"test": True}):
comt = "Overlay {} is set to be added".format(name)
ret.update({"comment": comt, "result": None})
assert layman.present(name) == ret
def test_absent():
"""
Test to verify that the overlay is absent.
"""
name = "sunrise"
ret = {"name": name, "result": True, "comment": "", "changes": {}}
mock = MagicMock(side_effect=[[], [name]])
with patch.dict(layman.__salt__, {"layman.list_local": mock}):
comt = "Overlay {} already absent".format(name)
ret.update({"comment": comt})
assert layman.absent(name) == ret
with patch.dict(layman.__opts__, {"test": True}):
comt = "Overlay {} is set to be deleted".format(name)
ret.update({"comment": comt, "result": None})
assert layman.absent(name) == ret
|
venv/share/doc/networkx-2.6.3/examples/algorithms/plot_circuits.py | AisahAlfiyatusR/Image_Retrieval_Heroku | 10,024 | 80231 | """
========
Circuits
========
Convert a Boolean circuit to an equivalent Boolean formula.
A Boolean circuit can be exponentially more expressive than an
equivalent formula in the worst case, since the circuit can reuse
subcircuits multiple times, whereas a formula cannot reuse subformulas
more than once. Thus creating a Boolean formula from a Boolean circuit
in this way may be infeasible if the circuit is large.
"""
import matplotlib.pyplot as plt
import networkx as nx
def circuit_to_formula(circuit):
# Convert the circuit to an equivalent formula.
formula = nx.dag_to_branching(circuit)
# Transfer the operator or variable labels for each node from the
# circuit to the formula.
for v in formula:
source = formula.nodes[v]["source"]
formula.nodes[v]["label"] = circuit.nodes[source]["label"]
return formula
def formula_to_string(formula):
def _to_string(formula, root):
# If there are no children, this is a variable node.
label = formula.nodes[root]["label"]
if not formula[root]:
return label
# Otherwise, this is an operator.
children = formula[root]
# If one child, the label must be a NOT operator.
if len(children) == 1:
child = nx.utils.arbitrary_element(children)
return f"{label}({_to_string(formula, child)})"
# NB "left" and "right" here are a little misleading: there is
# no order on the children of a node. That's okay because the
# Boolean AND and OR operators are symmetric. It just means that
# the order of the operands cannot be predicted and hence the
# function does not necessarily behave the same way on every
# invocation.
left, right = formula[root]
left_subformula = _to_string(formula, left)
right_subformula = _to_string(formula, right)
return f"({left_subformula} {label} {right_subformula})"
root = next(v for v, d in formula.in_degree() if d == 0)
return _to_string(formula, root)
###############################################################################
# Create an example Boolean circuit.
# ----------------------------------
#
# This circuit has a ∧ at the output and two ∨s at the next layer.
# The third layer has a variable x that appears in the left ∨, a
# variable y that appears in both the left and right ∨s, and a
# negation for the variable z that appears as the sole node in the
# fourth layer.
circuit = nx.DiGraph()
# Layer 0
circuit.add_node(0, label="∧", layer=0)
# Layer 1
circuit.add_node(1, label="∨", layer=1)
circuit.add_node(2, label="∨", layer=1)
circuit.add_edge(0, 1)
circuit.add_edge(0, 2)
# Layer 2
circuit.add_node(3, label="x", layer=2)
circuit.add_node(4, label="y", layer=2)
circuit.add_node(5, label="¬", layer=2)
circuit.add_edge(1, 3)
circuit.add_edge(1, 4)
circuit.add_edge(2, 4)
circuit.add_edge(2, 5)
# Layer 3
circuit.add_node(6, label="z", layer=3)
circuit.add_edge(5, 6)
# Convert the circuit to an equivalent formula.
formula = circuit_to_formula(circuit)
print(formula_to_string(formula))
labels = nx.get_node_attributes(circuit, "label")
options = {
"node_size": 600,
"alpha": 0.5,
"node_color": "blue",
"labels": labels,
"font_size": 22,
}
plt.figure(figsize=(8, 8))
pos = nx.multipartite_layout(circuit, subset_key="layer")
nx.draw_networkx(circuit, pos, **options)
plt.title(formula_to_string(formula))
plt.axis("equal")
plt.show()
|
ldif/inference/predict.py | trisct/ldif | 242 | 80240 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Class to do trained model inference in beam."""
import importlib
import os
import struct
import subprocess as sp
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.datasets import preprocess
from ldif.datasets import shapenet
from ldif.inference import experiment as experiments
from ldif.inference import extract_mesh
from ldif.inference import metrics
from ldif.model import model as sdf_model
from ldif.representation import structured_implicit_function
from ldif.util import camera_util
from ldif.util import file_util
from ldif.util import gaps_util
from ldif.util import geom_util
from ldif.util import geom_util_np
from ldif.util import gpu_util
from ldif.util import path_util
from ldif.util import py_util
from ldif.util import sdf_util
from ldif.util import np_util
from ldif.util.file_util import log
# pylint: enable=g-bad-import-order
importlib.reload(extract_mesh)
importlib.reload(structured_implicit_function)
importlib.reload(sdf_model)
importlib.reload(geom_util)
class TrainedNetwork(object):
"""A base class for all networks trained in XManager."""
def __init__(self, job, ckpt, use_gpu, **kwargs): # pylint: disable=unused-argument
self.job = job
self.ckpt = ckpt
self.graph = tf.Graph()
self.use_gpu = use_gpu
@classmethod
def from_experiment(cls,
experiment,
xid,
ckpt_idx,
use_temp_ckpts=None,
overrides=None,
use_gpu=True,
**kwargs):
"""Instantiates a TrainedNetwork from an experiment object."""
job = experiment.job_from_xmanager_id(xid, must_be_visible=True)
if use_temp_ckpts is not None:
job.set_use_temp_ckpts(use_temp_ckpts)
if overrides is not None:
for k, v in overrides.items():
setattr(job.model_config.hparams, k, v)
if ckpt_idx == 0:
log.error('Please select a checkpoint and rerun. Valid checkpoints:')
log.error(str(job.all_checkpoint_indices))
return
must_equal = ckpt_idx != -1
ckpt = job.latest_checkpoint_before(ckpt_idx, must_equal=must_equal)
log.info(f'Loading checkpoint {ckpt.abspath}')
return cls(job, ckpt, use_gpu, **kwargs)
@classmethod
def from_modeldir(cls,
model_directory,
model_name,
experiment_name,
xid,
ckpt_idx,
overrides=None,
use_temp_ckpts=True,
use_gpu=True,
**kwargs):
"""Creates a TrainedModel from a model directory root and name."""
experiment = experiments.Experiment(model_directory, model_name,
experiment_name)
return cls.from_experiment(experiment, xid, ckpt_idx, use_temp_ckpts,
overrides, use_gpu, **kwargs)
@classmethod
def from_identifiers(cls,
user,
model_name,
experiment_name,
xid,
ckpt_idx,
overrides=None,
use_temp_ckpts=None,
charged_user='viscam',
use_gpu=True,
**kwargs):
"""Creates a trained network from experiment identifiers."""
raise ValueError('No longer supported.')
def restore(self):
"""Creates a session with restored model variables."""
with self.graph.as_default():
if self.use_gpu:
# For now these are disabled since it is difficult to work on
# all GPUs.
#allowable_frac = gpu_util.get_allowable_fraction_without(
# mem_to_reserve=1024 + 512, cuda_device_index=0) # ~1GB
#gpu_options = tf.GPUOptions(
# per_process_gpu_memory_fraction=allowable_frac)
#config = tf.ConfigProto(gpu_options=gpu_options)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(device_count={'GPU': 0})
self.session = tf.Session(config=config)
saver = tf.train.Saver()
saver.restore(self.session, self.ckpt.abspath)
def conform_prediction(vector):
"""Forces an arbitrary vector to be a valid (D)SIF."""
vector = vector.copy()
if vector.shape[-1] not in [10, 42]:
raise ValueError('Unimplemented.')
consts, centers, radii_aa, radii_cov = np.split(
vector[..., :10], [1, 4, 7], axis=-1)
consts = np.minimum(consts, 0.0)
radii_aa = np.maximum(radii_aa, 1e-9)
radii_cov = np.clip(radii_cov, -np.pi / 4., np.pi / 4.)
log.verbose(
repr([
x.shape
for x in [consts, centers, radii_aa, radii_cov, vector[..., 10:]]
]))
return np.concatenate(
[consts, centers, radii_aa, radii_cov, vector[..., 10:]], axis=-1)
class SingleViewDepthEncoder(TrainedNetwork):
"""Maps from a single depth image (max-0) to a shape representation."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(SingleViewDepthEncoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
model_config = self.job.model_config
model_config.inputs = shapenet.build_placeholder_interface(
model_config, proto='ShapeNetOneImXyzPC')
training_example = preprocess.preprocess(model_config)
self.depth_input = model_config.inputs['dataset'].depth_render
self.xyz_input = model_config.inputs['dataset'].xyz_render
self.points_input = model_config.inputs['dataset'].surface_point_samples
training_example = preprocess.preprocess(model_config)
observation = sdf_model.Observation(model_config, training_example)
imp_net = sdf_model.StructuredImplicitModel(model_config, 'imp_net')
prediction = imp_net.forward(observation)
structured_implicit = prediction.structured_implicit
self.packed_vector = structured_implicit.vector
self.restore()
def run(self, depth, points, xyz):
"""Runs the network on the input data, returning a (D)SIF."""
h, w = np.squeeze(depth).shape
depth = np.reshape(depth, [1, h, w, 1])
points = np.reshape(points, [1, 10000, 6])
xyz = np.reshape(xyz, [1, h, w, 3])
with self.graph.as_default():
packed_vector = self.session.run(
self.packed_vector,
feed_dict={
self.depth_input: depth,
self.points_input: points,
self.xyz_input: xyz
})
packed_vector = np.reshape(packed_vector,
[self.job.model_config.hparams.sc, -1])
return packed_vector
def run_example(self, ex):
return self.run(ex.max_depth_224[0, ...] * 1000.0,
ex.get_max_world_pts_from_idx(0), ex.max_world_xyz_224[0,
...])
def run_example_bts(self, ex):
return self.run(ex.bts_depth_224[0, ...] * 1000.0,
ex.get_bts_world_pts_from_idx(0), ex.bts_world_xyz_224[0,
...])
class DepthEncoder(TrainedNetwork):
"""Maps from a dodecahedron of depth images to shape elements."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(DepthEncoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
model_config = self.job.model_config
model_config.hparams.bs = 1
model_config.inputs = shapenet.build_placeholder_interface(model_config)
training_example = preprocess.preprocess(model_config)
self.depth_input = model_config.inputs['dataset'].depth_renders
self.points_input = model_config.inputs['dataset'].surface_point_samples
self.nss_input = model_config.inputs['dataset'].near_surface_samples
training_example = preprocess.preprocess(model_config)
if hasattr(training_example, '_tx'):
self.tx = training_example._tx
else:
self.tx = None
observation = sdf_model.Observation(model_config, training_example)
imp_net = sdf_model.StructuredImplicitModel(model_config, 'imp_net')
prediction = imp_net.forward(observation)
structured_implicit = prediction.structured_implicit
self.packed_vector = structured_implicit.vector
# *phew* we have set up the graph... now we need to pull the weights.
self.restore()
def run(self, dodeca, points, nss=None):
"""Runs the network on the input data, returning a (D)SIF."""
dodeca = np.reshape(dodeca, [1, 20, 224, 224, 1])
points = np.reshape(points, [1, 10000, 6])
with self.graph.as_default():
feed_dict = {self.depth_input: dodeca, self.points_input: points}
if nss is not None:
feed_dict[self.nss_input] = np.reshape(nss, [1, 100000, 4])
if self.tx is not None:
packed_vector, tx = self.session.run([self.packed_vector, self.tx],
feed_dict=feed_dict)
else:
packed_vector = self.session.run(
self.packed_vector, feed_dict=feed_dict)
packed_vector = np.reshape(packed_vector,
[self.job.model_config.hparams.sc, -1])
if self.tx is not None:
return packed_vector, np.reshape(tx, [4, 4])
return packed_vector
def run_example(self, ex):
return self.run(ex.depth_images, ex.precomputed_surface_samples_from_dodeca)
class Decoder(TrainedNetwork):
"""A SIF -> Mesh decoder."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(Decoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
self.sif_input = tf.placeholder(tf.float32, self.batched_vector_shape)
# TODO(kgenova) Maybe the net should be handled entirely by the structured
# implicit function? Although there is a difference between the network
# that can give a result from a vector and a simple wrapper for models
# that don't need variables. Maybe it's just intelligent about creating
# the net only when really needed.
if 'silence_implicits' in kwargs and kwargs['silence_implicits']:
self.job.model_config.hparams.ipc = 'f'
log.info('Silencing implicits.')
net = sdf_model.StructuredImplicitModel(
self.job.model_config, name='imp_net')
structured_implicit = (
structured_implicit_function.StructuredImplicit.from_packed_vector(
self.job.model_config, self.sif_input, net))
self.structured_implicit = structured_implicit
self.block_res = 32
self.native_point_count = self.block_res**3
self.sample_locations_ph = tf.placeholder(
tf.float32, shape=[self.block_res, self.block_res, self.block_res, 3])
samples = tf.reshape(self.sample_locations_ph, [1, self.block_res**3, 3])
predicted_alg, predicted_locals = structured_implicit.class_at_samples(
samples, apply_class_transfer=False)
predicted_class = sdf_util.apply_class_transfer(
predicted_alg,
self.job.model_config,
soft_transfer=True,
offset=self.job.model_config.hparams.lset)
vol_shape = [self.block_res, self.block_res, self.block_res]
self.predicted_alg_grid = tf.reshape(predicted_alg, vol_shape)
self.predicted_class_grid = tf.reshape(predicted_class, vol_shape)
effective_element_count = (
structured_implicit_function.get_effective_element_count(
self.job.model_config))
self.local_decisions = tf.reshape(predicted_locals[0], [
effective_element_count, self.block_res, self.block_res,
self.block_res
])
self.base_grid = np_util.make_coordinate_grid_3d(
length=self.block_res,
height=self.block_res,
width=self.block_res,
is_screen_space=False,
is_homogeneous=False).astype(np.float32)
self._world2local = structured_implicit.world2local
self._use_inference_kernel = True
# Influence samples
self.true_sample_count = 10000
self.generic_sample_ph = tf.placeholder(
tf.float32, shape=[self.true_sample_count, 3])
self.predicted_influences = structured_implicit.rbf_influence_at_samples(
tf.expand_dims(self.generic_sample_ph, axis=0))
# Optimizer stuff
self.optimizer_pc = 5000
self.optimizer_samples = tf.placeholder(
tf.float32, shape=[self.optimizer_pc, 3])
optimizer_samples = tf.reshape(self.optimizer_samples,
[1, self.optimizer_pc, 3])
self.predicted_class, _ = structured_implicit.class_at_samples(
optimizer_samples)
self.predicted_class = tf.reshape(self.predicted_class,
[self.optimizer_pc, 1])
self.target_class_ph = tf.placeholder(tf.float32, [self.optimizer_pc, 1])
loss = 'crossentropy'
if loss == 'crossentropy':
clipped_pred = tf.clip_by_value(self.predicted_class, 1e-05, 1 - 1e-05)
self.optimizer_elt_loss = tf.where(self.target_class_ph > 0.5,
-tf.log(clipped_pred),
-tf.log(1 - clipped_pred))
elif loss == 'l1':
self.optimizer_elt_loss = tf.abs(self.target_class_ph -
self.predicted_class)
elif loss == 'l2':
self.optimizer_elt_loss = tf.square(self.target_class_ph -
self.predicted_class)
apply_where_agree = True
if not apply_where_agree:
gt_outside = self.target_class_ph > 0.5
pred_outside = self.predicted_class > 0.5
gt_inside = tf.logical_not(gt_outside)
pred_inside = tf.logical_not(pred_outside)
agree = tf.logical_or(
tf.logical_and(gt_outside, pred_outside),
tf.logical_and(gt_inside, pred_inside))
self.optimizer_elt_loss = tf.where_v2(agree, 0.0,
self.optimizer_elt_loss)
self.optimizer_loss = tf.reduce_mean(self.optimizer_elt_loss)
self.ldif_gradients = tf.gradients(self.optimizer_loss, self.sif_input)
# TODO(kgenova) Currently disabled since it's in testing and hardcodes
# some values.
# self.coords_ph = tf.placeholder(tf.float32, shape=[3])
# self.am_image_ph = tf.placeholder(tf.int32, shape=[224, 224])
# pose_cam2world, pose_eye = self._spherical_to_4x4(self.coords_ph)
# self.pose_error = self._evaluate_pose_error(pose_cam2world, pose_eye,
# self.am_image_ph)
# self.pose3_gradients = tf.gradients(self.pose_error, self.coords_ph)
try:
self.restore()
except ValueError:
log.warning('No variables to restore or restoration otherwise failed.')
@property
def unbatched_vector_shape(self):
shape_count = self.job.model_config.hparams.sc
shape_size = structured_implicit_function.element_dof(self.job.model_config)
return [shape_count, shape_size]
@property
def batched_vector_shape(self):
return [1] + self.unbatched_vector_shape
@property
def use_inference_kernel(self):
return self._use_inference_kernel
@use_inference_kernel.setter
def use_inference_kernel(self, should_use):
self._use_inference_kernel = bool(should_use)
# TODO(kgenova) The intermediate vector should really be its own class...
def savetxt(self, sif_vector, path=None, version='v1'):
"""Saves a (D)SIF as ASCII text in the SIF file format.
Args:
sif_vector: A numpy array containing the ldif to write to disk. Has shape
(element_count, element_length).
path: A string containing the path to the file to write to, if provided.
If none, no file is written.
version: A string with the version identifier. Must equal 'v1'.
Returns:
A string encoding of the (D)SIF.
"""
if version == 'v0':
raise ValueError('SIF v0 files are no longer supported.')
elif version == 'v1':
s = self.encode_sif_v1(sif_vector)
else:
raise ValueError(f'Unrecognized SIF file format: {version}.')
if path is not None:
file_util.writetxt(path, s)
return s
def encode_sif_v1(self, sif_vector):
"""Encodes a ldif to a string, and optionally writes it to disk.
A description of the file format:
Line 1: SIF
Line 2: Three ints separated by spaces. In order:
1) The number of blobs.
2) The version ID for the blob types. I added this to be safe since
last time when we updated to add rotation it broke all the old txt
files. For now it will always be zero, which means the following
eleven explicit parameters will be given per blob (in order):
1 constant. float.
3 centers (XYZ). float.
3 radii (XYZ diagonals). float.
3 radii (roll-pitch-yaw rotations). float.
1 symmetry ID type. int. For now it will be either 0 or 1:
Zero: Not symmetric.
One: Left-right (XY-plane) symmetry.
3) The number of implicit parameters per blob. So it will likely
be between 0-256.
After the first two lines, there is a line for each blob.
Each line will have the explicit parameters followed by the implicit
parameters. They are space separated.
Args:
sif_vector: The SIF vector to encode as a np array. Has shape
(element_count, element_length).
Returns:
A string encoding of v in the ldif v1 file format.
"""
sif_vector = sif_vector.copy()
shape_count = sif_vector.shape[-2]
shape_len = sif_vector.shape[-1]
if shape_len == 7:
off_axis = np.zeros([shape_count, 3])
sif_vector = np.concatenate([sif_vector, off_axis], axis=1)
shape_len = 10
explicit_len = 10
implicit_len = shape_len - explicit_len
sif_vector = np.reshape(sif_vector, [shape_count, shape_len])
has_implicits = implicit_len > 0
if not has_implicits:
assert shape_len == 10
implicit_len = 0
sif_vector[:, 4:7] = np.sqrt(np.maximum(sif_vector[:, 4:7], 0))
header = 'SIF\n%i %i %i\n' % (shape_count, 0, implicit_len)
out = header
for row_idx in range(shape_count):
row = ' '.join(10 * ['%.9g']) % tuple(sif_vector[row_idx, :10].tolist())
symmetry = int(row_idx < self.job.model_config.hparams.lyr)
row += ' %i' % symmetry
if has_implicits:
implicit_params = ' '.join(implicit_len * ['%.9g']) % (
tuple(sif_vector[row_idx, 10:].tolist()))
row += ' ' + implicit_params
row += '\n'
out += row
return out
def render_ellipsoids(self, sif_vector):
"""Renders an ellipsoid image visualizing the (D)SIF RBFs."""
with py_util.py2_temporary_directory() as d:
qpath = d + '/q.txt'
self.savetxt(sif_vector, qpath)
impath = d + '/im.png'
camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
with py_util.x11_server():
cmd = '%s/qview %s -camera %s -image %s' % (path_util.gaps_path(),
qpath, camera, impath)
sp.check_output(cmd, shell=True)
im = file_util.read_image(impath)
return im
def interactive_viewer(self, sif_vector, mesh=None):
"""Opens a GAPS viewer that can display the SIF blobs alongside a mesh."""
with py_util.py2_temporary_directory() as d:
qpath = d + '/q.txt'
self.savetxt(sif_vector, qpath)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
mstr = ''
if mesh is not None:
mpath = d + '/m.ply'
file_util.write_mesh(mpath, mesh)
mstr = f' -input_mesh {mpath}'
cmd = f'{path_util.gaps_path()}/qview {qpath} -camera {init_camera}{mstr}'
sp.check_output(cmd, shell=True)
def world2local(self, sif_vector):
if sif_vector.shape[0] != 1:
sif_vector = np.expand_dims(sif_vector, axis=0)
m = self.session.run(
self._world2local, feed_dict={self.sif_input: sif_vector})
return m
def interactive_mesh_viewer(self, sif_vector, resolution):
"""Opens up an OpenGL session viewing the mesh defined by the SIF/LDIF."""
with py_util.py2_temporary_directory() as d:
mpath = d + '/m.ply'
m = self.extract_mesh(sif_vector, resolution)
file_util.write_mesh(mpath, m)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
cmd = '%s/mshview %s -camera %s' % (path_util.gaps_path(), mpath,
init_camera)
sp.check_output(cmd, shell=True)
def interactive_gridview(self, sif_vector, resolution, extent=0.75):
volume = self._grid_eval(
sif_vector, resolution, extent, extract_parts=False, world2local=None)
return gaps_util.grdview(volume)
def _spherical_to_4x4(self, coords):
"""Turns spherical coords into a 4x4 affine transformation matrix."""
r = coords[0]
theta = coords[1]
phi = coords[2]
st = tf.sin(theta)
x = r * st * tf.cos(phi)
y = r * st * tf.sin(phi)
z = r * tf.cos(theta)
eye = tf.stack([x, y, z], axis=0)
eye = tf.reshape(eye, [1, 3])
center = tf.zeros([1, 3], dtype=tf.float32)
world_up = tf.constant([[0., 1., 0.]], dtype=tf.float32)
world2cam = camera_util.look_at(eye, center, world_up)
cam2world = tf.linalg.inv(world2cam)
cam2world = tf.constant(
[[-9.9398971e-01, 2.7342862e-03, -4.7837296e-03, 1.4993416e-04],
[1.6200442e-09, 8.6298174e-01, 4.9326313e-01, 7.1943283e-01],
[5.5100261e-03, 4.9325553e-01, -8.6296844e-01, -1.2277470e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00]],
dtype=tf.float32)
return tf.reshape(cam2world, [4, 4]), eye
def _evaluate_pose_error(self, cam2world, eye, am_image):
"""Evaluates the error of an estimated 4x4 pose matrix."""
# TODO(kgenova) Thisis a hack that only workds for 3d-r2n2
ray_directions = gaps_util.gaps_depth_image_to_cam_image(
np.ones((224, 224)), xfov=0.422204).astype(np.float32)
tc = 15
t_vals = tf.constant(np.arange(0.75, 2.25, .1), dtype=tf.float32)
t_vals = tf.reshape(t_vals, [1, tc, 1])
ray_count = int(np.prod(ray_directions.shape[:-1]))
ray_directions = tf.reshape(ray_directions, [ray_count, 1, 3])
eye = tf.reshape(eye, [1, 1, 3])
cam_rays = ray_directions * t_vals + eye
world_pts = geom_util.apply_4x4(
cam_rays, cam2world, are_points=True, batch_rank=0, sample_rank=2)
world_pts = tf.reshape(world_pts, [1, ray_count * tc, 3])
self.cam_3dof_pts = world_pts
world_rbfs = self.structured_implicit.rbf_influence_at_samples(world_pts)
eec = world_rbfs.get_shape().as_list()[-1]
assert len(am_image.get_shape().as_list()) == 2
is_bg = tf.reshape(
tf.logical_not(tf.equal(am_image, eec)), [1, ray_count, 1])
am_image = tf.tile(tf.expand_dims(am_image, axis=-1), [1, 1, tc])
flat_am = tf.reshape(am_image, [ray_count * tc, 1])
flat_am = tf.where_v2(tf.equal(flat_am, 45), 0, flat_am)
world_rbfs = tf.reshape(world_rbfs, [ray_count * tc, 45])
max_val = tf.gather(world_rbfs, flat_am, batch_dims=1)
max_val = tf.reshape(max_val, [1, ray_count, tc])
max_val = tf.reduce_max(max_val, axis=-1)
is_bg_mult = tf.cast(is_bg, dtype=tf.float32)
max_val = is_bg_mult * max_val
error = -1.0 * tf.reduce_sum(max_val)
return error
def optimize_3dof_pose(self, sif_vector, am_image, e, step_count=10, lr=1e-6):
"""Tries to fit a pose given a SIF in 3D and a SIF segmentation image."""
if len(sif_vector.shape) == 2:
sif_vector = np.expand_dims(sif_vector, axis=0)
# Now rays is an array of shape [h, w, 3]. The origin is currently [0,0,0]
# because the rays are in camera space (for now).
lr = np.array([0.0, lr, lr], dtype=np.float32)
# Just worry about a single step for now:
# The pose is 3-dof: distance, phi, theta.
coords = np.array([0.812717413913 / 1.75, 0.0, 0.0], dtype=np.float32)
# cam2world, eye = self._spherical_to_4x4(coords)
for i in range(step_count):
log.verbose('Step %i: (%0.4f, %0.4f, %0.4f)' %
(i, coords[0], coords[1], coords[2]))
grad, err, pts = self.session.run(
[self.pose3_gradients, self.pose_error, self.cam_3dof_pts],
feed_dict={
self.am_image_ph: am_image,
self.sif_input: sif_vector,
self.coords_ph: coords
})
grad = grad[0]
log.verbose('Error: %0.2f' % err)
log.verbose('grad: %s' % repr(grad))
log.verbose('pts.shape: ', repr(pts.shape))
assert len(grad.shape) == 1
assert grad.shape[0] == 3
update = lr * grad
log.verbose('Update: ', str(update))
gaps_util.ptsview(pts, mesh=e.v1_gt_mesh)
coords = coords - lr * grad
return coords
def optimize_to_gt(self,
sif_vector,
example,
step_count=1,
lr=0.01,
vis=0,
verbosity=0,
target='all',
samps='nss'):
"""Iteratively optimizes a SIF or LDIF to fit ground truth in/out values."""
if samps == 'nss':
all_samples = example.near_surface_samples.copy()
np.random.shuffle(all_samples)
elif samps == 'uni':
all_samples = example.uniform_samples.copy()
elif samps == 'nssuni':
all_samples = np.concatenate(
[example.near_surface_samples, example.uniform_samples], axis=0)
elif samps == 'dodeca':
depth_ims = example.depth_images / 1000.0
all_samples = geom_util.depth_dodeca_to_samples(depth_ims)
elif samps == 'depth':
depth_idx = 1 # TODO(kgenova) Make this the one in the observation.
depth_ims = example.depth_images / 1000.0
depth_im = depth_ims[0, depth_idx, :, :, :]
cam2world = geom_util.get_dodeca_camera_to_worlds()[depth_idx, :, :]
assert depth_im.shape[0] == 224
assert cam2world.shape[0] == 4
log.verbose('Depth im shape: ', depth_im.shape)
all_samples = geom_util.depth_image_to_samples(depth_im, cam2world)
if verbosity >= 2:
gaps_util.ptsview(all_samples[..., :], self.extract_mesh(sif_vector, 128))
np.random.shuffle(all_samples)
cl = all_samples[:, 3]
all_samples[cl < 0, 3] = 0
all_samples[cl > 0, 3] = 1
samples, gt_class = np.split(all_samples, [3], axis=-1)
samples = samples[:self.optimizer_pc, :]
gt_class = gt_class[:self.optimizer_pc, :]
def print_sat_count(vec):
"""Prints the number of contraints that are satisfied and the total."""
pred = self.class_at_samples(vec, np.reshape(samples, [-1, 3]))
pred_is_out = pred > 0.5
gt_is_out = gt_class > 0.5
log.verbose(pred_is_out.shape, gt_is_out.shape)
agree = np.logical_or(
np.logical_and(pred_is_out, gt_is_out),
np.logical_and(
np.logical_not(pred_is_out), np.logical_not(gt_is_out)))
sat_count = np.count_nonzero(agree)
log.info('%i/%i constraints are satisfied.' %
(sat_count, self.optimizer_pc))
if verbosity >= 1:
log.info('Beginning optimization.')
print_sat_count(sif_vector)
assert gt_class.shape[-1] == 1
sif_vector = sif_vector.copy()
sif_vector = np.expand_dims(sif_vector, axis=0)
cur_vector = sif_vector.copy()
ret_best = False
if ret_best:
min_loss = np.inf
best_vec = cur_vector.copy()
momentum = 0.9
velocity = np.zeros_like(cur_vector)
cur_batch_idx = 0
for i in range(step_count):
batch_start = cur_batch_idx
batch_end = cur_batch_idx + self.optimizer_pc
if batch_end > all_samples.shape[0]:
np.random.shuffle(all_samples)
batch_start = 0
batch_end = self.optimizer_pc
cur_batch_idx = 0
batch_all_samples = all_samples[batch_start:batch_end, :]
cur_batch_idx += self.optimizer_pc
batch_samples, batch_gt_class = np.split(batch_all_samples, [3], axis=-1)
grad = self.session.run(
self.ldif_gradients,
feed_dict={
self.target_class_ph: batch_gt_class,
self.sif_input: cur_vector,
self.optimizer_samples: batch_samples
})[0]
vis_this_time = vis >= 2 or (vis >= 1 and (i == 0 or i == step_count - 1))
print_this_time = verbosity >= 2 or (verbosity >= 1 and not i % 1000)
if vis_this_time or print_this_time:
loss = self.session.run(
self.optimizer_elt_loss,
feed_dict={
self.target_class_ph: batch_gt_class,
self.sif_input: cur_vector,
self.optimizer_samples: batch_samples
})
if ret_best:
lsum = np.sum(loss)
if lsum < min_loss:
min_loss = lsum
best_vec = cur_vector.copy()
# Assuming the loss is zero if a constraint is satisfied:
is_sat = self.optimizer_pc - np.count_nonzero(loss)
if print_this_time:
log.info('Step %i: Total loss: %s. Constraints %i/%i' %
(i, repr(np.sum(loss)), is_sat, self.optimizer_pc))
if vis_this_time:
self.vis_loss(
cur_vector,
gt_at_loss=gt_class,
loss=loss,
loss_positions=samples)
if target == 'all-eq':
mults = 42 * [1]
elif target == 'all':
mults = [0.001] + 3 * [0.001] + 6 * [0.0000001] + 32 * [50]
elif target == 'centers':
mults = [0.000] + 3 * [0.001] + 6 * [0.0000000] + 32 * [0]
elif target == 'radii':
mults = [0.000] + 3 * [0.000] + 6 * [0.0000001] + 32 * [0]
elif target == 'features':
mults = [0.000] + 3 * [0.000] + 6 * [0.0000000] + 32 * [50]
elif target == 'constants':
mults = [0.001] + 3 * [0.000] + 6 * [0.0000000] + 32 * [0]
else:
assert False
mults = np.array(mults).reshape([1, 1, 42])
velocity = momentum * velocity + mults * lr * grad
cur_vector = cur_vector - velocity
if verbosity >= 1:
log.info('Finished optimization.')
print_sat_count(cur_vector)
if ret_best:
cur_vector = best_vec
return np.reshape(cur_vector, self.unbatched_vector_shape)
def vis_loss(self, sif_vector, gt_at_loss, loss, loss_positions):
"""Visualizes the loss mid-optimization."""
loss = np.reshape(loss, [-1, 1])
gt_at_loss = np.reshape(gt_at_loss, [-1, 1])
assert gt_at_loss.shape[0] == loss.shape[0]
loss[gt_at_loss <= 0.5] = -loss[gt_at_loss <= 0.5]
loss_positions = np.reshape(loss_positions, [-1, 3])
arr = np.concatenate([loss_positions, loss], axis=1)
with py_util.py2_temporary_directory() as d:
sdf_path = f'{d}/a.sdf'
with file_util.open_file(sdf_path, 'wb') as f:
arr = arr.astype(np.float32)
arr.tofile(f)
m = self.extract_mesh(sif_vector, resolution=128)
m_path = f'{d}/m.ply'
file_util.write_mesh(m_path, m)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
cmd = '%s/ptsview %s %s -camera %s' % (path_util.gaps_path(), sdf_path,
m_path, init_camera)
sp.check_output(cmd, shell=True)
def _grid_eval_cuda(self, sif_vector, resolution, extent):
"""Evaluates a SIF/LDIF densely on a voxel grid."""
log.verbose('Using custom CUDA kernel for evaluation.')
# First step: Get the path where the serialized occnet should be.
# The serialized occnet should be at whatever the checkpoint path is,
# but replace model.ckpt-[idx] with serialized-occnet-[idx].occnet
checkpoint_path = self.ckpt.abspath
log.info(f'Using checkpoint {checkpoint_path} to write OccNet file.')
assert 'model.ckpt-' in checkpoint_path
occnet_path = checkpoint_path.replace('model.ckpt-', 'serialized-occnet-')
occnet_path = occnet_path + '.occnet'
# Second step: If it isn't there, write it to disk.
if not os.path.isfile(occnet_path):
assert os.path.isdir(os.path.dirname(occnet_path))
if self.job.model_config.hparams.ipe == 't':
self.write_occnet_file(occnet_path)
else:
occnet_path = path_util.get_path_to_ldif_root(
) + '/ldif2mesh/extracted.occnet'
# Third step: open a temporary directory, and write the embedding.
# Make sure that the temp directories are deleted afterwards.
with py_util.py2_temporary_directory() as d:
rep_path = f'{d}/ldif.txt'
self.savetxt(sif_vector, rep_path)
# Pick the path to the output grd file:
grd_path = f'{d}/grid.grd'
# Fourth step: Get the path to the kernel
kernel_path = os.path.join(path_util.get_path_to_ldif_root(),
'ldif2mesh/ldif2mesh')
if not os.path.isfile(kernel_path):
raise ValueError(
f'There is no compiled CUDA executable at {kernel_path}.')
cmd = (f'CUDA_VISIBLE_DEVICES=0 {kernel_path} {rep_path} {occnet_path} '
f'{grd_path} -resolution {resolution}')
log.verbose(f'Executing command {cmd}')
# TODO(kgenova) Support extent as a flag
if extent != 0.75:
raise ValueError(
'Currently only 0.75 extent is supported on the '
'custom kernel. Please set use_inference_kernel to false for an'
f' extent of {extent}.')
# Fifth step: Invoke the kernel.
try:
cmd_result = sp.check_output(cmd, shell=True)
log.info(cmd_result.decode('utf-8').replace('\n', ''))
except sp.CalledProcessError as e:
if 'out of memory' in e.output.decode('utf-8'):
raise ValueError(
'The GPU does not have enough free memory left for the'
' inference kernel. Please reduce the fraction'
' reserved by tensorflow.')
elif 'no kernel image is available' in e.output.decode('utf-8'):
raise ValueError(
'It appears that the CUDA kernel was not built to your '
'gpu\'s architecture. Hopefully this is an easy fix. '
'Please go to developer.nvidia.com/cuda-gpus, and find '
'your gpu from the list. Then, modify ./build_kernel.sh '
'by adding compute_XX and sm_XX for whatever your GPU '
'compute capability is according to the website. For '
'example, a 2080 Ti would use compute_75 and sm_75. '
'Note that if your card supports below 35, it likely '
'will fail to compile using this method. If you are '
'seeing this error, please feel free to open up an issue '
'and report it. We would like to support as many gpus as '
'possible.')
else:
raise ValueError(f'Unrecognized error code {e.returncode} occurred'
f' during inference kernel evaluation: {e.output}')
# Seventh step: Read the grid file.
_, grd = file_util.read_grd(grd_path)
# Eighth step: Verify the grid shape and return the grid.
log.verbose(f'The output CUDA grid has shape {grd.shape}.')
# gaps_util.grdview(grd)
return grd
def _grid_eval(self,
sif_vector,
resolution,
extent,
extract_parts,
world2local=None):
"""Evalutes the LDIF/SIF on a grid."""
log.verbose('Evaluating SDF grid for mesh.')
if self.use_inference_kernel and not extract_parts:
return self._grid_eval_cuda(sif_vector, resolution, extent)
if extract_parts or world2local:
log.warning('Part extraction and world2local are not supported with the'
' custom kernel.')
log.warning('Using pure tensorflow for grid evaluation, this will be slow.')
t = time.time()
sif_vector = np.reshape(sif_vector, self.batched_vector_shape)
assert not resolution % self.block_res
block_count = resolution // self.block_res
block_size = (2.0 * extent) / block_count
l_block = []
i = 0
dim_offset = 1 if extract_parts else 0
grid = self.local_decisions if extract_parts else self.predicted_alg_grid
for li in range(block_count):
l_min = -extent + (li) * block_size - 0.5 / resolution
h_block = []
for hi in range(block_count):
h_min = -extent + (hi) * block_size - 0.5 / resolution
w_block = []
for wi in range(block_count):
w_min = -extent + (wi) * block_size - 0.5 / resolution
offset = np.reshape(
np.array([w_min, l_min, h_min], dtype=np.float32), [1, 1, 1, 3])
sample_locations = block_size * self.base_grid + offset
if world2local is not None:
sample_locations = geom_util_np.apply_4x4(
sample_locations, world2local, are_points=True)
grid_out_np = self.session.run(
grid,
feed_dict={
self.sif_input: sif_vector,
self.sample_locations_ph: sample_locations
})
i += 1
w_block.append(grid_out_np)
h_block.append(np.concatenate(w_block, axis=2 + dim_offset))
l_block.append(np.concatenate(h_block, axis=0 + dim_offset))
grid_out = np.concatenate(l_block, axis=1 + dim_offset)
# log.verbose(f'Grid extent: {np.min(grid_out)}, {np.max(grid_out)}')
# grid_out -= 0.5
grid_out_time = time.time()
log.verbose(f'Grid Eval Time: {grid_out_time - t}')
return grid_out
def extract_mesh(self,
sif_vectors,
resolution=128,
extent=0.75,
return_success=False,
world2local=None):
"""Extracts a mesh that is the sum of one or more SIF meshes."""
extract_start_time = time.time()
if isinstance(sif_vectors, list):
volumes = []
if world2local is not None:
assert isinstance(world2local, list)
for i, v in enumerate(sif_vectors):
volumes.append(
self._grid_eval(
v,
resolution,
extent,
extract_parts=False,
world2local=world2local[i]
if world2local is not None else None))
volume = np.sum(volumes, axis=0)
else:
volume = self._grid_eval(
sif_vectors,
resolution,
extent,
extract_parts=False,
world2local=world2local)
grid_out_time = time.time()
log.verbose(f'Grid eval time: {grid_out_time - extract_start_time}')
had_crossing, mesh = extract_mesh.marching_cubes(volume, extent)
if not had_crossing:
log.warning('Warning: Marching Cubes found no surface.')
mesh.marching_cubes_successful = had_crossing
done_time = time.time()
log.verbose(f'MCubes Time: {done_time - grid_out_time}')
if return_success:
return mesh, had_crossing
return mesh
def extract_part_meshes(self, sif_vector, resolution, extent=0.75):
elt_volume = self._grid_eval(
sif_vector, resolution, extent, extract_parts=True, world2local=None)
local_meshes = []
for i in range(self.job.model_config.hparams.sc):
had_crossing, mesh_i = extract_mesh.marching_cubes(
elt_volume[i, ...], extent)
mesh_i.marching_cubes_successful = had_crossing
local_meshes.append(mesh_i)
return local_meshes
def _chunk_sample_eval(self, samples, query_fun, chunk_size):
"""Evaluates a set of query locations chunk by chunk to avoid OOM issues."""
# Note- this code will have strange behavior if there is randomness during
# decoding, because it chunks the decoding up into multiple calls.
assert len(samples.shape) == 2
point_count = samples.shape[0]
if point_count == chunk_size:
chunks = [samples]
else:
pad_len = chunk_size - (point_count % chunk_size)
if pad_len:
samples = np.pad(samples, ((0, pad_len), (0, 0)), 'constant')
assert not (point_count + pad_len) % chunk_size
chunk_count = (point_count + pad_len) // chunk_size
chunks = np.split(samples, chunk_count, axis=0)
out = []
for chunk in chunks:
out_i = query_fun(chunk)
assert len(out_i.shape) == 2
assert out_i.shape[0] == chunk_size
out.append(out_i)
return np.concatenate(out, axis=0)[:point_count, :]
def iou(self, sif_vector, example):
samps = example.uniform_samples[:, :3]
gt_is_inside = example.uniform_samples[:, 3:4] < 0.0
pred_is_inside = self.class_at_samples(sif_vector, samps) < 0.5
result = metrics.point_iou(pred_is_inside, gt_is_inside)
return result
def class_at_samples(self, sif_vector, samples):
"""Determines whether input xyz locations are inside or outside the shape.
Args:
sif_vector: A numpy array containing the LDIF/SIF to evaluate. Has shape
(element_count, element_length).
samples: A numpy array containing samples in the LDIF/SIF frame. Has shape
(sample_count, 3).
Returns:
A numpy array with shape (sample_count, 1). A float that is positive
outside the LDIF/SIF, and negative inside.
"""
sif_vector = np.reshape(sif_vector, self.batched_vector_shape)
def query(sample_chunk):
chunk_grid = sample_chunk.reshape(
[self.block_res, self.block_res, self.block_res, 3])
classes = self.session.run(
self.predicted_class_grid,
feed_dict={
self.sif_input: sif_vector,
self.sample_locations_ph: chunk_grid
})
classes = classes.reshape([self.block_res**3, 1])
return classes
return self._chunk_sample_eval(samples, query, self.block_res**3)
def rbf_influence_at_samples(self, sif_vector, samples):
"""Evalutes the influence of each RBF in the SIF/LDIF at each sample.
Args:
sif_vector: A numpy array containing the ldif to evaluate. Has shape
(element_count, element_length).
samples: A numpy array containing the samples in the ldif frame. Has shape
(sample_count, 3).
Returns:
A numpy array with shape (sample_count, effective_element_count). The
RBF weight of each effective element at each sample point. The 'effective'
element count may be higher than the element count, depending on the
symmetry settings of the ldif. In the case where a ldif is partially
symmetric, then some elements have multiple RBF weights- their main weight
(given first) and the weight associated with the shadow element(s)
transformed by their symmetry matrix. See structured_implicit_function.py
for a mapping from element indices to equivalent classes. Regardless of
additional 'effective' elements, the first RBF weights correspond to the
'real' elements with no symmetry transforms applied, in order.
"""
# TODO(kgenova) It's a bit clunky to make the user refer to a different
# python file to get symmetry equivalence classes. Maybe that mapping should
# be returned as needed.
sif_vector = np.reshape(sif_vector, self.batched_vector_shape)
def query(sample_chunk):
chunk_in = sample_chunk.reshape([self.true_sample_count, 3])
influences = self.session.run(
self.predicted_influences,
feed_dict={
self.generic_sample_ph: chunk_in,
self.sif_input: sif_vector
})
return np.squeeze(influences)
return self._chunk_sample_eval(samples, query, self.true_sample_count)
def write_occnet_file(self, path):
"""Serializes an occnet network and writes it to disk."""
f = file_util.open_file(path, 'wb')
# Get the weight tensors associated with the occnet:
with self.graph.as_default():
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
occnet_vars = contrib_framework.filter_variables(
all_vars, include_patterns=['eval_implicit_parameters'])
# Extract all the model weights as numpy values:
model = {}
for v in occnet_vars:
value = self.session.run(v)
log.verbose(f'{v.name}: {value.shape}')
assert v.name not in model
model[v.name] = value
# Serialize them into a single file:
def write_header(base_scope):
# Write the shape so the number of occnet resnet layers and their sizes
# are known.
num_resnet_layers = 1
# Writes all arrays in row-major order.
dim = model[base_scope +
'sample_resize_fc/fully_connected/weights:0'].shape[1]
log.verbose(f'Dimensionality is {dim}')
f.write(struct.pack('ii', num_resnet_layers, dim))
def write_fc_layer(layer_scope):
weights = model[layer_scope + '/fully_connected/weights:0']
biases = model[layer_scope + '/fully_connected/biases:0']
log.verbose(f'FC layer shapes: {weights.shape}, {biases.shape}')
f.write(weights.astype('f').tostring())
f.write(biases.astype('f').tostring())
def write_cbn_layer(layer_scope):
write_fc_layer(layer_scope + '/beta_fc')
write_fc_layer(layer_scope + '/gamma_fc')
running_mean = float(model[layer_scope + '/running_mean:0'])
running_var = float(model[layer_scope + '/running_variance:0'])
log.verbose(f'Running mean, variance: {running_mean}, {running_var}')
f.write(struct.pack('ff', running_mean, running_var))
def write_input_layer(layer_scope):
weights = model[layer_scope + '/fully_connected/weights:0']
biases = model[layer_scope + '/fully_connected/biases:0']
log.verbose(f'Input FC layer shapes: {weights.shape}, {biases.shape}')
f.write(weights.astype('f').tostring())
f.write(biases.astype('f').tostring())
def write_activation_layer(layer_scope):
weights = model[layer_scope + '/fully_connected/weights:0']
bias = float(model[layer_scope + '/fully_connected/biases:0'])
log.verbose(f'Final FC layer shape and bias: {weights.shape}, {bias}')
f.write(weights.astype('f').tostring())
f.write(struct.pack('f', bias))
base = 'imp_net/eval_implicit_parameters/all_elements/OccNet/'
write_header(base)
write_input_layer(base + 'sample_resize_fc')
write_cbn_layer(base + 'fc_resnet_layer_0/cbn_1')
write_fc_layer(base + 'fc_resnet_layer_0/fc_1')
write_cbn_layer(base + 'fc_resnet_layer_0/cbn_2')
write_fc_layer(base + 'fc_resnet_layer_0/fc_2')
write_cbn_layer(base + 'final_cbn')
write_activation_layer(base + 'final_activation')
f.close()
|
test/pytest/test_answers_and_docs.py | dmulyalin/ttp | 254 | 80251 | <reponame>dmulyalin/ttp<filename>test/pytest/test_answers_and_docs.py
import sys
sys.path.insert(0, "../..")
import pprint
import pytest
import logging
logging.basicConfig(level=logging.DEBUG)
from ttp import ttp
def test_answer_1():
"""https://stackoverflow.com/questions/63522291/parsing-blocks-of-text-within-a-file-into-objects"""
data = """
#*Approximate Distance Oracles with Improved Query Time.
#@<NAME>
#t2015
#cEncyclopedia of Algorithms
#index555036b37cea80f954149ffc
#*Subset Sum Algorithm for Bin Packing.
#@<NAME>
#t2015
#cEncyclopedia of Algorithms
#index555036b37cea80f954149ffd
"""
template = """
#*{{ info | ORPHRASE }}
#@{{ author | ORPHRASE }}
#t{{ year }}
#c{{ title | ORPHRASE }}
#index{{ index }}
"""
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
pprint.pprint(res)
assert res == [
{
"author": "<NAME>",
"index": "555036b37cea80f954149ffc",
"info": "Approximate Distance Oracles with Improved Query Time.",
"title": "Encyclopedia of Algorithms",
"year": "2015",
},
{
"author": "<NAME>",
"index": "555036b37cea80f954149ffd",
"info": "Subset Sum Algorithm for Bin Packing.",
"title": "Encyclopedia of Algorithms",
"year": "2015",
},
]
# test_answer_1()
def test_answer_2():
"""https://stackoverflow.com/questions/63499479/extract-value-from-text-string-using-format-string-in-python"""
data = """
name=username1, age=1001
name=username2, age=1002
name=username3, age=1003
"""
template = "name={{ name }}, age={{ age }}"
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res)
assert res == [
{"age": "1001", "name": "username1"},
{"age": "1002", "name": "username2"},
{"age": "1003", "name": "username3"},
]
# test_answer_2()
def test_issue_20_answer():
data_to_parse = """
(*, 172.16.58.3)
LISP0.4200, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
LISP0.4201, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(172.16.17.32, 172.16.58.3), 6d20h/00:02:23, flags: FT
Incoming interface: Vlan1029, RPF nbr 0.0.0.0
Outgoing interface list:
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
"""
show_mcast1 = """
<template name="mcast" results="per_template">
<group name="mcast_entries.{{ overlay_src }}">
({{ overlay_src | _start_ | replace("*", "'*'")}}, {{ overlay_grp | IP }})
({{ overlay_src | _start_ | IP }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, flags: {{ entry_flags }}
Incoming interface: {{ incoming_intf }}, RPF nbr {{ rpf_neighbor }}
<group name="oil_entries*">
{{ outgoing_intf }}, ({{ underlay_src | IP }}, {{ underlay_grp | IP }}), Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
</group>
</group>
</template>
"""
parser = ttp(template=show_mcast1)
parser.add_input(data_to_parse, template_name="mcast")
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=100)
assert res == {
"mcast": {
"mcast_entries": {
"'*'": {
"oil_entries": [
{
"oil_state_or_timer": "stopped",
"oil_uptime": "1d18h",
"outgoing_intf": "LISP0.4200",
"underlay_grp": "172.16.31.10",
"underlay_src": "172.16.58.3",
},
{
"oil_state_or_timer": "stopped",
"oil_uptime": "2d05h",
"outgoing_intf": "LISP0.4201",
"underlay_grp": "172.16.31.10",
"underlay_src": "172.16.58.3",
},
],
"overlay_grp": "172.16.58.3",
},
"172.16.17.32": {
"entry_flags": "FT",
"entry_state_or_timer": "00:02:23",
"entry_uptime": "6d20h",
"incoming_intf": "Vlan1029",
"oil_entries": [
{
"oil_state_or_timer": "stopped",
"oil_uptime": "1d18h",
"outgoing_intf": "LISP0.4100",
"underlay_grp": "172.16.31.10",
"underlay_src": "172.16.58.3",
}
],
"overlay_grp": "172.16.58.3",
"rpf_neighbor": "0.0.0.0",
},
}
}
}
# test_issue_20_answer()
def test_answer_3():
"""
Fixed bug with results forming - when have two _start_ matches, but
one of them is False, TTP was selecting first match without checking
if its False, updated decision logic to do that check.
"""
data = """
/c/slb/virt 12
dis
ipver v4
vip 1.1.1.1
rtsrcmac ena
vname "my name"
/c/slb/virt 12/service 443 https
group 15
rport 443
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 443 https/http
xforward ena
httpmod hsts_insert
/c/slb/virt 12/service 443 https/ssl
srvrcert cert certname
sslpol ssl-Policy
/c/slb/virt 12/service 80 http
group 15
rport 80
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 80 http/http
xforward ena
/c/slb/virt 14
dis
ipver v4
vip 1.1.4.4
rtsrcmac ena
vname "my name2"
"""
template = """
<template name="VIP_cfg" results="per_template">
<group name="{{ vip }}">
/c/slb/virt {{ virt_seq | DIGIT }}
dis {{ config_state | set("dis") }}
ipver {{ ipver}}
vip {{ vip }}
rtsrcmac {{ rtsrcmac }}
vname "{{ vip_name | ORPHRASE }}"
<group name="services.{{ port }}.{{ proto }}">
/c/slb/virt 12/service {{ port | DIGIT }} {{ proto | exclude(ssl) }}
group {{group_seq }}
rport {{ real_port }}
pbind {{ pbind }}
dbind {{ dbind }}
xforward {{ xforward }}
httpmod {{ httpmod }}
</group>
<group name="ssl_profile">
/c/slb/virt {{ virt_seq }}/service 443 https/ssl
srvrcert cert {{ ssl_server_cert }}
sslpol {{ ssl_profile }}
{{ ssl | set("https/ssl") }}
</group>
</group>
</template>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=50)
assert res == {
"VIP_cfg": {
"1.1.1.1": {
"config_state": "dis",
"ipver": "v4",
"rtsrcmac": "ena",
"services": {
"443": {
"https": {
"dbind": "forceproxy",
"group_seq": "15",
"pbind": "clientip",
"real_port": "443",
},
"https/http": {"httpmod": "hsts_insert", "xforward": "ena"},
},
"80": {
"http": {
"dbind": "forceproxy",
"group_seq": "15",
"pbind": "clientip",
"real_port": "80",
},
"http/http": {"xforward": "ena"},
},
},
"ssl_profile": {
"ssl": "https/ssl",
"ssl_profile": "ssl-Policy",
"ssl_server_cert": "certname",
"virt_seq": "12",
},
"vip_name": "my name",
"virt_seq": "12",
},
"1.1.4.4": {
"config_state": "dis",
"ipver": "v4",
"rtsrcmac": "ena",
"vip_name": "my name2",
"virt_seq": "14",
},
}
}
# test_answer_3()
def test_answer_4():
data = """
/c/slb/virt 12
dis
ipver v4
vip 1.1.1.1
rtsrcmac ena
vname "my name"
/c/slb/virt 12/service 443 https
group 15
rport 443
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 443 https/http
xforward ena
httpmod hsts_insert
/c/slb/virt 12/service 443 https/ssl
srvrcert cert certname
sslpol ssl-Policy
/c/slb/virt 12/service 80 http
group 15
rport 80
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 80 http/http
xforward ena
/c/slb/virt 14
dis
ipver v4
vip 1.1.4.4
rtsrcmac ena
vname "my name2"
"""
template = """
<template name="VIP_cfg" results="per_template">
<group name="{{ vip }}">
/c/slb/virt {{ virt_seq | DIGIT }}
dis {{ config_state | set("dis") }}
ipver {{ ipver}}
vip {{ vip }}
rtsrcmac {{ rtsrcmac }}
vname "{{ vip_name | ORPHRASE }}"
<group name="services.{{ port }}" contains="dbind, pbind">
/c/slb/virt 12/service {{ port | DIGIT }} {{ proto | exclude(ssl) }}
group {{group_seq }}
rport {{ real_port }}
pbind {{ pbind }}
dbind {{ dbind }}
xforward {{ xforward }}
httpmod {{ httpmod }}
</group>
<group name="ssl_profile">
/c/slb/virt {{ virt_seq }}/service 443 https/ssl
srvrcert cert {{ ssl_server_cert }}
sslpol {{ ssl_profile }}
{{ ssl | set("https/ssl") }}
</group>
</group>
</template>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=50)
assert res == {
"VIP_cfg": {
"1.1.1.1": {
"config_state": "dis",
"ipver": "v4",
"rtsrcmac": "ena",
"services": {
"443": {
"dbind": "forceproxy",
"group_seq": "15",
"pbind": "clientip",
"proto": "https",
"real_port": "443",
},
"80": {
"dbind": "forceproxy",
"group_seq": "15",
"pbind": "clientip",
"proto": "http",
"real_port": "80",
},
},
"ssl_profile": {
"ssl": "https/ssl",
"ssl_profile": "ssl-Policy",
"ssl_server_cert": "certname",
"virt_seq": "12",
},
"vip_name": "my name",
"virt_seq": "12",
},
"1.1.4.4": {
"config_state": "dis",
"ipver": "v4",
"rtsrcmac": "ena",
"vip_name": "my name2",
"virt_seq": "14",
},
}
}
# test_answer_4()
def test_issue_20_answer_2():
data_to_parse = """
(*, 192.168.3.11)
LISP0.4200, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
LISP0.4201, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(172.16.17.32, 239.100.100.100), 2d05h/00:01:19, flags: FT
Incoming interface: Vlan1029, RPF nbr 0.0.0.0
Outgoing interface list:
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
LISP0.4101, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(*, 172.16.58.3), 6d20h/00:03:28, RP 172.16.17.32, flags: S
Incoming interface: Null, RPF nbr 0.0.0.0
Outgoing interface list:
Vlan3014, Forward/Sparse, 1d18h/00:03:28
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
"""
show_mcast1 = """
<template name="mcast" results="per_template">
<group name="mcast_entries.{{ overlay_src }}">
({{ overlay_src | _start_ | replace("*", "'*'") }}, {{ overlay_grp | IP }})
({{ overlay_src | _start_ | IP }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, flags: {{ entry_flags }}
({{ overlay_src | _start_ | replace("*", "'*'") }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, RP {{ rp }}, flags: {{ entry_flags }}
Incoming interface: {{ incoming_intf }}, RPF nbr {{ rpf_neighbor }}
<group name="oil_entries*">
{{ outgoing_intf }}, Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
{{ outgoing_intf }}, ({{ underlay_src | IP }}, {{ underlay_grp | IP }}), Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
</group>
</group>
</template>
"""
parser = ttp(template=show_mcast1)
parser.add_input(data_to_parse, template_name="mcast")
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=100)
assert res == {
"mcast": {
"mcast_entries": {
"'*'": [
{"overlay_grp": "192.168.3.11"},
{
"entry_flags": "S",
"entry_state_or_timer": "00:03:28",
"entry_uptime": "6d20h",
"incoming_intf": "Null",
"oil_entries": [
{
"oil_state_or_timer": "00:03:28",
"oil_uptime": "1d18h",
"outgoing_intf": "Vlan3014",
"underlay_grp": "172.16.31.10",
"underlay_src": "172.16.58.3",
}
],
"overlay_grp": "172.16.58.3",
"rp": "172.16.17.32",
"rpf_neighbor": "0.0.0.0",
},
],
"172.16.17.32": {
"entry_flags": "FT",
"entry_state_or_timer": "00:01:19",
"entry_uptime": "2d05h",
"incoming_intf": "Vlan1029",
"overlay_grp": "172.16.58.3",
"rpf_neighbor": "0.0.0.0",
},
}
}
}
# test_issue_20_answer_2()
def test_docs_ttp_dictionary_usage_example():
template = """
<input load="text">
interface Lo0
ip address 172.16.17.32/29
!
interface Lo1
ip address 1.1.1.1/30
</input>
<group macro="add_last_host">
interface {{ interface }}
ip address {{ ip }}
</group>
<macro>
def add_last_host(data):
ip_obj, _ = _ttp_["match"]["to_ip"](data["ip"])
all_ips = list(ip_obj.network.hosts())
data["last_host"] = str(all_ips[-1])
return data
</macro>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
[
{
"interface": "Lo0",
"ip": "172.16.17.32/29",
"last_host": "172.16.58.3",
},
{"interface": "Lo1", "ip": "1.1.1.1/30", "last_host": "1.1.1.2"},
]
]
]
# test_docs_ttp_dictionary_usage_example()
def test_github_issue_21_answer():
data_to_parse = """
R1#sh ip nbar protocol-discovery protocol
GigabitEthernet1
Last clearing of "show ip nbar protocol-discovery" counters 00:13:45
Input Output
----- ------
Protocol Packet Count Packet Count
Byte Count Byte Count
5min Bit Rate (bps) 5min Bit Rate (bps)
5min Max Bit Rate (bps) 5min Max Bit Rate (bps)
---------------------------- ------------------------ ------------------------
ssh 191 134
24805 22072
2000 1000
1999 1001
unknown 172 503
39713 31378
0 0
3000 0
ping 144 144
14592 14592
0 0
1000 1000
dns 107 0
21149 0
0 0
2000 0
vrrp 0 738
0 39852
0 0
0 0
ldp 174 175
13224 13300
0 0
0 0
ospf 86 87
9460 9570
0 0
0 0
Total 874 1781
122943 130764
2000 1000
8000 2000
"""
show_nbar = """
<template name="nbar" results="per_template">
<vars>C1 = "DIGIT | to_int | to_list | joinmatches"</vars>
<group name="{{ interface }}">
{{ interface | re('Gig.+') | re('Ten.+') }}
<group name="{{ protocol }}" macro="map_to_keys">
{{ protocol }} {{ in | chain(C1) }} {{ out | chain(C1) }}
{{ ignore(r"\\s+") }} {{ in | chain(C1) }} {{ out | chain(C1) }}
</group>
</group>
<macro>
def map_to_keys(data):
# uncomment to see data
# print(data)
inp_values = data.pop("in")
out_values = data.pop("out")
inp_keys = ["IN Packet Count", "IN Byte Count", "IN 5min Bit Rate (bps)", "IN 5min Max Bit Rate (bps)"]
out_keys = ["OUT Packet Count", "OUT Byte Count", "OUT 5min Bit Rate (bps)", "OUT 5min Max Bit Rate (bps)"]
data.update(dict(zip(inp_keys, inp_values)))
data.update(dict(zip(out_keys, out_values)))
return data
</macro>
</template>
"""
parser = ttp(template=show_nbar)
parser.add_input(data_to_parse, template_name="nbar")
parser.parse()
res = parser.result(structure="dictionary")
pprint.pprint(res, width=100)
assert res == {
"nbar": {
"GigabitEthernet1 ": {
"Total": {
"IN 5min Bit Rate (bps)": 2000,
"IN 5min Max Bit Rate (bps)": 8000,
"IN Byte Count": 122943,
"IN Packet Count": 874,
"OUT 5min Bit Rate (bps)": 1000,
"OUT 5min Max Bit Rate (bps)": 2000,
"OUT Byte Count": 130764,
"OUT Packet Count": 1781,
},
"dns": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 2000,
"IN Byte Count": 21149,
"IN Packet Count": 107,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 0,
"OUT Packet Count": 0,
},
"ldp": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 0,
"IN Byte Count": 13224,
"IN Packet Count": 174,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 13300,
"OUT Packet Count": 175,
},
"ospf": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 0,
"IN Byte Count": 9460,
"IN Packet Count": 86,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 9570,
"OUT Packet Count": 87,
},
"ping": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 1000,
"IN Byte Count": 14592,
"IN Packet Count": 144,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 1000,
"OUT Byte Count": 14592,
"OUT Packet Count": 144,
},
"ssh": {
"IN 5min Bit Rate (bps)": 2000,
"IN 5min Max Bit Rate (bps)": 1999,
"IN Byte Count": 24805,
"IN Packet Count": 191,
"OUT 5min Bit Rate (bps)": 1000,
"OUT 5min Max Bit Rate (bps)": 1001,
"OUT Byte Count": 22072,
"OUT Packet Count": 134,
},
"unknown": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 3000,
"IN Byte Count": 39713,
"IN Packet Count": 172,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 31378,
"OUT Packet Count": 503,
},
"vrrp": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 0,
"IN Byte Count": 0,
"IN Packet Count": 0,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 39852,
"OUT Packet Count": 738,
},
}
}
}
# test_github_issue_21_answer()
def test_github_issue_22():
data = """
interface Loopback0
description Fabric Node Router ID
ip address 172.16.58.3 255.255.255.255
ip pim sparse-mode
ip router isis
clns mtu 1400
end
interface Loopback0
description Fabric Node Router ID
ip address 172.16.17.32 255.255.255.255
ip pim sparse-mode
ip router isis
clns mtu 1400
end
"""
template = """{{ ignore(r"\\s+") }}ip address {{ ip_address }} 255.255.255.255"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [[[{"ip_address": "172.16.58.3"}, {"ip_address": "172.16.17.32"}]]]
# test_github_issue_22()
def test_github_issue_24():
data = """
19: IP4 1.1.1.1, 00:03:b2:78:04:13, vname portal, NO SERVICES UP
Virtual Services:
http: rport http, group 11, health http (HTTP), pbind clientip
Real Servers:
22: 10.10.10.10, web1, group ena, health (runtime HTTP), 0 ms, FAILED
Reason: N/A
23: 10.11.11.11, web2, group ena, health (runtime HTTP), 0 ms, FAILED
Reason: N/A
https: rport https, group 12, health tcp (TCP), pbind clientip
Real Servers:
22: 10.10.10.10, web1, group ena, health (runtime TCP), 0 ms, FAILED
Reason: N/A
23: 10.11.11.11, web2, group ena, health (runtime TCP), 0 ms, FAILED
Reason: N/A
"""
template = """
<template name="VIP_cfg" results="per_template">
<group name="{{ vs_instance }}" default="">
{{ vs_instance }}: IP4 {{ vs_ip }},{{ ignore(".+") }}
<group name="services*" default="">
{{ vs_service }}: rport {{ rport }},{{ ignore(".+") }}
<group name="pool*" default="">
{{ node_id }}: {{ node_ip }},{{ ignore(".+") }}
Reason: {{ reason }}
</group>
</group>
</group>
</template>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=100)
assert res == {
"VIP_cfg": {
"19": {
"services": [
{
"pool": [
{
"node_id": "22",
"node_ip": "10.10.10.10",
"reason": "N/A",
},
{
"node_id": "23",
"node_ip": "10.11.11.11",
"reason": "N/A",
},
],
"rport": "http",
"vs_service": "http",
},
{
"pool": [
{
"node_id": "22",
"node_ip": "10.10.10.10",
"reason": "N/A",
},
{
"node_id": "23",
"node_ip": "10.11.11.11",
"reason": "N/A",
},
],
"rport": "https",
"vs_service": "https",
},
],
"vs_ip": "1.1.1.1",
}
}
}
# test_github_issue_24()
def test_reddit_answer_1():
"""
https://www.reddit.com/r/networking/comments/j106ot/export_custom_lists_from_the_config_aruba_switch/
Hit a bug while was doing this template - join action overridden by ignore indicator add action
"""
data = """
SWITCH# show vlan port 2/11 detail
Status and Counters - VLAN Information - for ports 2/11
Port name:
VLAN ID Name | Status Voice Jumbo Mode
------- -------------------- + ---------- ----- ----- --------
60 ABC | Port-based No No Tagged
70 DEF | Port-based No No Tagged
101 GHIJ | Port-based No No Untagged
105 KLMNO | Port-based No No Tagged
116 PQRS | Port-based No No Tagged
117 TVU | Port-based No No Tagged
SWITCH# show vlan port 2/12 detail
Status and Counters - VLAN Information - for ports 2/12
Port name:
VLAN ID Name | Status Voice Jumbo Mode
------- -------------------- + ---------- ----- ----- --------
61 ABC | Port-based No No Tagged
71 DEF | Port-based No No Tagged
103 GHI | Port-based No No Untagged
"""
template = """
<vars>
hostname="gethostname"
</vars>
<group name="vlans*">
Status and Counters - VLAN Information - for ports {{ Port_Number }}
{{ Tagged_VLAN | joinmatches(" ") }} {{ ignore }} | {{ ignore }} {{ ignore }} {{ ignore }} Tagged
{{ Untagged_VLAN }} {{ ignore }} | {{ ignore }} {{ ignore }} {{ ignore }} Untagged
{{ Hostname | set(hostname) }}
</group>
<output>
format = "csv"
path = "vlans"
headers = "Hostname, Port_Number, Untagged_VLAN, Tagged_VLAN"
</output>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# print(res)
assert res == [
'"Hostname","Port_Number","Untagged_VLAN","Tagged_VLAN"\n"SWITCH","2/11","101","60 70 105 116 117"\n"SWITCH","2/12","103","61 71"'
]
# test_reddit_answer_1()
def test_reddit_answer_2():
data = """
config router ospf
set abr-type standard
set auto-cost-ref-bandwidth 1000
set distance-external 110
set distance-inter-area 110
set distance-intra-area 110
set database-overflow disable
set database-overflow-max-lsas 10000
set database-overflow-time-to-recover 300
set default-information-originate disable
set default-information-metric 10
set default-information-metric-type 2
set default-information-route-map ''
set default-metric 10
set distance 110
set rfc1583-compatible disable
set router-id 10.1.1.1
set spf-timers 5 10
set bfd disable
set log-neighbour-changes enable
set distribute-list-in "OSPF_IMPORT_PREFIX"
set distribute-route-map-in ''
set restart-mode none
set restart-period 120
config area
edit 0.0.0.1
set shortcut disable
set authentication none
set default-cost 10
set nssa-translator-role candidate
set stub-type summary
set type nssa
set nssa-default-information-originate disable
set nssa-default-information-originate-metric 10
set nssa-default-information-originate-metric-type 2
set nssa-redistribution enable
next
end
config ospf-interface
edit "vlan1-int"
set interface "Vlan1"
set ip 0.0.0.0
set authentication text
set authentication-key netconanRemoved13
set prefix-length 0
set retransmit-interval 5
set transmit-delay 1
set cost 0
set priority 1
set dead-interval 40
set hello-interval 10
set hello-multiplier 0
set database-filter-out disable
set mtu 0
set mtu-ignore disable
set network-type point-to-point
set bfd global
set status enable
set resync-timeout 40
next
edit "vlan2-int"
set interface "vlan2"
set ip 0.0.0.0
set authentication text
set authentication-key netconanRemoved14
set prefix-length 0
set retransmit-interval 5
set transmit-delay 1
set cost 0
set priority 1
set dead-interval 40
set hello-interval 10
set hello-multiplier 0
set database-filter-out disable
set mtu 0
set mtu-ignore disable
set network-type point-to-point
set bfd global
set status enable
set resync-timeout 40
next
end
config network
edit 1
set prefix 10.1.1.1 255.255.255.252
set area 0.0.0.1
next
edit 2
set prefix 10.1.1.3 255.255.255.252
set area 0.0.0.1
next
end
config redistribute "connected"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "static"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "rip"
set status disable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "bgp"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "isis"
set status disable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
end
"""
template = """
<vars>
clean_phrase = [
'ORPHRASE',
'macro(\"clean_str\")'
]
clean_list = [
'ORPHRASE',
'macro(\"build_list\")'
]
</vars>
<macro>
def build_list(data):
if "\\" \\"" in data:
t = data.split("\\" \\"")
for i in range(0, len(t)):
t[i] = t[i].strip("\\"").replace(" ", "_")
i+=1
return t
else:
return [data.strip("\\"").replace(" ", "_")]
def clean_str(data):
return data.replace("\\"","").replace(" ", "_")
def match_ip_or_any(data):
import ipaddress
if data == \"any\":
return data
elif "/" in data:
return str(data)
else:
t = data.replace(" ", "/")
return str(ipaddress.IPv4Network(t, strict=False))
def ignore_empty(data):
if data == "\'\'":
return bool(False)
else:
return data
</macro>
<macro>
def skip_empty(data):
if data == {}:
return False
return data
</macro>
<group name="ospf">
config router ospf {{ _start_ }}
set auto-cost-ref-bandwidth {{ ref_bw }}
set default-information-originate {{ default_originate | contains("enable") }}
set default-information-metric {{ default_originate_metric }}
set default-information-metric-type {{ default_originate_metric_type }}
set default-information-route-map {{ default_originate_routemap | chain("clean_phrase") | macro("ignore_empty") }}
set default-metric {{ default_rt_metric }}
set rfc1583-compatible {{ rfc1583_compat | contains("enable") }}
set router-id {{ router_id }}
set distribute-list-in {{ dist_list_in | chain("clean_phrase") | macro("ignore_empty") }}
set distribute-route-map-in {{ dist_routemap_in | chain("clean_phrase") | macro("ignore_empty") }}
<group name="areas*" macro="skip_empty">
config area {{ _start_ }}
<group>
edit {{ area | _start_ }}
set stub-type {{ stub_type }}
set type {{ area_type }}
set nssa-default-information-originate {{ nssa_default_originate | contains("enable") }}
set nssa-default-information-originate-metric {{ nssa_default_metric }}
set nssa-default-information-originate-metric-type {{ nssa_default_metric_type }}
set nssa-redistribution {{ nssa_redis }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="interfaces*" macro="skip_empty">
config ospf-interface {{ _start_ }}
<group contains="status">
edit {{ name | chain("clean_phrase") | _start_ }}
set interface {{ interface | chain("clean_phrase")}}
set ip {{ ip | exclude("0.0.0.0") }}
set cost {{ cost | exclude("0") }}
set priority {{ priority }}
set mtu {{ mtu | exclude("0") }}
set network-type {{ network }}
set status {{ status | contains("enable") }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="networks*" macro="skip_empty">
config network {{ _start_ }}
<group>
edit {{ id | _start_ }}
set prefix {{ prefix | ORPHRASE | to_ip | with_prefixlen }}
set area {{ area }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="redistribute*" contains="status">
config redistribute {{ protocol | chain("clean_phrase") | _start_ }}
set status {{ status | contains('enable') }}
set route-map {{ route_map | chain("clean_phrase") | macro("ignore_empty") }}
set metric-type {{ metric-type }}
set metric {{ metric | exclude("0") }}
set tag {{ tag | exclude("0")}}
end {{ _end_ }}
</group>
end {{ _end_ }}
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"ospf": {
"areas": [
{
"area": "0.0.0.1",
"area_type": "nssa",
"nssa_default_metric": "10",
"nssa_default_metric_type": "2",
"nssa_redis": "enable",
"stub_type": "summary",
}
],
"default_originate_metric": "10",
"default_originate_metric_type": "2",
"default_rt_metric": "10",
"dist_list_in": "OSPF_IMPORT_PREFIX",
"interfaces": [
{
"interface": "Vlan1",
"name": "vlan1-int",
"network": "point-to-point",
"priority": "1",
"status": "enable",
},
{
"interface": "vlan2",
"name": "vlan2-int",
"network": "point-to-point",
"priority": "1",
"status": "enable",
},
],
"networks": [
{"area": "0.0.0.1", "id": "1", "prefix": "10.1.1.1/30"},
{"area": "0.0.0.1", "id": "2", "prefix": "10.1.1.3/30"},
],
"redistribute": [
{
"metric-type": "2",
"protocol": "connected",
"status": "enable",
},
{"metric-type": "2", "protocol": "static", "status": "enable"},
{"metric-type": "2", "protocol": "bgp", "status": "enable"},
],
"ref_bw": "1000",
"router_id": "10.1.1.1",
}
}
]
]
# test_reddit_answer_2()
def test_github_issue_32():
data = """
.id=*c;export-route-targets=65001:48;65001:0;import-route-targets=65001:48;interfaces=lo-ext;vlan56;route-distinguisher=65001:48;routing-mark=VRF_EXT
.id=*10;comment=;export-route-targets=65001:80;import-route-targets=65001:80;65001:0;interfaces=lo-private;route-distinguisher=65001:80;routing-mark=VRF_PRIVATE
"""
template = """
<group method="table">
.id={{ id | exclude(";") }};export-route-targets={{ export-route-targets }};import-route-targets={{ import-route-targets }};interfaces={{ interfaces }};route-distinguisher={{ route-distinguisher }};routing-mark={{ routing-mark }}
.id={{ id }};comment{{ comment }};export-route-targets={{ export-route-targets }};import-route-targets={{ import-route-targets }};interfaces={{ interfaces }};route-distinguisher={{ route-distinguisher }};routing-mark={{ routing-mark }}
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res)
assert res == [
{
"export-route-targets": "65001:48;65001:0",
"id": "*c",
"import-route-targets": "65001:48",
"interfaces": "lo-ext;vlan56",
"route-distinguisher": "65001:48",
"routing-mark": "VRF_EXT",
},
{
"comment": "=",
"export-route-targets": "65001:80",
"id": "*10",
"import-route-targets": "65001:80;65001:0",
"interfaces": "lo-private",
"route-distinguisher": "65001:80",
"routing-mark": "VRF_PRIVATE",
},
]
# test_github_issue_32()
def test_slack_answer_1():
data = """
Firmware
Version
----------------
02.1.1 Build 002
Hardware
Version
----------------
V2R4
"""
template = """
<group name="versions">
Hardware {{ _start_ }}
Firmware {{ _start_ }}
{{ version | PHRASE | let("type", "firmware") }}
{{ version | exclude("---") | exclude("Vers") | let("type", "hardware") }}
{{ _end_ }}
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res)
assert res == [
{
"versions": [
{"type": "firmware", "version": "02.1.1 Build 002"},
{"type": "hardware", "version": "V2R4"},
]
}
]
# test_slack_answer_1()
def test_group_default_docs():
template = """
<input load="text">
device-hostame uptime is 27 weeks, 3 days, 10 hours, 46 minutes, 10 seconds
</input>
<group name="uptime**">
device-hostame uptime is {{ uptime | PHRASE }}
<group name="software">
software version {{ version | default("uncknown") }}
</group>
</group>
<group name="domain" default="Uncknown">
Default domain is {{ fqdn }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"domain": {"fqdn": "Uncknown"},
"uptime": {
"software": {"version": "uncknown"},
"uptime": "27 weeks, 3 days, 10 hours, 46 minutes, 10 seconds",
},
}
]
]
# test_group_default_docs()
def test_github_issue_34_answer():
template = """
<input load="text">
Hi World
</input>
<group name='demo'>
<group name='audiences*'>
Hello {{ audience | default([]) }}
</group>
</group>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [[{"demo": {"audiences": [{"audience": []}]}}]]
# test_github_issue_34_answer()
def test_github_issue_33_answer_1():
template = """
<input load="text">
server 1.1.1.1
server 172.16.31.10 172.16.31.10
server 172.16.17.32 172.16.17.32 172.16.31.10
</input>
<group name="servers" method="table">
server {{ server | re(r"\\S+") | let("servers_number", 1 ) }}
server {{ server | re(r"\\S+ \\S+") | let("servers_number", 2) }}
server {{ server | re(r"\\S+ \\S+ \\S+") | let("servers_number", 3) }}
</group>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"servers": [
{"server": "1.1.1.1", "servers_number": 1},
{"server": "172.16.31.10 172.16.31.10", "servers_number": 2},
{"server": "172.16.17.32 172.16.17.32 172.16.31.10", "servers_number": 3},
]
}
]
]
# test_github_issue_33_answer_1()
def test_issue_36():
template = """
<input load="text">
ip access-list standard 42
10 remark machine_A
10 permit 192.168.200.162
20 remark machine_B
20 permit 192.168.200.149
30 deny any log
ip access-list standard 98
10 permit 10.10.10.1
20 remark toto
20 permit 30.30.30.1
30 permit 30.30.30.0 0.0.0.255
ip access-list standard 99
10 permit 10.20.30.40 log
20 permit 20.30.40.1 log
30 remark DEVICE - SNMP RW
30 permit 172.16.58.3 0.0.0.127
40 permit 172.16.17.32 0.0.0.63
ip access-list extended 199
10 remark COLLECTOR - SNMP
10 permit ip 172.16.17.32 0.0.0.255 any
20 remark RETURN - Back
20 permit ip 172.16.31.10 0.0.0.127 any
30 remark VISUALIZE
30 permit ip host 172.16.58.3 any
</input>
<group name="ip.{{ acl_type }}.{{ acl_name }}">
ip access-list {{ acl_type }} {{ acl_name }}
<group name="{{ entry_id }}*" method="table">
{{ entry_id }} remark {{ remark_name | re(".+") | let("action", "remark") }}
{{ entry_id }} {{ action }} {{ src_host }}
{{ entry_id }} {{ action }} {{ src_host | let("log", "log") }} log
{{ entry_id }} {{ action }} {{ protocol }} host {{ src_host | let("dest_any", "any") }} any
{{ entry_id }} {{ action }} {{ protocol }} {{ src_ntw | let("dest_any", "any") }} {{ src_wildcard | IP }} any
{{ entry_id }} {{ action }} {{ src_ntw }} {{ src_wildcard | IP }}
</group>
</group>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [
[
{
"ip": {
"extended": {
"199": {
"10": [
{"action": "remark", "remark_name": "COLLECTOR - SNMP"},
{
"action": "permit",
"dest_any": "any",
"protocol": "ip",
"src_ntw": "172.16.17.32",
"src_wildcard": "0.0.0.255",
},
],
"20": [
{"action": "remark", "remark_name": "RETURN - Back"},
{
"action": "permit",
"dest_any": "any",
"protocol": "ip",
"src_ntw": "172.16.31.10",
"src_wildcard": "0.0.0.127",
},
],
"30": [
{"action": "remark", "remark_name": "VISUALIZE"},
{
"action": "permit",
"dest_any": "any",
"protocol": "ip",
"src_host": "172.16.58.3",
},
],
}
},
"standard": {
"42": {
"10": [
{"action": "remark", "remark_name": "machine_A"},
{"action": "permit", "src_host": "192.168.200.162"},
],
"20": [
{"action": "remark", "remark_name": "machine_B"},
{"action": "permit", "src_host": "192.168.200.149"},
],
"30": [{"action": "deny", "log": "log", "src_host": "any"}],
},
"98": {
"10": [{"action": "permit", "src_host": "10.10.10.1"}],
"20": [
{"action": "remark", "remark_name": "toto"},
{"action": "permit", "src_host": "30.30.30.1"},
],
"30": [
{
"action": "permit",
"src_ntw": "30.30.30.0",
"src_wildcard": "0.0.0.255",
}
],
},
"99": {
"10": [
{
"action": "permit",
"log": "log",
"src_host": "10.20.30.40",
}
],
"20": [
{
"action": "permit",
"log": "log",
"src_host": "20.30.40.1",
}
],
"30": [
{"action": "remark", "remark_name": "DEVICE - SNMP RW"},
{
"action": "permit",
"src_ntw": "172.16.58.3",
"src_wildcard": "0.0.0.127",
},
],
"40": [
{
"action": "permit",
"src_ntw": "60.60.60.64",
"src_wildcard": "0.0.0.63",
}
],
},
},
}
}
]
]
# test_issue_36()
def test_github_issue_37_original_data_template():
template = """
<macro>
import re
def qinq(data):
data = re.sub(r"\\*", r"qinq", data)
return data
</macro>
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}" default="none">
epipe {{ service_id | _start_ }} customer {{ customer_id }} create
description "{{ description | ORPHRASE | default("none") }}"
service-mtu {{ service_mtu | default("none") }}
service-name "{{ service_name | ORPHRASE | default("none") }}"
<group name="endpoint" default="none">
endpoint {{ endpoint | _start_ }} create
revert-time {{ revert_time | default("none") }}
exit {{ _end_ }}
</group>
<group name="sap.{{ sap_id }}" default="none">
sap {{ sap_id | macro("qinq") | _start_ | ORPHRASE }} create
description "{{ description | ORPHRASE | default("none")}}"
multi-service-site "{{ mss_name | default("none") }}"
<group name="ingress" default="default_ingress" >
ingress {{ _start_ }}
qos {{ sap_ingress | default("1") }}
scheduler-policy {{ scheduler_policy | default("none")}}
exit {{ _end_ }}
</group>
<group name="egress" default="default_egress">
egress {{ _start_ }}
scheduler-policy {{ scheduler_policy | default("none") }}
qos {{ sap_egress | default("1)") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="pwr_sdp.{{pwr_spoke_sdp_id}}**" default="none">
spoke-sdp {{ pwr_spoke_sdp_id | default("none")}}:{{vc_id | _start_ | default("none") }} endpoint {{ endpoint | default("none") }} create
precedence {{ precedence | default("default_precedence") }}
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="regular_sdp.{{r_spoke_sdp_id}}**" default="none">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id | _start_ }} create
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
"""
data = """
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
epipe 103256 customer 160 create
description "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:"
service-mtu 1988
service-name "EPIPE service-103256 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:15.* create
description "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN5"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8139:103256 create
no shutdown
exit
no shutdown
exit
epipe 103742 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:"
service-mtu 1588
service-name "EPIPE service-103742 DKTN08a-D0105 (192.168.3.11)"
sap 5/2/50:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:"
multi-service-site "TATA_STRAT_LON_A206_LANA"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8061:103742 create
no shutdown
exit
no shutdown
exit
epipe 55513386 customer 4 vc-switching create
description "vf=EAGG:cn=Bulldog:tl=VF"
service-mtu 1526
spoke-sdp 78:55513386 create
control-word
no shutdown
exit
spoke-sdp 8245:55513386 create
control-word
no shutdown
exit
no shutdown
exit
epipe 55517673 customer 4 create
description "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA EPIPE#BAACTQ#VLAN 901"
service-mtu 1526
service-name "epipe service-64585 DKTN08a-D0105 (192.168.3.11)"
endpoint "SDP" create
revert-time infinite
exit
sap 2/2/3:901.* create
description "2_2_3,H0505824A,Bulldog,VLAN 901"
ingress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
egress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
no shutdown
exit
spoke-sdp 8243:55517673 endpoint "SDP" create
collect-stats
precedence 1
no shutdown
exit
spoke-sdp 8245:55517673 endpoint "SDP" create
collect-stats
precedence primary
no shutdown
exit
no shutdown
exit
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"service": {
"epipe": {
"103076": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:",
"regular_sdp": {
"8051": {"state": "enabled", "vc_id": "103076"}
},
"sap": {
"1/2/12:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:",
"egress": {
"sap_egress": "1)",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "1",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN10",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103076 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103206": {
"customer_id": "1904",
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK PLC Stepney Green E1 "
"3DG'",
"regular_sdp": {
"8035": {"state": "enabled", "vc_id": "103206"}
},
"sap": {
"2/2/3:401.100": {
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK "
"PLC "
"Stepney "
"Green "
"E1 "
"3DG'",
"egress": {
"sap_egress": "11010",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11010",
"scheduler_policy": "none",
},
"mss_name": "SKANSKA_E13DG_A825_LAN1",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103206 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103256": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:",
"regular_sdp": {
"8139": {"state": "enabled", "vc_id": "103256"}
},
"sap": {
"1/2/12:15.qinq": {
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN5",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103256 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103742": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:",
"regular_sdp": {
"8061": {"state": "enabled", "vc_id": "103742"}
},
"sap": {
"5/2/50:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_STRAT_LON_A206_LANA",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103742 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"55517673": {
"customer_id": "4",
"description": "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA "
"EPIPE#BAACTQ#VLAN 901",
"endpoint": {
"endpoint": '"SDP"',
"revert_time": "infinite",
},
"pwr_sdp": {
"8243": {
"endpoint": '"SDP"',
"precedence": "1",
"state": "enabled",
"vc_id": "55517673",
},
"8245": {
"endpoint": '"SDP"',
"precedence": "primary",
"state": "enabled",
"vc_id": "55517673",
},
},
"sap": {
"2/2/3:901.qinq": {
"description": "2_2_3,H0505824A,Bulldog,VLAN "
"901",
"egress": {
"sap_egress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"ingress": {
"sap_ingress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"mss_name": "none",
"state": "enabled",
}
},
"service_mtu": "1526",
"service_name": "epipe service-64585 "
"DKTN08a-D0105 "
"(6172.16.17.321)",
"state": "enabled",
},
}
}
}
]
]
# test_github_issue_37_original_data_template()
def test_github_issue_37_cleaned_up_data():
"""
Problem with below template without bug fix, was that
'no shutdown' statement for sap group was matched by
spoke-sdp group as well and added to results causing
false match. To fix it, added tracking of previously
started groups in results object, so that before add
match results to overall results if PATH differ need
to check that this particular item groups has been
started before, previous logic was not checking for that.
Have not noticed any issues with other 200+ tests or
any performance degradation for single/multi-process
parsing.
"""
template = """
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}">
epipe {{ service_id }} customer {{ customer_id }} create
<group name="regular_sdp.{{r_spoke_sdp_id}}**">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id }} create
no shutdown {{ state | set("enabled") }}
</group>
</group>
</group>
"""
data = """
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [
[
{
"service": {
"epipe": {
"103076": {
"customer_id": "160",
"regular_sdp": {
"8051": {"state": "enabled", "vc_id": "103076"}
},
},
"103206": {
"customer_id": "1904",
"regular_sdp": {
"8035": {"state": "enabled", "vc_id": "103206"}
},
},
}
}
}
]
]
# test_github_issue_37_cleaned_up_data()
def test_github_issue_37_cleaned_data_template():
template = """
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}" default="none">
epipe {{ service_id }} customer {{ customer_id }} create
description "{{ description | ORPHRASE }}"
service-mtu {{ service_mtu }}
service-name "{{ service_name | ORPHRASE }}"
<group name="endpoint" default="none">
endpoint {{ endpoint }} create
revert-time {{ revert_time }}
exit {{ _end_ }}
</group>
<group name="sap.{{ sap_id }}" default="none">
sap {{ sap_id | resub(r"\\*", "qinq") | ORPHRASE }} create
description "{{ description | ORPHRASE }}"
multi-service-site "{{ mss_name }}"
<group name="ingress">
ingress {{ _start_ }}
qos {{ sap_ingress | default("1") }}
scheduler-policy {{ scheduler_policy | default("none")}}
exit {{ _end_ }}
</group>
<group name="egress">
egress {{ _start_ }}
scheduler-policy {{ scheduler_policy | default("none") }}
qos {{ sap_egress | default("1)") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="pwr_sdp.{{pwr_spoke_sdp_id}}**" default="none">
spoke-sdp {{ pwr_spoke_sdp_id }}:{{vc_id }} endpoint {{ endpoint }} create
precedence {{ precedence | default("default_precedence") }}
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="regular_sdp.{{r_spoke_sdp_id}}**" default="none">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id }} create
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
"""
data = """
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
epipe 103256 customer 160 create
description "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:"
service-mtu 1988
service-name "EPIPE service-103256 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:15.* create
description "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN5"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8139:103256 create
no shutdown
exit
no shutdown
exit
epipe 103742 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:"
service-mtu 1588
service-name "EPIPE service-103742 DKTN08a-D0105 (192.168.3.11)"
sap 5/2/50:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:"
multi-service-site "TATA_STRAT_LON_A206_LANA"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8061:103742 create
no shutdown
exit
no shutdown
exit
epipe 55513386 customer 4 vc-switching create
description "vf=EAGG:cn=Bulldog:tl=VF"
service-mtu 1526
spoke-sdp 78:55513386 create
control-word
no shutdown
exit
spoke-sdp 8245:55513386 create
control-word
no shutdown
exit
no shutdown
exit
epipe 55517673 customer 4 create
description "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA EPIPE#BAACTQ#VLAN 901"
service-mtu 1526
service-name "epipe service-64585 DKTN08a-D0105 (63.130.108.41)"
endpoint "SDP" create
revert-time infinite
exit
sap 2/2/3:901.* create
description "2_2_3,H0505824A,Bulldog,VLAN 901"
ingress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
egress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
no shutdown
exit
spoke-sdp 8243:55517673 endpoint "SDP" create
collect-stats
precedence 1
no shutdown
exit
spoke-sdp 8245:55517673 endpoint "SDP" create
collect-stats
precedence primary
no shutdown
exit
no shutdown
exit
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"service": {
"epipe": {
"103076": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:",
"regular_sdp": {
"8051": {"state": "enabled", "vc_id": "103076"}
},
"sap": {
"1/2/12:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:",
"egress": {
"sap_egress": "1)",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "1",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN10",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103076 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103206": {
"customer_id": "1904",
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK PLC Stepney Green E1 "
"3DG'",
"regular_sdp": {
"8035": {"state": "enabled", "vc_id": "103206"}
},
"sap": {
"2/2/3:401.100": {
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK "
"PLC "
"Stepney "
"Green "
"E1 "
"3DG'",
"egress": {
"sap_egress": "11010",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11010",
"scheduler_policy": "none",
},
"mss_name": "SKANSKA_E13DG_A825_LAN1",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103206 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103256": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:",
"regular_sdp": {
"8139": {"state": "enabled", "vc_id": "103256"}
},
"sap": {
"1/2/12:15.qinq": {
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN5",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103256 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103742": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:",
"regular_sdp": {
"8061": {"state": "enabled", "vc_id": "103742"}
},
"sap": {
"5/2/50:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_STRAT_LON_A206_LANA",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103742 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"55517673": {
"customer_id": "4",
"description": "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA "
"EPIPE#BAACTQ#VLAN 901",
"endpoint": {
"endpoint": '"SDP"',
"revert_time": "infinite",
},
"pwr_sdp": {
"8243": {
"endpoint": '"SDP"',
"precedence": "1",
"state": "enabled",
"vc_id": "55517673",
},
"8245": {
"endpoint": '"SDP"',
"precedence": "primary",
"state": "enabled",
"vc_id": "55517673",
},
},
"sap": {
"2/2/3:901.qinq": {
"description": "2_2_3,H0505824A,Bulldog,VLAN "
"901",
"egress": {
"sap_egress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"ingress": {
"sap_ingress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"mss_name": "none",
"state": "enabled",
}
},
"service_mtu": "1526",
"service_name": "epipe service-64585 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
}
}
}
]
]
# test_github_issue_37_cleaned_data_template()
def test_github_issue_42():
data = """
vrf xyz
address-family ipv4 unicast
import route-target
65000:3507
65000:3511
65000:5453
65000:5535
!
export route-target
65000:5453
65000:5535
!
!
!
"""
template = """
<group name="vrfs">
vrf {{name}}
<group name="route-targets">
import route-target {{ _start_ }}
{{ import | to_list | joinmatches }}
</group>
!
<group name="route-targets">
export route-target {{ _start_ }}
{{ export | to_list | joinmatches }}
</group>
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"vrfs": {
"name": "xyz",
"route-targets": [
{
"import": [
"65000:3507",
"65000:3511",
"65000:5453",
"65000:5535",
]
},
{"export": ["65000:5453", "65000:5535"]},
],
}
}
]
]
# test_github_issue_42()
def test_github_issue_42_answer():
data = """
vrf xyz
address-family ipv4 unicast
import route-target
65000:3507
65000:3511
65000:5453
65000:5535
!
export route-target
65000:5453
65000:5535
!
!
!
"""
template = """
<group name="vrfs">
vrf {{name}}
<group name="import_rts">
import route-target {{ _start_ }}
{{ import_rt | _start_ }}
</group>
!
<group name="export_rts">
export route-target {{ _start_ }}
{{ export_rt | _start_ }}
</group>
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"vrfs": {
"export_rts": [
{"export_rt": "65000:5453"},
{"export_rt": "65000:5535"},
],
"import_rts": [
{"import_rt": "65000:3507"},
{"import_rt": "65000:3511"},
{"import_rt": "65000:5453"},
{"import_rt": "65000:5535"},
],
"name": "xyz",
}
}
]
]
# test_github_issue_42_answer()
def test_issue_45():
data = """
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
DHCP-NGN-SIG {
10.154.6.147;
}
}
group group2 {
active-server-group IN_MEDIA_SIGNALING;
overrides {
trust-option-82;
}
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""
template = """
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name1 | _start_ }} {
<group name="helper_addresses*">
{{ helper_address | IP }};
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
active-server-group {{server_group_name2}};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res)
# assert res == [
# [
# {
# "vrfs": [
# {
# "forwarding_options": {
# "dhcp_relay": {
# "groups": [
# {
# "group_name": "group2",
# "server_group_name2": "IN_MEDIA_SIGNALING",
# },
# {
# "group_name": "NGN-SIG",
# "server_group_name2": "DHCP-NGN-SIG",
# },
# ],
# "server_group": {
# "dhcp": [
# {
# "helper_addresses": [
# {"helper_address": "10.154.6.147"}
# ],
# "server_group_name1": "IN_MEDIA_SIGNALING",
# },
# {
# "helper_addresses": [
# {"helper_address": "10.154.6.147"}
# ],
# "server_group_name1": "DHCP-NGN-SIG",
# },
# {"server_group_name1": "overrides"},
# {"server_group_name1": "overrides"},
# ]
# },
# }
# },
# "name": "vrf2",
# }
# ]
# }
# ]
# ]
# was able to fix the issue by introducing ended_groups tracking in results
# processing while was trying to fix issue 57
assert res == [
[
{
"vrfs": [
{
"forwarding_options": {
"dhcp_relay": {
"groups": [
{
"group_name": "group2",
"server_group_name2": "IN_MEDIA_SIGNALING",
},
{
"group_name": "NGN-SIG",
"server_group_name2": "DHCP-NGN-SIG",
},
],
"server_group": {
"dhcp": [
{
"helper_addresses": [
{"helper_address": "10.154.6.147"}
],
"server_group_name1": "IN_MEDIA_SIGNALING",
},
{
"helper_addresses": [
{"helper_address": "10.154.6.147"}
],
"server_group_name1": "DHCP-NGN-SIG",
},
]
},
}
},
"name": "vrf2",
}
]
}
]
]
# test_issue_45()
def test_issue_45_1():
data = """
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""
template = """
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name | _start_ }} {
</group>
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
</group>
</group>
</group>
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"vrfs": [
{
"forwarding_options": {
"dhcp_relay": {
"groups": [{"group_name": "NGN-SIG"}],
"server_group": {
"dhcp": [
{"server_group_name": "IN_MEDIA_SIGNALING"},
{"server_group_name": "overrides"},
]
},
}
},
"name": "vrf2",
}
]
}
]
]
# test_issue_45_1()
def test_issue_45_filtering_fix():
data = """
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
DHCP-NGN-SIG {
10.154.6.147;
}
}
group group2 {
active-server-group IN_MEDIA_SIGNALING;
overrides {
trust-option-82;
}
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""
template = """
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name1 | _start_ | exclude("overrides") }} {
<group name="helper_addresses*">
{{ helper_address | IP }};
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
active-server-group {{server_group_name2}};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"vrfs": [
{
"forwarding_options": {
"dhcp_relay": {
"groups": [
{
"group_name": "group2",
"server_group_name2": "IN_MEDIA_SIGNALING",
},
{
"group_name": "NGN-SIG",
"server_group_name2": "DHCP-NGN-SIG",
},
],
"server_group": {
"dhcp": [
{
"helper_addresses": [
{"helper_address": "10.154.6.147"}
],
"server_group_name1": "IN_MEDIA_SIGNALING",
},
{
"helper_addresses": [
{"helper_address": "10.154.6.147"}
],
"server_group_name1": "DHCP-NGN-SIG",
},
]
},
}
},
"name": "vrf2",
}
]
}
]
]
# test_issue_45_filtering_fix()
def test_issue_47_answer():
data = """
Some text which indicates that below block should be included in results ABC
interface Loopback0
description Router-id-loopback
ip address 192.168.0.113/24
!
Some text which indicates that below block should be included in results DEF
interface Loopback2
description Router-id-loopback 2
ip address 192.168.0.114/24
!
Some text which indicates that below block should NOT be included in results
interface Vlan778
description CPE_Acces_Vlan
ip address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/124
ip vrf CPE1
!
Some text which indicates that below block should be included in results GKL
interface Loopback3
description Router-id-loopback 3
ip address 192.168.0.115/24
!
"""
template = """
Some text which indicates that below block should be included in results ABC {{ _start_ }}
Some text which indicates that below block should be included in results DEF {{ _start_ }}
Some text which indicates that below block should be included in results GKL {{ _start_ }}
interface {{ interface }}
ip address {{ ip }}/{{ mask }}
description {{ description | re(".+") }}
ip vrf {{ vrf }}
! {{ _end_ }}
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=150)
assert res == [
[
[
{
"description": "Router-id-loopback",
"interface": "Loopback0",
"ip": "192.168.0.113",
"mask": "24",
},
{
"description": "Router-id-loopback 2",
"interface": "Loopback2",
"ip": "192.168.0.114",
"mask": "24",
},
{
"description": "Router-id-loopback 3",
"interface": "Loopback3",
"ip": "192.168.0.115",
"mask": "24",
},
]
]
]
# test_issue_47_answer()
def test_issue_48_answer():
data = """
ECON*3400 The Economics of Personnel Management U (3-0) [0.50]
In this course, we examine the economics of personnel management in organizations.
Using mainstream microeconomic and behavioural economic theory, we will consider
such issues as recruitment, promotion, financial and non-financial incentives,
compensation, job performance, performance evaluation, and investment in personnel.
The interplay between theoretical models and empirical evidence will be emphasized in
considering different approaches to the management of personnel.
Prerequisite(s): ECON*2310 or ECON*2200
Department(s): Department of Economics and Finance
ECON*4400 The Economics of Personnel Management U (7-1) [0.90]
In this course, we examine the economics of personnel management in organizations.
Using mainstream microeconomic and behavioural economic theory, we will consider
such issues as recruitment, promotion, financial and non-financial incentives,
compensation, job performance, performance evaluation, and investment in personnel.
Prerequisite(s): ECON*2310
Department(s): Department of Economics
"""
template = """
<vars>
descr_chain = [
"PHRASE",
"exclude('Prerequisite(s)')",
"exclude('Department(s)')",
"joinmatches"
]
</vars>
<group>
{{ course }}*{{ code }} {{ name | PHRASE }} {{ semester }} ({{ lecture_lab_time }}) [{{ weight }}]
{{ description | chain(descr_chain) }}
Prerequisite(s): {{ prereqs | ORPHRASE }}
Department(s): {{ department | ORPHRASE }}
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=150)
assert res == [
[
[
{
"code": "3400",
"course": "ECON",
"department": "Department of Economics and Finance",
"description": "In this course, we examine the economics of personnel management in organizations.\n"
"Using mainstream microeconomic and behavioural economic theory, we will consider\n"
"such issues as recruitment, promotion, financial and non-financial incentives,\n"
"compensation, job performance, performance evaluation, and investment in personnel.\n"
"The interplay between theoretical models and empirical evidence will be emphasized in\n"
"considering different approaches to the management of personnel.",
"lecture_lab_time": "3-0",
"name": "The Economics of Personnel Management",
"prereqs": "ECON*2310 or ECON*2200",
"semester": "U",
"weight": "0.50",
},
{
"code": "4400",
"course": "ECON",
"department": "Department of Economics",
"description": "In this course, we examine the economics of personnel management in organizations.\n"
"Using mainstream microeconomic and behavioural economic theory, we will consider\n"
"such issues as recruitment, promotion, financial and non-financial incentives,\n"
"compensation, job performance, performance evaluation, and investment in personnel.",
"lecture_lab_time": "7-1",
"name": "The Economics of Personnel Management",
"prereqs": "ECON*2310",
"semester": "U",
"weight": "0.90",
},
]
]
]
# test_issue_48_answer()
def test_issue_48_answer_more():
data = """
IBIO*4521 Thesis in Integrative Biology F (0-12) [1.00]
This course is the first part of the two-semester course IBIO*4521/2. This course is
a two-semester (F,W) undergraduate project in which students conduct a comprehensive,
independent research project in organismal biology under the supervision of a faculty
member in the Department of Integrative Biology. Projects involve a thorough literature
review, a research proposal, original research communicated in oral and poster
presentations, and in a written, publication quality document. This two-semester course
offers students the opportunity to pursue research questions and experimental designs
that cannot be completed in the single semester research courses. Students must make
arrangements with both a faculty supervisor and the course coordinator at least one
semester in advance. A departmental registration form must be obtained from the course
coordinator and submitted no later than the second class day of the fall semester. This is
a twosemester course offered over consecutive semesters F-W. When you select this
course, you must select IBIO*4521 in the Fall semester and IBIO*4522 in the Winter
semester.A grade will not be assigned to IBIO*4521 until IBIO*4522 has been completed.
Prerequisite(s): 12.00 credits
Restriction(s): Normally a minimum cumulative average of 70%. Permission of course
coordinator.
Department(s): Department of Integrative Biology
IBIO*4533 Thesis in Integrative Biology F (0-14) [2.00]
This course is the first part of the two-semester course IBIO*4521/2. This course is
a two-semester (F,W) undergraduate project in which students conduct a comprehensive,
independent research project in organismal biology under the supervision of a faculty
member in the Department of Integrative Biology.
Restriction(s): Normally a minimum cumulative average of 80%. Permission of course
coordinator. Normally a minimum cumulative average of 90%. Permission of course
coordinator.
Department(s): Department of Integrative Biology
"""
template = """
<vars>
chain_1 = [
"ORPHRASE",
"exclude('Prerequisite(s)')",
"exclude('Department(s)')",
"exclude('Restriction(s)')",
"joinmatches"
]
</vars>
<group>
{{ course }}*{{ code }} {{ name | PHRASE }} {{ semester }} ({{ lecture_lab_time }}) [{{ weight }}]
{{ description | chain(chain_1) }}
Prerequisite(s): {{ prereqs | ORPHRASE }}
Department(s): {{ department | ORPHRASE }}
<group name="_">
Restriction(s): {{ restrictions | PHRASE | joinmatches }}
{{ restrictions | chain(chain_1) }}
</group>
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res, width=150)
assert res == [
[
[
{
"code": "4521",
"course": "IBIO",
"department": "Department of Integrative Biology",
"description": "This course is the first part of the two-semester course IBIO*4521/2. This course is\n"
"a two-semester (F,W) undergraduate project in which students conduct a comprehensive,\n"
"independent research project in organismal biology under the supervision of a faculty\n"
"member in the Department of Integrative Biology. Projects involve a thorough literature\n"
"review, a research proposal, original research communicated in oral and poster\n"
"presentations, and in a written, publication quality document. This two-semester course\n"
"offers students the opportunity to pursue research questions and experimental designs\n"
"that cannot be completed in the single semester research courses. Students must make\n"
"arrangements with both a faculty supervisor and the course coordinator at least one\n"
"semester in advance. A departmental registration form must be obtained from the course\n"
"coordinator and submitted no later than the second class day of the fall semester. This is\n"
"a twosemester course offered over consecutive semesters F-W. When you select this\n"
"course, you must select IBIO*4521 in the Fall semester and IBIO*4522 in the Winter\n"
"semester.A grade will not be assigned to IBIO*4521 until IBIO*4522 has been completed.",
"lecture_lab_time": "0-12",
"name": "Thesis in Integrative Biology",
"prereqs": "12.00 credits",
"restrictions": "Normally a minimum cumulative average of 70%. Permission of course\ncoordinator.",
"semester": "F",
"weight": "1.00",
},
{
"code": "4533",
"course": "IBIO",
"department": "Department of Integrative Biology",
"description": "This course is the first part of the two-semester course IBIO*4521/2. This course is\n"
"a two-semester (F,W) undergraduate project in which students conduct a comprehensive,\n"
"independent research project in organismal biology under the supervision of a faculty\n"
"member in the Department of Integrative Biology.",
"lecture_lab_time": "0-14",
"name": "Thesis in Integrative Biology",
"restrictions": "Normally a minimum cumulative average of 80%. Permission of course\n"
"coordinator. Normally a minimum cumulative average of 90%. Permission of course\n"
"coordinator.",
"semester": "F",
"weight": "2.00",
},
]
]
]
# test_issue_48_answer_more()
def test_slack_channel_answer_for_Noif():
data = """
# not disabled and no comment
/ip address add address=10.4.1.245 interface=lo0 network=10.4.1.245
/ip address add address=10.4.1.246 interface=lo1 network=10.4.1.246
# not disabled and comment with no quotes
/ip address add address=10.9.48.241/29 comment=SITEMON interface=ether2 network=10.9.48.240
/ip address add address=10.9.48.233/29 comment=Camera interface=vlan205@bond1 network=10.9.48.232
/ip address add address=10.9.49.1/24 comment=SM-Management interface=vlan200@bond1 network=10.9.49.0
# not disabled and comment with quotes
/ip address add address=10.4.1.130/30 comment="to core01" interface=vlan996@bond4 network=10.4.1.128
/ip address add address=10.4.250.28/29 comment="BH 01" interface=vlan210@bond1 network=10.4.250.24
/ip address add address=10.9.50.13/30 comment="Cust: site01-PE" interface=vlan11@bond1 network=10.9.50.12
# disabled no comment
/ip address add address=10.0.0.2/30 disabled=yes interface=bridge:customer99 network=10.0.0.0
# disabled with comment
/ip address add address=169.254.1.100/24 comment=Cambium disabled=yes interface=vlan200@bond1 network=169.254.1.0
# disabled with comment with quotes
/ip address add address=10.4.248.20/29 comment="Backhaul to AGR (Test Segment)" disabled=yes interface=vlan209@bond1 network=10.4.248.16
"""
template = """
<vars>
default_values = {
"comment": "",
"disabled": False
}
</vars>
<group default="default_values">
## not disabled and no comment
/ip address add address={{ ip | _start_ }} interface={{ interface }} network={{ network }}
## not disabled and comment with/without quotes
/ip address add address={{ ip | _start_ }}/{{ mask }} comment={{ comment | ORPHRASE | exclude("disabled=") | strip('"')}} interface={{ interface }} network={{ network }}
## disabled no comment
/ip address add address={{ ip | _start_ }}/{{ mask }} disabled={{ disabled }} interface={{ interface }} network={{ network }}
## disabled with comment with/without quotes
/ip address add address={{ ip | _start_ }}/{{ mask }} comment={{ comment | ORPHRASE | exclude("disabled=") | strip('"') }} disabled={{ disabled }} interface={{ interface }} network={{ network }}
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res, width=200)
assert res == [
{
"comment": "",
"disabled": False,
"interface": "lo0",
"ip": "10.4.1.245",
"network": "10.4.1.245",
},
{
"comment": "",
"disabled": False,
"interface": "lo1",
"ip": "10.4.1.246",
"network": "10.4.1.246",
},
{
"comment": "SITEMON",
"disabled": False,
"interface": "ether2",
"ip": "10.9.48.241",
"mask": "29",
"network": "10.9.48.240",
},
{
"comment": "Camera",
"disabled": False,
"interface": "vlan205@bond1",
"ip": "10.9.48.233",
"mask": "29",
"network": "10.9.48.232",
},
{
"comment": "SM-Management",
"disabled": False,
"interface": "vlan200@bond1",
"ip": "10.9.49.1",
"mask": "24",
"network": "10.9.49.0",
},
{
"comment": "to core01",
"disabled": False,
"interface": "vlan996@bond4",
"ip": "10.4.1.130",
"mask": "30",
"network": "10.4.1.128",
},
{
"comment": "BH 01",
"disabled": False,
"interface": "vlan210@bond1",
"ip": "10.4.250.28",
"mask": "29",
"network": "10.4.250.24",
},
{
"comment": "Cust: site01-PE",
"disabled": False,
"interface": "vlan11@bond1",
"ip": "10.9.50.13",
"mask": "30",
"network": "10.9.50.12",
},
{
"comment": "",
"disabled": "yes",
"interface": "bridge:customer99",
"ip": "10.0.0.2",
"mask": "30",
"network": "10.0.0.0",
},
{
"comment": "Cambium",
"disabled": "yes",
"interface": "vlan200@bond1",
"ip": "169.254.1.100",
"mask": "24",
"network": "169.254.1.0",
},
{
"comment": "Backhaul to AGR (Test Segment)",
"disabled": "yes",
"interface": "vlan209@bond1",
"ip": "10.4.248.20",
"mask": "29",
"network": "10.4.248.16",
},
]
# test_slack_channel_answer_for_Noif()
def test_slack_answer_2():
data_to_parse = """
port 1/1/1
description "port 1 description"
ethernet
mode hybrid
encap-type dot1q
crc-monitor
sd-threshold 5 multiplier 5
sf-threshold 3 multiplier 5
window-size 60
exit
network
queue-policy "ncq-only"
accounting-policy 12
collect-stats
egress
queue-group "qos-policy-for-router1" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
exit
exit
access
egress
queue-group "policer-output-queues" instance 1 create
accounting-policy 1
collect-stats
exit
exit
exit
lldp
dest-mac nearest-bridge
admin-status tx-rx
notification
tx-tlvs port-desc sys-name sys-desc sys-cap
tx-mgmt-address system
exit
exit
down-on-internal-error
exit
no shutdown
exit
port 1/1/2
description "another port to a another router"
ethernet
mode hybrid
encap-type dot1q
egress-scheduler-policy "qos-port-scheduler"
crc-monitor
sd-threshold 5 multiplier 5
sf-threshold 3 multiplier 5
window-size 60
exit
access
egress
queue-group "policer-output-queues" instance 1 create
accounting-policy 1
collect-stats
exit
exit
exit
down-on-internal-error
exit
no shutdown
exit
port 1/1/3
description "port 3 to some third router"
ethernet
mode access
encap-type dot1q
mtu 2000
egress-scheduler-policy "strict-scheduler"
network
queue-policy "ncq-only"
accounting-policy 12
collect-stats
egress
queue-group "some-shaping-policy" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
queue-group "another-shaping-policy" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
queue-group "this-shaper-is-cool" instance 1 create
agg-rate
rate 1000000
exit
exit
exit
exit
exit
no shutdown
exit
"""
template = """
<group name="system.ports">
port {{ id }}
shutdown {{ admin_enabled | set(false) }}
description "{{ description | ORPHRASE | strip('"') }}"
<group name="ethernet">
ethernet {{ _start_ }}
mode {{ mode }}
encap-type {{ encap_type }}
mtu {{ mtu | DIGIT }}
egress-scheduler-policy {{ egress_sched_policy | strip('"') }}
loopback internal persistent {{ loop_internal | set(true) }}
<group name="network">
network {{ _start_ }}
queue-policy {{ queue_policy | ORPHRASE | strip('"') }}
accounting-policy {{ accounting_policy | DIGIT }}
collect-stats {{ collect_stats | set(true) }}
<group name="egress">
egress {{ _start_ }}
<group name="queuegroups*">
queue-group {{ name | strip('"') }} instance 1 create
rate {{ agg_rate | DIGIT }}
exit {{_end_}}
</group>
## this "exit {{ _end_ }}" had wrong indentation level, leading to
## group name="egress" finishing too early
exit {{_end_}}
</group>
exit {{_end_}}
</group>
lldp {{ lldp_enabled | set(true) }}
exit {{_end_}}
</group>
no shutdown {{admin_enabled | set(true)}}
exit {{_end_}}
</group>
"""
parser = ttp(data=data_to_parse, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res, width=150)
assert res == [
[
{
"system": {
"ports": [
{
"admin_enabled": True,
"description": "port 1 description",
"ethernet": {
"encap_type": "dot1q",
"lldp_enabled": True,
"mode": "hybrid",
"network": {
"accounting_policy": "12",
"collect_stats": True,
"egress": {
"queuegroups": [
{
"agg_rate": "50000",
"name": "qos-policy-for-router1",
}
]
},
"queue_policy": "ncq-only",
},
},
"id": "1/1/1",
},
{
"admin_enabled": True,
"description": "another port to a another router",
"ethernet": {
"egress_sched_policy": "qos-port-scheduler",
"encap_type": "dot1q",
"mode": "hybrid",
},
"id": "1/1/2",
},
{
"admin_enabled": True,
"description": "port 3 to some third router",
"ethernet": {
"egress_sched_policy": "strict-scheduler",
"encap_type": "dot1q",
"mode": "access",
"mtu": "2000",
"network": {
"accounting_policy": "12",
"collect_stats": True,
"egress": {
"queuegroups": [
{
"agg_rate": "50000",
"name": "some-shaping-policy",
},
{
"agg_rate": "50000",
"name": "another-shaping-policy",
},
{
"agg_rate": "1000000",
"name": "this-shaper-is-cool",
},
]
},
"queue_policy": "ncq-only",
},
},
"id": "1/1/3",
},
]
}
}
]
]
# test_slack_answer_2()
def test_slack_answer_3():
"""
Problem was that interfaces were matched by regexes from both ospf and ospfv3
groups, decision logic was not able to properly work out to which group result
should belong, changed behavior to check if match is a child of current record
group and use it if so. Also had to change how group id encoded from string to
tuple of two elements ("group path", "group index",)
Here is some debug output until problem was fixed:
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf3**::1
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf**.interfaces*::0
re_idex: 0
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf3**::1
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf3**.interfaces*::0
re_idex: 1
# problem was happening because logic was not able to decide that need to use this match
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf**::0
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf**.interfaces*::0
re_idex: 0
# problem was happening because logic was picking up this match
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf**::0
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf3**.interfaces*::0
re_idex: 1
Wrong results:
[[{'service': {'vprns': [{'4': {'name': 'ospf_version3_vprn',
'ospf': {'area': '0.0.0.0', 'interfaces': [{'name': 'interface-one'}]},
'ospf3': {'area': '0.0.0.0', 'interfaces': [{'name': 'interface-two'}]}},
'5': {'name': 'vprn5', 'ospf': {'area': '0.0.0.0'},
'ospf3': {'interfaces': [{'name': 'interface-three'}]}}}]}}]]
"""
data = """
service
vprn 4 name "ospf_version3_vprn" customer 40 create
ospf
area 0.0.0.0
interface "interface-one"
ospf3 0
area 0.0.0.0
interface "interface-two"
vprn 5 name "vprn5" customer 50 create
ospf
area 0.0.0.0
interface "interface-three"
"""
template = """
<group name="service.vprns*.{{id}}**">
vprn {{ id }} name {{ name | ORPHRASE | strip('"') }} customer {{ ignore }} create
<group name="ospf**">
ospf {{ _start_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') }}
</group>
</group>
<group name="ospf3**">
ospf3 0 {{ _start_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') }}
</group>
</group>
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[
{
"service": {
"vprns": [
{
"4": {
"name": "ospf_version3_vprn",
"ospf": {
"area": "0.0.0.0",
"interfaces": [{"name": "interface-one"}],
},
"ospf3": {
"area": "0.0.0.0",
"interfaces": [{"name": "interface-two"}],
},
},
"5": {
"name": "vprn5",
"ospf": {
"area": "0.0.0.0",
"interfaces": [{"name": "interface-three"}],
},
},
}
]
}
}
]
]
# test_slack_answer_3()
def test_slack_answer_3_full():
data = """
service
vprn 1 name "vprn1" customer 10 create
interface "loopback" create
exit
interface "interface-one" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 2 name "vprn2" customer 20 create
interface "loopback" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 3 name "vprn3" customer 30 create
interface "loopback" create
exit
interface "interface-two" create
exit
exit
vprn 4 name "ospf_version3_vprn" customer 40 create
interface "loopback" create
exit
interface "interface-two" create
exit
exit
vprn 5 name "vprn5" customer 50 create
interface "loopback" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 1 name "vprn1" customer 10 create
interface "loopback" create
address 10.10.10.1/32
loopback
exit
interface "interface-one" create
address 10.10.10.10/30
sap 1/1/1:10 create
exit
exit
interface "interface-two" create
address 10.10.10.100/31
sap lag-5:80 create
exit
exit
interface "bgp-interface" create
address 10.10.10.200/31
sap lag-4:100 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 2 name "vprn2" customer 20 create
interface "interface-two" create
address 10.11.11.10/31
sap lag-1:50 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 3 name "vprn3" customer 30 create
interface "loopback" create
address 10.12.12.12/32
loopback
exit
interface "interface-two" create
address 10.12.12.100/31
sap lag-5:33 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 4 name "ospf_version3_vprn" customer 40 create
interface "loopback" create
address 10.40.40.10/32
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:ae46/128
exit
loopback
exit
interface "interface-two" create
address 10.40.40.100/31
ipv6
address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:1111/64
exit
sap lag-5:800 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
ospf3 0
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 5 name "vprn5" customer 50 create
interface "loopback" create
address 10.50.50.50/32
loopback
exit
interface "interface-two" create
address 10.50.50.100/31
sap lag-5:5 create
exit
exit
interface "bgp-interface" create
address 10.50.50.200/31
sap lag-1:602 create
exit
exit
bgp
group "eBGP"
peer-as 4444
neighbor 10.50.50.201
exit
exit
no shutdown
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
exit
"""
template = """
#-------------------------------------------------- {{ ignore }}
echo "Service Configuration" {{ ignore }}
#-------------------------------------------------- {{ ignore }}
service {{ ignore }}
<group name="service.vprns*.{{id}}**">
vprn {{ id }} name {{ name | ORPHRASE | strip('"') }} customer {{ ignore }} create
shutdown {{ admin_enabled | set("False") }}
description {{ description | ORPHRASE | strip('"') }}
vrf-import {{ import_policy | ORPHRASE | strip('"') }}
router-id {{ router_id }}
autonomous-system {{ local_as }}
route-distinguisher {{ loopback_ip }}:{{ vrf_routedist }}
vrf-target target:{{ ignore }}:{{ vrf_routetarget }}
vrf-target {{ vrf_export }} target:{{ ignore }}:{{ vrf_routetarget }}
<group name="interfaces*.{{name}}**">
interface {{ name | ORPHRASE | strip('"') }} create
shutdown {{ admin_enabled | set("False") }}
description {{ description | ORPHRASE | strip('"') }}
address {{ address | IP }}/{{ mask | DIGIT }}
ip-mtu {{ mtu }}
bfd {{ bfd_timers }} receive {{ ignore }} multiplier {{ bfd_interval }}
<group name="vrrp">
vrrp {{ instance }}
backup {{ backup }}
priority {{ priority }}
policy {{ policy }}
ping-reply {{ pingreply | set("True") }}
traceroute-reply {{ traceroute_reply | set("True") }}
init-delay {{ initdelay }}
message-interval {{ message_int_seconds }}
message-interval milliseconds {{ message_int_milliseconds }}
bfd-enable 1 interface {{ bfd_interface | ORPHRASE | strip('"')}} dst-ip {{ bfd_dst_ip }}
exit {{ _end_ }}
</group>
<group name="ipv6">
ipv6 {{ _start_ }}
address {{ address | IPV6 }}/{{ mask | DIGIT }}
address {{ address | _start_ | IPV6 }}/{{ mask | DIGIT }} dad-disable
link-local-address {{ linklocal_address | IPV6 }} dad-disable
<group name="vrrp">
vrrp {{ instance | _start_ }}
<group name="backup*">
backup {{ ip }}
</group>
priority {{ priority }}
policy {{ policy }}
ping-reply {{ pingreplay | set("True") }}
traceroute-reply {{ traceroute_reply | set("True") }}
init-delay {{ initdelay }}
message-interval milliseconds {{ message_int_milliseconds }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
<group name="vpls">
vpls {{ vpls_name | ORPHRASE | strip('"') | _start_ }}
exit {{ _end_ }}
</group>
<group name="sap**">
sap {{ port | _start_ }}:{{ vlan | DIGIT }} create
ingress {{ _exact_ }}
qos {{ qos_sap_ingress }}
<group name="_">
egress {{ _start_ }}
qos {{ qos_sap_egress }}
</group>
collect-stats {{ collect_stats | set("True") }}
accounting-policy {{ accounting_policy }}
exit {{ _end_}}
</group>
exit {{ _end_}}
</group>
<group name="staticroutes*">
static-route-entry {{ prefix | PREFIX | _start_ }}
black-hole {{ blackhole | set("True") }}
next-hop {{ nexthop | IP }}
shutdown {{ admin_enabled | set("False") }}
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="aggregates">
aggregate {{ agg_block | PREFIX | _start_ }} summary-only
</group>
<group name="router_advertisement">
router-advertisement {{ _start_ }}
interface {{ interface | ORPHRASE | strip('"') }}
use-virtual-mac {{ use_virtualmac | set("True") }}
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="bgp**">
bgp {{ _start_ }}
min-route-advertisement {{ min_route_advertisement | DIGIT }}
<group name="peergroups*">
group {{ name | ORPHRASE | strip('"') }}
family {{ family | ORPHRASE | split(" ") }}
type {{ peer_type | ORPHRASE }}
import {{ importpolicy | ORPHRASE | strip('"') }}
export {{ exportpolicy | ORPHRASE | strip('"') }}
peer-as {{ remote_as }}
bfd-enable {{ bfd_enabled | set("True") }}
<group name="neighbors*">
neighbor {{ address | IP | _start_ }}
neighbor {{ address | IPV6 | _start_ }}
shutdown {{ admin_enabled | set("False") }}
keepalive {{ keepalive }}
hold-time {{ holdtime }}
bfd-enable {{ bfd_enabled | set("True") }}
as-override {{ as_override | set("True") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") | _start_ }}
exit {{ _end_ }}
</group>
<group name="ospf**">
ospf {{ _start_ }}{{ _exact_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') | _start_ }}
passive {{ passive | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="ospf3**">
ospf3 0 {{ _start_ }}{{ _exact_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') | _start_ }}
passive {{ passive | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res, width=100)
assert res == [
[
{
"service": {
"vprns": [
{
"1": {
"admin_enabled": "True",
"interfaces": [
{
"bgp-interface": {
"address": "10.10.10.200",
"mask": "31",
"sap": {"port": "lag-4", "vlan": "100"},
},
"interface-one": {
"address": "10.10.10.10",
"mask": "30",
"sap": {"port": "1/1/1", "vlan": "10"},
},
"interface-two": {
"address": "10.10.10.100",
"mask": "31",
"sap": {"port": "lag-5", "vlan": "80"},
},
"loopback": {
"address": "10.10.10.1",
"mask": "32",
},
}
],
"name": "vprn1",
"ospf": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
"2": {
"admin_enabled": "True",
"interfaces": [
{
"bgp-interface": {},
"interface-two": {
"address": "10.11.11.10",
"mask": "31",
"sap": {"port": "lag-1", "vlan": "50"},
},
"loopback": {},
}
],
"name": "vprn2",
"ospf": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
"3": {
"admin_enabled": "True",
"interfaces": [
{
"interface-two": {
"address": "10.12.12.100",
"mask": "31",
"sap": {"port": "lag-5", "vlan": "33"},
},
"loopback": {
"address": "10.12.12.12",
"mask": "32",
},
}
],
"name": "vprn3",
"ospf": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
"4": {
"admin_enabled": "True",
"interfaces": [
{
"interface-two": {
"address": "10.40.40.100",
"ipv6": {
"address": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:1111",
"mask": "64",
},
"mask": "31",
"sap": {"port": "lag-5", "vlan": "800"},
},
"loopback": {
"address": "10.40.40.10",
"ipv6": {
"address": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:ae46",
"mask": "128",
},
"mask": "32",
},
}
],
"name": "ospf_version3_vprn",
"ospf": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
"ospf3": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
"5": {
"admin_enabled": "True",
"bgp": {
"admin_enabled": "True",
"peergroups": [
{
"name": "eBGP",
"neighbors": [{"address": "10.50.50.201"}],
"remote_as": "4444",
}
],
},
"interfaces": [
{
"bgp-interface": {
"address": "10.50.50.200",
"mask": "31",
"sap": {"port": "lag-1", "vlan": "602"},
},
"interface-two": {
"address": "10.50.50.100",
"mask": "31",
"sap": {"port": "lag-5", "vlan": "5"},
},
"loopback": {
"address": "10.50.50.50",
"mask": "32",
},
}
],
"name": "vprn5",
"ospf": {
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
}
]
}
}
]
]
# test_slack_answer_3_full()
def test_issue_45_for_junos_cfg():
data = """
system {
host-name LAB-MX-1;
time-zone some/time;
default-address-selection;
no-redirects;
no-ping-record-route;
no-ping-time-stamp;
tacplus-server {
1.1.1.1 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
172.16.31.10 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
172.16.17.32 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
}
services {
ssh {
root-login deny;
no-tcp-forwarding;
protocol-version v2;
max-sessions-per-connection 32;
client-alive-count-max 3;
client-alive-interval 10;
connection-limit 10;
rate-limit 5;
}
netconf {
ssh {
connection-limit 10;
rate-limit 4;
}
}
}
}
"""
template = """
<group name="system_level">
system { {{ _start_ }}
host-name {{ HOSTNAME }};
time-zone {{ TZ }};
default-address-selection; {{ default_address_selection | set(True) }}
no-redirects; {{ no_redirects | set(True) }}
no-ping-record-route; {{ no_ping_record_route | set(True) }}
no-ping-time-stamp; {{ no_ping_time_stamp | set(True) }}
<group name="services">
services { {{ _start_ }}
<group name="{{ service }}">
{{ service }} {
http; {{ http | set(true) }}
https; {{ https | set(true) }}
no-tcp-forwarding; {{ no-tcp-fwding | set(true) }}
protocol-version {{ ssh-proto }};
connection-limit {{ connection-limit | DIGIT }};
rate-limit {{rate-limit | DIGIT }};
root-login deny; {{ root-login | set(false) }}
max-sessions-per-connection {{ max-sessions | DIGIT }};
client-alive-count-max {{ client-alive-count-max | DIGIT }};
client-alive-interval {{ client-alive-interval | DIGIT }};
<group name="ssh">
ssh; {{ ssh | set(true) }}
</group>
<group name="ssh">
ssh { {{ _start_ }}
connection-limit {{ connection-limit | DIGIT }};
rate-limit {{ rate-limit | DIGIT }};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="internet-options">
internet-options { {{ _start_ }}
icmpv4-rate-limit packet-rate {{ packet-rate| DIGIT }};
icmpv6-rate-limit packet-rate {{ packet-rate| DIGIT }};
no-source-quench; {{ no-source-quench | set(true) }}
tcp-drop-synfin-set; {{ tcp-drop-synfin-set | set(true) }}
no-tcp-reset {{ no-tcp-reset }};
} {{ _end_ }}
</group>
authentication-order [{{ authentication-order }}];
<group name="ports">
ports { {{ _start_ }}
auxiliary disable; {{ auxiliary | set(false) }}
} {{ _end_ }}
</group>
<group name="root-authentication">
root-authentication { {{ _start_ }}
encrypted-password "{{ <PASSWORD>-password }}"; ## SECRET-DATA
} {{ _end_ }}
</group>
<group name="dns" itemize="name_server">
name-server { {{ _start_ }}
{{ name_server | IP | _line_ | to_list }};
} {{ _end_ }}
</group>
<group name="commit">
commit { {{ _start_ }}
synchronize; {{ commit_sync | set(true) }}
persist-groups-inheritance; {{ commit_persist-groups-inherit | set(true) }}
} {{ _end_ }}
</group>
<group name="tacacs">
tacplus-server { {{ _start_ }}
<group name="tacacs-servers.{{ tac_server }}">
{{ tac_server | IP }} {
port {{ tac_port }};
secret "{{ tac_secret }}"; ## SECRET-DATA
source-address {{ tac_source | IP }};
} {{ end }}
</group>
} {{ end }}
</group>
} {{ end }}
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[
{
"system_level": {
"HOSTNAME": "LAB-MX-1",
"TZ": "some/time",
"default_address_selection": True,
"no_ping_record_route": True,
"no_ping_time_stamp": True,
"no_redirects": True,
"services": {
"netconf": {
"ssh": {"connection-limit": "10", "rate-limit": "4"}
},
"ssh": {
"client-alive-count-max": "3",
"client-alive-interval": "10",
"connection-limit": "10",
"max-sessions": "32",
"no-tcp-fwding": True,
"rate-limit": "5",
"root-login": False,
"ssh-proto": "v2",
},
},
"tacacs": {
"tacacs-servers": {
"1.1.1.1": {
"tac_port": "49",
"tac_secret": "<SECRET_HASH>",
"tac_source": "5.5.5.5",
},
"2.2.2.2": {
"tac_port": "49",
"tac_secret": "<SECRET_HASH>",
"tac_source": "5.5.5.5",
},
"4.4.4.4": {
"tac_port": "49",
"tac_secret": "<SECRET_HASH>",
"tac_source": "5.5.5.5",
},
}
},
}
}
]
]
# test_issue_45_for_junos_cfg()
def test_faq_multiline_output_matching():
data = """
Local Intf: Te2/1/23
System Name: r1.lab.local
System Description:
Cisco IOS Software, Catalyst 1234 L3 Switch Software (cat1234e-ENTSERVICESK9-M), Version 1534.1(1)SG, RELEASE SOFTWARE (fc3)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2012 by Cisco Systems, Inc.
Compiled Sun 15-Apr-12 02:35 by p
Time remaining: 92 seconds
"""
template = """
<group>
Local Intf: {{ local_intf }}
System Name: {{ peer_name }}
<group name="peer_system_description">
System Description: {{ _start_ }}
{{ sys_description | _line_ | joinmatches(" ") }}
Time remaining: {{ ignore }} seconds {{ _end_ }}
</group>
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[
[
{
"local_intf": "Te2/1/23",
"peer_name": "r1.lab.local",
"peer_system_description": {
"sys_description": "Cisco IOS Software, Catalyst 1234 L3 Switch "
"Software (cat1234e-ENTSERVICESK9-M), Version "
"1534.1(1)SG, RELEASE SOFTWARE (fc3) Technical "
"Support: http://www.cisco.com/techsupport "
"Copyright (c) 1986-2012 by Cisco Systems, Inc. "
"Compiled Sun 15-Apr-12 02:35 by p"
},
}
]
]
]
# test_faq_multiline_output_matching()
def test_issue_52_answer():
data = """
Origin:
Some random name
Example Address, example number, example city
Origin:
Some random name 2
Example Address, example number, example city 2
Origin:
Some random name 3
Example Address, example number, example city 3
One more string
"""
template = """
<macro>
def process(data):
lines = data["match"].splitlines()
name = lines[0]
address = lines[1]
return {"name": name, "address": address}
</macro>
<group name="origin*" macro="process">
Origin: {{ _start_ }}
{{ match | _line_ | joinmatches }}
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[
{
"origin": [
{
"address": "Example Address, example number, example city",
"name": "Some random name",
},
{
"address": "Example Address, example number, example city 2",
"name": "Some random name 2",
},
{
"address": "Example Address, example number, example city 3",
"name": "Some random name 3",
},
]
}
]
]
# test_issue_52_answer()
def test_issue_51_answer():
""" test workaround for removing <> chars from input data """
data = """
Name:Jane<br>
Name:Michael<br>
Name:July<br>
"""
template = """
<group name="people">
Name:{{ name }}<br>
</group>
"""
# this works as well
# template = "Name:{{ name }}br"
# data = data.replace("<", "").replace(">", "")
# this did not work. fails with xml parsing error
# template = "Name:{{ name }}<br>"
# data = data.replace("<", "<").replace(">", ">")
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[{"people": [{"name": "Jane"}, {"name": "Michael"}, {"name": "July"}]}]
]
# test_issue_51_answer()
def test_issue_50():
template = """
<input load="text">
interface "BNG-RH201-CORE"
address 11.11.11.11/31
description "BNG-RH201-CORE"
ldp-sync-timer 10
port lag-107:709
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64
exit
bfd 150 receive 150 multiplier 3
no shutdown
exit
interface "BNG-RH202-CORE"
address 22.22.22.22/31
description "BNG-RH201-CORE"
ldp-sync-timer 10
port lag-108:809
ipv6
address fdf8:f53e:61e4::18/64
exit
bfd 150 receive 150 multiplier 3
no shutdown
exit
interface "system"
address 33.33.33.33/32
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128
exit
no shutdown
exit
ies 97 name "OTDR-MGT" customer 1 create
description "OTDR-MGT"
interface "OTDR-MGT" create
address 172.16.31.10/25
vrrp 97
backup 10.20.30.1
priority 200
exit
vpls "OTDR-MGT-VPLS"
exit
exit
no shutdown
exit
ies 99 name "OLT-MGT" customer 1 create
description "OLT-INBAND-MGT"
interface "OLT-MGT" create
address 192.168.3.11/25
vrrp 1
backup 10.20.40.1
priority 200
exit
vpls "OLT-MGT-VPLS"
exit
exit
no shutdown
exit
ies 100 name "100" customer 1 create
description "IES 100 for subscribers"
redundant-interface "shunt" create
address 66.66.66.66/31
spoke-sdp 1:100 create
no shutdown
exit
exit
subscriber-interface "s100" create
description " Subscriber interface for subscribers"
allow-unmatching-subnets
address 172.16.58.3/22 gw-ip-address 192.168.3.11
address 172.16.31.10/20 gw-ip-address 192.168.3.11
group-interface "s100-lag210-vlan101" create
tos-marking-state trusted
ipv6
router-advertisements
managed-configuration
no shutdown
exit
dhcp6
proxy-server
no shutdown
exit
exit
exit
exit
exit
</input>
<group name="ifaces.{{ name }}" contains="ipv4,ipv6">
## group to match top level interfaces
interface "{{ name }}"
description {{ description | re(".+") | strip('"') }}
address {{ ipv4 | joinmatches('; ') }}
address {{ ipv6 | contains(":") | joinmatches('; ') }}
exit {{ _end_ }}
</group>
<group name="ifaces.{{ name }}" contains="ipv4,ipv6">
## group to match lower level interfaces
interface "{{ name | _start_ }}" create
{{ iftype }}-interface "{{ name | _start_ }}" create
description {{ description | re(".+") | strip('"') | strip }}
address {{ ipv4 | contains(".") | joinmatches('; ') }}
address {{ ipv4 | contains(".") | joinmatches('; ') }} gw-ip-address {{ ignore }}
exit {{ _end_ }}
</group>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"ifaces": {
"BNG-RH201-CORE": {
"description": "BNG-RH201-CORE",
"ipv4": "11.11.11.11/31",
"ipv6": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64",
},
"BNG-RH202-CORE": {
"description": "BNG-RH201-CORE",
"ipv4": "172.16.17.32/31",
"ipv6": "fdf8:f53e:61e4::18/64",
},
"OLT-MGT": {"ipv4": "192.168.3.11/25"},
"OTDR-MGT": {"ipv4": "172.16.31.10/25"},
"s100": {
"description": "Subscriber interface for subscribers",
"iftype": "subscriber",
"ipv4": "172.16.58.3/22; 172.16.31.10/20",
},
"shunt": {"iftype": "redundant", "ipv4": "66.66.66.66/31"},
"system": {
"ipv4": "192.168.127.12/32",
"ipv6": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128",
},
}
}
]
]
# test_issue_50()
def test_start_with_set():
data = """
authentication {
inactive: authentication {
"""
template = """
authentication { {{ inactive | set(False) | _start_ }}
inactive: authentication { {{ inactive | set(True) | _start_ }}
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [[[{"inactive": False}, {"inactive": True}]]]
# test_start_with_set()
def test_ios_bgp_pers_pars():
template = """
<vars>
defaults_bgp_peers = {
"description": "",
"remote-as": "",
"shutdown": "no",
"inherit_peer-session": "",
"update-source": "",
"password": ""
}
</vars>
<group name="bgp_peers">
<group name="{{ ASN }}">
router bgp {{ ASN }}
<group name="{{ PeerIP }}" default="defaults_bgp_peers">
neighbor {{ PeerIP }} remote-as {{ remote-as }}
neighbor {{ PeerIP }} description {{ description | ORPHRASE }}
neighbor {{ PeerIP | let("shutdown", "yes") }} shutdown
neighbor {{ PeerIP }} inherit peer-session {{ inherit_peer-session }}
neighbor {{ PeerIP }} password {{ password | ORPHRASE }}
neighbor {{ PeerIP }} update-source {{ update-source }}
</group>
</group>
</group>
"""
data = """
router bgp 65100
neighbor 1.1.1.1 remote-as 1234
neighbor 1.1.1.1 description Some Description here
neighbor 1.1.1.1 shutdown
neighbor 1.1.1.1 inherit peer-session session_1
neighbor 1.1.1.1 password <PASSWORD>
neighbor 1.1.1.1 update-source Loopback 1
neighbor 1.1.1.2 remote-as 1234
neighbor 1.1.1.2 inherit peer-session session_1
neighbor 1.1.1.2 update-source Loopback 1
"""
parser = ttp(data, template, log_level="DEBUG")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"bgp_peers": {
"65100": {
"1.1.1.1": {
"description": "Some Description here",
"inherit_peer-session": "session_1",
"password": "<PASSWORD>",
"remote-as": "1234",
"shutdown": "yes",
"update-source": "",
},
"1.1.1.2": {
"description": "",
"inherit_peer-session": "session_1",
"password": "",
"remote-as": "1234",
"shutdown": "no",
"update-source": "",
},
}
}
}
]
]
# test_ios_bgp_pers_pars()
def test_ip_address_parsing():
data = """
interface Vlan99
description vlan99_interface
ip address 192.168.127.12 255.255.255.0 secondary
ip address 192.168.3.11 255.255.255.0 secondary
ip address 10.99.10.1 255.255.255.0
load-interval 60
bandwidth 10000000
!
interface Vlan100
description vlan100_interface
ip address 10.100.10.1 255.255.255.0
load-interval 60
bandwidth 10000000
!
"""
template = """
<group name="interface">
interface {{ interface }}
description {{ description }}
ip address {{ ipv4_addr | PHRASE | exclude("secondary") | to_ip | with_prefixlen }}
load-interval {{ load-interval }}
bandwidth {{ bandwidth }}
<group name="ipv4_secondary*">
ip address {{ ipv4_addr | PHRASE | let("is_secondary", True) | to_ip | with_prefixlen }} secondary
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"interface": [
{
"bandwidth": "10000000",
"description": "vlan99_interface",
"interface": "Vlan99",
"ipv4_addr": "10.99.10.1/24",
"ipv4_secondary": [
{"ipv4_addr": "192.168.127.12/24", "is_secondary": True},
{"ipv4_addr": "192.168.3.11/24", "is_secondary": True},
],
"load-interval": "60",
},
{
"bandwidth": "10000000",
"description": "vlan100_interface",
"interface": "Vlan100",
"ipv4_addr": "10.100.10.1/24",
"load-interval": "60",
},
]
}
]
]
# test_ip_address_parsing()
def test_vlans_parsing():
template = """
<group name="ports_summary*">
{{ port }} {{ mode }} {{ encap }} {{ satus }} {{ native_vlan | DIGIT }}
</group>
<group name="vlans_allowed">
Port Vlans allowed on trunk {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
<group name="vlans_active">
Port Vlans allowed and active in management domain {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
<group name="vlans_forwarding">
Port Vlans in spanning tree forwarding state and not pruned {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
"""
data = """
Port Mode Encapsulation Status Native vlan
Gi0 on 802.1q trunking 1
Gi7 on 802.1q trunking 1
Port Vlans allowed on trunk
Gi0 1,8,999,1002-1005
Gi7 1,100,120,1000,1002-1005
Port Vlans allowed and active in management domain
Gi0 1,8,999
Gi7 1,100,120,1000
Port Vlans in spanning tree forwarding state and not pruned
Gi0 1,8,999
Gi7 1,100,120,1000
"""
parser = ttp(data, template, log_level="DEBUG")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=120)
assert res == [
[
{
"ports_summary": [
{
"encap": "802.1q",
"mode": "on",
"native_vlan": "1",
"port": "Gi0",
"satus": "trunking",
},
{
"encap": "802.1q",
"mode": "on",
"native_vlan": "1",
"port": "Gi7",
"satus": "trunking",
},
],
"vlans_active": {
"interfaces": [
{"port": "Gi0", "vlans": ["1", "8", "999"]},
{"port": "Gi7", "vlans": ["1", "100", "120", "1000"]},
]
},
"vlans_allowed": {
"interfaces": [
{
"port": "Gi0",
"vlans": ["1", "8", "999", "1002", "1003", "1004", "1005"],
},
{
"port": "Gi7",
"vlans": [
"1",
"100",
"120",
"1000",
"1002",
"1003",
"1004",
"1005",
],
},
]
},
"vlans_forwarding": {
"interfaces": [
{"port": "Gi0", "vlans": ["1", "8", "999"]},
{"port": "Gi7", "vlans": ["1", "100", "120", "1000"]},
]
},
}
]
]
# test_vlans_parsing()
def test_asa_acls_issue_55_uses_itemize_with_dynamic_path():
data = """
object-group service gokuhead
service-object tcp-udp destination eq gokurpc
service-object tcp destination eq 902
service-object tcp destination eq https
service-object tcp destination eq nfs
service-object tcp destination eq 10025
object-group network gohan
network-object object gohan-01
network-object object gohan-02
network-object object vlan_944
network-object object gohan-03
network-object object gohan-05
network-object object gohan-06
object-group service sql tcp
port-object eq 1433
object-group network vegeta
group-object trunks
network-object object vegeta-01
object-group network Space-Users
network-object object ab
network-object object ac
network-object object ad
network-object object ae
network-object object af
network-object object ag
network-object object ah
network-object object ai
network-object object aj
object-group network dalmatians
network-object object dog-01
group-object trunks
network-object object vlan_950
group-object Space-Users
network-object object Darts-Summary
"""
template = """
<vars>
SVC_PORTS = "tcp-udp|tcp|udp"
</vars>
<group name="object-{{ object_type }}-groups**.{{ object_name }}**">
object-group {{ object_type }} {{ object_name | _start_ }}
object-group {{ object_type }} {{ object_name | _start_ }} {{ protocol | re("SVC_PORTS")}}
description {{ description | re(".*") }}
<group name="{{ type }}-objects" itemize="obj_name" method="table">
network-object object {{ obj_name | let("type", "network") }}
network-object host {{ obj_name | IP | let("type", "network") }}
group-object {{ obj_name | let("type", "group") }}
service-object object {{ obj_name | let("type", "service") }}
service-object {{ obj_name | let("type", "service") }}
</group>
<group name="service-object-ports*">
service-object {{ protocol | re("SVC_PORTS") }} destination eq {{port}}
</group>
<group name="service-object-port-ranges*">
service-object {{ protocol | re("SVC_PORTS") }} destination range {{port_begin}} {{port_end}}
</group>
<group name="service-port-objects" itemize="port_obj">
port-object eq {{ port_obj }}
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"object-network-groups": {
"Space-Users": {
"network-objects": [
"ab",
"ac",
"ad",
"ae",
"af",
"ag",
"ah",
"ai",
"aj",
]
},
"dalmatians": {
"group-objects": ["trunks", "Space-Users"],
"network-objects": ["dog-01", "vlan_950", "Darts-Summary"],
},
"gohan": {
"network-objects": [
"gohan-01",
"gohan-02",
"vlan_944",
"gohan-03",
"gohan-05",
"gohan-06",
]
},
"vegeta": {
"group-objects": ["trunks"],
"network-objects": ["vegeta-01"],
},
},
"object-service-groups": {
"gokuhead": {
"service-object-ports": [
{"port": "gokurpc", "protocol": "tcp-udp"},
{"port": "902", "protocol": "tcp"},
{"port": "https", "protocol": "tcp"},
{"port": "nfs", "protocol": "tcp"},
{"port": "10025", "protocol": "tcp"},
]
},
"sql": {"protocol": "tcp", "service-port-objects": ["1433"]},
},
}
]
]
# test_asa_acls_issue_55()
def test_asa_acls_issue_55():
data = """
object-group service gokuhead
service-object tcp-udp destination eq gokurpc
service-object tcp destination eq 902
service-object tcp destination eq https
service-object tcp destination eq nfs
service-object tcp destination eq 10025
object-group network gohan
network-object object gohan-01
network-object object gohan-02
network-object object vlan_944
network-object object gohan-03
network-object object gohan-05
network-object object gohan-06
object-group service sql tcp
port-object eq 1433
object-group network vegeta
group-object trunks
network-object object vegeta-01
object-group network Space-Users
network-object object ab
network-object object ac
network-object object ad
network-object object ae
network-object object af
network-object object ag
network-object object ah
network-object object ai
network-object object aj
object-group network dalmatians
network-object object dog-01
group-object trunks
network-object object vlan_950
group-object Space-Users
network-object object Darts-Summary
"""
template = """
<vars>
SVC_PORTS = "tcp-udp|tcp|udp"
</vars>
<group name="object-{{ object_type }}-groups**.{{ object_name }}**">
object-group {{ object_type }} {{ object_name | _start_ }}
object-group {{ object_type }} {{ object_name | _start_ }} {{ protocol | re("SVC_PORTS")}}
description {{ description | re(".*") }}
<group name="network-objects" itemize="obj_name" method="table">
network-object object {{ obj_name | }}
network-object host {{ obj_name | IP }}
</group>
<group name="group-objects" itemize="obj_name" method="table">
group-object {{ obj_name }}
</group>
<group name="group-objects" itemize="obj_name" method="table">
service-object object {{ obj_name }}
service-object {{ obj_name }}
</group>
<group name="service-object-ports*">
service-object {{ protocol | re("SVC_PORTS") }} destination eq {{port}}
</group>
<group name="service-object-port-ranges*">
service-object {{ protocol | re("SVC_PORTS") }} destination range {{port_begin}} {{port_end}}
</group>
<group name="service-port-objects" itemize="port_obj">
port-object eq {{ port_obj }}
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"object-network-groups": {
"Space-Users": {
"network-objects": [
"ab",
"ac",
"ad",
"ae",
"af",
"ag",
"ah",
"ai",
"aj",
]
},
"dalmatians": {
"group-objects": ["trunks", "Space-Users"],
"network-objects": ["dog-01", "vlan_950", "Darts-Summary"],
},
"gohan": {
"network-objects": [
"gohan-01",
"gohan-02",
"vlan_944",
"gohan-03",
"gohan-05",
"gohan-06",
]
},
"vegeta": {
"group-objects": ["trunks"],
"network-objects": ["vegeta-01"],
},
},
"object-service-groups": {
"gokuhead": {
"service-object-ports": [
{"port": "gokurpc", "protocol": "tcp-udp"},
{"port": "902", "protocol": "tcp"},
{"port": "https", "protocol": "tcp"},
{"port": "nfs", "protocol": "tcp"},
{"port": "10025", "protocol": "tcp"},
]
},
"sql": {"protocol": "tcp", "service-port-objects": ["1433"]},
},
}
]
]
# test_asa_acls_issue_55()
def test_issue_57_headers_parsing():
"""
Issue first was with startempty match not beeing selected in favour
of start match produced by headers :
Interface Link Protocol Primary_IP Description {{ _headers_ }}
that was fixed by adding this code to the TTP selection logic for multiple
matches:
# startempty RE always more preferred
if startempty_re:
for index in startempty_re:
re_ = result[index][0]
result_data = result[index][1]
# skip results that did not pass validation check
if result_data == False:
continue
# prefer result with same path as current record
elif re_["GROUP"].group_id == self.record["GRP_ID"]:
break
# prefer children of current record group
elif self.record["GRP_ID"] and re_["GROUP"].group_id[
0
].startswith(self.record["GRP_ID"][0]):
break
# start RE preferred next
elif start_re:
Another problem was with
Interface Link Protocol Primary_IP Description {{ _headers_ }}
matching on "Duplex: (a)/A - auto; H - half; F - full" line, that was fixed
by chaning _end_ logic by introducing self.ended_groups set to _results_class
and replacing self.GRPLOCL with logic to use self.ended_groups instead.
All in all it resulted in better _end_ handling behavior and allowed to fix issue
45 as well where before this one had to use filtering instead, but now _end_ also
helps.
"""
data = """
Brief information on interfaces in route mode:
Link: ADM - administratively down; Stby - standby
Protocol: (s) - spoofing
Interface Link Protocol Primary IP Description
InLoop0 UP UP(s) --
REG0 UP -- --
Vlan401 UP UP 10.251.147.36 HSSBC_to_inband_mgmt_r4
Brief information on interfaces in bridge mode:
Link: ADM - administratively down; Stby - standby
Speed: (a) - auto
Duplex: (a)/A - auto; H - half; F - full
Type: A - access; T - trunk; H - hybrid
Interface Link Speed Duplex Type PVID Description
BAGG1 UP 20G(a) F(a) T 1 to-KDC-R4.10-Core-1
BAGG14 UP 10G(a) F(a) T 1 KDC-R429-E1 BackUp Chassis
BAGG22 UP 20G(a) F(a) T 1 HSSBC-NS-01
FGE1/0/49 DOWN auto A A 1
XGE1/0/1 UP 10G(a) F(a) T 1 KDC-R402-E1 Backup Chassis
"""
template = """
<group name = "interfaces">
<group name="routed">
Brief information on interfaces in route mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Protocol Primary_IP Description {{ _headers_ }}
</group>
{{ _end_ }}
</group>
<group name="bridged">
Brief information on interfaces in bridge mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Speed Duplex Type PVID Description {{ _headers_ }}
</group>
{{ _end_ }}
</group>
</group>
"""
parser = ttp(data, template, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": {
"bridged": {
"BAGG1": {
"Description": "to-KDC-R4.10-Core-1",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "20G(a)",
"Type": "T",
},
"BAGG14": {
"Description": "KDC-R429-E1 BackUp " "Chassis",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "10G(a)",
"Type": "T",
},
"BAGG22": {
"Description": "HSSBC-NS-01",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "20G(a)",
"Type": "T",
},
"FGE1/0/49": {
"Description": "",
"Duplex": "A",
"Link": "DOWN",
"PVID": "1",
"Speed": "auto",
"Type": "A",
},
"Link: ADM - administr": {
"Description": "",
"Duplex": "Stby -",
"Link": "ative",
"PVID": "dby",
"Speed": "ly down;",
"Type": "stan",
},
"XGE1/0/1": {
"Description": "KDC-R402-E1 Backup " "Chassis",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "10G(a)",
"Type": "T",
},
},
"routed": {
"InLoop0": {
"Description": "",
"Link": "UP",
"Primary_IP": "--",
"Protocol": "UP(s)",
},
"Link: ADM - administr": {
"Description": "",
"Link": "ative",
"Primary_IP": "Stby - " "standby",
"Protocol": "ly down;",
},
"REG0": {
"Description": "",
"Link": "UP",
"Primary_IP": "--",
"Protocol": "--",
},
"Vlan401": {
"Description": "HSSBC_to_inband_mgmt_r4",
"Link": "UP",
"Primary_IP": "10.251.147.36",
"Protocol": "UP",
},
},
}
}
]
]
# test_issue_57_headers_parsing()
def test_issue_57_headers_parsing_using_columns():
"""
Added columns for headers, now can adjust headers size as required
to filter unwanted results
"""
data = """
Brief information on interfaces in route mode:
Link: ADM - administratively down; Stby - standby
Protocol: (s) - spoofing
Interface Link Protocol Primary IP Description
InLoop0 UP UP(s) --
REG0 UP -- --
Vlan401 UP UP 10.251.147.36 HSSBC_to_inband_mgmt_r4
Brief information on interfaces in bridge mode:
Link: ADM - administratively down; Stby - standby
Speed: (a) - auto
Duplex: (a)/A - auto; H - half; F - full
Type: A - access; T - trunk; H - hybrid
Interface Link Speed Duplex Type PVID Description
BAGG1 UP 20G(a) F(a) T 1 to-KDC-R4.10-Core-1
BAGG14 UP 10G(a) F(a) T 1 KDC-R429-E1 BackUp Chassis
BAGG22 UP 20G(a) F(a) T 1 HSSBC-NS-01
FGE1/0/49 DOWN auto A A 1
XGE1/0/1 UP 10G(a) F(a) T 1 KDC-R402-E1 Backup Chassis
"""
template = """
<group name = "interfaces">
<group name="routed">
Brief information on interfaces in route mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Protocol Primary_IP Description {{ _headers_ | columns(5)}}
</group>
{{ _end_ }}
</group>
<group name="bridged">
Brief information on interfaces in bridge mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Speed Duplex Type PVID Description {{ _headers_ | columns(7) }}
</group>
{{ _end_ }}
</group>
</group>
"""
parser = ttp(data, template, log_level="error")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": {
"bridged": {
"BAGG1": {
"Description": "to-KDC-R4.10-Core-1",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "20G(a)",
"Type": "T",
},
"BAGG14": {
"Description": "KDC-R429-E1 BackUp " "Chassis",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "10G(a)",
"Type": "T",
},
"BAGG22": {
"Description": "HSSBC-NS-01",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "20G(a)",
"Type": "T",
},
"FGE1/0/49": {
"Description": "",
"Duplex": "A",
"Link": "DOWN",
"PVID": "1",
"Speed": "auto",
"Type": "A",
},
"XGE1/0/1": {
"Description": "KDC-R402-E1 Backup " "Chassis",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "10G(a)",
"Type": "T",
},
},
"routed": {
"InLoop0": {
"Description": "",
"Link": "UP",
"Primary_IP": "--",
"Protocol": "UP(s)",
},
"REG0": {
"Description": "",
"Link": "UP",
"Primary_IP": "--",
"Protocol": "--",
},
"Vlan401": {
"Description": "HSSBC_to_inband_mgmt_r4",
"Link": "UP",
"Primary_IP": "10.251.147.36",
"Protocol": "UP",
},
},
}
}
]
]
# test_issue_57_headers_parsing_using_columns()
def test_interface_template_not_collecting_all_data_solution():
data = """
interface Bundle-Ether10
description Bundle-Ether10
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.7
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.6 255.255.255.254
ipv6 address fc00::1:5/127
load-interval 30
!
interface Bundle-Ether51
description Bundle-Ether51
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.2
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.3 255.255.255.254
ipv6 address fc00::1:3/127
load-interval 30
!
interface Loopback0
description Loopback0
ipv4 address 10.1.1.1 255.255.255.255
ipv4 address 10.2.2.2 255.255.255.255 secondary
ipv6 address fc00::1/128
ipv6 address fc00::101/128
!
interface Loopback1
description Loopback1
ipv4 address 10.100.0.1 255.255.255.0
ipv4 address 10.100.1.1 255.255.255.0 secondary
ipv4 address 10.100.2.1 255.255.255.0 secondary
ipv6 address fc00:100::1/64
ipv6 address fc00:100::101/64
ipv6 address fc00:100::201/64
!
interface MgmtEth0/RP0/CPU0/0
description MgmtEth0/RP0/CPU0/0
cdp
vrf VRF-MGMT
ipv4 address 172.23.136.21 255.255.252.0
!
interface GigabitEthernet0/0/0/12
description GigabitEthernet0/0/0/12
mtu 9018
lldp
receive disable
transmit disable
!
negotiation auto
load-interval 30
l2transport
!
!
interface TenGigE0/0/0/4
description TenGigE0/0/0/4
bundle id 51 mode active
cdp
load-interval 30
!
interface TenGigE0/0/0/5
shutdown
!
interface TenGigE0/0/0/5.100 l2transport
description TenGigE0/0/0/5.100
!
interface TenGigE0/0/0/47
description TenGigE0/0/0/47
shutdown
mac-address 201.b19.1234
!
interface BVI101
cdp
description BVI101
ipv4 address 192.168.101.1 255.255.255.0
load-interval 30
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
description HundredGigE0/0/1/0
bundle id 10 mode active
cdp
load-interval 30
mac-address 200.b19.5678
!
interface preconfigure GigabitEthernet0/0/0/11
description GigabitEthernet0/0/0/11
shutdown
!
interface preconfigure GigabitEthernet0/0/0/16
description GigabitEthernet0/0/0/16
shutdown
!
interface preconfigure GigabitEthernet0/0/0/17
description GigabitEthernet0/0/0/17
shutdown
!
"""
template_original = """
<doc>
Template for capturing interface configuration data from IOS-XR devices
Note: In order to different interface appearances, the interface block has been replicated.
Be sure to update all blocks accordingly when adding any new values to capture.
</doc>
<vars>
intf_defaults = {
"description": None,
"speed": None,
"negotiation": None,
"disabled": False,
"mode": None,
}
</vars>
<macro>
## parses ipv4 addresses to determine which is primary and which are secondary
## and converts dotted-quad subnet mask into cidr format
def ipv4_macro(data):
data_list = list(data.split(" "))
addr = str(data_list[0])
mask = str(data_list[1])
mask = str(sum(bin(int(x)).count('1') for x in mask.split('.')))
ipv4 = addr+"/"+mask
if 'secondary' in data:
is_secondary = True
else:
is_secondary = False
result = { "ipv4" : ipv4, "is_secondary" : is_secondary }
return result
</macro>
<group name="interfaces" default="intf_defaults">
interface {{ interface | _start_}}
interface {{ interface | let("mode", "l2transport") | _start_ }} l2transport
interface preconfigure {{ interface | let("mode", "preconfigure") | _start_ }}
description {{ description | re(".+") }}
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | ORPHRASE | _exact_ }}
</group>
! {{ _end_ }}
</group>
"""
parser = ttp(data, template_original, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": [
{
"description": "Bundle-Ether10",
"disabled": False,
"interface": "Bundle-Ether10",
"ipv4": [
{"ipv4": {"ipv4": "192.168.1.6/31", "is_secondary": False}}
],
"ipv6": [{"ipv6": "fc00::1:5/127"}],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "Bundle-Ether51",
"disabled": False,
"interface": "Bundle-Ether51",
"ipv4": [
{"ipv4": {"ipv4": "192.168.1.3/31", "is_secondary": False}}
],
"ipv6": [{"ipv6": "fc00::1:3/127"}],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "Loopback0",
"disabled": False,
"interface": "Loopback0",
"ipv4": [
{"ipv4": {"ipv4": "10.1.1.1/32", "is_secondary": False}},
{"ipv4": {"ipv4": "10.2.2.2/32", "is_secondary": True}},
],
"ipv6": [{"ipv6": "fc00::1/128"}, {"ipv6": "fc00::101/128"}],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "Loopback1",
"disabled": False,
"interface": "Loopback1",
"ipv4": [
{"ipv4": {"ipv4": "10.100.0.1/24", "is_secondary": False}},
{"ipv4": {"ipv4": "10.100.1.1/24", "is_secondary": True}},
{"ipv4": {"ipv4": "10.100.2.1/24", "is_secondary": True}},
],
"ipv6": [
{"ipv6": "fc00:100::1/64"},
{"ipv6": "fc00:100::101/64"},
{"ipv6": "fc00:100::201/64"},
],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "MgmtEth0/RP0/CPU0/0",
"disabled": False,
"interface": "MgmtEth0/RP0/CPU0/0",
"ipv4": [
{
"ipv4": {
"ipv4": "172.23.136.21/22",
"is_secondary": False,
}
}
],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "GigabitEthernet0/0/0/12",
"disabled": False,
"interface": "GigabitEthernet0/0/0/12",
"mode": None,
"negotiation": "auto",
"speed": None,
},
{
"description": "TenGigE0/0/0/4",
"disabled": False,
"interface": "TenGigE0/0/0/4",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": None,
"disabled": True,
"interface": "TenGigE0/0/0/5",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "TenGigE0/0/0/5.100",
"disabled": False,
"interface": "TenGigE0/0/0/5.100",
"mode": "l2transport",
"negotiation": None,
"speed": None,
},
{
"description": "TenGigE0/0/0/47",
"disabled": True,
"interface": "TenGigE0/0/0/47",
"mac_address": "201.b19.1234",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "BVI101",
"disabled": False,
"interface": "BVI101",
"ipv4": [
{
"ipv4": {
"ipv4": "192.168.101.1/24",
"is_secondary": False,
}
}
],
"mac_address": "200.b19.4321",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "HundredGigE0/0/1/0",
"disabled": False,
"interface": "HundredGigE0/0/1/0",
"mac_address": "200.b19.5678",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "GigabitEthernet0/0/0/11",
"disabled": True,
"interface": "GigabitEthernet0/0/0/11",
"mode": "preconfigure",
"negotiation": None,
"speed": None,
},
{
"description": "GigabitEthernet0/0/0/16",
"disabled": True,
"interface": "GigabitEthernet0/0/0/16",
"mode": "preconfigure",
"negotiation": None,
"speed": None,
},
{
"description": "GigabitEthernet0/0/0/17",
"disabled": True,
"interface": "GigabitEthernet0/0/0/17",
"mode": "preconfigure",
"negotiation": None,
"speed": None,
},
]
}
]
]
# test_interface_template_not_collecting_all_data_solution()
@pytest.mark.skipif(True, reason="Need to fix this one")
def test_interface_template_not_collecting_all_data():
"""
For interface BVI101 not collecting mac-address
"""
data = """
interface Bundle-Ether10
description Bundle-Ether10
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.7
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.6 255.255.255.254
ipv6 address fc00::1:5/127
load-interval 30
!
interface Bundle-Ether51
description Bundle-Ether51
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.2
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.3 255.255.255.254
ipv6 address fc00::1:3/127
load-interval 30
!
interface Loopback0
description Loopback0
ipv4 address 10.1.1.1 255.255.255.255
ipv4 address 10.2.2.2 255.255.255.255 secondary
ipv6 address fc00::1/128
ipv6 address fc00::101/128
!
interface Loopback1
description Loopback1
ipv4 address 10.100.0.1 255.255.255.0
ipv4 address 10.100.1.1 255.255.255.0 secondary
ipv4 address 10.100.2.1 255.255.255.0 secondary
ipv6 address fc00:100::1/64
ipv6 address fc00:100::101/64
ipv6 address fc00:100::201/64
!
interface MgmtEth0/RP0/CPU0/0
description MgmtEth0/RP0/CPU0/0
cdp
vrf VRF-MGMT
ipv4 address 172.23.136.21 255.255.252.0
!
interface GigabitEthernet0/0/0/12
description GigabitEthernet0/0/0/12
mtu 9018
lldp
receive disable
transmit disable
!
negotiation auto
load-interval 30
l2transport
!
!
interface TenGigE0/0/0/4
description TenGigE0/0/0/4
bundle id 51 mode active
cdp
load-interval 30
!
interface TenGigE0/0/0/5
shutdown
!
interface TenGigE0/0/0/5.100 l2transport
description TenGigE0/0/0/5.100
!
interface TenGigE0/0/0/47
description TenGigE0/0/0/47
shutdown
mac-address 201.b19.1234
!
interface BVI101
cdp
description BVI101
ipv4 address 192.168.101.1 255.255.255.0
load-interval 30
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
description HundredGigE0/0/1/0
bundle id 10 mode active
cdp
load-interval 30
mac-address 200.b19.5678
!
interface preconfigure GigabitEthernet0/0/0/11
description GigabitEthernet0/0/0/11
shutdown
!
interface preconfigure GigabitEthernet0/0/0/16
description GigabitEthernet0/0/0/16
shutdown
!
interface preconfigure GigabitEthernet0/0/0/17
description GigabitEthernet0/0/0/17
shutdown
!
"""
template_original = """
<doc>
Template for capturing interface configuration data from IOS-XR devices
Note: In order to different interface appearances, the interface block has been replicated.
Be sure to update all blocks accordingly when adding any new values to capture.
</doc>
<macro>
## parses ipv4 addresses to determine which is primary and which are secondary
## and converts dotted-quad subnet mask into cidr format
def ipv4_macro(data):
data_list = list(data.split(" "))
addr = str(data_list[0])
mask = str(data_list[1])
mask = str(sum(bin(int(x)).count('1') for x in mask.split('.')))
ipv4 = addr+"/"+mask
if 'secondary' in data:
is_secondary = True
else:
is_secondary = False
result = { "ipv4" : ipv4, "is_secondary" : is_secondary }
return result
</macro>
## parent group for all interface groups
<group name="interfaces">
## matches primary interfaces
<group>
{{ mode | set(None) }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface {{ interface }}
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
## matches pre-configured interfaces
<group>
{{ mode | set('preconfigure') }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface preconfigure {{ interface }}
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
{{ mode | set('l2transport') }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface {{ interface }} l2transport
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
</group>
"""
parser = ttp(data, template_original, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
# test_interface_template_not_collecting_all_data()
def test_interface_template_not_collecting_all_data_reduced():
"""
Below template and data were producing this result:
[[{'interfaces': [{'interface': 'TenGigE0/0/0/5.100'},
{'interface': 'BVI101',
'ipv4': [{'ipv4': '192.168.101.1 255.255.255.0'}]}]}]]
TTP was not collecting mac-address for BVI 101
"""
data = """
interface TenGigE0/0/0/5.100 l2transport
!
interface BVI101
ipv4 address 192.168.101.1 255.255.255.0
mac-address 200.b19.4321
!
"""
template = """
<group name="interfaces">
## matches primary interfaces
<group>
interface {{ interface }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | _line_ | _exact_ }}
</group>
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
interface {{ interface }} l2transport
mac-address {{ mac_address }}
</group>
</group>
"""
parser = ttp(data, template, log_level="error")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": [
{"interface": "TenGigE0/0/0/5.100"},
{
"interface": "BVI101",
"ipv4": [{"ipv4": "192.168.101.1 255.255.255.0"}],
"mac_address": "200.b19.4321",
},
]
}
]
]
# test_interface_template_not_collecting_all_data_reduced()
@pytest.mark.skipif(True, reason="Need to fix this one")
def test_interface_template_not_collecting_all_data_reduced_2():
"""
Below template and data producing this result:
[[{'interfaces': [{'interface': 'TenGigE0/0/0/5'},
{'interface': 'TenGigE0/0/0/5.100',
'mac_address': '200.b19.1234'},
{'interface': 'BVI101',
'ipv4': [{'ipv4': '192.168.101.1 255.255.255.0'}]},
{'interface': 'HundredGigE0/0/1/0',
'mac_address': '200.b19.5678'}]}]]
Interface BVI should not have IPv4 address matched, but
should have mac-address matched. Problem is due to that
l2transport group starts and it has group for IPv4 addresses,
next match after matching IPv4 is mac-address, but his parent
is a different group, as a result IPv4 address saved under wrong group
and mac-address not saved at all
IDEA: try to implement automatic end of group tracking, to add pevious
groups to self.ended_groups if next, different group starts.
Current solution to this problem would be to use _end_ to explicitly
indicate end of group
"""
data = """
interface TenGigE0/0/0/5
!
interface TenGigE0/0/0/5.100 l2transport
mac-address 200.b19.1234
!
interface BVI101
ipv4 address 192.168.101.1 255.255.255.0
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
mac-address 200.b19.5678
!
"""
template_original = """
<group name="interfaces">
## matches primary interfaces
<group>
interface {{ interface }}
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
interface {{ interface }} l2transport
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | _line_ | _exact_ }}
</group>
</group>
</group>
"""
parser = ttp(data, template_original, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
# test_interface_template_not_collecting_all_data_reduced_2()
def test_issue_61():
data = """
banner motd &
BANNER MESSAGE line 1
BANNER MESSAGE line 2
BANNER MESSAGE line 3
&
some
other staff
"""
template_to_match_marker = "banner motd {{ marker }}"
template_to_parse_banner = """
<group name="motd">
banner motd {{ ignore(banner_marker) }} {{ _start_ }}
{{ banner_mesage | _line_ | joinmatches("\\n") }}
{{ ignore(banner_marker) }} {{ _end_ }}
</group>
"""
# extract marker value
parser = ttp(data, template_to_match_marker)
parser.parse()
marker = parser.result()[0][0]["marker"]
# parse banner
parser = ttp(data, template_to_parse_banner, vars={"banner_marker": marker})
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [[{'motd': {'banner_mesage': 'BANNER MESSAGE line 1\n'
'BANNER MESSAGE line 2\n'
'BANNER MESSAGE line 3'}}]]
# test_issue_61()
def test_fortigate_intf_parsing():
template = """
<group name="interfaces">
config system interface {{ _start_ }}
<group name="/interfaces*">
edit "{{ interface }}"
set allowaccess {{ allowaccess }}
set description "{{ description }}"
set interface "{{ phy_interface }}"
set snmp-index {{ snmp_index }}
set type {{ fgt_int_type }}
set vdom "{{ vdom }}"
set vlanid {{ vlan }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
"""
data = """
config system np6
edit "np6_0"
next
end
config system interface
edit "mgmt1"
set vdom "root"
set ip 10.10.10.1 255.255.255.248
set allowaccess ping
set type physical
set description "mgmt1"
set snmp-index 1
next
edit "port1"
set vdom "internal"
set ip 20.20.20.1 255.255.255.248
set allowaccess ping
set type physical
set snmp-index 2
next
end
config system custom-language
edit "en"
set filename "en"
next
edit "fr"
set filename "fr"
next
end
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [[{'interfaces': [{'allowaccess': 'ping',
'description': 'mgmt1',
'fgt_int_type': 'physical',
'interface': 'mgmt1',
'snmp_index': '1',
'vdom': 'root'},
{'allowaccess': 'ping',
'fgt_int_type': 'physical',
'interface': 'port1',
'snmp_index': '2',
'vdom': 'internal'}]}]]
# test_fortigate_intf_parsing()
def test_issue_57_one_more():
"""
Without _anonymous_ group groups id formation bug fix
below template/data were producitng this result:
[[{'portchannel': {'1': {'local_members': [{}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
Further debugging revelead the flaw in results selection logic,
due to exclude("Port") statemets group was invalidated and anonymous group_id
was same as parent group_id resulting in new anonymous group matches were not
able to restart the group, fixed by changing the way how anonymous group id formed.
Before fix:
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
After fix:
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*._anonymous_', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
"""
data = """
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""
template = """
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*" void="">
Local: {{_start_}}
<group>
{{interface | exclude("Port") }} {{status}} {{priority}} {{oper_key }} {{flag}}
</group>
</group>
<group name = "remote_members*">
{{interface }} {{status}} {{priority}} {{oper_key}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [[{'portchannel': {'1': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
# test_issue_57_one_more()
def test_issue_57_one_more_answer():
data = """
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""
template = """
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*">
{{interface}} {{status}} {{priority | DIGIT}} {{oper_key | DIGIT}} {{flag}}
</group>
<group name = "remote_members*">
{{interface}} {{status}} {{priority | DIGIT}} {{oper_key | DIGIT}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [[{'portchannel': {'1': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
# test_issue_57_one_more_answer()
def test_issue_57_one_more_empty_dict_in_res():
"""
Without fix this results produced:
[[{'portchannel': {'1': {'local_members': [{},
{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{},
{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{},
{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{},
{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
Above results contain empty dictionary list item, this is because
local_members* and remote_members* use * to indicate list item
as a result self.dict_by_path was returning E as a list element,
and results were appended to that element, but results are empty dictionary,
update saving logic to check if results are empty and skip appending them
if so.
"""
data = """
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""
template = """
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*">
Local: {{_start_}}
<group>
{{interface }} {{status}} {{priority}} {{oper_key | DIGIT }} {{flag}}
</group>
</group>
<group name = "remote_members*">
Remote: {{_start_}}
<group>
{{interface }} {{status}} {{priority}} {{oper_key}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [[{'portchannel': {'1': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
# test_issue_57_one_more_empty_dict_in_res() |
tests/unit_tests/test_pin.py | norberto-schmidt/openmc | 262 | 80258 | <filename>tests/unit_tests/test_pin.py
"""
Tests for constructing Pin universes
"""
import numpy as np
import pytest
import openmc
from openmc.model import pin
def get_pin_radii(pin_univ):
"""Return a sorted list of all radii from pin"""
rads = set()
for cell in pin_univ.get_all_cells().values():
surfs = cell.region.get_surfaces().values()
rads.update(set(s.r for s in surfs))
return list(sorted(rads))
@pytest.fixture
def pin_mats():
fuel = openmc.Material(name="UO2")
fuel.volume = 100
clad = openmc.Material(name="zirc")
clad.volume = 100
water = openmc.Material(name="water")
return fuel, clad, water
@pytest.fixture
def good_radii():
return (0.4, 0.42)
def test_failure(pin_mats, good_radii):
"""Check for various failure modes"""
good_surfaces = [openmc.ZCylinder(r=r) for r in good_radii]
# Bad material type
with pytest.raises(TypeError):
pin(good_surfaces, [mat.name for mat in pin_mats])
# Incorrect lengths
with pytest.raises(ValueError, match="length"):
pin(good_surfaces[:len(pin_mats) - 2], pin_mats)
# Non-positive radii
rad = [openmc.ZCylinder(r=-0.1)] + good_surfaces[1:]
with pytest.raises(ValueError, match="index 0"):
pin(rad, pin_mats)
# Non-increasing radii
surfs = tuple(reversed(good_surfaces))
with pytest.raises(ValueError, match="index 1"):
pin(surfs, pin_mats)
# Bad orientation
surfs = [openmc.XCylinder(r=good_surfaces[0].r)] + good_surfaces[1:]
with pytest.raises(TypeError, match="surfaces"):
pin(surfs, pin_mats)
# Passing cells argument
with pytest.raises(ValueError, match="Cells"):
pin(surfs, pin_mats, cells=[])
def test_pins_of_universes(pin_mats, good_radii):
"""Build a pin with a Universe in one ring"""
u1 = openmc.Universe(cells=[openmc.Cell(fill=pin_mats[1])])
new_items = pin_mats[:1] + (u1, ) + pin_mats[2:]
new_pin = pin(
[openmc.ZCylinder(r=r) for r in good_radii], new_items,
subdivisions={0: 2}, divide_vols=True)
assert len(new_pin.cells) == len(pin_mats) + 1
@pytest.mark.parametrize(
"surf_type", [openmc.ZCylinder, openmc.XCylinder, openmc.YCylinder])
def test_subdivide(pin_mats, good_radii, surf_type):
"""Test the subdivision with various orientations"""
surfs = [surf_type(r=r) for r in good_radii]
fresh = pin(surfs, pin_mats, name="fresh pin")
assert len(fresh.cells) == len(pin_mats)
assert fresh.name == "fresh pin"
# subdivide inner region
N = 5
div0 = pin(surfs, pin_mats, {0: N})
assert len(div0.cells) == len(pin_mats) + N - 1
# Check volume of fuel material
for mid, mat in div0.get_all_materials().items():
if mat.name == "UO2":
assert mat.volume == pytest.approx(100 / N)
# check volumes of new rings
radii = get_pin_radii(div0)
bounds = [0] + radii[:N]
sqrs = np.square(bounds)
assert np.all(sqrs[1:] - sqrs[:-1] == pytest.approx(good_radii[0] ** 2 / N))
# subdivide non-inner most region
new_pin = pin(surfs, pin_mats, {1: N})
assert len(new_pin.cells) == len(pin_mats) + N - 1
# Check volume of clad material
for mid, mat in div0.get_all_materials().items():
if mat.name == "zirc":
assert mat.volume == pytest.approx(100 / N)
# check volumes of new rings
radii = get_pin_radii(new_pin)
sqrs = np.square(radii[:N + 1])
assert np.all(sqrs[1:] - sqrs[:-1] == pytest.approx(
(good_radii[1] ** 2 - good_radii[0] ** 2) / N))
|
mayan/apps/common/exceptions.py | eshbeata/open-paperless | 2,743 | 80273 | <gh_stars>1000+
from __future__ import unicode_literals
class BaseCommonException(Exception):
"""
Base exception for the common app
"""
pass
class NotLatestVersion(BaseCommonException):
"""
The installed version is not the latest available version
"""
def __init__(self, upstream_version):
self.upstream_version = upstream_version
|
util/cm_list.py | ValkA/audio-analyzer-for-android | 260 | 80282 | <gh_stars>100-1000
#!/usr/bin/env python
# Ref:
# Origin (mpl colormaps):
# https://bids.github.io/colormap/
# Raw data:
# https://github.com/BIDS/colormap/blob/master/colormaps.py
import csv
import numpy as np
#import matplotlib.pyplot as plt # matplotlib ver >= 1.5
names = ['magma', 'inferno', 'plasma', 'viridis', 'blackbody_uniform']
for c_name in names:
# cmap = np.array(plt.get_cmap(c_name).colors) # if use matplotlib
with open(c_name+'.csv', 'rb') as f:
reader = csv.reader(f)
cmap = np.array(list(reader)).astype(np.float)
#Note: len(cmap) == 256
if len(cmap)==256:
cm = np.floor(cmap[::-1] * 255.99);
else:
cm = np.vstack((np.floor(cmap[::-1] * 255.99),[0,0,0]))
cm[-1] = [0, 0, 0] # make last block black
s = ", ".join([("0x%06x" % (c[0] * 2**16 + c[1] * 2**8 + c[2])) for c in cm])
s2 = '\n'.join([s[0+i:80+i] for i in range(0, len(s), 80)])
print("static final int[] " + c_name + " = {\n" + s2 + "\n};\n")
|
recipes/Python/498259_Implementing_observer_pattern_yet_agathtime/recipe-498259.py | tdiprima/code | 2,023 | 80294 | <gh_stars>1000+
"""
observer module
Typical usage is as follows:
from __future__ import with_statement
from observer import consumer, observation
@consumer
def do_something_with_notification():
while True:
key, old, new = (yield)
print "%s: %s -> %s" % (key, old, new)
container = {}
# Any modification to `container`, now called `observed` in the
# body of the with statement, is sent to the coroutine
# do_something_with_notification()
with observation(observe=container,
notify=[do_something_with_notification()) as observed:
modify_observed(observed)
Requires Python 2.5
Author: <NAME> (<EMAIL>)
"""
from __future__ import with_statement
from contextlib import contextmanager
import unittest
@contextmanager
def observation(observe, notify):
"""Simple boilerplate to link to the 'with' statement.
Contextlib's contextmanager decorator is a very convenient way to
create simple context managers, specifically the __enter__ and
__exit__ special methods.
"""
proxy = Observation(observe, notify)
try:
yield proxy
finally:
proxy.close()
class NoneSuch(object):
"""A useful alternative to None in the case of a key being deleted or inserted."""
def __new__(cls, *args, **kwargs):
if '_inst' not in vars(cls):
cls._inst = object.__new__(cls, *args, **kwargs)
return cls._inst
def __init__(self, *args, **kwargs): pass
def __repr__(self): return "NoneSuch()"
def __call__(self, *args, **kwargs): return self
def __nonzero__(self): return False
NoneSuch = NoneSuch()
class Observation(object):
"""Enables observation of dictionaries.
Proxies the `observe` dictionary such that any modifications to
it are sent via `send()` to the notifiers in the `notify`
sequence. The sent value is a triple (key, old, new).
Notifications are sent AFTER the change.
Other mutable containers, such as sets and lists or your custom
container, can be readily added by supporting their interface.
"""
def __init__(self, observe, notify):
self._obj = observe
self.notify = notify
def close(self):
self._obj = None
self.notify = None
def __iter__(self):
if self._obj is None: raise ValueError("Operation on closed observation")
return iter(self._obj)
# all mutating methods go here, this list should be comprehensive as of 2.5
def __delitem__(self, K):
if self._obj is None: raise ValueError("Operation on closed observation")
old = self._obj[K]
del self._obj[K]
for notify in self.notify:
notify.send((K, old, NoneSuch))
def __setitem__(self, K, V):
if self._obj is None: raise ValueError("Operation on closed observation")
old = self._obj.get(K, NoneSuch)
self._obj[K] = V
for notify in self.notify:
notify.send((K, old, V))
def setdefault(self, K, default):
if self._obj is None: raise ValueError("Operation on closed observation")
try:
return self._obj[K]
except KeyError:
self._obj[K] = default
for notify in self.notify:
notify.send((K, NoneSuch, default))
def clear(self):
if self._obj is None: raise ValueError("Operation on closed observation")
items = self._obj.items()
self._obj.clear()
for K, old in items:
for notify in self.notify:
notify.send((K, old, NoneSuch))
def update(self, *seq_or_map, **kw):
from itertools import chain
if self._obj is None: raise ValueError("Operation on closed observation")
try: seq = seq_or_map[0].iteritems()
except IndexError: seq = ((K,None) for K in seq_or_map)
for K, V in chain(seq, kw.iteritems()):
old = self._obj.get(K, NoneSuch)
self._obj[K] = V
for notify in self.notify:
notify.send((K, old, V))
def pop(self, K, *default):
if self._obj is None: raise ValueError("Operation on closed observation")
# this may be unexpected to have old be the default
# value. what do you think?
if default:
old = self._obj.pop(K, default[0])
else:
old = self._obj.pop(K)
for notify in self.notify:
notify.send((K, old, NoneSuch))
return old
def popitem(self):
if self._obj is None: raise ValueError("Operation on closed observation")
K,old = self._obj.popitem()
for notify in self.notify:
notify.send((K, old, NoneSuch))
return old
def __contains__(self, K):
if self._obj is None: raise ValueError("Operation on closed observation")
return K in self._obj
def __getitem__(self, K):
if self._obj is None: raise ValueError("Operation on closed observation")
return self._obj[K]
def __len__(self):
if self._obj is None: raise ValueError("Operation on closed observation")
return len(self._obj)
# otherwise, just pass through
def __getattr__(self, attrib):
if self._obj is None: raise ValueError("Operation on closed observation")
return getattr(self._obj, attrib)
def consumer(func):
"""A decorator, advances func to its first yield point when called.
Modifed this original example code from PEP 342 to use the new
functools.wraps decorator. This convenience function makes it look
like the original function, which is almost always what we want,
especially if we designed the original function to be wrapped in
the first place!
Maybe `consumer` should go into functools too!
"""
from functools import wraps
@wraps(func)
def wrapper(*args,**kw):
gen = func(*args, **kw)
gen.next()
return gen
return wrapper
class ObserverTestCase(unittest.TestCase):
"""Tests observer module, special emphasis on dictionary protocol.
We keep the tests monolithic, just RunTest(), to keep the scope of
the with statement visible and simple.
"""
def runTest(self):
from collections import deque
changes = deque()
def consume(X):
def _consume(X):
while X:
yield X.popleft()
return list(_consume(X))
@consumer
def observe_changes():
while True:
change = (yield)
changes.append(change)
fruits = dict(apple=1, banana=2, cherry=3)
with observation(observe=fruits, notify=[observe_changes()]) as observed_fruits:
# typical mutations
observed_fruits['cherry'] *= 2
del observed_fruits['apple']
self.assertEquals(consume(changes), [('cherry', 3, 6), ('apple', 1, NoneSuch)])
# .update with keyword args
observed_fruits.update(durian=4, figs=5)
self.assertEquals(fruits['durian'], 4)
# .clear
observed_fruits.clear()
self.assertEquals(len(observed_fruits), 0)
consume(changes) # keep it simple, just throw away
# .update with map and keyword args, kw should override
observed_fruits.update({'grapefruit':6, 'jackfruit':7}, jackfruit=8)
self.assertEquals(observed_fruits['jackfruit'], 8)
self.assertEquals(consume(changes), [('jackfruit', NoneSuch, 7), ('grapefruit', NoneSuch, 6), ('jackfruit', 7, 8)])
# .pop, default here may be controversial
observed_fruits.pop('durian', None)
self.assertEquals(consume(changes), [('durian', None, NoneSuch)])
# .setdefault
observed_fruits.setdefault('jackfruit', -1)
observed_fruits.setdefault('kiwi', 9)
self.assertEquals(consume(changes), [('kiwi', NoneSuch, 9)])
# .popitem
while observed_fruits:
observed_fruits.popitem()
self.assertEquals(fruits, dict())
# verify that outside of with statement scope, the observation
# is closed
self.assertRaises(ValueError, lambda: observed_fruits.update(foo=0, fum=1))
if __name__ == "__main__":
unittest.main()
|
tests/text/test_chrf.py | stancld/metrics | 769 | 80306 | from functools import partial
from typing import Sequence
import pytest
from torch import Tensor, tensor
from tests.text.helpers import TextTester
from tests.text.inputs import _inputs_multiple_references, _inputs_single_sentence_multiple_references
from torchmetrics.functional.text.chrf import chrf_score
from torchmetrics.text.chrf import CHRFScore
from torchmetrics.utilities.imports import _SACREBLEU_AVAILABLE
if _SACREBLEU_AVAILABLE:
from sacrebleu.metrics import CHRF
def sacrebleu_chrf_fn(
preds: Sequence[str],
targets: Sequence[Sequence[str]],
char_order: int,
word_order: int,
lowercase: bool,
whitespace: bool,
) -> Tensor:
sacrebleu_chrf = CHRF(
char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace, eps_smoothing=True
)
# Sacrebleu CHRF expects different format of input
targets = [[target[i] for target in targets] for i in range(len(targets[0]))]
sacrebleu_chrf = sacrebleu_chrf.corpus_score(preds, targets).score / 100
return tensor(sacrebleu_chrf)
@pytest.mark.parametrize(
["char_order", "word_order", "lowercase", "whitespace"],
[
(6, 2, False, False),
(6, 2, False, True),
(4, 2, True, False),
(6, 0, True, False),
(6, 0, True, True),
(4, 0, False, True),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_multiple_references.preds, _inputs_multiple_references.targets)],
)
@pytest.mark.skipif(not _SACREBLEU_AVAILABLE, reason="test requires sacrebleu")
class TestCHRFScore(TextTester):
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize("dist_sync_on_step", [False, True])
def test_chrf_score_class(
self, ddp, dist_sync_on_step, preds, targets, char_order, word_order, lowercase, whitespace
):
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
nltk_metric = partial(
sacrebleu_chrf_fn, char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=CHRFScore,
sk_metric=nltk_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
)
def test_chrf_score_functional(self, preds, targets, char_order, word_order, lowercase, whitespace):
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
nltk_metric = partial(
sacrebleu_chrf_fn, char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=chrf_score,
sk_metric=nltk_metric,
metric_args=metric_args,
)
def test_chrf_score_differentiability(self, preds, targets, char_order, word_order, lowercase, whitespace):
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=CHRFScore,
metric_functional=chrf_score,
metric_args=metric_args,
)
def test_chrf_empty_functional():
hyp = []
ref = [[]]
assert chrf_score(hyp, ref) == tensor(0.0)
def test_chrf_empty_class():
chrf = CHRFScore()
hyp = []
ref = [[]]
assert chrf(hyp, ref) == tensor(0.0)
def test_chrf_return_sentence_level_score_functional():
hyp = _inputs_single_sentence_multiple_references.preds
ref = _inputs_single_sentence_multiple_references.targets
_, chrf_sentence_score = chrf_score(hyp, ref, return_sentence_level_score=True)
isinstance(chrf_sentence_score, Tensor)
def test_chrf_return_sentence_level_class():
chrf = CHRFScore(return_sentence_level_score=True)
hyp = _inputs_single_sentence_multiple_references.preds
ref = _inputs_single_sentence_multiple_references.targets
_, chrf_sentence_score = chrf(hyp, ref)
isinstance(chrf_sentence_score, Tensor)
|
usaspending_api/references/account_helpers.py | g4brielvs/usaspending-api | 217 | 80329 | from django.db import connection
from usaspending_api.common.etl import ETLQuery, ETLTable
from usaspending_api.common.etl.operations import delete_obsolete_rows, insert_missing_rows, update_changed_rows
# This is basically the desired final state of the federal_account table. We can diff this against the
# actual federal_account table and make corrections as appropriate to bring the federal_account table
# into line. Since the treasury_appropriation_account and federal_account tables are fairly small, we
# can perform full diffs with no noticeable performance impact. This sort order is dictated by DEV-3495.
FEDERAL_ACCOUNTS_FROM_TREASURY_ACCOUNTS_SQL = """
select
distinct on (agency_id, main_account_code)
agency_id as agency_identifier,
main_account_code,
concat(agency_id, '-', main_account_code) as federal_account_code,
account_title
from
treasury_appropriation_account
order by
agency_id,
main_account_code,
beginning_period_of_availability desc nulls last,
ending_period_of_availability desc nulls last,
sub_account_code,
allocation_transfer_agency_id,
treasury_account_identifier desc
"""
source_federal_account_query = ETLQuery(FEDERAL_ACCOUNTS_FROM_TREASURY_ACCOUNTS_SQL)
destination_federal_account_table = ETLTable(
"federal_account", key_overrides=["agency_identifier", "main_account_code"]
)
def remove_empty_federal_accounts():
"""
Removes federal accounts that are no longer attached to a TAS.
Returns:
Number of rows updated
"""
return delete_obsolete_rows(source_federal_account_query, destination_federal_account_table)
def update_federal_accounts():
"""
Update existing federal account records based on the latest information
from the TreasuryAppropriationAccount (TAS) table. The account title
for each federal account should reflect the account title of the
a related TAS with the most recent beginning period of availability.
Returns:
Number of rows updated
"""
return update_changed_rows(source_federal_account_query, destination_federal_account_table)
def insert_federal_accounts():
"""
Insert new federal accounts records based on the TreasuryAppropriationAccount
(TAS) table. Each TAS maps to a higher-level federal account, defined
by a unique combination of TAS agency_id (AID) and TAS main account
code (MAC).
"""
return insert_missing_rows(source_federal_account_query, destination_federal_account_table)
def link_treasury_accounts_to_federal_accounts():
"""
Federal accounts are derived from AID (agency identifier) + MAIN (main account code) in treasury accounts.
Using this information, we can link treasury accounts to their corresponding federal account and correct
any accounts that may be mis-linked. Since these tables are relatively small, we can simply perform full
updates with little to no noticeable performance impact.
"""
with connection.cursor() as cursor:
cursor.execute(
"""
update treasury_appropriation_account as tu
set federal_account_id = fa.id
from treasury_appropriation_account as t
left outer join federal_account as fa on
t.agency_id = fa.agency_identifier and
t.main_account_code = fa.main_account_code
where tu.treasury_account_identifier = t.treasury_account_identifier and
tu.federal_account_id is distinct from fa.id;
"""
)
return cursor.rowcount
|
python/test/mapreduce/util_test.py | Batterii/appengine-mapreduce | 228 | 80331 | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-bad-name
import datetime
import os
import sys
import unittest
from google.appengine.api import taskqueue
from mapreduce import model
from mapreduce import parameters
from mapreduce import util
class TestHandler(object):
"""Test handler class."""
def __call__(self, entity):
pass
def process(self, entity):
pass
@staticmethod
def process2(entity):
pass
@classmethod
def process3(cls):
pass
# pylint: disable=unused-argument
def test_handler_function(entity):
"""Empty test handler function."""
pass
class TestHandlerWithArgs(object):
"""Test handler with argument in constructor."""
def __init__(self, arg_unused):
"""Constructor."""
pass
def process(self, entity):
"""Empty process function."""
pass
# pylint: disable=g-old-style-class
class TestHandlerOldStyle():
"""Old style class."""
def __call__(self, entity):
pass
# pylint: disable=unused-argument
def test_handler_yield(entity):
"""Yielding handler function."""
yield 1
yield 2
class MockMapreduceSpec:
"""Mock MapreduceSpec class."""
def __init__(self):
self.params = {}
class ForNameTest(unittest.TestCase):
"""Test util.for_name function."""
def testClassName(self):
"""Test passing fq class name."""
self.assertEquals(TestHandler, util.for_name("__main__.TestHandler"))
def testFunctionName(self):
"""Test passing function name."""
self.assertEquals(test_handler_function,
util.for_name("__main__.test_handler_function"))
def testMethodName(self):
"""Test passing method name."""
self.assertEquals(TestHandler.process,
util.for_name("__main__.TestHandler.process"))
def testClassWithArgs(self):
"""Test passing method name of class with constructor args."""
self.assertEquals(TestHandlerWithArgs.process,
util.for_name("__main__.TestHandlerWithArgs.process"))
def testBadModule(self):
"""Tests when the module name is bogus."""
try:
util.for_name("this_is_a_bad_module_name.stuff")
except ImportError, e:
self.assertEquals(
"Could not find 'stuff' on path 'this_is_a_bad_module_name'",
str(e))
else:
self.fail("Did not raise exception")
def testBadFunction(self):
"""Tests when the module name is good but the function is missing."""
try:
util.for_name("__main__.does_not_exist")
except ImportError, e:
self.assertEquals(
"Could not find 'does_not_exist' on path '__main__'",
str(e))
else:
self.fail("Did not raise exception")
def testBadClass(self):
"""Tests when the class is found but the function name is missing."""
try:
util.for_name("__main__.TestHandlerWithArgs.missing")
except ImportError, e:
self.assertEquals(
"Could not find 'missing' on path '__main__.TestHandlerWithArgs'",
str(e))
else:
self.fail("Did not raise exception")
def testGlobalName(self):
"""Tests when the name has no dots in it."""
try:
util.for_name("this_is_a_bad_module_name")
except ImportError, e:
self.assertTrue(str(e).startswith(
"Could not find 'this_is_a_bad_module_name' on path "))
else:
self.fail("Did not raise exception")
class TestGetQueueName(unittest.TestCase):
def testGetQueueName(self):
self.assertEqual("foo", util.get_queue_name("foo"))
os.environ["HTTP_X_APPENGINE_QUEUENAME"] = "foo"
self.assertEqual("foo", util.get_queue_name(None))
os.environ["HTTP_X_APPENGINE_QUEUENAME"] = "__cron"
self.assertEqual(parameters.config.QUEUE_NAME, util.get_queue_name(None))
class SerializeHandlerTest(unittest.TestCase):
"""Test util.try_*serialize_handler works on various types."""
def testNonSerializableTypes(self):
# function.
self.assertEquals(None, util.try_serialize_handler(test_handler_function))
# Unbound method.
self.assertEquals(None, util.try_serialize_handler(TestHandler.process))
# bounded method.
self.assertEquals(None, util.try_serialize_handler(TestHandler().process))
# class method.
self.assertEquals(None, util.try_serialize_handler(TestHandler.process3))
# staticmethod, which is really a function.
self.assertEquals(None, util.try_serialize_handler(TestHandler.process2))
def testSerializableTypes(self):
# new style callable instance.
i = TestHandler()
self.assertNotEquals(
None, util.try_deserialize_handler(util.try_serialize_handler(i)))
i = TestHandlerOldStyle()
self.assertNotEquals(
None, util.try_deserialize_handler(util.try_serialize_handler(i)))
class IsGeneratorFunctionTest(unittest.TestCase):
"""Test util.is_generator function."""
def testGenerator(self):
self.assertTrue(util.is_generator(test_handler_yield))
def testNotGenerator(self):
self.assertFalse(util.is_generator(test_handler_function))
class GetTaskHeadersTest(unittest.TestCase):
def setUp(self):
super(GetTaskHeadersTest, self).setUp()
os.environ["CURRENT_VERSION_ID"] = "v7.1"
os.environ["CURRENT_MODULE_ID"] = "foo-module"
os.environ["DEFAULT_VERSION_HOSTNAME"] = "foo.appspot.com"
def testGetTaskHost(self):
self.assertEqual("v7.foo-module.foo.appspot.com", util._get_task_host())
task = taskqueue.Task(url="/relative_url",
headers={"Host": util._get_task_host()})
self.assertEqual("v7.foo-module.foo.appspot.com",
task.headers["Host"])
self.assertEqual("v7.foo-module", task.target)
def testGetTaskHostDefaultModule(self):
os.environ["CURRENT_MODULE_ID"] = "default"
self.assertEqual("v7.foo.appspot.com", util._get_task_host())
task = taskqueue.Task(url="/relative_url",
headers={"Host": util._get_task_host()})
self.assertEqual("v7.foo.appspot.com",
task.headers["Host"])
self.assertEqual("v7", task.target)
def testGetTaskHeaders(self):
mr_spec = model.MapreduceSpec(
name="foo", mapreduce_id="foo_id",
mapper_spec=model.MapperSpec("foo", "foo", {}, 8).to_json())
task = taskqueue.Task(url="/relative_url",
headers=util._get_task_headers(mr_spec.mapreduce_id))
self.assertEqual("foo_id", task.headers[util._MR_ID_TASK_HEADER])
self.assertEqual("v7.foo-module.foo.appspot.com",
task.headers["Host"])
self.assertEqual("v7.foo-module", task.target)
class GetShortNameTest(unittest.TestCase):
"""Test util.get_short_name function."""
def testGetShortName(self):
self.assertEquals("blah", util.get_short_name("blah"))
self.assertEquals("blah", util.get_short_name(".blah"))
self.assertEquals("blah", util.get_short_name("__mmm__.blah"))
self.assertEquals("blah", util.get_short_name("__mmm__.Krb.blah"))
class TotalSecondsTest(unittest.TestCase):
"""Test util.total_seconds."""
def testTotalSeconds(self):
td = datetime.timedelta(days=1, seconds=1)
self.assertEqual(24 * 60 * 60 + 1, util.total_seconds(td))
td = datetime.timedelta(days=1, seconds=1, microseconds=1)
self.assertEqual(24 * 60 * 60 + 2, util.total_seconds(td))
class ParseBoolTest(unittest.TestCase):
"""Test util.parse_bool function."""
def testParseBool(self):
self.assertEquals(True, util.parse_bool(True))
self.assertEquals(False, util.parse_bool(False))
self.assertEquals(True, util.parse_bool("True"))
self.assertEquals(False, util.parse_bool("False"))
self.assertEquals(True, util.parse_bool(1))
self.assertEquals(False, util.parse_bool(0))
self.assertEquals(True, util.parse_bool("on"))
self.assertEquals(False, util.parse_bool("off"))
class CreateConfigTest(unittest.TestCase):
"""Test create_datastore_write_config function."""
def setUp(self):
super(CreateConfigTest, self).setUp()
self.spec = MockMapreduceSpec()
def testDefaultConfig(self):
config = util.create_datastore_write_config(self.spec)
self.assertTrue(config)
self.assertFalse(config.force_writes)
def testForceWrites(self):
self.spec.params["force_writes"] = "True"
config = util.create_datastore_write_config(self.spec)
self.assertTrue(config)
self.assertTrue(config.force_writes)
class FooClass(object):
pass
class ObjToPathTest(unittest.TestCase):
def setUp(self):
super(ObjToPathTest, self).setUp()
self.sys_modules = sys.modules
def tearDown(self):
super(ObjToPathTest, self).tearDown()
sys.modules = self.sys_modules
def testBasic(self):
self.assertEqual(None, util._obj_to_path(None))
self.assertEqual("__main__.FooClass", util._obj_to_path(FooClass))
self.assertEqual("__main__.test_handler_function",
util._obj_to_path(test_handler_function))
@staticmethod
def foo():
pass
class FooClass2(object):
pass
def testNotTopLevel(self):
self.assertRaises(ValueError, util._obj_to_path, self.FooClass2)
def testNotTopLevel2(self):
self.assertRaises(ValueError, util._obj_to_path, self.foo)
def testUnexpectedType(self):
self.assertRaises(TypeError, util._obj_to_path, self.testUnexpectedType)
class GetDescendingKeyTest(unittest.TestCase):
"""Tests the _get_descending_key function."""
def testBasic(self):
"""Basic test of the function."""
now = 1234567890
os.environ["REQUEST_ID_HASH"] = "12345678"
self.assertEquals(
"159453012940012345678",
util._get_descending_key(
gettime=lambda: now))
class StripPrefixFromItemsTest(unittest.TestCase):
"""Tests the strip_prefix_from_items function."""
def testBasic(self):
"""Basic test of the function."""
items = ["/foo/bar", "/foos/bar2", "/bar3"]
prefix = "/foo/"
self.assertEquals(["bar", "/foos/bar2", "/bar3"],
util.strip_prefix_from_items(prefix, items))
if __name__ == "__main__":
unittest.main()
|
api/curve/db.py | QiliangFan/Baidu-Curve | 478 | 80349 | <reponame>QiliangFan/Baidu-Curve
# -*- coding: utf-8 -*-
"""
Curve
~~~~
db package
:copyright: (c) 2017-2018 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""
import flask_sqlalchemy
db = flask_sqlalchemy.SQLAlchemy()
|
python/hls2srt.py | AngusF22/toolbox | 182 | 80356 | <gh_stars>100-1000
#!/usr/bin/python2.7
#
# Copyright 2019 Eyevinn Technology. All rights reserved
# Use of this source code is governed by a MIT License
# that can be found in the LICENSE file.
# Author: <NAME> (Eyevinn Technology)
#
# Receive HLS and stream over SRT
#
import argparse
import subprocess
from os.path import basename
import re
import glob
parser = argparse.ArgumentParser(description='Pull HLS and restream over SRT.')
parser.add_argument('hlsurl')
parser.add_argument('address')
parser.add_argument('--srtmode', dest='srtmode', help='SRT mode [caller|listener]. Default is listener')
parser.add_argument('--with-debug', dest='debug', action='store_true')
args = parser.parse_args()
srtmode = "&mode=listener"
if args.srtmode == "caller":
srtmode = ""
srtoutput = "-f mpegts srt://%s?pkt_size=1316%s" % (args.address, srtmode)
ffmpeg = "ffmpeg -fflags +genpts -re -i %s -strict -2 -y -acodec copy -vcodec copy %s " % (args.hlsurl, srtoutput)
if args.debug:
print "%s" % ffmpeg
print ffmpeg.split()
p1 = subprocess.Popen(ffmpeg.split())
output,err = p1.communicate()
|
core/src/main/resources/tf_algos/easytransfer/run_easytransfer_train_main.py | starburst-project/Alink | 3,301 | 80391 | <filename>core/src/main/resources/tf_algos/easytransfer/run_easytransfer_train_main.py
import os
os.environ["HOME"] = os.path.expanduser('~')
from akdl.models.tf.easytransfer import easytransfer_main
from akdl.runner.config import TrainTaskConfig
def main(task_config: TrainTaskConfig):
easytransfer_main.main(task_config)
|
tests/lists_tests.py | cockcrow/python-mammoth | 557 | 80400 | from nose.tools import istest, assert_equal
from mammoth.lists import unique
@istest
def unique_of_empty_list_is_empty_list():
assert_equal([], unique([]))
@istest
def unique_removes_duplicates_while_preserving_order():
assert_equal(["apple", "banana"], unique(["apple", "banana", "apple"]))
|
djangae/tests/test_storage.py | julietkb/djangae | 467 | 80438 | <gh_stars>100-1000
# coding: utf-8
# STANDARD LIB
import os
# THIRD PARTY
import requests
from django.core.files.base import (
ContentFile,
File,
)
from django.db import models
from django.test.utils import override_settings
# DJANGAE
from djangae.contrib import sleuth
from djangae.storage import (
CloudStorage,
_get_storage_client,
)
from djangae.test import TestCase
class ModelWithTextFile(models.Model):
class Meta:
app_label = "djangae"
text_file = models.FileField()
class ModelWithUploadTo(models.Model):
class Meta:
app_label = "djangae"
text_file = models.FileField(upload_to="nested/document/")
class CloudStorageTests(TestCase):
def setUp(self):
requests.get('{}/wipe'.format(os.environ["STORAGE_EMULATOR_HOST"]))
client = _get_storage_client()
client.create_bucket('test_bucket')
return super().setUp()
def test_no_config_raises(self):
from django.core.exceptions import ImproperlyConfigured
with sleuth.fake("djangae.storage.project_id", return_value=None):
with self.assertRaises(ImproperlyConfigured):
CloudStorage()
@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')
def test_basic_actions(self):
content = b'content'
storage = CloudStorage()
name = u'tmp.ąćęłńóśźż.马铃薯.zip'
f = ContentFile(content, name='my_file')
filename = storage.save(name, f)
self.assertIsInstance(filename, str)
self.assertTrue(filename.endswith(name))
self.assertTrue(storage.exists(filename))
self.assertEqual(storage.size(filename), len(content))
url = storage.url(filename)
self.assertIsInstance(url, str)
self.assertNotEqual(url, '')
response = requests.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, content)
f = storage.open(filename)
self.assertIsInstance(f, File)
self.assertEqual(f.read(), content)
# Delete it
storage.delete(filename)
self.assertFalse(storage.exists(filename))
@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')
def test_dotslash_prefix(self):
storage = CloudStorage()
name = './my_file'
f = ContentFile(b'content')
filename = storage.save(name, f)
self.assertEqual(filename, name.lstrip("./"))
@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')
def test_different_bucket(self):
from google.cloud.exceptions import NotFound
storage = CloudStorage(bucket_name='different_test_bucket')
name = './my_file'
f = ContentFile(b'content')
with self.assertRaises(NotFound) as cm:
storage.save(name, f)
self.assertIn('different_test_bucket', cm.exception.message)
@override_settings(CLOUD_STORAGE_BUCKET='different_test_bucket')
def test_different_bucket_config(self):
from google.cloud.exceptions import NotFound
storage = CloudStorage()
name = './my_file'
f = ContentFile(b'content')
with self.assertRaises(NotFound) as cm:
storage.save(name, f)
self.assertIn('different_test_bucket', cm.exception.message)
@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')
def test_supports_nameless_files(self):
storage = CloudStorage()
f2 = ContentFile(b'nameless-content')
storage.save('tmp2', f2)
@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')
def test_new_objects_get_the_default_acl(self):
storage = CloudStorage()
filename = 'example.txt'
fileobj = ContentFile(b'content')
with sleuth.watch('google.cloud.storage.blob.Blob.upload_from_file') as upload_func:
storage.save(filename, fileobj)
self.assertTrue(storage.exists(filename))
self.assertIsNone(upload_func.calls[0].kwargs['predefined_acl'])
@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')
def test_new_objects_with_an_explicit_acl(self):
storage = CloudStorage(google_acl='publicRead')
filename = 'example.txt'
fileobj = ContentFile(b'content', name=filename)
with sleuth.watch('google.cloud.storage.blob.Blob.upload_from_file') as upload_func:
storage.save(filename, fileobj)
self.assertTrue(storage.exists(filename))
self.assertEqual(
upload_func.calls[0].kwargs['predefined_acl'],
'publicRead',
)
@override_settings(
CLOUD_STORAGE_BUCKET='test_bucket',
DEFAULT_FILE_STORAGE='djangae.storage.CloudStorage',
)
def test_works_with_text_file_fields(self):
content = b"content"
instance = ModelWithTextFile(
text_file=ContentFile(content, name="my_file")
)
instance.save()
fetched = ModelWithTextFile.objects.get()
self.assertEqual(fetched.text_file.read(), content)
@override_settings(
CLOUD_STORAGE_BUCKET='test_bucket',
DEFAULT_FILE_STORAGE='djangae.storage.CloudStorage',
)
def test_works_with_upload_to(self):
content = b"content"
instance = ModelWithUploadTo(
text_file=ContentFile(content, name="my_file")
)
instance.save()
fetched = ModelWithUploadTo.objects.get()
self.assertEqual(fetched.text_file.read(), content)
@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')
def test_open_uses_correct_bucket(self):
storage = CloudStorage()
filename = storage.save('file1', ContentFile(b'content', name='file1'))
storage = CloudStorage() # new instance
storage._open(filename)
@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')
def test_delete_uses_correct_bucket(self):
storage = CloudStorage()
filename = storage.save('file1', ContentFile(b'content', name='file1'))
storage = CloudStorage() # new instance
storage.delete(filename)
self.assertFalse(storage.exists(filename))
|
alipay/aop/api/domain/KoubeiRetailWmsSupplierQueryModel.py | snowxmas/alipay-sdk-python-all | 213 | 80472 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OperateContext import OperateContext
class KoubeiRetailWmsSupplierQueryModel(object):
def __init__(self):
self._operate_context = None
self._supplier_ids = None
@property
def operate_context(self):
return self._operate_context
@operate_context.setter
def operate_context(self, value):
if isinstance(value, OperateContext):
self._operate_context = value
else:
self._operate_context = OperateContext.from_alipay_dict(value)
@property
def supplier_ids(self):
return self._supplier_ids
@supplier_ids.setter
def supplier_ids(self, value):
if isinstance(value, list):
self._supplier_ids = list()
for i in value:
self._supplier_ids.append(i)
def to_alipay_dict(self):
params = dict()
if self.operate_context:
if hasattr(self.operate_context, 'to_alipay_dict'):
params['operate_context'] = self.operate_context.to_alipay_dict()
else:
params['operate_context'] = self.operate_context
if self.supplier_ids:
if isinstance(self.supplier_ids, list):
for i in range(0, len(self.supplier_ids)):
element = self.supplier_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.supplier_ids[i] = element.to_alipay_dict()
if hasattr(self.supplier_ids, 'to_alipay_dict'):
params['supplier_ids'] = self.supplier_ids.to_alipay_dict()
else:
params['supplier_ids'] = self.supplier_ids
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiRetailWmsSupplierQueryModel()
if 'operate_context' in d:
o.operate_context = d['operate_context']
if 'supplier_ids' in d:
o.supplier_ids = d['supplier_ids']
return o
|
fexm/test/test_fuzzer.py | fgsect/fexm | 105 | 80490 | <reponame>fgsect/fexm
import json
import unittest
import unittest.mock
import sys
from unittest import mock
import sys
import os
from helpers import utils
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "configfinder/")))
sys.modules[
'configfinder.builder'] = unittest.mock.Mock() # Mocking builder like so:https://stackoverflow.com/questions/8658043/how-to-mock-an-import
sys.modules[
'builder'] = unittest.mock.Mock() # Mocking builder like so:https://stackoverflow.com/questions/8658043/how-to-mock-an-import
sys.modules["config_settings.MAX_TIMEOUT_PER_PACKAGE"] = 1 # unittest.mock.Mock(MAX_TIMEOUT_PER_PACKAGE=1)
import configfinder.fuzzer_wrapper
from configfinder import minimzer
import sh
import shutil
import os
class TestAflFuzzerWrapper(unittest.TestCase):
def setUp(self):
os.makedirs("test_data", exist_ok=True)
self.volume_path = "test_data/test_output_volume"
os.makedirs(self.volume_path, exist_ok=True)
self.jpg_binary_path = "test_data/jpg_binary_main"
aflgcc = sh.Command("afl-gcc")
aflgcc("test/mock_data/input_mock/jpg_binary/main.c", "-o", self.jpg_binary_path)
self.timeout_binary_path = "test_data/timeout_binary_main"
aflgcc("test/mock_data/input_mock/timeout_binary/main.c", "-o", self.timeout_binary_path)
def tearDown(self):
shutil.rmtree("test_data")
def test_multi_core_fuzzing(self):
package_name = "jpg_parser"
binary_path = self.jpg_binary_path
parameter = "@@"
fuzz_duration = 30
seeds_dir = "test/mock_data/mock_seeds/jpg_samples"
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
fuzzer_wrapper = configfinder.fuzzer_wrapper.AflFuzzWrapper(volume_path=self.volume_path, package=package_name, binary_path=binary_path, parameter=parameter, fuzz_duration=fuzz_duration,
seeds_dir=seeds_dir, afl_config_file_path=os.path.join(self.volume_path, package_name, os.path.basename(binary_path))+".afl_conf")
fuzzer_wrapper.start_fuzzer(cores=4)
self.assertTrue(os.path.exists(os.path.join(fuzzer_wrapper.get_afl_multi_core_config_dict()["output"], fuzzer_wrapper.session_name + "000/fuzzer_stats")))
self.assertGreater(int(utils.get_afl_stats_from_syncdir(fuzzer_wrapper.multicore_dict["output"])["execs_done"]), 0)
def test_multi_core_fuzzing_timeout(self):
package_name = "timeut_jpg_parser"
binary_path = self.timeout_binary_path
parameter = "@@"
fuzz_duration = 20
seeds_dir = "test/mock_data/mock_seeds/jpg_samples"
log_dict = {}
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
fuzzer_wrapper = configfinder.fuzzer_wrapper.AflFuzzWrapper(volume_path=self.volume_path, package=package_name, binary_path=binary_path, parameter=parameter, fuzz_duration=fuzz_duration,
seeds_dir=seeds_dir, log_dict=log_dict)
self.assertFalse(fuzzer_wrapper.start_fuzzer(cores=4))
print(log_dict)
"""
class TestFuzzingWrapper(unittest.TestCase):
def test_wrong_qemu_invocation(self, ):
if os.path.exists("afl_out"):
shutil.rmtree("afl_out")
aflgcc = sh.Command("afl-gcc")
aflgcc("test/mock_data/input_mock/jpg_binary/main.c", "-o", "test/mock_data/input_mock/jpg_binary/main")
fuzzer_args = ["-Q", "-i", "test/mock_data/mock_seeds", "-o", "afl_out", "--",
"test/mock_data/input_mock/jpg_binary/main", "@@"]
self.assertEqual(
configfinder.fuzzer_wrapper.afl_fuzz_wrapper(fuzzer_args, "test/mock_data/input_mock/jpg_binary/main",
fuzz_duration=6), True)
self.assertEqual(os.path.exists("afl_out/fuzzer_stats"), True)
shutil.rmtree("afl_out")
def test_wrong_nonqemu_invocation(self, ):
if os.path.exists("afl_out"):
shutil.rmtree("afl_out")
gcc = sh.Command("gcc")
command = gcc(
["test/mock_data/input_mock/jpg_binary/main.c", "-o", "test/mock_data/input_mock/jpg_binary/main"],
_out=sys.stdout)
fuzzer_args = ["-i", "test/mock_data/mock_seeds", "-o", "afl_out", "--",
"test/mock_data/input_mock/jpg_binary/main", "@@"]
self.assertEqual(
configfinder.fuzzer_wrapper.afl_fuzz_wrapper(fuzzer_args, "test/mock_data/input_mock/jpg_binary/main",
fuzz_duration=6), True)
self.assertEqual(os.path.exists("afl_out/fuzzer_stats"), True)
shutil.rmtree("afl_out")
def test_fuzzer_normal(self):
volume_path = "test/test_output_volume"
name = "test_package"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter=None,
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=name, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
"test/test_output_volume/test_package/main/afl_fuzz_mockuuid")
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_minimized(self):
volume_path = "test/test_output_volume"
name = "main"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuidmin"
m = minimzer.minize(parameter="@@", seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main", package=None,
volume_path=volume_path, afl_config_file_name="main.afl_config", tmin_total_time=1000)
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter="@@",
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=None, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
os.path.join(volume_path, name, "main/afl_fuzz_mockuuid"))
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_resume(self):
volume_path = "test/test_output_volume"
name = "test_package"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter="@@",
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=name, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=15, timeout=1500.0)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
"test/test_output_volume/test_package/main/afl_fuzz_mockuuid")
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "resume"
configfinder.fuzzer_wrapper.resume_fuzzer("test/test_output_volume/test_package/main/afl_fuzz_mockuuid",
binary_path="test/mock_data/input_mock/jpg_binary/main",
parameter="@@", timeout=1500.0, fuzz_duration=10)
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_minimized_failed(self):
volume_path = "test/test_output_volume"
name = "main"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuidmin"
m = minimzer.minize(parameter="@@", seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main", package=None,
volume_path=volume_path, afl_config_file_name="main.afl_config", tmin_total_time=1000)
uuidmock.return_value = "mockuuid"
for file in os.listdir(os.path.join(volume_path, name, "main/afl_tmin_mockuuidmin/")):
with open(os.path.join(os.path.join(volume_path, name, "main/afl_tmin_mockuuidmin/", file)),
"w"):
pass
# shutil.rmtree(os.path.join(volume_path,name,"main/afl_tmin_mockuuidmin/"))
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter=None,
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=None, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
# with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
# aflconfigdict = json.load(testaflfp)
# self.assertEqual(aflconfigdict["afl_out_dir"],
# os.path.join(volume_path, name, "main/afl_fuzz_mockuuid"))
# self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
""" |
alipay/aop/api/domain/AlipayOpenOperationBizfeeAftechCancelModel.py | snowxmas/alipay-sdk-python-all | 213 | 80541 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenOperationBizfeeAftechCancelModel(object):
def __init__(self):
self._app_name = None
self._gmt_service = None
self._order_no = None
self._out_biz_no = None
self._tnt_inst_id = None
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def gmt_service(self):
return self._gmt_service
@gmt_service.setter
def gmt_service(self, value):
self._gmt_service = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
def to_alipay_dict(self):
params = dict()
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.gmt_service:
if hasattr(self.gmt_service, 'to_alipay_dict'):
params['gmt_service'] = self.gmt_service.to_alipay_dict()
else:
params['gmt_service'] = self.gmt_service
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenOperationBizfeeAftechCancelModel()
if 'app_name' in d:
o.app_name = d['app_name']
if 'gmt_service' in d:
o.gmt_service = d['gmt_service']
if 'order_no' in d:
o.order_no = d['order_no']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
return o
|
QuickWall/feh.py | deepjyoti30/QuickWall | 191 | 80562 | <filename>QuickWall/feh.py
"""Functions related to usign feh as wallpaper setter."""
import subprocess
from pathlib import Path
from simber import Logger
# Declare the logger
logger = Logger("feh")
class feh:
def __init__(self):
self.feh_config_path = Path('~/.fehbg').expanduser()
self.current = self._find_current()
def _find_current(self):
"""
Extract the current wall path.
"""
logger.debug("{}".format(open(self.feh_config_path).read().split(' ')[-2]))
return open(self.feh_config_path).read().split(' ')[-2]
def restore(self):
"""
Restore the wallpaper
"""
command = "feh --bg-fill {}".format(self.current)
subprocess.Popen(command.split(), stdout=subprocess.PIPE)
def set(self, file_path):
"""
Set the wallpaper temporarily.
"""
command = "feh --bg-fill {}".format(file_path)
p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
ret, err = p.communicate()
def set_perm(self, file_path):
"""
Set the wallpaper permanently.
"""
self.set(file_path)
|
helm/dagster/schema/schema/charts/dagster/subschema/service_account.py | dbatten5/dagster | 4,606 | 80632 | <reponame>dbatten5/dagster
from pydantic import BaseModel # pylint: disable=no-name-in-module
from ...utils import kubernetes
class ServiceAccount(BaseModel):
create: bool
name: str
annotations: kubernetes.Annotations
|
vega/report/__init__.py | jie311/vega | 724 | 80649 | from .report_server import ReportServer
from .report_client import ReportClient
from .record import ReportRecord
from .nsga_iii import NonDominatedSorting, SortAndSelectPopulation
|
plugins/lookup/apt_keys.py | manala/ansible-roles | 138 | 80662 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: apt_keys
author: Manala (@manala)
short_description: returns a curated keys list
description:
- Takes a keys list and returns it curated.
'''
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
results = []
keys = self._flatten(terms[0])
keysPatterns = terms[1]
repositories = terms[2]
itemDefault = {}
# Handle repositories defined as reversed preferences
for repository in repositories[::-1]:
if 'key' in repository:
keys.insert(0, repository.get('key'))
for key in keys:
items = []
item = itemDefault.copy()
# Short syntax
if isinstance(key, string_types):
item.update(
keysPatterns.get(key)
)
else:
# Must be a dict
if not isinstance(key, dict):
raise AnsibleError('Expected a dict but was a %s' % type(key))
# Check id key
if 'id' not in key:
raise AnsibleError('Missing "id" key')
item.update(key)
items.append(item)
# Merge by index key
for item in items:
itemFound = False
for i, result in enumerate(results):
if result['id'] == item['id']:
results[i] = item
itemFound = True
break
if not itemFound:
results.append(item)
return results
|
tests/test_graphviz.py | center-for-threat-informed-defense/attack-flow | 165 | 80696 | from textwrap import dedent
import attack_flow.graphviz
def test_convert_attack_flow_to_dot():
flow = {
"actions": [
{
"id": "action1",
"name": "action-one",
},
{
"id": "action2",
"name": "action-two",
},
],
"assets": [
{"id": "asset1"},
{"id": "asset2"},
],
"relationships": [
{
"source": "action1",
"target": "asset1",
},
{
"source": "asset1",
"target": "action2",
},
{
"source": "action2",
"target": "asset2",
},
],
}
output = attack_flow.graphviz.convert(flow)
assert output == dedent('''\
digraph {
node [shape=box,style="rounded,filled,fixedsize=true,width=2,height=1"]
"action1" [fillcolor=pink,label="action-one"]
"action2" [fillcolor=pink,label="action-two"]
"asset1" [fillcolor=lightblue1]
"asset2" [fillcolor=lightblue1]
}''')
def test_convert_complex_attack_flow_to_dot():
flow = {
"flow": {
"type": "attack-flow",
"id": "flow-1",
"name": "Attack Flow Export",
"author": "Unspecified",
"created": "2022-01-14T13:59:42-05:00"
},
"actions": [
{
"id": "flow-1/action-3",
"type": "action",
"name": "T1133: External Remote Services",
"description": "Kubernetes Dashboard",
"reference": "",
"succeeded": 1,
"confidence": 1,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-11",
"type": "action",
"name": "T1610: Deploy Container",
"description": "Deploy cryptomining container",
"reference": "",
"succeeded": 1,
"confidence": 1,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-12",
"type": "action",
"name": "T1552.001: Unsecured Credentials: Credentials In Files",
"description": "Harvest AWS service credentials.",
"reference": "",
"succeeded": 1,
"confidence": 0,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-17",
"type": "action",
"name": "T1496: Resource Highjacking",
"description": "Run cryptomining software",
"reference": "",
"succeeded": 1,
"confidence": 1,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-18",
"type": "action",
"name": "T1078.004: Valid Accounts: Cloud Accounts",
"description": "Use harvested AWS credentials",
"reference": "",
"succeeded": 1,
"confidence": 0,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-23",
"type": "action",
"name": "T1530: Data from Cloud Storage Object",
"description": "Download data from storage bucket",
"reference": "",
"succeeded": 1,
"confidence": 0,
"logic_operator_language": "",
"logic_operator": "AND"
}
],
"assets": [
{
"id": "flow-1/asset-1",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-7",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-9",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-13",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-15",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-19",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-21",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-24",
"type": "asset",
"state": "compromised"
}
],
"relationships": [
{
"source": "flow-1/asset-1",
"type": "flow-1#state",
"target": "flow-1/action-3"
},
{
"source": "flow-1/action-3",
"type": "flow-1#state-change",
"target": "flow-1/asset-7"
},
{
"source": "flow-1/action-3",
"type": "flow-1#state-change",
"target": "flow-1/asset-9"
},
{
"source": "flow-1/asset-7",
"type": "flow-1#state",
"target": "flow-1/action-11"
},
{
"source": "flow-1/asset-9",
"type": "flow-1#state",
"target": "flow-1/action-12"
},
{
"source": "flow-1/action-11",
"type": "flow-1#state-change",
"target": "flow-1/asset-13"
},
{
"source": "flow-1/action-12",
"type": "flow-1#state-change",
"target": "flow-1/asset-15"
},
{
"source": "flow-1/asset-13",
"type": "flow-1#state",
"target": "flow-1/action-17"
},
{
"source": "flow-1/asset-15",
"type": "flow-1#state",
"target": "flow-1/action-18"
},
{
"source": "flow-1/action-17",
"type": "flow-1#state-change",
"target": "flow-1/asset-19"
},
{
"source": "flow-1/action-18",
"type": "flow-1#state-change",
"target": "flow-1/asset-21"
},
{
"source": "flow-1/asset-21",
"type": "flow-1#state",
"target": "flow-1/action-23"
},
{
"source": "flow-1/action-23",
"type": "flow-1#state-change",
"target": "flow-1/asset-24"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-3"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-11"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-12"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-17"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-18"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-23"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-1"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-7"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-9"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-13"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-15"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-19"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-21"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-24"
}
],
"object_properties": [],
"data_properties": [
{
"source": "flow-1/asset-1",
"type": "flow-1#description",
"target": "Kubernetes Dashboard"
},
{
"source": "flow-1/asset-1",
"type": "flow-1#state",
"target": "exposed"
},
{
"source": "flow-1/asset-1",
"type": "flow-1#state",
"target": "unsecured"
},
{
"source": "flow-1/asset-7",
"type": "flow-1#description",
"target": "Kubernetes Cluster"
},
{
"source": "flow-1/asset-9",
"type": "flow-1#description",
"target": "Kubernetes Admin Priv"
},
{
"source": "flow-1/asset-13",
"type": "flow-1#description",
"target": "Kubernetes Container"
},
{
"source": "flow-1/asset-15",
"type": "flow-1#description",
"target": "AWS Credentials"
},
{
"source": "flow-1/asset-19",
"type": "flow-1#description",
"target": "Cryptocurrency"
},
{
"source": "flow-1/asset-21",
"type": "flow-1#description",
"target": "AWS Access"
},
{
"source": "flow-1/asset-24",
"type": "flow-1#description",
"target": "Data"
}
]
}
output = attack_flow.graphviz.convert(flow)
assert output == dedent('''\
digraph {
node [shape=box,style="rounded,filled,fixedsize=true,width=2,height=1"]
"flow-1/action-3" [fillcolor=pink,label="T1133: External\\nRemote Services"]
"flow-1/action-11" [fillcolor=pink,label="T1610: Deploy\\nContainer"]
"flow-1/action-12" [fillcolor=pink,label="T1552.001: Unsecured\\nCredentials:\\nCredentials In Files"]
"flow-1/action-17" [fillcolor=pink,label="T1496: Resource\\nHighjacking"]
"flow-1/action-18" [fillcolor=pink,label="T1078.004: Valid\\nAccounts: Cloud\\nAccounts"]
"flow-1/action-23" [fillcolor=pink,label="T1530: Data from\\nCloud Storage Object"]
"flow-1/asset-1" [fillcolor=lightblue1,label="Kubernetes Dashboard"]
"flow-1/asset-7" [fillcolor=lightblue1,label="Kubernetes Cluster"]
"flow-1/asset-9" [fillcolor=lightblue1,label="Kubernetes Admin\\nPriv"]
"flow-1/asset-13" [fillcolor=lightblue1,label="Kubernetes Container"]
"flow-1/asset-15" [fillcolor=lightblue1,label="AWS Credentials"]
"flow-1/asset-19" [fillcolor=lightblue1,label="Cryptocurrency"]
"flow-1/asset-21" [fillcolor=lightblue1,label="AWS Access"]
"flow-1/asset-24" [fillcolor=lightblue1,label="Data"]
"flow-1/asset-1" -> "flow-1/action-3" [label="requires"]
"flow-1/action-3" -> "flow-1/asset-7" [label="provides"]
"flow-1/action-3" -> "flow-1/asset-9" [label="provides"]
"flow-1/asset-7" -> "flow-1/action-11" [label="requires"]
"flow-1/asset-9" -> "flow-1/action-12" [label="requires"]
"flow-1/action-11" -> "flow-1/asset-13" [label="provides"]
"flow-1/action-12" -> "flow-1/asset-15" [label="provides"]
"flow-1/asset-13" -> "flow-1/action-17" [label="requires"]
"flow-1/asset-15" -> "flow-1/action-18" [label="requires"]
"flow-1/action-17" -> "flow-1/asset-19" [label="provides"]
"flow-1/action-18" -> "flow-1/asset-21" [label="provides"]
"flow-1/asset-21" -> "flow-1/action-23" [label="requires"]
"flow-1/action-23" -> "flow-1/asset-24" [label="provides"]
"flow-1/asset-1-exposed-state" [fillcolor=lightgreen,label="exposed"]
"flow-1/asset-1-unsecured-state" [fillcolor=lightgreen,label="unsecured"]
"flow-1/asset-1-exposed-state" -> "flow-1/asset-1" [dir=none,style=dashed]
"flow-1/asset-1-unsecured-state" -> "flow-1/asset-1" [dir=none,style=dashed]
}''') # noqa: E501
def test_align_node_label_one_liner():
assert attack_flow.graphviz.align_node_label("one liner") == "one liner"
def test_align_node_label_multiline():
assert attack_flow.graphviz.align_node_label("multi liner label example", width=15) == "multi liner\\nlabel example"
def test_align_node_label_string_escaping():
assert attack_flow.graphviz.align_node_label("a \"tricky\" example") == 'a \\"tricky\\" example'
|
vue/factory.py | adamlwgriffiths/vue.py | 274 | 80698 | <reponame>adamlwgriffiths/vue.py
from .decorators.base import VueDecorator
from .decorators.prop import Prop
from .decorators.data import Data
from .decorators.lifecycle_hook import LifecycleHook
from .decorators.method import Method
from .decorators.render import Render
from .decorators.mixins import Mixins
from .decorators.template import Template
from .decorators.directive import DirectiveHook
from .decorators.extends import Extends
from .decorators.components import Components
from .decorators.state import State
from .decorators.plugin import Plugin
from .decorators.routes import Routes
def merge_templates(sub):
def get_template_slots(cls):
template_slots = getattr(cls, "template_slots", {})
if isinstance(template_slots, str):
template_slots = {"default": template_slots}
return template_slots
base = sub.__base__
template_merging = hasattr(base, "template") and getattr(
sub, "template_slots", False
)
if template_merging:
base_template = merge_templates(base)
base_slots = get_template_slots(base)
sub_slots = get_template_slots(sub)
slots = dict(tuple(base_slots.items()) + tuple(sub_slots.items()))
default = slots.get("default")
return base_template.format(default, **slots)
return getattr(sub, "template", "{}")
class BrythonObjectWorkarounds(type):
"""
Fixes the following Brython bugs:
* https://github.com/brython-dev/brython/issues/904
"""
@property
def __base__(cls):
return cls.__bases__[0]
class Wrapper(metaclass=BrythonObjectWorkarounds):
pass
class AttributeDictFactory:
@classmethod
def get_item(cls, wrapper):
if isinstance(wrapper, BrythonObjectWorkarounds):
return cls(wrapper).generate_item()
return wrapper
@classmethod
def get_wrapper_base(cls, wrapper):
base = wrapper.__base__
if base is Wrapper:
return wrapper
return cls.get_wrapper_base(base)
def __init__(self, wrapper):
self.wrapper = wrapper
self.base = self.get_wrapper_base(wrapper)
def __attributes__(self):
all_objects = set(dir(self.wrapper))
all_objects.update(getattr(self.wrapper, "__annotations__", {}).keys())
own_objects = all_objects - set(dir(self.base)) - {"__annotations__"}
for obj_name in own_objects:
yield obj_name, getattr(self.wrapper, obj_name, None)
def auto_decorate(self, obj_name, obj):
return obj
def generate_item(self):
object_map = {}
for obj_name, obj in self.__attributes__():
obj = self.auto_decorate(obj_name, obj)
if isinstance(obj, VueDecorator):
obj.update(object_map)
return object_map
class VueComponentFactory(AttributeDictFactory):
def _property_mixin(self, prop_name):
if prop_name not in dir(self.wrapper):
return {"required": True}
else:
return {"default": getattr(self.wrapper, prop_name)}
def auto_decorate(self, obj_name, obj):
if obj_name in LifecycleHook.mapping:
obj = LifecycleHook(obj_name, obj)
elif obj_name == "template":
obj = Template(merge_templates(self.wrapper))
elif obj_name == "extends":
if obj:
extends = self.wrapper.__base__ if isinstance(obj, bool) else obj
obj = Extends(VueComponentFactory.get_item(extends))
elif obj_name == "mixins":
obj = Mixins(*(VueComponentFactory.get_item(m) for m in obj))
elif obj_name == "components":
obj = Components(*(VueComponentFactory.get_item(m) for m in obj))
elif obj_name == "render":
obj = Render(obj)
elif callable(obj):
obj = Method(obj)
elif obj_name in getattr(self.wrapper, "__annotations__", {}):
obj = Prop(
obj_name,
self.wrapper.__annotations__[obj_name],
self._property_mixin(obj_name),
)
elif not isinstance(obj, VueDecorator):
obj = Data(obj_name, obj)
return super().auto_decorate(obj_name, obj)
def generate_item(self):
init_dict = super().generate_item()
_data = init_dict.get("data", None)
if not _data:
return init_dict
def get_initialized_data(this):
initialized_data = {}
for name, date in _data.items():
initialized_data[name] = date(this) if callable(date) else date
return initialized_data
init_dict.update(data=get_initialized_data)
return init_dict
class VueDirectiveFactory(AttributeDictFactory):
def auto_decorate(self, obj_name, obj):
if callable(obj):
obj = DirectiveHook(obj, hooks=(obj_name,), name=self.wrapper.name)
return super().auto_decorate(obj_name, obj)
@classmethod
def get_item(cls, wrapper):
default = {wrapper.name: {}}
dct = super().get_item(wrapper)
return dct.get("directives", default).popitem()[1]
class VueStoreFactory(AttributeDictFactory):
def auto_decorate(self, obj_name, obj):
if obj_name == "plugins":
obj = Plugin(obj)
elif not isinstance(obj, VueDecorator):
obj = State(obj_name, obj)
return super().auto_decorate(obj_name, obj)
class VueRouterFactory(AttributeDictFactory):
def auto_decorate(self, obj_name, obj):
if obj_name == "routes":
obj = Routes(obj)
return super().auto_decorate(obj_name, obj)
|
pytermgui/context_managers.py | jakkso/pytermgui | 771 | 80704 | <filename>pytermgui/context_managers.py
"""
Ease-of-use context-manager classes & functions.
There isn't much (or any) additional functionality provided in this module,
most things are nicer-packaged combinations to already available methods from
`pytermgui.ansi_interface`.
"""
from __future__ import annotations
from os import name
from contextlib import contextmanager
from typing import Callable, Generator, Any, Union, List
from .ansi_interface import (
is_interactive,
save_cursor,
restore_cursor,
print_to,
show_cursor,
hide_cursor,
set_echo,
unset_echo,
set_alt_buffer,
unset_alt_buffer,
cursor_up,
report_mouse,
translate_mouse,
MouseEvent,
)
# TODO: Move this absolute beast to a types submodule
MouseTranslator = Callable[[str], Union[List[Union[MouseEvent, None]], None]]
@contextmanager
def cursor_at(pos: tuple[int, int]) -> Generator[Callable[..., None], None, None]:
"""Get callable to print at `pos`, incrementing `y` on every print"""
offset = 0
posx, posy = pos
def printer(*args: tuple[Any, ...]) -> None:
"""Print to posx, current y"""
nonlocal offset
print_to((posx, posy + offset), *args)
offset += 1
try:
save_cursor()
yield printer
finally:
restore_cursor()
@contextmanager
def alt_buffer(echo: bool = False, cursor: bool = True) -> Generator[None, None, None]:
"""Create non-scrollable alt-buffer
This is useful for retrieving original terminal state after program end."""
try:
set_alt_buffer()
if not echo and name == "posix" and not is_interactive():
unset_echo()
if not cursor:
hide_cursor()
yield
finally:
unset_alt_buffer()
if not echo and name == "posix" and not is_interactive():
set_echo()
cursor_up()
if not cursor:
show_cursor()
cursor_up()
@contextmanager
def mouse_handler(
events: list[str], method: str = "decimal_xterm"
) -> Generator[MouseTranslator | None, None, None]:
"""Return a mouse handler function
Note: This method only supports `decimal_urxvt` and `decimal_xterm`, as they are the most
universal.
See `help(report_mouse)` for help about all of the methods.
Example use:
```python3
import pytermgui as ptg
with ptg.mouse_handler(["press", "hover"]) as mouse:
while True:
event = mouse(ptg.getch())
print(type(event))
print(event.action)
print(event.position)
'pytermgui.ansi_interface.MouseEvent'
'pytermgui.ansi_interface.MouseAction.LEFT_CLICK'
(33, 55)
```
"""
event = None
try:
for event in events:
report_mouse(event, method=method)
yield lambda code: translate_mouse(code, method=method)
finally:
if event is not None:
report_mouse(event, method=method, stop=True)
|
tests/test_filtration.py | bbhunter/pyWhat | 5,049 | 80717 | <reponame>bbhunter/pyWhat<filename>tests/test_filtration.py
import pytest
from pywhat import Distribution, Filter, pywhat_tags
from pywhat.helper import CaseInsensitiveSet, InvalidTag, load_regexes
regexes = load_regexes()
@pytest.mark.skip(
"Dist.get_regexes() returns the regex list with the default filter of 0.1:1. \
load_regexes() returns all regex without that filter. \
This fails because one of them is filtered and the other is not."
)
def test_distribution():
dist = Distribution()
assert regexes == dist.get_regexes()
def test_distribution2():
filter = {
"MinRarity": 0.3,
"MaxRarity": 0.8,
"Tags": ["Networking"],
"ExcludeTags": ["Identifiers"],
}
dist = Distribution(filter)
for regex in regexes:
if (
0.3 <= regex["Rarity"] <= 0.8
and "Networking" in regex["Tags"]
and "Identifiers" not in regex["Tags"]
):
assert regex in dist.get_regexes()
def test_distribution3():
filter1 = {"MinRarity": 0.3, "Tags": ["Networking"], "ExcludeTags": ["Identifiers"]}
filter2 = {"MinRarity": 0.4, "MaxRarity": 0.8, "ExcludeTags": ["Media"]}
dist = Distribution(filter1) & Distribution(filter2)
assert dist._dict["MinRarity"] == 0.4
assert dist._dict["MaxRarity"] == 0.8
assert dist._dict["Tags"] == CaseInsensitiveSet(["Networking"])
assert dist._dict["ExcludeTags"] == CaseInsensitiveSet()
for regex in regexes:
if 0.4 <= regex["Rarity"] <= 0.8 and "Networking" in regex["Tags"]:
assert regex in dist.get_regexes()
def test_distribution4():
filter1 = {"MinRarity": 0.3, "Tags": ["Networking"], "ExcludeTags": ["Identifiers"]}
filter2 = {"MinRarity": 0.4, "MaxRarity": 0.8, "ExcludeTags": ["Media"]}
dist = Distribution(filter2)
dist &= Distribution(filter1)
assert dist._dict["MinRarity"] == 0.4
assert dist._dict["MaxRarity"] == 0.8
assert dist._dict["Tags"] == CaseInsensitiveSet(["Networking"])
assert dist._dict["ExcludeTags"] == CaseInsensitiveSet()
for regex in regexes:
if 0.4 <= regex["Rarity"] <= 0.8 and "Networking" in regex["Tags"]:
assert regex in dist.get_regexes()
def test_distribution5():
filter1 = {"MinRarity": 0.3, "Tags": ["Networking"], "ExcludeTags": ["Identifiers"]}
filter2 = {"MinRarity": 0.4, "MaxRarity": 0.8, "ExcludeTags": ["Media"]}
dist = Distribution(filter1) | Distribution(filter2)
assert dist._dict["MinRarity"] == 0.3
assert dist._dict["MaxRarity"] == 1
assert dist._dict["Tags"] == CaseInsensitiveSet(pywhat_tags)
assert dist._dict["ExcludeTags"] == CaseInsensitiveSet(["Identifiers", "Media"])
for regex in regexes:
if (
0.3 <= regex["Rarity"] <= 1
and "Identifiers" not in regex["Tags"]
and "Media" not in regex["Tags"]
):
assert regex in dist.get_regexes()
def test_distribution6():
filter1 = {"MinRarity": 0.3, "Tags": ["Networking"], "ExcludeTags": ["Identifiers"]}
filter2 = {"MinRarity": 0.4, "MaxRarity": 0.8, "ExcludeTags": ["Media"]}
dist = Distribution(filter2)
dist |= Distribution(filter1)
assert dist._dict["MinRarity"] == 0.3
assert dist._dict["MaxRarity"] == 1
assert dist._dict["Tags"] == CaseInsensitiveSet(pywhat_tags)
assert dist._dict["ExcludeTags"] == CaseInsensitiveSet(["Identifiers", "Media"])
for regex in regexes:
if (
0.3 <= regex["Rarity"] <= 1
and "Identifiers" not in regex["Tags"]
and "Media" not in regex["Tags"]
):
assert regex in dist.get_regexes()
def test_distribution7():
with pytest.raises(InvalidTag):
Distribution({"Tags": "Media", "MinRarity": 0.7})
def test_filter():
filter = {
"MinRarity": 0.3,
"MaxRarity": 0.8,
"Tags": ["Networking"],
"ExcludeTags": ["Identifiers"],
}
filt = Filter(filter)
assert filt["MinRarity"] == 0.3
assert filt["MaxRarity"] == 0.8
assert filt["Tags"] == CaseInsensitiveSet(["networking"])
assert filt["ExcludeTags"] == CaseInsensitiveSet(["identifiers"])
def test_filter2():
filter1 = {
"MinRarity": 0.3,
"MaxRarity": 0.8,
"Tags": ["Networking"],
"ExcludeTags": ["Identifiers"],
}
filter2 = {"MinRarity": 0.5, "Tags": ["Networking", "Identifiers"]}
filt = Filter(filter1) & Filter(filter2)
assert filt["MinRarity"] == 0.5
assert filt["MaxRarity"] == 0.8
assert filt["Tags"] == CaseInsensitiveSet(["networking"])
assert filt["ExcludeTags"] == CaseInsensitiveSet([])
def test_filter3():
filter = {
"MinRarity": 0.3,
"MaxRarity": 0.8,
"Tags": ["Networking"],
"ExcludeTags": ["Identifiers"],
}
filt = Filter(filter)
dist = Distribution(filt)
for regex in regexes:
if (
0.3 <= regex["Rarity"] <= 0.8
and "Networking" in regex["Tags"]
and "Identifiers" not in regex["Tags"]
):
assert regex in dist.get_regexes()
|
samples/break.py | akhand2222/pyicu | 140 | 80728 | <reponame>akhand2222/pyicu
# ====================================================================
# Copyright (c) 2004-2010 Open Source Applications Foundation.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ====================================================================
#
from icu import UnicodeString, BreakIterator, Locale
def printTextRange(iterator, start, end):
s = iterator.getText().getText()
print("%2d %2d %s|%s|%s" %(start, end, s[:start], s[start:end], s[end:]))
#def printTextRange(iterator, start, end):
#
# u = iterator.getText().getText(UnicodeString())
# print "%2d %2d %s|%s|%s" %(start, end,
# UnicodeString(u, 0, start),
# UnicodeString(u, start, end-start),
# UnicodeString(u, end))
def printEachForward(boundary):
start = boundary.first()
for end in boundary:
printTextRange(boundary, start, end)
start = end
# Print each element in reverse order:
def printEachBackward(boundary):
end = boundary.last()
while True:
start = boundary.previous()
if start == BreakIterator.DONE:
break
printTextRange(boundary, start, end)
end = start
# Print the first element
def printFirst(boundary):
start = boundary.first()
end = boundary.next()
printTextRange(boundary, start, end)
# Print the last element
def printLast(boundary):
end = boundary.last()
start = boundary.previous()
if start != BreakIterator.DONE:
printTextRange(boundary, start, end)
# Print the element at a specified position
def printAt(boundary, pos):
end = boundary.following(pos)
start = boundary.previous()
printTextRange(boundary, start, end)
def main():
print("ICU Break Iterator Sample Program")
print("C++ Break Iteration in Python")
stringToExamine = u"Aaa bbb ccc. Ddd eee fff."
print("Examining: ", stringToExamine)
# print each sentence in forward and reverse order
boundary = BreakIterator.createSentenceInstance(Locale.getUS())
boundary.setText(stringToExamine)
print()
print("Sentence Boundaries... ")
print("----- forward: -----------")
printEachForward(boundary)
print("----- backward: ----------")
printEachBackward(boundary)
# print each word in order
print()
print("Word Boundaries...")
boundary = BreakIterator.createWordInstance(Locale.getUS())
boundary.setText(stringToExamine)
print("----- forward: -----------")
printEachForward(boundary)
# print first element
print("----- first: -------------")
printFirst(boundary)
# print last element
print("----- last: --------------")
printLast(boundary)
# print word at charpos 10
print("----- at pos 10: ---------")
printAt(boundary, 10)
print()
print("End C++ Break Iteration in Python")
if __name__ == "__main__":
main()
|
test/unit/optimizer/test_selection_operators.py | rozlana-g/FEDOT | 358 | 80734 | <gh_stars>100-1000
from functools import partial
from fedot.core.composer.advisor import PipelineChangeAdvisor
from fedot.core.composer.gp_composer.gp_composer import GPComposerRequirements
from fedot.core.debug.metrics import RandomMetric
from fedot.core.optimisers.adapters import PipelineAdapter
from fedot.core.optimisers.gp_comp.gp_operators import random_graph
from fedot.core.optimisers.gp_comp.gp_optimiser import GraphGenerationParams
from fedot.core.optimisers.gp_comp.individual import Individual
from fedot.core.optimisers.gp_comp.operators.selection import (
SelectionTypesEnum,
individuals_selection,
random_selection,
selection,
tournament_selection
)
def rand_population_gener_and_eval(pop_size=4):
models_set = ['knn', 'logit', 'rf']
requirements = GPComposerRequirements(primary=models_set,
secondary=models_set, max_depth=1)
pipeline_gener_params = GraphGenerationParams(advisor=PipelineChangeAdvisor(), adapter=PipelineAdapter())
random_pipeline_function = partial(random_graph, params=pipeline_gener_params,
requirements=requirements)
population = []
while len(population) != pop_size:
# to ensure uniqueness
ind = Individual(random_pipeline_function())
if ind not in population:
population.append(ind)
# evaluation
for ind in population:
ind.fitness = obj_function()
return population
def obj_function() -> float:
metric_function = RandomMetric.get_value
return metric_function()
def test_tournament_selection():
num_of_inds = 2
population = rand_population_gener_and_eval(pop_size=4)
selected_individuals = tournament_selection(individuals=population,
pop_size=num_of_inds)
assert (all([ind in population for ind in selected_individuals]) and
len(selected_individuals) == num_of_inds)
def test_random_selection():
num_of_inds = 2
population = rand_population_gener_and_eval(pop_size=4)
selected_individuals = random_selection(individuals=population,
pop_size=num_of_inds)
assert (all([ind in population for ind in selected_individuals]) and
len(selected_individuals) == num_of_inds)
def test_selection():
num_of_inds = 2
population = rand_population_gener_and_eval(pop_size=4)
graph_params = GraphGenerationParams(advisor=PipelineChangeAdvisor(), adapter=PipelineAdapter())
selected_individuals = selection(types=[SelectionTypesEnum.tournament],
population=population,
pop_size=num_of_inds,
params=graph_params)
assert (all([ind in population for ind in selected_individuals]) and
len(selected_individuals) == num_of_inds)
def test_individuals_selection_random_individuals():
num_of_inds = 2
population = rand_population_gener_and_eval(pop_size=4)
types = [SelectionTypesEnum.tournament]
graph_params = GraphGenerationParams(advisor=PipelineChangeAdvisor(), adapter=PipelineAdapter())
selected_individuals = individuals_selection(types=types,
individuals=population,
pop_size=num_of_inds,
graph_params=graph_params)
selected_individuals_ref = [str(ind) for ind in selected_individuals]
assert (len(set(selected_individuals_ref)) == len(selected_individuals) and
len(selected_individuals) == num_of_inds)
def test_individuals_selection_equality_individuals():
num_of_inds = 4
population = rand_population_gener_and_eval(pop_size=1)
types = [SelectionTypesEnum.tournament]
population = [population[0] for _ in range(4)]
graph_params = GraphGenerationParams(advisor=PipelineChangeAdvisor(), adapter=PipelineAdapter())
selected_individuals = individuals_selection(types=types,
individuals=population,
pop_size=num_of_inds, graph_params=graph_params)
selected_individuals_ref = [str(ind) for ind in selected_individuals]
assert (len(selected_individuals) == num_of_inds and
len(set(selected_individuals_ref)) == 1)
|
tools/azure-devtools/src/azure_devtools/perfstress_tests/_event_perf_test.py | vincenttran-msft/azure-sdk-for-python | 2,728 | 80746 | <reponame>vincenttran-msft/azure-sdk-for-python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import asyncio
import threading
import time
from ._repeated_timer import AtomicCounter
from ._perf_stress_base import _PerfTestBase
class EventPerfTest(_PerfTestBase):
def __init__(self, arguments):
super().__init__(arguments)
if self.args.profile:
raise NotImplementedError("Profiler support for event tests pending.")
if self.args.sync:
self._condition = threading.Condition()
else:
self._condition = asyncio.Condition()
self._start_time = time.time()
self._error = None
self._processing = None
self._completed_operations = AtomicCounter()
@property
def completed_operations(self) -> int:
"""
Total number of operations completed by run_all().
Reset after warmup.
"""
return self._completed_operations.value()
@property
def last_completion_time(self) -> float:
"""
Elapsed time between start of warmup/run and last completed operation.
Reset after warmup.
"""
return self._last_completion_time - self._start_time
def event_raised_sync(self):
self._completed_operations.increment()
self._last_completion_time = time.time()
def error_raised_sync(self, error):
with self._condition:
self._error = error
self._condition.notify_all()
async def event_raised_async(self):
self._completed_operations.increment()
self._last_completion_time = time.time()
async def error_raised_async(self, error):
async with self._condition:
self._error = error
self._condition.notify_all()
async def setup(self) -> None:
"""
Setup called once per parallel test instance.
Used to setup state specific to this test instance.
"""
if self.args.sync:
self._processing = threading.Thread(target=self.start_events_sync)
self._processing.daemon = True
self._processing.start()
else:
self._processing = asyncio.ensure_future(self.start_events_async())
async def cleanup(self) -> None:
"""
Cleanup called once per parallel test instance.
Used to cleanup state specific to this test instance.
"""
if self.args.sync:
self.stop_events_sync()
self._processing.join()
else:
await self.stop_events_async()
await self._processing
try:
raise self._error
except TypeError:
pass
def run_all_sync(self, duration: int) -> None:
"""
Run all sync tests, including both warmup and duration.
"""
with self._condition:
self._completed_operations.reset()
self._last_completion_time = 0.0
self._start_time = time.time()
self._condition.wait(timeout=duration)
async def run_all_async(self, duration: int) -> None:
"""
Run all async tests, including both warmup and duration.
"""
async with self._condition:
self._completed_operations.reset()
self._last_completion_time = 0.0
self._start_time = time.time()
try:
await asyncio.wait_for(self._condition.wait(), timeout=duration)
except asyncio.TimeoutError:
pass
def start_events_sync(self) -> None:
"""
Start the process for receiving events.
"""
raise NotImplementedError("start_events_sync must be implemented for {}".format(self.__class__.__name__))
def stop_events_sync(self) -> None:
"""
Stop the process for receiving events.
"""
raise NotImplementedError("stop_events_sync must be implemented for {}".format(self.__class__.__name__))
async def start_events_async(self) -> None:
"""
Start the process for receiving events.
"""
raise NotImplementedError("start_events_async must be implemented for {}".format(self.__class__.__name__))
async def stop_events_async(self) -> None:
"""
Stop the process for receiving events.
"""
raise NotImplementedError("stop_events_async must be implemented for {}".format(self.__class__.__name__))
|
zeus/datasets/transforms/pytorch/Numpy2Tensor.py | shaido987/vega | 240 | 80748 | <reponame>shaido987/vega<filename>zeus/datasets/transforms/pytorch/Numpy2Tensor.py
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for Numpy2Tensor."""
import torch
import numpy as np
from zeus.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class Numpy2Tensor(object):
"""Transform a numpy to tensor."""
def __call__(self, *args):
"""Call function of Numpy2Tensor."""
if len(args) == 1:
return torch.from_numpy(args[0])
else:
return tuple([torch.from_numpy(np.array(array)) for array in args])
|
server/explorer/utils/collection.py | s-bauer/yang-explorer | 437 | 80763 | <filename>server/explorer/utils/collection.py<gh_stars>100-1000
"""
Copyright 2015, Cisco Systems, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: <NAME>, Cisco Systems, Inc.
"""
import os
import glob
import lxml.etree as ET
import logging
from explorer.models import Collection as Col
from explorer.models import User
from explorer.utils.adapter import Adapter
class Collection(object):
""" This class implements utility routines to work with
collections """
@staticmethod
def add(metadata, payload):
""" Add a collection entry """
if metadata in [None, '']:
logging.error('Invalid metadata')
return False
if payload in [None, '', 'null']:
logging.error('Invalid payload')
return False
metadata = ET.fromstring(metadata)
payload = ET.fromstring(payload)
logging.debug(ET.tostring(metadata))
logging.debug(ET.tostring(payload))
cname = metadata.find('collection').text
author = metadata.find('author').text
name = metadata.find('name').text
if not Col.objects.filter(name=cname).exists():
if not User.objects.filter(username=author).exists():
logging.error('User %s does not exist !!' % author)
return False
user = User.objects.filter(username=author)
obj = Col(name=cname, user=user[0])
obj.save()
logging.debug('Created new collection ' + cname)
path = os.path.join('data', 'collections', cname)
if not os.path.exists(path):
logging.error('Path to collection does not exist : %s !!' % path)
return False
for child in payload:
if child.tag == 'metadata':
for elem in metadata:
child.append(elem)
cfile = os.path.join(path, name + '.xml')
with open(cfile, 'w') as f:
f.write(ET.tostring(payload))
logging.debug('%s was saved successfully in collection %s' % (name, cname))
return True
@staticmethod
def remove(metadata):
""" Remove a entry from collection """
if metadata is None or metadata == '':
logging.error('Invalid metadata')
return False
metadata = ET.fromstring(metadata)
cname = metadata.find('collection').text
name = metadata.find('name').text
if name is None or not name:
logging.error('Invalid entry %s in argument!!' % name)
return False
if not Col.objects.filter(name=cname).exists():
logging.debug('Collection %s does not exists !!' % cname)
return True
path = os.path.join('data', 'collections', cname, name + '.xml')
if not os.path.exists(path):
logging.debug('Path to collection does not exist : %s !!' % path)
return True
os.remove(path)
logging.debug('%s was successfully removed from collection %s' % (name, cname))
return True
@staticmethod
def list():
""" get list of all collection entries """
cols_elem = ET.Element('collections')
for col in Col.objects.all():
path = os.path.join('data', 'collections', col.name)
if not os.path.exists(path):
logging.error('Collection has inconstancy : %s !!' % col.name)
continue
files = glob.glob(os.path.join(path, '*'))
for _file in files:
payload = ET.parse(_file)
for child in payload.getroot():
if child.tag == 'metadata':
cols_elem.append(child)
return cols_elem
@staticmethod
def load(username, metadata):
""" Load a collection entry """
if metadata is None or metadata == '':
logging.error('Invalid metadata')
return None
metadata = ET.fromstring(metadata)
cname = metadata.find('collection').text
name = metadata.find('name').text
if not Col.objects.filter(name=cname).exists():
logging.debug('Collection %s does not exists !!' % cname)
return None
_file = os.path.join('data', 'collections', cname, name + '.xml')
if not os.path.exists(_file):
logging.error('Collection entry not found')
return None
data = None
with open(_file, 'r') as f:
data = f.read()
data = data.replace('>','>')
data = data.replace('<','<')
payload = ET.fromstring(data)
if data is None:
logging.error('Collection entry is empty')
return None
fmt = payload.get('format', 'raw')
if fmt == 'xpath':
return Adapter.gen_rpc(username, data)
return payload
|
src/genie/libs/parser/iosxr/tests/ShowSpanningTreePvrst/cli/equal/golden_output_1_expected.py | balmasea/genieparser | 204 | 80764 |
expected_output = {
'pvst': {
'a': {
'pvst_id': 'a',
'vlans': {
2: {
'vlan_id': 2,
'designated_root_priority': 32768,
'designated_root_address': '0021.1bff.d973',
'designated_root_max_age': 20,
'designated_root_forward_delay': 15,
'bridge_priority': 32768,
'sys_id_ext': 0,
'bridge_address': '8cb6.4fff.6588',
'bridge_max_age': 20,
'bridge_forward_delay': 15,
'bridge_transmit_hold_count': 6,
'interface': {
'GigabitEthernet0/7/0/0': {
'name': 'GigabitEthernet0/7/0/0',
'cost': 20000,
'role': 'DSGN',
'port_priority': 128,
'port_num': 1,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '8cb6.4fff.6588',
'designated_port_priority': 128,
'designated_port_num': 1,
},
'GigabitEthernet0/7/0/1': {
'name': 'GigabitEthernet0/7/0/1',
'cost': 20000,
'role': 'DSGN',
'port_priority': 128,
'port_num': 2,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '8cb6.4fff.6588',
'designated_port_priority': 128,
'designated_port_num': 2,
},
'GigabitEthernet0/7/0/10': {
'name': 'GigabitEthernet0/7/0/10',
'cost': 20000,
'role': 'ROOT',
'port_priority': 128,
'port_num': 3,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '0021.1bff.d973',
'designated_port_priority': 128,
'designated_port_num': 3,
},
'GigabitEthernet0/7/0/11': {
'name': 'GigabitEthernet0/7/0/11',
'cost': 20000,
'role': 'ALT',
'port_priority': 128,
'port_num': 4,
'port_state': 'BLK',
'designated_bridge_priority': 32768,
'designated_bridge_address': '0021.1bff.d973',
'designated_port_priority': 128,
'designated_port_num': 4,
},
},
},
3: {
'vlan_id': 3,
'designated_root_priority': 32768,
'designated_root_address': '0021.1bff.d973',
'designated_root_max_age': 20,
'designated_root_forward_delay': 15,
'bridge_priority': 32768,
'sys_id_ext': 0,
'bridge_address': '8cb6.4fff.6588',
'bridge_max_age': 20,
'bridge_forward_delay': 15,
'bridge_transmit_hold_count': 6,
'interface': {
'GigabitEthernet0/7/0/0': {
'name': 'GigabitEthernet0/7/0/0',
'cost': 20000,
'role': 'DSGN',
'port_priority': 128,
'port_num': 1,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '8cb6.4fff.6588',
'designated_port_priority': 128,
'designated_port_num': 1,
},
'GigabitEthernet0/7/0/1': {
'name': 'GigabitEthernet0/7/0/1',
'cost': 20000,
'role': 'DSGN',
'port_priority': 128,
'port_num': 2,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '8cb6.4fff.6588',
'designated_port_priority': 128,
'designated_port_num': 2,
},
'GigabitEthernet0/7/0/10': {
'name': 'GigabitEthernet0/7/0/10',
'cost': 20000,
'role': 'ROOT',
'port_priority': 128,
'port_num': 3,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '0021.1bff.d973',
'designated_port_priority': 128,
'designated_port_num': 3,
},
'GigabitEthernet0/7/0/11': {
'name': 'GigabitEthernet0/7/0/11',
'cost': 20000,
'role': 'ALT',
'port_priority': 128,
'port_num': 4,
'port_state': 'BLK',
'designated_bridge_priority': 32768,
'designated_bridge_address': '0021.1bff.d973',
'designated_port_priority': 128,
'designated_port_num': 4,
},
},
},
4: {
'vlan_id': 4,
'designated_root_priority': 32768,
'designated_root_address': '0021.1bff.d973',
'designated_root_max_age': 20,
'designated_root_forward_delay': 15,
'bridge_priority': 32768,
'sys_id_ext': 0,
'bridge_address': '8cb6.4fff.6588',
'bridge_max_age': 20,
'bridge_forward_delay': 15,
'bridge_transmit_hold_count': 6,
'interface': {
'GigabitEthernet0/7/0/0': {
'name': 'GigabitEthernet0/7/0/0',
'cost': 20000,
'role': 'DSGN',
'port_priority': 128,
'port_num': 1,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '8cb6.4fff.6588',
'designated_port_priority': 128,
'designated_port_num': 1,
},
'GigabitEthernet0/7/0/1': {
'name': 'GigabitEthernet0/7/0/1',
'cost': 20000,
'role': 'DSGN',
'port_priority': 128,
'port_num': 2,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '8cb6.4fff.6588',
'designated_port_priority': 128,
'designated_port_num': 2,
},
'GigabitEthernet0/7/0/10': {
'name': 'GigabitEthernet0/7/0/10',
'cost': 20000,
'role': 'ROOT',
'port_priority': 128,
'port_num': 3,
'port_state': 'FWD',
'designated_bridge_priority': 32768,
'designated_bridge_address': '0021.1bff.d973',
'designated_port_priority': 128,
'designated_port_num': 3,
},
'GigabitEthernet0/7/0/11': {
'name': 'GigabitEthernet0/7/0/11',
'cost': 20000,
'role': 'ALT',
'port_priority': 128,
'port_num': 4,
'port_state': 'BLK',
'designated_bridge_priority': 32768,
'designated_bridge_address': '0021.1bff.d973',
'designated_port_priority': 128,
'designated_port_num': 4,
},
},
},
},
},
},
}
|
pyscf/grad/test/test_casscf.py | QuESt-Calculator/pyscf | 501 | 80788 | #!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
from functools import reduce
import unittest
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import ao2mo
from pyscf import fci
from pyscf.tools import molden
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import casscf as casscf_grad
from pyscf.grad.mp2 import _shell_prange
from pyscf.fci.addons import fix_spin_
def grad_elec(mc, mf_grad):
mf = mf_grad.base
mol = mf_grad.mol
mo_energy = mc.mo_energy
mo_coeff = mc.mo_coeff
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nelecas = mc.nelecas
nao, nmo = mo_coeff.shape
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
casdm1, casdm2 = mc.fcisolver.make_rdm12(mc.ci, ncas, nelecas)
dm1 = numpy.zeros((nmo,nmo))
dm1[numpy.diag_indices(ncore)] = 2
dm1[ncore:nocc,ncore:nocc] = casdm1
dm2 = numpy.zeros((nmo,nmo,nmo,nmo))
for i in range(ncore):
for j in range(ncore):
dm2[i,i,j,j] += 4
dm2[i,j,j,i] -= 2
dm2[i,i,ncore:nocc,ncore:nocc] = casdm1 * 2
dm2[ncore:nocc,ncore:nocc,i,i] = casdm1 * 2
dm2[i,ncore:nocc,ncore:nocc,i] =-casdm1
dm2[ncore:nocc,i,i,ncore:nocc] =-casdm1
dm2[ncore:nocc,ncore:nocc,ncore:nocc,ncore:nocc] = casdm2
h1 = reduce(numpy.dot, (mo_coeff.T, mc._scf.get_hcore(), mo_coeff))
h2 = ao2mo.kernel(mf._eri, mo_coeff, compact=False).reshape([nmo]*4)
# Generalized Fock, according to generalized Brillouin theorm
# Adv. Chem. Phys., 69, 63
gfock = numpy.dot(h1, dm1)
gfock+= numpy.einsum('iqrs,qjsr->ij', h2, dm2)
gfock = (gfock + gfock.T) * .5
dme0 = reduce(numpy.dot, (mo_coeff[:,:nocc], gfock[:nocc,:nocc], mo_coeff[:,:nocc].T))
dm1 = reduce(numpy.dot, (mo_coeff, dm1, mo_coeff.T))
dm2 = lib.einsum('ijkl,pi,qj,rk,sl->pqrs', dm2,
mo_coeff, mo_coeff, mo_coeff, mo_coeff)
eri_deriv1 = mol.intor('int2e_ip1', comp=3).reshape(3,nao,nao,nao,nao)
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, dm1)
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], dme0[p0:p1]) * 2
de[k] -= numpy.einsum('xijkl,ijkl->x', eri_deriv1[:,p0:p1], dm2[p0:p1]) * 2
return de
mol = gto.Mole()
mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'
mol.verbose = 5
mol.output = '/dev/null'
mol.symmetry = False
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-12)
def tearDownModule():
global mol, mf
mol.stdout.close()
del mol, mf
class KnownValues(unittest.TestCase):
def test_casscf_grad(self):
mc = mcscf.CASSCF(mf, 4, 4).run()
g1 = casscf_grad.Gradients(mc).kernel()
self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 7)
g1ref = grad_elec(mc, mf.nuc_grad_method())
g1ref += rhf_grad.grad_nuc(mol)
self.assertAlmostEqual(abs(g1-g1ref).max(), 0, 9)
mcs = mc.as_scanner()
pmol = mol.copy()
e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2'))
e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2'))
self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 4)
# def test_frozen(self):
# mc = mcscf.CASSCF(mf, 4, 4).set(frozen=2).run()
# gscan = mc.nuc_grad_method().as_scanner()
# g1 = gscan(mol)[1]
# self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 9)
#
# mcs = mc.as_scanner()
# pmol = mol.copy()
# e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2'))
# e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2'))
# self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 4)
def test_scanner(self):
mc = mcscf.CASSCF(mf, 4, 4)
gs = mc.nuc_grad_method().as_scanner().as_scanner()
e, g1 = gs(mol.atom, atmlst=range(4))
self.assertAlmostEqual(e, -108.39289688030243, 9)
self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 7)
def test_state_specific_scanner(self):
mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', verbose=0)
mf = scf.RHF(mol).run(conv_tol=1e-14)
mc = mcscf.CASSCF(mf, 4, 4)
gs = mc.state_specific_(2).nuc_grad_method().as_scanner()
e, de = gs(mol)
self.assertAlmostEqual(e, -108.68788613661442, 7)
self.assertAlmostEqual(lib.fp(de), -0.10695162143777398, 5)
mcs = gs.base
pmol = mol.copy()
e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201'))
e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199'))
self.assertAlmostEqual(de[1,2], (e1-e2)/0.002*lib.param.BOHR, 5)
def test_state_average_scanner(self):
mc = mcscf.CASSCF(mf, 4, 4)
mc.conv_tol = 1e-10 # B/c high sensitivity in the numerical test
mc.fcisolver.conv_tol = 1e-10
gs = mc.state_average_([0.5, 0.5]).nuc_grad_method().as_scanner()
e_avg, de_avg = gs(mol)
e_0, de_0 = gs(mol, state=0)
e_1, de_1 = gs(mol, state=1)
mcs = gs.base
pmol = mol.copy()
mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2'))
e1_avg = mcs.e_average
e1_0 = mcs.e_states[0]
e1_1 = mcs.e_states[1]
mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2'))
e2_avg = mcs.e_average
e2_0 = mcs.e_states[0]
e2_1 = mcs.e_states[1]
self.assertAlmostEqual(e_avg, -1.083838462140703e+02, 9)
self.assertAlmostEqual(lib.fp(de_avg), -1.034340877615413e-01, 7)
self.assertAlmostEqual(e_0, -1.083902662192770e+02, 9)
self.assertAlmostEqual(lib.fp(de_0), -6.398928175384316e-02, 7)
self.assertAlmostEqual(e_1, -1.083774262088640e+02, 9)
self.assertAlmostEqual(lib.fp(de_1), -1.428890918624837e-01, 7)
self.assertAlmostEqual(de_avg[1,2], (e1_avg-e2_avg)/0.002*lib.param.BOHR, 4)
self.assertAlmostEqual(de_0[1,2], (e1_0-e2_0)/0.002*lib.param.BOHR, 4)
self.assertAlmostEqual(de_1[1,2], (e1_1-e2_1)/0.002*lib.param.BOHR, 4)
def test_state_average_mix_scanner(self):
mc = mcscf.CASSCF(mf, 4, 4)
mc.conv_tol = 1e-10 # B/c high sensitivity in the numerical test
fcisolvers = [fci.solver (mol, singlet=bool(i)) for i in range (2)]
fcisolvers[0].conv_tol = fcisolvers[1].conv_tol = 1e-10
fcisolvers[0].spin = 2
mc = mcscf.addons.state_average_mix_(mc, fcisolvers, (.5, .5))
gs = mc.nuc_grad_method().as_scanner()
e_avg, de_avg = gs(mol)
e_0, de_0 = gs(mol, state=0)
e_1, de_1 = gs(mol, state=1)
mcs = gs.base
pmol = mol.copy()
mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2'))
e1_avg = mcs.e_average
e1_0 = mcs.e_states[0]
e1_1 = mcs.e_states[1]
mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2'))
e2_avg = mcs.e_average
e2_0 = mcs.e_states[0]
e2_1 = mcs.e_states[1]
self.assertAlmostEqual(e_avg, -1.083838462141992e+02, 9)
self.assertAlmostEqual(lib.fp(de_avg), -1.034392760319145e-01, 7)
self.assertAlmostEqual(e_0, -1.083902661656155e+02, 9)
self.assertAlmostEqual(lib.fp(de_0), -6.398921123988113e-02, 7)
self.assertAlmostEqual(e_1, -1.083774262627830e+02, 9)
self.assertAlmostEqual(lib.fp(de_1), -1.428891618903179e-01, 7)
self.assertAlmostEqual(de_avg[1,2], (e1_avg-e2_avg)/0.002*lib.param.BOHR, 4)
self.assertAlmostEqual(de_0[1,2], (e1_0-e2_0)/0.002*lib.param.BOHR, 4)
self.assertAlmostEqual(de_1[1,2], (e1_1-e2_1)/0.002*lib.param.BOHR, 4)
def test_with_x2c_scanner(self):
with lib.light_speed(20.):
mc = mcscf.CASSCF(mf.x2c(), 4, 4).run()
gscan = mc.nuc_grad_method().as_scanner()
g1 = gscan(mol)[1]
self.assertAlmostEqual(lib.fp(g1), -0.07027493570511917, 7)
mcs = mcscf.CASSCF(mf, 4, 4).as_scanner().x2c()
e1 = mcs('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')
e2 = mcs('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')
self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 5)
def test_with_qmmm_scanner(self):
from pyscf import qmmm
mol = gto.Mole()
mol.atom = ''' O 0.00000000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 '''
mol.verbose = 0
mol.basis = '6-31g'
mol.build()
coords = [(0.5,0.6,0.1)]
#coords = [(0.0,0.0,0.0)]
charges = [-0.1]
mf = qmmm.add_mm_charges(scf.RHF(mol), coords, charges)
mc = mcscf.CASSCF(mf, 4, 4).as_scanner()
e_tot, g = mc.nuc_grad_method().as_scanner()(mol)
self.assertAlmostEqual(e_tot, -76.0461574155984, 7)
self.assertAlmostEqual(lib.fp(g), 0.042835374915102364, 6)
e1 = mc(''' O 0.00100000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 ''')
e2 = mc(''' O -0.00100000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 ''')
ref = (e1 - e2)/0.002 * lib.param.BOHR
self.assertAlmostEqual(g[0,0], ref, 4)
mf = scf.RHF(mol)
mc = qmmm.add_mm_charges(mcscf.CASSCF(mf, 4, 4).as_scanner(), coords, charges)
e_tot, g = mc.nuc_grad_method().as_scanner()(mol)
self.assertAlmostEqual(e_tot, -76.0461574155984, 7)
self.assertAlmostEqual(lib.fp(g), 0.042835374915102364, 6)
def test_symmetrize(self):
mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', symmetry=True, verbose=0)
g = mol.RHF.run().CASSCF(4, 4).run().Gradients().kernel()
self.assertAlmostEqual(lib.fp(g), 0.12355818572359845, 7)
if __name__ == "__main__":
print("Tests for CASSCF gradients")
unittest.main()
|
pybotters/client.py | maruuuui/pybotters | 176 | 80807 | from __future__ import annotations
import asyncio
import json
import logging
import os
from typing import Any, Mapping, Optional, Union
import aiohttp
from aiohttp import hdrs
from aiohttp.client import _RequestContextManager
from .auth import Auth
from .request import ClientRequest
from .typedefs import WsBytesHandler, WsJsonHandler, WsStrHandler
from .ws import ClientWebSocketResponse, ws_run_forever
logger = logging.getLogger(__name__)
class Client:
"""
HTTPリクエストクライアントクラス
.. note::
引数 apis は省略できます。
:Example:
.. code-block:: python
async def main():
async with pybotters.Client(apis={'example': ['KEY', 'SECRET']}) as client:
r = await client.get('https://...', params={'foo': 'bar'})
print(await r.json())
.. code-block:: python
async def main():
async with pybotters.Client(apis={'example': ['KEY', 'SECRET']}) as client:
wstask = await client.ws_connect(
'wss://...',
send_json={'foo': 'bar'},
hdlr_json=pybotters.print_handler
)
await wstask
# Ctrl+C to break
Basic API
パッケージトップレベルで利用できるHTTPリクエスト関数です。 これらは同期関数です。 内部的にpybotters.Clientをラップしています。
:Example:
.. code-block:: python
r = pybotters.get(
'https://...',
params={'foo': 'bar'},
apis={'example': ['KEY', 'SECRET']}
)
print(r.text())
print(r.json())
.. code-block:: python
pybotters.ws_connect(
'wss://...',
send_json={'foo': 'bar'},
hdlr_json=pybotters.print_handler,
apis={'example': ['KEY', 'SECRET']}
)
# Ctrl+C to break
"""
_session: aiohttp.ClientSession
_base_url: str
def __init__(
self,
apis: Optional[Union[dict[str, list[str]], str]] = None,
base_url: str = '',
**kwargs: Any,
) -> None:
"""
:param apis: APIキー・シークレットのデータ(optional) ex: {'exchange': ['key', 'secret']}
:param base_url: リクエストメソッドの url の前方に自動付加するURL(optional)
:param ``**kwargs``: aiohttp.Client.requestに渡されるキーワード引数(optional)
"""
self._session = aiohttp.ClientSession(
request_class=ClientRequest,
ws_response_class=ClientWebSocketResponse,
**kwargs,
)
apis = self._load_apis(apis)
self._session.__dict__['_apis'] = self._encode_apis(apis)
self._base_url = base_url
async def __aenter__(self) -> 'Client':
return self
async def __aexit__(self, *args: Any) -> None:
await self.close()
async def close(self) -> None:
await self._session.close()
def _request(
self,
method: str,
url: str,
*,
params: Optional[Mapping[str, Any]] = None,
data: Optional[dict[str, Any]] = None,
auth: Optional[Auth] = Auth,
**kwargs: Any,
) -> _RequestContextManager:
return self._session.request(
method=method,
url=self._base_url + url,
params=params,
data=data,
auth=auth,
**kwargs,
)
def request(
self,
method: str,
url: str,
*,
params: Optional[Mapping[str, str]] = None,
data: Any = None,
**kwargs: Any,
) -> _RequestContextManager:
"""
:param method: GET, POST, PUT, DELETE などのHTTPメソッド
:param url: リクエストURL
:param params: URLのクエリ文字列(optional)
:param data: リクエストボディ(optional)
:param headers: リクエストヘッダー(optional)
:param auth: API自動認証の機能の有効/無効。デフォルトで有効。auth=Noneを指定することで無効になります(optional)
:param ``kwargs``: aiohttp.Client.requestに渡されるキーワード引数(optional)
"""
return self._request(method, url, params=params, data=data, **kwargs)
def get(
self,
url: str,
*,
params: Optional[Mapping[str, str]] = None,
**kwargs: Any,
) -> _RequestContextManager:
return self._request(hdrs.METH_GET, url, params=params, **kwargs)
def post(
self,
url: str,
*,
data: Any = None,
**kwargs: Any,
) -> _RequestContextManager:
return self._request(hdrs.METH_POST, url, data=data, **kwargs)
def put(
self,
url: str,
*,
data: Any = None,
**kwargs: Any,
) -> _RequestContextManager:
return self._request(hdrs.METH_PUT, url, data=data, **kwargs)
def delete(
self,
url: str,
*,
data: Any = None,
**kwargs: Any,
) -> _RequestContextManager:
return self._request(hdrs.METH_DELETE, url, data=data, **kwargs)
async def ws_connect(
self,
url: str,
*,
send_str: Optional[Union[str, list[str]]] = None,
send_bytes: Optional[Union[bytes, list[bytes]]] = None,
send_json: Any = None,
hdlr_str: Optional[WsStrHandler] = None,
hdlr_bytes: Optional[WsBytesHandler] = None,
hdlr_json: Optional[WsJsonHandler] = None,
**kwargs: Any,
) -> asyncio.Task:
"""
:param url: WebSocket URL
:param send_str: WebSocketで送信する文字列。文字列、または文字列のリスト形式(optional)
:param send_json: WebSocketで送信する辞書オブジェクト。辞書、または辞書のリスト形式(optional)
:param hdlr_str: WebSocketの受信データをハンドリングする関数。
第1引数 msg に _str_型, 第2引数 ws にWebSocketClientResponse 型の変数が渡されます(optional)
:param hdlr_json: WebSocketの受信データをハンドリングする関数。
第1引数 msg に Any 型(JSON-like), 第2引数 ws に WebSocketClientResponse 型の変数が渡されます
(optional)
:param headers: リクエストヘッダー(optional)
:param auth: API自動認証の機能の有効/無効。デフォルトで有効。auth=Noneを指定することで無効になります(optional)
:param ``**kwargs``: aiohttp.ClientSession.ws_connectに渡されるキーワード引数(optional)
"""
event = asyncio.Event()
task = asyncio.create_task(
ws_run_forever(
url,
self._session,
event,
send_str=send_str,
send_bytes=send_bytes,
send_json=send_json,
hdlr_str=hdlr_str,
hdlr_bytes=hdlr_bytes,
hdlr_json=hdlr_json,
**kwargs,
)
)
await event.wait()
return task
@staticmethod
def _load_apis(
apis: Optional[Union[dict[str, list[str]], str]]
) -> dict[str, list[str]]:
if apis is None:
apis = {}
if isinstance(apis, dict):
if apis:
return apis
else:
current_apis = os.path.join(os.getcwd(), 'apis.json')
if os.path.isfile(current_apis):
with open(current_apis) as fp:
return json.load(fp)
else:
env_apis = os.getenv('PYBOTTERS_APIS')
if env_apis and os.path.isfile(env_apis):
with open(env_apis) as fp:
return json.load(fp)
else:
return apis
elif isinstance(apis, str):
with open(apis) as fp:
return json.load(fp)
else:
logger.warning(f'apis must be dict or str, not {apis.__class__.__name__}')
return {}
@staticmethod
def _encode_apis(
apis: Optional[dict[str, list[str]]]
) -> dict[str, tuple[str, bytes]]:
if apis is None:
apis = {}
encoded = {}
for name in apis:
if len(apis[name]) == 2:
encoded[name] = (apis[name][0], apis[name][1].encode())
return encoded
|
airesources/Python3/hlt/__init__.py | xinnosuke/Halite-II | 232 | 80813 | <filename>airesources/Python3/hlt/__init__.py
"""
Halite II Python 3 starter kit
See MyBot.py for a basic usage example. In short, you should initialize() at
the start, then in a loop, call get_map() to get the current game state, then
build up a list of commands and send them with send_command_queue().
"""
from . import collision, constants, entity, game_map, networking
from .networking import Game
|
pywick/losses.py | achaiah/pywick | 408 | 80837 | <filename>pywick/losses.py<gh_stars>100-1000
"""
Losses are critical to training a neural network well. The training can only make progress if you
provide a meaningful measure of loss for each training step. What the loss looks like usually depends
on your application. Pytorch has a number of `loss functions <https://pytorch.org/docs/stable/nn.html#loss-functions/>`_ that
you can use out of the box. However, some more advanced and cutting edge loss functions exist that are not (yet) part of
Pytorch. We include those below for your experimenting.\n
**Caution:** if you decide to use one of these, you will definitely want to peruse the source code first, as it has
many additional useful notes and references which will help you.
Keep in mind that losses are specific to the type of task. Classification losses are computed differently from Segmentation losses.
Within segmentation domain make sure to use BCE (Binary Cross Entropy) for any work involving binary masks (e.g. num_classes = 1)
Make sure to read the documentation and notes (in the code) for each loss to understand how it is applied.
`Read this blog post <https://gombru.github.io/2018/05/23/cross_entropy_loss/>`_
Note:
Logit is the vector of raw (non-normalized) predictions that a classification model generates, which is ordinarily then passed to a normalization function.
If the model is solving a multi-class classification problem, logits typically become an input to the softmax function. The softmax function then generates
a vector of (normalized) probabilities with one value for each possible class.
For example, BCEWithLogitsLoss is a BCE that accepts R((-inf, inf)) and automatically applies torch.sigmoid to convert it to ([0,1]) space.
However, if you use one-hot encoding or similar methods where you need to convert a tensor to pytorch from another source (e.g. numpy), you will need to
make sure to apply the correct type to the resulting tensor. E.g. If y_hot is of type long and the BCE loss expects a Tensor of type float then you
can try converting y_hot with y_hot = y_hot.type_as(output).
To convert predictions into (0,1) range you will sometimes need to use either softmax or sigmoid.
Softmax is used for multi-classification in the Logistic Regression model, whereas Sigmoid is used for binary classification in the Logistic Regression model
"""
## Various loss calculation functions ##
# Sources: https://github.com/bermanmaxim/jaccardSegment/blob/master/losses.py (?)
# https://github.com/doodledood/carvana-image-masking-challenge/blob/master/losses.py (MIT)
# https://github.com/atlab/attorch/blob/master/attorch/losses.py (MIT)
# https://github.com/EKami/carvana-challenge (MIT)
# https://github.com/DingKe/pytorch_workplace (MIT)
import numpy as np
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd import Variable
from torch import Tensor
from typing import Iterable, Set
__all__ = ['ActiveContourLoss', 'ActiveContourLossAlt', 'AngularPenaltySMLoss', 'AsymLoss', 'BCELoss2d', 'BCEDiceLoss',
'BCEWithLogitsViewLoss', 'BCEDiceTL1Loss', 'BCEDicePenalizeBorderLoss', 'BCEDiceFocalLoss', 'BinaryFocalLoss',
'ComboBCEDiceLoss', 'ComboSemsegLossWeighted', 'EncNetLoss', 'FocalLoss', 'FocalLoss2',
'HausdorffERLoss', 'HausdorffDTLoss', 'LovaszSoftmax', 'mIoULoss', 'MixSoftmaxCrossEntropyOHEMLoss',
'MSE3D', 'OhemCELoss', 'OhemCrossEntropy2d', 'OhemBCEDicePenalizeBorderLoss', 'PoissonLoss',
'PoissonLoss3d', 'RecallLoss', 'RMILoss', 'RMILossAlt', 'RMIBCEDicePenalizeBorderLoss', 'SoftInvDiceLoss',
'SoftDiceLoss', 'StableBCELoss', 'TverskyLoss', 'ThresholdedL1Loss', 'WeightedSoftDiceLoss', 'WeightedBCELoss2d',
'BDLoss', 'L1Loss3d', 'WingLoss', 'BoundaryLoss']
VOID_LABEL = 255
N_CLASSES = 1
class StableBCELoss(nn.Module):
def __init__(self, **_):
super(StableBCELoss, self).__init__()
@staticmethod
def forward(input_, target, **_):
neg_abs = - input_.abs()
loss = input_.clamp(min=0) - input_ * target + (1 + neg_abs.exp()).log()
return loss.mean()
# WARN: Only applicable to Binary Segmentation!
def binaryXloss(logits, label):
mask = (label.view(-1) != VOID_LABEL)
nonvoid = mask.long().sum()
if nonvoid == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
# if nonvoid == mask.numel():
# # no void pixel, use builtin
# return F.cross_entropy(logits, label)
target = label.contiguous().view(-1)[mask]
logits = logits.contiguous().view(-1)[mask]
# loss = F.binary_cross_entropy(logits, target.float())
loss = StableBCELoss()(logits, target.float())
return loss
def naive_single(logit, label):
# single images
mask = (label.view(-1) != 255)
num_preds = mask.long().sum()
if num_preds == 0:
# only void pixels, the gradients should be 0
return logit.sum() * 0.
target = label.contiguous().view(-1)[mask].float()
logit = logit.contiguous().view(-1)[mask]
prob = torch.sigmoid(logit)
intersect = target * prob
union = target + prob - intersect
loss = (1. - intersect / union).sum()
return loss
# WARN: Only applicable to Binary Segmentation!
def hingeloss(logits, label):
mask = (label.view(-1) != 255)
num_preds = mask.long().sum().item()
if num_preds == 0:
# only void pixels, the gradients should be 0
return logits.sum().item() * 0.
target = label.contiguous().view(-1)[mask]
target = 2. * target.float() - 1. # [target == 0] = -1
logits = logits.contiguous().view(-1)[mask]
hinge = 1. / num_preds * F.relu(1. - logits * target).sum().item()
return hinge
def gamma_fast(gt, permutation):
p = len(permutation)
gt = gt.gather(0, permutation)
gts = gt.sum()
intersection = gts - gt.float().cumsum(0)
union = gts + (1 - gt).float().cumsum(0)
jaccard = 1. - intersection / union
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
# WARN: Only applicable to Binary Segmentation right now (zip function needs to be replaced)!
def lovaszloss(logits, labels, prox=False, max_steps=20, debug=None):
"""
`The Lovasz-Softmax loss <https://arxiv.org/abs/1705.08790>`_
:param logits:
:param labels:
:param prox:
:param max_steps:
:param debug:
:return:
"""
if debug is None:
debug = {}
# image-level Lovasz hinge
if logits.size(0) == 1:
# single image case
loss = lovasz_single(logits.squeeze(0), labels.squeeze(0), prox, max_steps, debug)
else:
losses = []
# assert len(logits[0]) == len(labels[0])
for logit, label in zip(logits, labels):
loss = lovasz_single(logit, label, prox, max_steps, debug)
losses.append(loss)
loss = sum(losses) / len(losses)
return loss
def naiveloss(logits, labels):
# image-level Lovasz hinge
if logits.size(0) == 1:
# single image case
loss = naive_single(logits.squeeze(0), labels.squeeze(0))
else:
losses = []
for logit, label in zip(logits, labels):
loss = naive_single(logit, label)
losses.append(loss)
loss = sum(losses) / len(losses)
return loss
def iouloss(pred, gt):
# works for one binary pred and associated target
# make byte tensors
pred = (pred == 1)
mask = (gt != 255)
gt = (gt == 1)
union = (gt | pred)[mask].long().sum()
if not union:
return 0.
else:
intersection = (gt & pred)[mask].long().sum()
return 1. - intersection / union
def compute_step_length(x, grad, active, eps=1e-6):
# compute next intersection with an edge in the direction grad
# OR next intersection with a 0 - border
# returns: delta in ind such that:
# after a step delta in the direction grad, x[ind] and x[ind+1] will be equal
delta = np.inf
ind = -1
if active > 0:
numerator = (x[:active] - x[1:active + 1]) # always positive (because x is sorted)
denominator = (grad[:active] - grad[1:active + 1])
# indices corresponding to negative denominator won't intersect
# also, we are not interested in indices in x that are *already equal*
valid = (denominator > eps) & (numerator > eps)
valid_indices = valid.nonzero()
intersection_times = numerator[valid] / denominator[valid]
if intersection_times.size():
delta, ind = intersection_times.min(0)
ind = valid_indices[ind]
delta, ind = delta[0], ind[0, 0]
if grad[active] > 0:
intersect_zero = x[active] / grad[active]
if intersect_zero > 0. and intersect_zero < delta:
return intersect_zero, -1
return delta, ind
def project(gam, active, members):
tovisit = set(range(active + 1))
while tovisit:
v = tovisit.pop()
if len(members[v]) > 1:
avg = 0.
for k in members[v]:
if k != v: tovisit.remove(k)
avg += gam[k] / len(members[v])
for k in members[v]:
gam[k] = avg
if active + 1 < len(gam):
gam[active + 1:] = 0.
def find_proximal(x0, gam, lam, eps=1e-6, max_steps=20, debug=None):
if debug is None:
debug = {}
# x0: sorted margins data
# gam: initial gamma_fast(target, perm)
# regularisation parameter lam
x = x0.clone()
act = (x >= eps).nonzero()
finished = False
if not act.size():
finished = True
else:
active = act[-1, 0]
members = {i: {i} for i in range(active + 1)}
if active > 0:
equal = (x[:active] - x[1:active + 1]) < eps
for i, e in enumerate(equal):
if e:
members[i].update(members[i + 1])
members[i + 1] = members[i]
project(gam, active, members)
step = 0
while not finished and step < max_steps and active > -1:
step += 1
res = compute_step_length(x, gam, active, eps)
delta, ind = res
if ind == -1:
active = active - len(members[active])
stop = torch.dot(x - x0, gam) / torch.dot(gam, gam) + 1. / lam
if 0 <= stop < delta:
delta = stop
finished = True
x = x - delta * gam
if not finished:
if ind >= 0:
repr = min(members[ind])
members[repr].update(members[ind + 1])
for m in members[ind]:
if m != repr:
members[m] = members[repr]
project(gam, active, members)
if "path" in debug:
debug["path"].append(x.numpy())
if "step" in debug:
debug["step"] = step
if "finished" in debug:
debug["finished"] = finished
return x, gam
def lovasz_binary(margins, label, prox=False, max_steps=20, debug=None):
if debug is None:
debug = {}
# 1d vector inputs
# Workaround: can't sort Variable bug
# prox: False or lambda regularization value
_, perm = torch.sort(margins.detach(), dim=0, descending=True)
margins_sorted = margins[perm]
grad = gamma_fast(label, perm)
loss = torch.dot(F.relu(margins_sorted), grad)
if prox is not False:
xp, gam = find_proximal(margins_sorted.detach(), grad, prox, max_steps=max_steps, eps=1e-6, debug=debug)
hook = margins_sorted.register_hook(lambda grad: (margins_sorted.detach() - xp))
return loss, hook, gam
else:
return loss
def lovasz_single(logit, label, prox=False, max_steps=20, debug=None):
if debug is None:
debug = {}
# single images
mask = (label.view(-1) != 255)
num_preds = mask.long().sum()
if num_preds == 0:
# only void pixels, the gradients should be 0
return logit.sum() * 0.
target = label.contiguous().view(-1)[mask]
signs = 2. * target.float() - 1.
logit = logit.contiguous().view(-1)[mask]
margins = (1. - logit * signs)
loss = lovasz_binary(margins, target, prox, max_steps, debug=debug)
return loss
def dice_coefficient(logit, label, isCuda=True):
'''
WARNING THIS IS VERY SLOW FOR SOME REASON!!
:param logit: calculated guess (expects torch.Tensor)
:param label: truth label (expects torch.Tensor)
:return: dice coefficient
'''
A = label.view(-1)
B = logit.view(-1)
A = A.clone()
B = B.clone()
if len(A) != len(B):
raise AssertionError
for i in list(range(len(A))):
if A[i] > 0.5:
A[i] = 1.0
else:
A[i] = 0.0
if B[i] > 0.5:
B[i] = 1.0
else:
B[i] = 0.0
if isCuda:
A = A.type(torch.cuda.ByteTensor)
else:
A = A.type(torch.ByteTensor)
dice = torch.masked_select(B, A).sum()*2.0 / (B.sum() + A.sum())
return dice
# ==================================== #
# Source: https://github.com/EKami/carvana-challenge
class WeightedSoftDiceLoss(torch.nn.Module):
def __init__(self, **_):
super(WeightedSoftDiceLoss, self).__init__()
@staticmethod
def forward(logits, labels, weights, **_):
probs = torch.sigmoid(logits)
num = labels.size(0)
w = weights.view(num,-1)
w2 = w*w
m1 = probs.view(num,-1)
m2 = labels.view(num,-1)
intersection = (m1 * m2)
score = 2. * ((w2*intersection).sum(1)+1) / ((w2*m1).sum(1) + (w2*m2).sum(1)+1)
score = 1 - score.sum()/num
return score
def dice_coeff(pred, target):
smooth = 1.
num = pred.size(0)
m1 = pred.view(num, -1) # Flatten
m2 = target.view(num, -1) # Flatten
intersection = (m1 * m2).sum()
return (2. * intersection + smooth) / (m1.sum() + m2.sum() + smooth)
def dice_coeff_hard_np(y_true, y_pred):
smooth = 1.
y_true_f = np.flatten(y_true)
y_pred_f = np.round(np.flatten(y_pred))
intersection = np.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
return score
# ==================================== #
# Source: https://github.com/doodledood/carvana-image-masking-challenge/blob/master/losses.py
# TODO Replace this with nn.BCEWithLogitsLoss??
class BCELoss2d(nn.Module):
def __init__(self, weight=None, size_average=True, **_):
super(BCELoss2d, self).__init__()
self.bce_loss = nn.BCELoss(weight, size_average)
def forward(self, logits, labels, **_):
probs = torch.sigmoid(logits)
probs_flat = probs.view(-1)
targets_flat = labels.view(-1)
return self.bce_loss(probs_flat, targets_flat)
class SoftDiceLoss(nn.Module):
def __init__(self, smooth=1.0, **_):
super(SoftDiceLoss, self).__init__()
self.smooth = smooth
def forward(self, logits, labels, **_):
num = labels.size(0)
probs = torch.sigmoid(logits)
m1 = probs.view(num, -1)
m2 = labels.view(num, -1)
intersection = (m1 * m2)
# smooth = 1.
score = 2. * (intersection.sum(1) + self.smooth) / (m1.sum(1) + m2.sum(1) + self.smooth)
score = 1 - score.sum() / num
return score
class FocalLoss(nn.Module):
"""
Weighs the contribution of each sample to the loss based in the classification error.
If a sample is already classified correctly by the CNN, its contribution to the loss decreases.
:eps: Focusing parameter. eps=0 is equivalent to BCE_loss
"""
def __init__(self, l=0.5, eps=1e-6, **_):
super(FocalLoss, self).__init__()
self.l = l
self.eps = eps
def forward(self, logits, labels, **_):
labels = labels.view(-1)
probs = torch.sigmoid(logits).view(-1)
losses = -(labels * torch.pow((1. - probs), self.l) * torch.log(probs + self.eps) + \
(1. - labels) * torch.pow(probs, self.l) * torch.log(1. - probs + self.eps))
loss = torch.mean(losses)
return loss
class ThresholdedL1Loss(nn.Module):
def __init__(self, threshold=0.5, **_):
super(ThresholdedL1Loss, self).__init__()
self.threshold = threshold
def forward(self, logits, labels, **_):
labels = labels.view(-1)
probs = torch.sigmoid(logits).view(-1)
probs = (probs > self.threshold).float()
losses = torch.abs(labels - probs)
loss = torch.mean(losses)
return loss
class BCEDiceTL1Loss(nn.Module):
def __init__(self, threshold=0.5, **_):
super(BCEDiceTL1Loss, self).__init__()
self.bce = nn.BCEWithLogitsLoss(weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None)
self.dice = SoftDiceLoss()
self.tl1 = ThresholdedL1Loss(threshold=threshold)
def forward(self, logits, labels, **_):
return self.bce(logits, labels) + self.dice(logits, labels) + self.tl1(logits, labels)
class BCEDiceFocalLoss(nn.Module):
'''
:param num_classes: number of classes
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.
:param weights: (list(), default = [1,1,1]) Optional weighing (0.0-1.0) of the losses in order of [bce, dice, focal]
'''
def __init__(self, focal_param, weights=None, **kwargs):
if weights is None:
weights = [1.0,1.0,1.0]
super(BCEDiceFocalLoss, self).__init__()
self.bce = BCEWithLogitsViewLoss(weight=None, size_average=True, **kwargs)
self.dice = SoftDiceLoss(**kwargs)
self.focal = FocalLoss(l=focal_param, **kwargs)
self.weights = weights
def forward(self, logits, labels, **_):
return self.weights[0] * self.bce(logits, labels) + self.weights[1] * self.dice(logits, labels) + self.weights[2] * self.focal(logits.unsqueeze(1), labels.unsqueeze(1))
class BCEDiceLoss(nn.Module):
def __init__(self, **_):
super(BCEDiceLoss, self).__init__()
self.bce = BCELoss2d()
self.dice = SoftDiceLoss()
def forward(self, logits, labels, **_):
return self.bce(logits, labels) + self.dice(logits, labels)
class WeightedBCELoss2d(nn.Module):
def __init__(self, **_):
super(WeightedBCELoss2d, self).__init__()
@staticmethod
def forward(logits, labels, weights, **_):
w = weights.view(-1) # (-1 operation flattens all the dimensions)
z = logits.view(-1) # (-1 operation flattens all the dimensions)
t = labels.view(-1) # (-1 operation flattens all the dimensions)
loss = w*z.clamp(min=0) - w*z*t + w*torch.log(1 + torch.exp(-z.abs()))
loss = loss.sum()/w.sum()
return loss
class BCEDicePenalizeBorderLoss(nn.Module):
def __init__(self, kernel_size=55, **_):
super(BCEDicePenalizeBorderLoss, self).__init__()
self.bce = WeightedBCELoss2d()
self.dice = WeightedSoftDiceLoss()
self.kernel_size = kernel_size
def to(self, device):
super().to(device=device)
self.bce.to(device=device)
self.dice.to(device=device)
def forward(self, logits, labels, **_):
a = F.avg_pool2d(labels, kernel_size=self.kernel_size, padding=self.kernel_size // 2, stride=1)
ind = a.ge(0.01) * a.le(0.99)
ind = ind.float()
weights = torch.ones(a.size()).to(device=logits.device)
w0 = weights.sum()
weights = weights + ind * 2
w1 = weights.sum()
weights = weights / w1 * w0
loss = self.bce(logits, labels, weights) + self.dice(logits, labels, weights)
return loss
# ==== Focal Loss with extra parameters ==== #
# Source: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/FocalLoss/FocalLoss.py
# License: MIT
class FocalLoss2(nn.Module):
"""
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
Params:
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param smooth: (float,double) smooth value when cross entropy
:param balance_index: (int) balance class index, should be specific when alpha is float
:param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.
"""
def __init__(self, num_class, alpha=None, gamma=2, balance_index=-1, smooth=None, size_average=True, **_):
super(FocalLoss2, self).__init__()
self.num_class = num_class
self.alpha = alpha
self.gamma = gamma
self.smooth = smooth
self.size_average = size_average
if self.alpha is None:
self.alpha = torch.ones(self.num_class, 1)
elif isinstance(self.alpha, (list, np.ndarray)):
if len(self.alpha) != self.num_class:
raise AssertionError
self.alpha = torch.FloatTensor(alpha).view(self.num_class, 1)
self.alpha = self.alpha / self.alpha.sum()
elif isinstance(self.alpha, float):
alpha = torch.ones(self.num_class, 1)
alpha = alpha * (1 - self.alpha)
alpha[balance_index] = self.alpha
self.alpha = alpha
else:
raise TypeError('Not support alpha type')
if self.smooth is not None:
if self.smooth < 0 or self.smooth > 1.0:
raise ValueError('smooth value should be in [0,1]')
def forward(self, logits, labels, **_):
# logits = F.softmax(logits, dim=1)
if logits.dim() > 2:
# N,C,d1,d2 -> N,C,m (m=d1*d2*...)
logits = logits.view(logits.size(0), logits.size(1), -1)
logits = logits.permute(0, 2, 1).contiguous()
logits = logits.view(-1, logits.size(-1))
labels = labels.view(-1, 1)
# N = input.size(0)
# alpha = torch.ones(N, self.num_class)
# alpha = alpha * (1 - self.alpha)
# alpha = alpha.scatter_(1, target.long(), self.alpha)
epsilon = 1e-10
alpha = self.alpha.to(logits.device)
idx = labels.cpu().long()
one_hot_key = torch.FloatTensor(labels.size(0), self.num_class).zero_()
one_hot_key = one_hot_key.scatter_(1, idx, 1)
one_hot_key = one_hot_key.to(logits.device)
if self.smooth:
one_hot_key = torch.clamp(one_hot_key, self.smooth / (self.num_class - 1), 1.0 - self.smooth)
pt = (one_hot_key * logits).sum(1) + epsilon
logpt = pt.log()
gamma = self.gamma
alpha = alpha[idx]
loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
# -------- #
# Source: https://github.com/huaifeng1993/DFANet/blob/master/loss.py
class FocalLoss3(nn.Module):
"""
This criterion is a implemenation of Focal Loss, which is proposed in Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Params:
:param alpha: (1D Tensor, Variable) - the scalar factor for this criterion
:param gamma: (float, double) - gamma > 0
:param size_average: (bool) - size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are instead summed for each minibatch.
"""
def __init__(self, class_num, alpha=None, gamma=2, size_average=True, **_):
super(FocalLoss3, self).__init__()
if alpha is None:
self.alpha = Variable(torch.ones(class_num+1))
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = Variable(alpha)
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, labels, **_): # variables
P = F.softmax(inputs)
if len(inputs.size()) == 3:
torch_out = torch.zeros(inputs.size())
else:
b,c,h,w = inputs.size()
torch_out = torch.zeros([b,c+1,h,w])
if inputs.is_cuda:
torch_out = torch_out.cuda()
class_mask = Variable(torch_out)
class_mask.scatter_(1, labels.long(), 1.)
class_mask = class_mask[:,:-1,:,:]
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
# print('alpha',self.alpha.size())
alpha = self.alpha[labels.data.view(-1)].view_as(labels)
# print (alpha.size(),class_mask.size(),P.size())
probs = (P * class_mask).sum(1) # + 1e-6#.view(-1, 1)
log_p = probs.log()
batch_loss = -alpha * (torch.pow((1 - probs), self.gamma)) * log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
# -------- #
# -------- #
# Source: https://discuss.pytorch.org/t/is-this-a-correct-implementation-for-focal-loss-in-pytorch/43327/4
class BinaryFocalLoss(nn.Module):
'''
Implementation of binary focal loss. For multi-class focal loss use one of the other implementations.
gamma = 0 is equivalent to BinaryCrossEntropy Loss
'''
def __init__(self, gamma=1.333, eps=1e-6, alpha=1.0, **_):
super().__init__()
self.gamma = gamma
self.eps = eps
self.alpha = alpha
def forward(self, inputs, labels, **_):
BCE_loss = F.binary_cross_entropy_with_logits(inputs, labels, reduction='none')
pt = torch.exp(-BCE_loss) # prevents nans when probability 0
F_loss = self.alpha * (1 - pt) ** self.gamma * BCE_loss
return F_loss.mean()
# -------- #
# ==== Additional Losses === #
# Source: https://github.com/atlab/attorch/blob/master/attorch/losses.py
# License: MIT
class PoissonLoss(nn.Module):
def __init__(self, bias=1e-12, **_):
super().__init__()
self.bias = bias
def forward(self, output, labels, **_):
# _assert_no_grad(target)
with torch.no_grad: # Pytorch 0.4.0 replacement (should be ok to use like this)
return (output - labels * torch.log(output + self.bias)).mean()
class PoissonLoss3d(nn.Module):
def __init__(self, bias=1e-12, **_):
super().__init__()
self.bias = bias
def forward(self, output, target, **_):
# _assert_no_grad(target)
with torch.no_grad: # Pytorch 0.4.0 replacement (should be ok to use like this)
lag = target.size(1) - output.size(1)
return (output - target[:, lag:, :] * torch.log(output + self.bias)).mean()
class L1Loss3d(nn.Module):
def __init__(self, bias=1e-12, **_):
super().__init__()
self.bias = bias
@staticmethod
def forward(output, target, **_):
# _assert_no_grad(target)
with torch.no_grad: # Pytorch 0.4.0 replacement (should be ok to use like this)
lag = target.size(1) - output.size(1)
return (output - target[:, lag:, :]).abs().mean()
class MSE3D(nn.Module):
def __init__(self, **_):
super().__init__()
@staticmethod
def forward(output, target, **_):
# _assert_no_grad(target)
with torch.no_grad: # Pytorch 0.4.0 replacement (should be ok to use like this)
lag = target.size(1) - output.size(1)
return (output - target[:, lag:, :]).pow(2).mean()
# ==== Custom ==== #
class BCEWithLogitsViewLoss(nn.BCEWithLogitsLoss):
'''
Silly wrapper of nn.BCEWithLogitsLoss because BCEWithLogitsLoss only takes a 1-D array
'''
def __init__(self, weight=None, size_average=True, **_):
super().__init__(weight=weight, size_average=size_average)
def forward(self, input_, target, **_):
'''
:param input_:
:param target:
:return:
Simply passes along input.view(-1), target.view(-1)
'''
return super().forward(input_.view(-1), target.view(-1))
# ===================== #
# Source: https://discuss.pytorch.org/t/one-hot-encoding-with-autograd-dice-loss/9781/5
# For calculating dice loss on images where multiple classes are present at the same time
def multi_class_dice_loss(output, target, weights=None, ignore_index=None):
# output : NxCxHxW float tensor
# target : NxHxW long tensor
# weights : C float tensor
# ignore_index : int value to ignore from loss
smooth = 1.
loss = 0.
output = output.exp()
encoded_target = output.detach().clone().zero_()
if ignore_index is not None:
mask = target == ignore_index
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
encoded_target.scatter_(1, target.unsqueeze(1), 1)
if weights is None:
weights = torch.ones(output.size(1)).type_as(output.detach())
intersection = output * encoded_target
numerator = 2 * intersection.sum(3).sum(2).sum(0) + smooth
denominator = (output + encoded_target).sum(3).sum(2).sum(0) + smooth
loss_per_channel = weights * (1 - (numerator / denominator))
return loss_per_channel.sum() / output.size(1)
# ====================== #
# Source: https://discuss.pytorch.org/t/how-to-implement-soft-iou-loss/15152
# Calculation of soft-IOU loss
def to_one_hot(tensor, nClasses):
n, h, w = tensor.size()
one_hot = torch.zeros(n, nClasses, h, w).scatter_(1, tensor.view(n, 1, h, w), 1)
return one_hot
# ====================== #
# Source: https://gist.github.com/jeremyjordan/9ea3032a32909f71dd2ab35fe3bacc08
# Another calculation of dice loss over multiple classes. Input is numpy matrices.
def soft_multiclass_dice_loss(y_true, y_pred, epsilon=1e-6):
'''
Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions.
Assumes the `channels_last` format.
# Arguments
y_true: b x X x Y( x Z...) x c One hot encoding of ground truth
y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax)
epsilon: Used for numerical stability to avoid divide by zero errors
# References
V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation
https://arxiv.org/abs/1606.04797
More details on Dice loss formulation
https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72)
Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022
'''
# skip the batch and class axis for calculating Dice score
axes = tuple(range(1, len(y_pred.shape) - 1))
numerator = 2. * np.sum(y_pred * y_true, axes)
denominator = np.sum(np.square(y_pred) + np.square(y_true), axes)
return 1 - np.mean(numerator / (denominator + epsilon)) # average over classes and batch
class mIoULoss(nn.Module):
def __init__(self, weight=None, size_average=True, num_classes=2, **_):
super(mIoULoss, self).__init__()
self.classes = num_classes
def forward(self, inputs, target_oneHot, **_):
# inputs => N x Classes x H x W
# target_oneHot => N x Classes x H x W
N = inputs.size()[0]
# predicted probabilities for each pixel along channel
inputs = F.softmax(inputs, dim=1)
# Numerator Product
inter = inputs * target_oneHot
## Sum over all pixels N x C x H x W => N x C
inter = inter.view(N, self.classes, -1).sum(2)
# Denominator
union = inputs + target_oneHot - (inputs * target_oneHot)
## Sum over all pixels N x C x H x W => N x C
union = union.view(N, self.classes, -1).sum(2)
loss = inter / union
## Return average loss over classes and batch
return -loss.mean()
# ====================== #
# Source: https://github.com/snakers4/mnasnet-pytorch/blob/master/src/models/semseg_loss.py
# Combination Loss from BCE and Dice
class ComboBCEDiceLoss(nn.Module):
"""
Combination BinaryCrossEntropy (BCE) and Dice Loss with an optional running mean and loss weighing.
"""
def __init__(self, use_running_mean=False, bce_weight=1, dice_weight=1, eps=1e-6, gamma=0.9, combined_loss_only=True, **_):
"""
:param use_running_mean: - bool (default: False) Whether to accumulate a running mean and add it to the loss with (1-gamma)
:param bce_weight: - float (default: 1.0) Weight multiplier for the BCE loss (relative to dice)
:param dice_weight: - float (default: 1.0) Weight multiplier for the Dice loss (relative to BCE)
:param eps: -
:param gamma:
:param combined_loss_only: - bool (default: True) whether to return a single combined loss or three separate losses
"""
super().__init__()
'''
Note: BCEWithLogitsLoss already performs a torch.sigmoid(pred)
before applying BCE!
'''
self.bce_logits_loss = nn.BCEWithLogitsLoss()
self.dice_weight = dice_weight
self.bce_weight = bce_weight
self.eps = eps
self.gamma = gamma
self.combined_loss_only = combined_loss_only
self.use_running_mean = use_running_mean
self.bce_weight = bce_weight
self.dice_weight = dice_weight
if self.use_running_mean is True:
self.register_buffer('running_bce_loss', torch.zeros(1))
self.register_buffer('running_dice_loss', torch.zeros(1))
self.reset_parameters()
def to(self, device):
super().to(device=device)
self.bce_logits_loss.to(device=device)
def reset_parameters(self):
self.running_bce_loss.zero_()
self.running_dice_loss.zero_()
def forward(self, outputs, labels, **_):
# inputs and targets are assumed to be BxCxWxH (batch, color, width, height)
outputs = outputs.squeeze() # necessary in case we're dealing with binary segmentation (color dim of 1)
if len(outputs.shape) != len(labels.shape):
raise AssertionError
# assert that B, W and H are the same
if outputs.size(-0) != labels.size(-0):
raise AssertionError
if outputs.size(-1) != labels.size(-1):
raise AssertionError
if outputs.size(-2) != labels.size(-2):
raise AssertionError
bce_loss = self.bce_logits_loss(outputs, labels)
dice_target = (labels == 1).float()
dice_output = torch.sigmoid(outputs)
intersection = (dice_output * dice_target).sum()
union = dice_output.sum() + dice_target.sum() + self.eps
dice_loss = (-torch.log(2 * intersection / union))
if self.use_running_mean is False:
bmw = self.bce_weight
dmw = self.dice_weight
# loss += torch.clamp(1 - torch.log(2 * intersection / union),0,100) * self.dice_weight
else:
self.running_bce_loss = self.running_bce_loss * self.gamma + bce_loss.data * (1 - self.gamma)
self.running_dice_loss = self.running_dice_loss * self.gamma + dice_loss.data * (1 - self.gamma)
bm = float(self.running_bce_loss)
dm = float(self.running_dice_loss)
bmw = 1 - bm / (bm + dm)
dmw = 1 - dm / (bm + dm)
loss = bce_loss * bmw + dice_loss * dmw
if self.combined_loss_only:
return loss
else:
return loss, bce_loss, dice_loss
class ComboSemsegLossWeighted(nn.Module):
def __init__(self,
use_running_mean=False,
bce_weight=1,
dice_weight=1,
eps=1e-6,
gamma=0.9,
use_weight_mask=False,
combined_loss_only=False, **_
):
super().__init__()
self.use_weight_mask = use_weight_mask
self.nll_loss = nn.BCEWithLogitsLoss()
self.dice_weight = dice_weight
self.bce_weight = bce_weight
self.eps = eps
self.gamma = gamma
self.combined_loss_only = combined_loss_only
self.use_running_mean = use_running_mean
self.bce_weight = bce_weight
self.dice_weight = dice_weight
if self.use_running_mean is True:
self.register_buffer('running_bce_loss', torch.zeros(1))
self.register_buffer('running_dice_loss', torch.zeros(1))
self.reset_parameters()
def to(self, device):
super().to(device=device)
self.nll_loss.to(device=device)
def reset_parameters(self):
self.running_bce_loss.zero_()
self.running_dice_loss.zero_()
def forward(self, logits, labels, weights, **_):
# logits and labels are assumed to be BxCxWxH
if len(logits.shape) != len(labels.shape):
raise AssertionError
# assert that B, W and H are the same
if logits.size(0) != labels.size(0):
raise AssertionError
if logits.size(2) != labels.size(2):
raise AssertionError
if logits.size(3) != labels.size(3):
raise AssertionError
# weights are assumed to be BxWxH
# assert that B, W and H are the are the same for target and mask
if logits.size(0) != weights.size(0):
raise AssertionError
if logits.size(2) != weights.size(1):
raise AssertionError
if logits.size(3) != weights.size(2):
raise AssertionError
if self.use_weight_mask:
bce_loss = F.binary_cross_entropy_with_logits(input=logits,
target=labels,
weight=weights)
else:
bce_loss = self.nll_loss(input=logits,
target=labels)
dice_target = (labels == 1).float()
dice_output = torch.sigmoid(logits)
intersection = (dice_output * dice_target).sum()
union = dice_output.sum() + dice_target.sum() + self.eps
dice_loss = (-torch.log(2 * intersection / union))
if self.use_running_mean is False:
bmw = self.bce_weight
dmw = self.dice_weight
# loss += torch.clamp(1 - torch.log(2 * intersection / union),0,100) * self.dice_weight
else:
self.running_bce_loss = self.running_bce_loss * self.gamma + bce_loss.data * (1 - self.gamma)
self.running_dice_loss = self.running_dice_loss * self.gamma + dice_loss.data * (1 - self.gamma)
bm = float(self.running_bce_loss)
dm = float(self.running_dice_loss)
bmw = 1 - bm / (bm + dm)
dmw = 1 - dm / (bm + dm)
loss = bce_loss * bmw + dice_loss * dmw
if self.combined_loss_only:
return loss
else:
return loss, bce_loss, dice_loss
# ====================== #
# Source: https://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/master/core/utils/loss.py
# Description: http://www.erogol.com/online-hard-example-mining-pytorch/
# Online Hard Example Loss
class OhemCrossEntropy2d(nn.Module):
def __init__(self, thresh=0.6, min_kept=0, ignore_index=-100, is_binary=True, **kwargs):
super().__init__()
self.ignore_label = ignore_index
self.is_binary = is_binary
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.criterion = BCEWithLogitsViewLoss(**kwargs)
def forward(self, logits, labels, **_):
"""
Args:
predict:(n, c, h, w)
labels:(n, h, w)
"""
if self.is_binary:
predict = torch.sigmoid(logits)
else:
predict = F.softmax(logits, dim=1)
n, c, h, w = predict.size()
input_label = labels.detach().cpu().numpy().ravel().astype(np.int32)
x = np.rollaxis(predict.detach().cpu().numpy(), 1).reshape((c, -1))
input_prob = np.exp(x - x.max(axis=0).reshape((1, -1)))
input_prob /= input_prob.sum(axis=0).reshape((1, -1))
valid_flag = input_label != self.ignore_label
valid_inds = np.where(valid_flag)[0]
label = input_label[valid_flag]
num_valid = valid_flag.sum()
if self.min_kept >= num_valid:
print('Labels: {}'.format(num_valid))
elif num_valid > 0:
prob = input_prob[:, valid_flag]
pred = prob[label-1, np.arange(len(label), dtype=np.int32)]
threshold = self.thresh
if self.min_kept > 0:
index = pred.argsort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if pred[threshold_index] > self.thresh:
threshold = pred[threshold_index]
kept_flag = pred <= threshold
valid_inds = valid_inds[kept_flag]
#print('hard ratio: {} = {} / {} '.format(round(len(valid_inds)/num_valid, 4), len(valid_inds), num_valid))
label = input_label[valid_inds].copy()
input_label.fill(self.ignore_label)
input_label[valid_inds] = label
#print(np.sum(input_label != self.ignore_label))
labels = torch.from_numpy(input_label.reshape(labels.size())).type_as(predict).to(labels.device)
predict = predict.squeeze() # in case we're dealing with B/W images instead of RGB
return self.criterion(predict, labels)
# ====================== #
# Source: https://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/master/core/utils/loss.py
# Loss used for EncNet
class EncNetLoss(nn.CrossEntropyLoss):
"""
2D Cross Entropy Loss with SE Loss
Specifically used for EncNet.
se_loss is the Semantic Encoding Loss from the paper `Context Encoding for Semantic Segmentation <https://arxiv.org/pdf/1803.08904v1>`_.
It computes probabilities of contexts appearing together.
Without SE_loss and Aux_loss this class simply forwards inputs to Torch's Cross Entropy Loss (nn.CrossEntropyLoss)
"""
def __init__(self, se_loss=True, se_weight=0.2, nclass=19, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **_):
super(EncNetLoss, self).__init__(weight, None, ignore_index)
self.se_loss = se_loss
self.aux = aux
self.nclass = nclass
self.se_weight = se_weight
self.aux_weight = aux_weight
self.bceloss = nn.BCELoss(weight)
def forward(self, *inputs, **_):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
if not self.se_loss and not self.aux:
return super(EncNetLoss, self).forward(*inputs)
elif not self.se_loss:
pred1, pred2, target = tuple(inputs)
loss1 = super(EncNetLoss, self).forward(pred1, target)
loss2 = super(EncNetLoss, self).forward(pred2, target)
return dict(loss=loss1 + self.aux_weight * loss2)
elif not self.aux:
pred, se_pred, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred)
loss1 = super(EncNetLoss, self).forward(pred, target)
loss2 = self.bceloss(torch.sigmoid(se_pred), se_target)
return dict(loss=loss1 + self.se_weight * loss2)
else:
pred1, se_pred, pred2, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1)
loss1 = super(EncNetLoss, self).forward(pred1, target)
loss2 = super(EncNetLoss, self).forward(pred2, target)
loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
return dict(loss=loss1 + self.aux_weight * loss2 + self.se_weight * loss3)
@staticmethod
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass - 1)
vect = hist > 0
tvect[i] = vect
return tvect
class MixSoftmaxCrossEntropyOHEMLoss(OhemCrossEntropy2d):
"""
Loss taking into consideration class and segmentation targets together, as well as, using OHEM
"""
def __init__(self, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **kwargs):
super(MixSoftmaxCrossEntropyOHEMLoss, self).__init__(ignore_index=ignore_index)
self.aux = aux
self.aux_weight = aux_weight
self.bceloss = nn.BCELoss(weight)
def to(self, device):
super().to(device=device)
self.bceloss.to(device=device)
def _aux_forward(self, *inputs, **_):
*preds, target = tuple(inputs)
loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[0], target)
for i in range(1, len(preds)):
aux_loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[i], target)
loss += self.aux_weight * aux_loss
return loss
def forward(self, *inputs, **_):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
if self.aux:
return dict(loss=self._aux_forward(*inputs))
else:
return dict(loss=super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds, target))
# ====================== #
# Source: https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/loss.py
# OHEM Segmentation Loss
class OHEMSegmentationLosses(OhemCrossEntropy2d):
"""
2D Cross Entropy Loss with Auxiliary Loss
"""
def __init__(self, se_loss=False, se_weight=0.2, num_classes=1,
aux=False, aux_weight=0.4, weight=None,
ignore_index=-1):
super(OHEMSegmentationLosses, self).__init__(ignore_index)
self.se_loss = se_loss
self.aux = aux
self.num_classes = num_classes
self.se_weight = se_weight
self.aux_weight = aux_weight
self.bceloss = nn.BCELoss(weight)
def to(self, device):
super().to(device=device)
self.bceloss.to(device=device)
def forward(self, *inputs, **_):
if not self.se_loss and not self.aux:
return super(OHEMSegmentationLosses, self).forward(*inputs)
elif not self.se_loss:
pred1, pred2, target = tuple(inputs)
loss1 = super(OHEMSegmentationLosses, self).forward(pred1, target)
loss2 = super(OHEMSegmentationLosses, self).forward(pred2, target)
return loss1 + self.aux_weight * loss2
elif not self.aux:
pred, se_pred, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.num_classes).type_as(pred)
loss1 = super(OHEMSegmentationLosses, self).forward(pred, target)
loss2 = self.bceloss(torch.sigmoid(se_pred), se_target)
return loss1 + self.se_weight * loss2
else:
pred1, se_pred, pred2, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.num_classes).type_as(pred1)
loss1 = super(OHEMSegmentationLosses, self).forward(pred1, target)
loss2 = super(OHEMSegmentationLosses, self).forward(pred2, target)
loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
return loss1 + self.aux_weight * loss2 + self.se_weight * loss3
@staticmethod
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass-1)
vect = hist>0
tvect[i] = vect
return tvect
# ====================== #
# Source: https://github.com/yinmh17/DNL-Semantic-Segmentation/blob/master/model/seg/loss/ohem_ce_loss.py
# OHEM CrossEntropy Loss
class OhemCELoss(nn.Module):
def __init__(self, configer, is_binary=False):
super(OhemCELoss, self).__init__()
self.configer = configer
weight = self.configer.get('loss.params.ohem_ce_loss.weight', default=None)
self.weight = torch.FloatTensor(weight) if weight is not None else weight
self.reduction = self.configer.get('loss.params.ohem_ce_loss.reduction', default='mean')
self.ignore_index = self.configer.get('loss.params.ohem_ce_loss.ignore_index', default=-100)
self.thresh = self.configer.get('loss.params.ohem_ce_loss.thresh', default=0.7)
self.min_kept = max(1, self.configer.get('loss.params.ohem_ce_loss.minkeep', default=5))
self.is_binary = is_binary
def forward(self, logits, labels, **_):
"""
Args:
logits:(n, c, h, w)
labels:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
batch_kept = self.min_kept * labels.size(0)
labels = self._scale_target(labels, (logits.size(2), logits.size(3)))
if self.is_binary:
prob_out = torch.sigmoid(logits)
else:
prob_out = F.softmax(logits, dim=1)
tmp_target = labels.clone()
tmp_target[tmp_target == self.ignore_index] = 0
prob = prob_out.gather(1, tmp_target.unsqueeze(1))
mask = labels.contiguous().view(-1, ) != self.ignore_index
sort_prob, sort_indices = prob.contiguous().view(-1, )[mask].contiguous().sort()
min_threshold = sort_prob[min(batch_kept, sort_prob.numel() - 1)] if sort_prob.numel() > 0 else 0.0
threshold = max(min_threshold, self.thresh)
loss_matrix = F.cross_entropy(logits, labels,
weight=self.weight.to(logits.device) if self.weight is not None else None,
ignore_index=self.ignore_index, reduction='none')
loss_matrix = loss_matrix.contiguous().view(-1, )
sort_loss_matirx = loss_matrix[mask][sort_indices]
select_loss_matrix = sort_loss_matirx[sort_prob < threshold]
if self.reduction == 'sum' or select_loss_matrix.numel() == 0:
return select_loss_matrix.sum()
elif self.reduction == 'mean':
return select_loss_matrix.mean()
else:
raise NotImplementedError('Reduction Error!')
@staticmethod
def _scale_target(targets_, scaled_size):
targets = targets_.clone().unsqueeze(1).float()
targets = F.interpolate(targets, size=scaled_size, mode='nearest')
return targets.squeeze(1).long()
# ===================== #
# Source: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/LovaszSoftmax/lovasz_loss.py
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
class LovaszSoftmax(nn.Module):
def __init__(self, reduction='mean', **_):
super(LovaszSoftmax, self).__init__()
self.reduction = reduction
@staticmethod
def prob_flatten(input, target):
if input.dim() not in [4, 5]:
raise AssertionError
num_class = input.size(1)
if input.dim() == 4:
input = input.permute(0, 2, 3, 1).contiguous()
input_flatten = input.view(-1, num_class)
elif input.dim() == 5:
input = input.permute(0, 2, 3, 4, 1).contiguous()
input_flatten = input.view(-1, num_class)
target_flatten = target.view(-1)
return input_flatten, target_flatten
def lovasz_softmax_flat(self, inputs, targets):
num_classes = inputs.size(1)
losses = []
for c in range(num_classes):
target_c = (targets == c).float()
if num_classes == 1:
input_c = inputs[:, 0]
else:
input_c = inputs[:, c]
loss_c = (torch.autograd.Variable(target_c) - input_c).abs()
loss_c_sorted, loss_index = torch.sort(loss_c, 0, descending=True)
target_c_sorted = target_c[loss_index]
losses.append(torch.dot(loss_c_sorted, torch.autograd.Variable(lovasz_grad(target_c_sorted))))
losses = torch.stack(losses)
if self.reduction == 'none':
loss = losses
elif self.reduction == 'sum':
loss = losses.sum()
else:
loss = losses.mean()
return loss
def forward(self, inputs, targets, **_):
inputs, targets = self.prob_flatten(inputs, targets)
losses = self.lovasz_softmax_flat(inputs, targets)
return losses
# ===================== #
# Source: https://github.com/xuuuuuuchen/Active-Contour-Loss/blob/master/Active-Contour-Loss.py (MIT)
class ActiveContourLoss(nn.Module):
"""
`Learning Active Contour Models for Medical Image Segmentation <http://openaccess.thecvf.com/content_CVPR_2019/papers/Chen_Learning_Active_Contour_Models_for_Medical_Image_Segmentation_CVPR_2019_paper.pdf>`_
Note that is only works for B/W masks right now... which is kind of the point of this loss as contours in RGB should be cast to B/W
before computing the loss.
Params:
:param mu: (float, default=1.0) - Scales the inner region loss relative to outer region (less or more prominent)
:param lambdaP: (float, default=1.0) - Scales the combined region loss compared to the length loss (less or more prominent)
"""
def __init__(self, lambdaP=5., mu=1., is_binary: bool = False, **_):
super(ActiveContourLoss, self).__init__()
self.lambdaP = lambdaP
self.mu = mu
self.is_binary = is_binary
def forward(self, logits, labels, **_):
if self.is_binary:
logits = torch.sigmoid(logits)
else:
logits = F.softmax(logits, dim=1)
if labels.shape != logits.shape:
if logits.shape > labels.shape:
labels.unsqueeze(dim=1)
else:
raise Exception(f'Non-matching shapes for logits ({logits.shape}) and labels ({labels.shape})')
"""
length term
"""
x = logits[:,:,1:,:] - logits[:,:,:-1,:] # horizontal gradient (B, C, H-1, W)
y = logits[:,:,:,1:] - logits[:,:,:,:-1] # vertical gradient (B, C, H, W-1)
delta_x = x[:,:,1:,:-2]**2 # (B, C, H-2, W-2)
delta_y = y[:,:,:-2,1:]**2 # (B, C, H-2, W-2)
delta_u = torch.abs(delta_x + delta_y)
epsilon = 1e-8 # where is a parameter to avoid square root is zero in practice.
length = torch.mean(torch.sqrt(delta_u + epsilon)) # eq.(11) in the paper, mean is used instead of sum.
"""
region term
"""
C_in = torch.ones_like(logits)
C_out = torch.zeros_like(labels)
region_in = torch.abs(torch.mean(logits[:,0,:,:] * ((labels[:, 0, :, :] - C_in) ** 2))) # equ.(12) in the paper, mean is used instead of sum.
region_out = torch.abs(torch.mean((1-logits[:,0,:,:]) * ((labels[:, 0, :, :] - C_out) ** 2))) # equ.(12) in the paper
return length + self.lambdaP * (self.mu * region_in + region_out)
class ActiveContourLossAlt(nn.Module):
"""
`Learning Active Contour Models for Medical Image Segmentation <http://openaccess.thecvf.com/content_CVPR_2019/papers/Chen_Learning_Active_Contour_Models_for_Medical_Image_Segmentation_CVPR_2019_paper.pdf>`_
Note that is only works for B/W masks right now... which is kind of the point of this loss as contours in RGB should be cast to B/W
before computing the loss.
Params:
:param len_w: (float, default=1.0) - The multiplier to use when adding boundary loss.
:param reg_w: (float, default=1.0) - The multiplier to use when adding region loss.
:param apply_log: (bool, default=True) - Whether to transform the log into log space (due to the
"""
def __init__(self, len_w=1., reg_w=1., apply_log=True, is_binary: bool = False, **_):
super(ActiveContourLossAlt, self).__init__()
self.len_w = len_w
self.reg_w = reg_w
self.epsilon = 1e-8 # a parameter to avoid square root = zero issues
self.apply_log = apply_log
self.is_binary = is_binary
def forward(self, logits, labels, **_):
# must convert raw logits to predicted probabilities for each pixel along channel
if self.is_binary:
probs = torch.sigmoid(logits)
else:
probs = F.softmax(logits, dim=1)
if labels.shape != logits.shape:
if logits.shape > labels.shape:
labels.unsqueeze(dim=1)
else:
raise Exception(f'Non-matching shapes for logits ({logits.shape}) and labels ({labels.shape})')
"""
length term:
- Subtract adjacent pixels from each other in X and Y directions
- Determine where they differ from the ground truth (targets)
- Calculate MSE
"""
# horizontal and vertical directions
x = probs[:, :, 1:, :] - probs[:, :, :-1, :] # differences in horizontal direction
y = probs[:, :, :, 1:] - probs[:, :, :, :-1] # differences in vertical direction
target_x = labels[:, :, 1:, :] - labels[:, :, :-1, :]
target_y = labels[:, :, :, 1:] - labels[:, :, :, :-1]
# find difference between values of probs and targets
delta_x = (target_x - x).abs() # do we need to subtract absolute values or relative?
delta_y = (target_y - y).abs()
# get MSE of the differences per pixel
# importantly because deltas are mostly < 1, a simple square of the error will actually yield LOWER results
# so we select 0.5 as the middle ground where small error will be further minimized while large error will
# be highlighted (pushed to be > 1 and up to 2.5 for maximum error).
# len_error_sq = ((delta_x + 0.5) ** 2) + ((delta_y + 0.5) ** 2)
# length = torch.sqrt(len_error_sq.sum() + self.epsilon)
# the length loss here is simply the MSE of x and y deltas
length_loss = torch.sqrt(delta_x.sum() ** 2 + delta_y.sum() ** 2 + self.epsilon)
"""
region term (should this be done in log space to avoid instabilities?)
- compute the error produced by all pixels that are not equal to 0 outside of the ground truth mask
- compute error produced by all pixels that are not equal to 1 inside the mask
"""
# reference code for selecting masked values from a tensor
# t_m_bool = t_mask.type(torch.ByteTensor)
# t_result = t_in.masked_select(t_m_bool)
# C_1 = torch.ones((image_size, image_size), device=target.device)
# C_2 = torch.zeros((image_size, image_size), device=target.device)
# the sum of all pixel values that are not equal 0 outside of the ground truth mask
error_in = probs[:, 0, :, :] * ((labels[:, 0, :, :] - 1) ** 2) # invert the ground truth mask and multiply by probs
# the sum of all pixel values that are not equal 1 inside of the ground truth mask
probs_diff = (probs[:, 0, :, :] - labels[:, 0, :, :]).abs() # subtract mask from probs giving us the errors
error_out = (probs_diff * labels[:, 0, :, :]) # multiply mask by error, giving us the error terms inside the mask.
if self.apply_log:
loss = torch.log(length_loss) + torch.log(error_in.sum() + error_out.sum())
else:
# loss = self.len_w * length_loss
loss = self.reg_w * (error_in.sum() + error_out.sum())
return torch.clamp(loss, min=0.0) # make sure we don't return negative values
# ===================== #
# Sources: https://github.com/JunMa11/SegLoss
# https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunet (Apache 2.0)
def uniq(a: Tensor) -> Set:
return set(torch.unique(a.cpu()).numpy())
def sset(a: Tensor, sub: Iterable) -> bool:
return uniq(a).issubset(sub)
def simplex(t: Tensor, axis=1) -> bool:
_sum = t.sum(axis).type(torch.float32)
_ones = torch.ones_like(_sum, dtype=torch.float32)
return torch.allclose(_sum, _ones)
def one_hot(t: Tensor, axis=1) -> bool:
return simplex(t, axis) and sset(t, [0, 1])
def numpy_haussdorf(pred: np.ndarray, target: np.ndarray) -> float:
from scipy.spatial.distance import directed_hausdorff
if len(pred.shape) != 2:
raise AssertionError
if pred.shape != target.shape:
raise AssertionError
return max(directed_hausdorff(pred, target)[0], directed_hausdorff(target, pred)[0])
def haussdorf(preds: Tensor, target: Tensor) -> Tensor:
if preds.shape != target.shape:
raise AssertionError
if not one_hot(preds):
raise AssertionError
if not one_hot(target):
raise AssertionError
B, C, _, _ = preds.shape
res = torch.zeros((B, C), dtype=torch.float32, device=preds.device)
n_pred = preds.detach().cpu().numpy()
n_target = target.detach().cpu().numpy()
for b in range(B):
if C == 2:
res[b, :] = numpy_haussdorf(n_pred[b, 0], n_target[b, 0])
continue
for c in range(C):
res[b, c] = numpy_haussdorf(n_pred[b, c], n_target[b, c])
return res
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes:
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
return tp, fp, fn
# ===================== #
# Boundary Loss
# Source: https://github.com/JunMa11/SegLoss/blob/71b14900e91ea9405d9705c95b451fc819f24c70/test/loss_functions/boundary_loss.py#L102
def compute_sdf(img_gt, out_shape):
"""
compute the signed distance map of binary mask
img_gt: segmentation, shape = (batch_size, x, y, z)
out_shape: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
"""
from scipy.ndimage import distance_transform_edt
from skimage import segmentation as skimage_seg
img_gt = img_gt.astype(np.uint8)
gt_sdf = np.zeros(out_shape)
for b in range(out_shape[0]): # batch size
for c in range(1, out_shape[1]): # channel
posmask = img_gt[b][c].astype(np.bool)
if posmask.any():
negmask = ~posmask
posdis = distance_transform_edt(posmask)
negdis = distance_transform_edt(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
sdf = negdis - posdis
sdf[boundary==1] = 0
gt_sdf[b][c] = sdf
return gt_sdf
class BDLoss(nn.Module):
def __init__(self, is_binary: bool = False, **_):
"""
compute boundary loss
only compute the loss of foreground
ref: https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py#L74
"""
self.is_binary = is_binary
super(BDLoss, self).__init__()
# self.do_bg = do_bg
def forward(self, logits, labels, **_):
"""
net_output: (batch_size, class, x,y,z)
target: ground truth, shape: (batch_size, 1, x,y,z)
bound: precomputed distance map, shape (batch_size, class, x,y,z)
"""
if self.is_binary:
logits = torch.sigmoid(logits)
else:
logits = F.softmax(logits, dim=1)
with torch.no_grad():
if len(logits.shape) != len(labels.shape):
labels = labels.view((labels.shape[0], 1, *labels.shape[1:]))
if all([i == j for i, j in zip(logits.shape, labels.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = labels
else:
labels = labels.long()
y_onehot = torch.zeros(logits.shape)
if logits.device.type == "cuda":
y_onehot = y_onehot.cuda(logits.device.index)
y_onehot.scatter_(1, labels, 1)
gt_sdf = compute_sdf(y_onehot.cpu().numpy(), logits.shape)
phi = torch.from_numpy(gt_sdf)
if phi.device != logits.device:
phi = phi.to(logits.device).type(torch.float32)
# pred = net_output[:, 1:, ...].type(torch.float32)
# phi = phi[:,1:, ...].type(torch.float32)
multipled = torch.einsum("bcxyz,bcxyz->bcxyz", logits[:, 1:, ...], phi[:, 1:, ...])
bd_loss = multipled.mean()
return bd_loss
# ===================== #
# Source: https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
class TverskyLoss(nn.Module):
"""Computes the Tversky loss [1].
Args:
:param alpha: controls the penalty for false positives.
:param beta: controls the penalty for false negatives.
:param eps: added to the denominator for numerical stability.
Returns:
tversky_loss: the Tversky loss.
Notes:
alpha = beta = 0.5 => dice coeff
alpha = beta = 1 => tanimoto coeff
alpha + beta = 1 => F beta coeff
References:
[1]: https://arxiv.org/abs/1706.05721
"""
def __init__(self, alpha, beta, eps=1e-7, **_):
super(TverskyLoss, self).__init__()
self.alpha = alpha
self.beta = beta
self.eps = eps
def forward(self, logits, labels, **_):
"""
Args:
:param logits: a tensor of shape [B, C, H, W]. Corresponds to the raw output or logits of the model.
:param labels: a tensor of shape [B, H, W] or [B, 1, H, W].
:return: loss
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[labels.squeeze(1).long()]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[labels.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, logits.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
fps = torch.sum(probas * (1 - true_1_hot), dims)
fns = torch.sum((1 - probas) * true_1_hot, dims)
num = intersection
denom = intersection + (self.alpha * fps) + (self.beta * fns)
tversky_loss = (num / (denom + self.eps)).mean()
return 1 - tversky_loss
# ===================== #
# Source: https://github.com/cvqluu/Angular-Penalty-Softmax-Losses-Pytorch
class AngularPenaltySMLoss(nn.Module):
def __init__(self, in_features, out_features, loss_type='arcface', eps=1e-7, s=None, m=None, **_):
'''
Angular Penalty Softmax Loss
Three 'loss_types' available: ['arcface', 'sphereface', 'cosface']
These losses are described in the following papers:
ArcFace: https://arxiv.org/abs/1801.07698
SphereFace: https://arxiv.org/abs/1704.08063
CosFace/Ad Margin: https://arxiv.org/abs/1801.05599
- Example -
criterion = AngularPenaltySMLoss(in_features, out_features, loss_type='arcface') # loss_type in ['arcface', 'sphereface', 'cosface']
'''
super(AngularPenaltySMLoss, self).__init__()
loss_type = loss_type.lower()
if loss_type not in ['arcface', 'sphereface', 'cosface']:
raise AssertionError
if loss_type == 'arcface':
self.s = 64.0 if not s else s
self.m = 0.5 if not m else m
if loss_type == 'sphereface':
self.s = 64.0 if not s else s
self.m = 1.35 if not m else m
if loss_type == 'cosface':
self.s = 30.0 if not s else s
self.m = 0.4 if not m else m
self.loss_type = loss_type
self.in_features = in_features
self.out_features = out_features
self.fc = nn.Linear(in_features, out_features, bias=False)
self.eps = eps
def forward(self, x, labels, **_):
'''
input shape (N, in_features)
'''
if len(x) != len(labels):
raise AssertionError
if torch.min(labels) < 0:
raise AssertionError
if torch.max(labels) >= self.out_features:
raise AssertionError
for W in self.fc.parameters():
W = F.normalize(W, p=2, dim=1)
x = F.normalize(x, p=2, dim=1)
wf = self.fc(x)
if self.loss_type == 'cosface':
numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels]) - self.m)
if self.loss_type == 'arcface':
numerator = self.s * torch.cos(torch.acos(torch.clamp(torch.diagonal(wf.transpose(0, 1)[labels]), -1. + self.eps, 1 - self.eps)) + self.m)
if self.loss_type == 'sphereface':
numerator = self.s * torch.cos(self.m * torch.acos(torch.clamp(torch.diagonal(wf.transpose(0, 1)[labels]), -1. + self.eps, 1 - self.eps)))
excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0) for i, y in enumerate(labels)], dim=0)
denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1)
L = numerator - torch.log(denominator)
return -torch.mean(L)
# ===================== #
# Source: https://github.com/JunMa11/SegLoss/blob/master/losses_pytorch/dice_loss.py
class AsymLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1., square=False, **_):
"""
paper: https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8573779
"""
super(AsymLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.beta = 1.5
def forward(self, logits, labels, loss_mask=None, **_):
shp_x = logits.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
logits = self.apply_nonlin(logits)
tp, fp, fn = get_tp_fp_fn(logits, labels, axes, loss_mask, self.square)# shape: (batch size, class num)
weight = (self.beta**2)/(1+self.beta**2)
asym = (tp + self.smooth) / (tp + weight*fn + (1-weight)*fp + self.smooth)
if not self.do_bg:
if self.batch_dice:
asym = asym[1:]
else:
asym = asym[:, 1:]
asym = asym.mean()
return -asym
# ===================== #
# Source: https://github.com/BloodAxe/pytorch-toolbelt
# Used to enhance facial segmentation
def wing_loss(output: torch.Tensor, target: torch.Tensor, width=5, curvature=0.5, reduction="mean"):
"""
https://arxiv.org/pdf/1711.06753.pdf
:param output:
:param target:
:param width:
:param curvature:
:param reduction:
:return:
"""
diff_abs = (target - output).abs()
loss = diff_abs.clone()
idx_smaller = diff_abs < width
idx_bigger = diff_abs >= width
loss[idx_smaller] = width * torch.log(1 + diff_abs[idx_smaller] / curvature)
C = width - width * math.log(1 + width / curvature)
loss[idx_bigger] = loss[idx_bigger] - C
if reduction == "sum":
loss = loss.sum()
if reduction == "mean":
loss = loss.mean()
return loss
class WingLoss(nn.modules.loss._Loss):
"""
Used to enhance facial segmentation
"""
def __init__(self, width=5, curvature=0.5, reduction="mean", **_):
super(WingLoss, self).__init__(reduction=reduction)
self.width = width
self.curvature = curvature
def forward(self, prediction, target, **_):
return wing_loss(prediction, target, self.width, self.curvature, self.reduction)
# ===================== #
# Source: https://github.com/JUNHAOYAN/FPN/tree/master/RMI
# ..which is adapted from: https://github.com/ZJULearning/RMI (MIT License)
# Segmentation loss (memory intensive)
class RMILoss(nn.Module):
"""
region mutual information
I(A, B) = H(A) + H(B) - H(A, B)
This version need a lot of memory if do not dwonsample.
"""
def __init__(self,
num_classes=1,
rmi_radius=3,
rmi_pool_way=0,
rmi_pool_size=3,
rmi_pool_stride=3,
loss_weight_lambda=0.5,
lambda_way=1,
device="cuda", **_):
super(RMILoss, self).__init__()
self._CLIP_MIN = 1e-6 # min clip value after softmax or sigmoid operations
self._CLIP_MAX = 1.0 # max clip value after softmax or sigmoid operations
self._POS_ALPHA = 5e-4 # add this factor to ensure the AA^T is positive definite
self._IS_SUM = 1 # sum the loss per channel
self.num_classes = num_classes
# radius choices
if rmi_radius not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
raise AssertionError
self.rmi_radius = rmi_radius
if rmi_pool_way not in [0, 1, 2, 3]:
raise AssertionError
self.rmi_pool_way = rmi_pool_way
# set the pool_size = rmi_pool_stride
if rmi_pool_size != rmi_pool_stride:
raise AssertionError
self.rmi_pool_size = rmi_pool_size
self.rmi_pool_stride = rmi_pool_stride
self.weight_lambda = loss_weight_lambda
self.lambda_way = lambda_way
# dimension of the distribution
self.half_d = self.rmi_radius * self.rmi_radius
self.d = 2 * self.half_d
self.kernel_padding = self.rmi_pool_size // 2
# ignore class
self.ignore_index = 255
self.device = device
def forward(self, logits, labels, **_):
if self.num_classes == 1:
loss = self.forward_sigmoid(logits, labels)
else:
loss = self.forward_softmax_sigmoid(logits, labels)
return loss
def forward_softmax_sigmoid(self, inputs, targets):
"""
Using both softmax and sigmoid operations.
Args:
inputs : [N, C, H, W], dtype=float32
targets : [N, H, W], dtype=long
"""
# PART I -- get the normal cross entropy loss
normal_loss = F.cross_entropy(input=inputs,
target=targets.long(),
ignore_index=self.ignore_index,
reduction='mean')
# PART II -- get the lower bound of the region mutual information
# get the valid label and logits
# valid label, [N, C, H, W]
label_mask_3D = targets < self.num_classes
valid_onehot_labels_4D = F.one_hot(targets.long() * label_mask_3D.long(),
num_classes=self.num_classes).float()
label_mask_3D = label_mask_3D.float()
valid_onehot_labels_4D = valid_onehot_labels_4D * label_mask_3D.unsqueeze(dim=3)
valid_onehot_labels_4D = valid_onehot_labels_4D.permute(0, 3, 1, 2).requires_grad_(False)
# valid probs
probs_4D = torch.sigmoid(inputs) * label_mask_3D.unsqueeze(dim=1)
probs_4D = probs_4D.clamp(min=self._CLIP_MIN, max=self._CLIP_MAX)
# get region mutual information
rmi_loss = self.rmi_lower_bound(valid_onehot_labels_4D, probs_4D)
# add together
final_loss = (self.weight_lambda * normal_loss + rmi_loss * (1 - self.weight_lambda) if self.lambda_way
else normal_loss + rmi_loss * self.weight_lambda)
return final_loss
def forward_sigmoid(self, logits_4D, labels_4D):
"""
Using the sigmiod operation both.
Args:
logits_4D : [N, C, H, W], dtype=float32
labels_4D : [N, H, W], dtype=long
"""
# label mask -- [N, H, W, 1]
label_mask_3D = labels_4D < self.num_classes
# valid label
valid_onehot_labels_4D = F.one_hot(labels_4D.long() * label_mask_3D.long(),
num_classes=self.num_classes).float()
label_mask_3D = label_mask_3D.float()
label_mask_flat = label_mask_3D.view([-1, ])
valid_onehot_labels_4D = valid_onehot_labels_4D * label_mask_3D.unsqueeze(dim=3)
valid_onehot_labels_4D.requires_grad_(False)
# PART I -- calculate the sigmoid binary cross entropy loss
valid_onehot_label_flat = valid_onehot_labels_4D.view([-1, self.num_classes]).requires_grad_(False)
logits_flat = logits_4D.permute(0, 2, 3, 1).contiguous().view([-1, self.num_classes])
# binary loss, multiplied by the not_ignore_mask
valid_pixels = torch.sum(label_mask_flat)
binary_loss = F.binary_cross_entropy_with_logits(logits_flat,
target=valid_onehot_label_flat,
weight=label_mask_flat.unsqueeze(dim=1),
reduction='sum')
bce_loss = torch.div(binary_loss, valid_pixels + 1.0)
# PART II -- get rmi loss
# onehot_labels_4D -- [N, C, H, W]
probs_4D = logits_4D.sigmoid() * label_mask_3D.unsqueeze(dim=1) + self._CLIP_MIN
valid_onehot_labels_4D = valid_onehot_labels_4D.permute(0, 3, 1, 2).requires_grad_(False)
# get region mutual information
rmi_loss = self.rmi_lower_bound(valid_onehot_labels_4D, probs_4D)
# add together
final_loss = (self.weight_lambda * bce_loss + rmi_loss * (1 - self.weight_lambda) if self.lambda_way
else bce_loss + rmi_loss * self.weight_lambda)
return final_loss
def rmi_lower_bound(self, labels_4D, probs_4D):
"""
calculate the lower bound of the region mutual information.
Args:
labels_4D : [N, C, H, W], dtype=float32
probs_4D : [N, C, H, W], dtype=float32
"""
if labels_4D.size() != probs_4D.size():
raise AssertionError
p, s = self.rmi_pool_size, self.rmi_pool_stride
if self.rmi_pool_stride > 1:
if self.rmi_pool_way == 0:
labels_4D = F.max_pool2d(labels_4D, kernel_size=p, stride=s, padding=self.kernel_padding)
probs_4D = F.max_pool2d(probs_4D, kernel_size=p, stride=s, padding=self.kernel_padding)
elif self.rmi_pool_way == 1:
labels_4D = F.avg_pool2d(labels_4D, kernel_size=p, stride=s, padding=self.kernel_padding)
probs_4D = F.avg_pool2d(probs_4D, kernel_size=p, stride=s, padding=self.kernel_padding)
elif self.rmi_pool_way == 2:
# interpolation
shape = labels_4D.size()
new_h, new_w = shape[2] // s, shape[3] // s
labels_4D = F.interpolate(labels_4D, size=(new_h, new_w), mode='nearest')
probs_4D = F.interpolate(probs_4D, size=(new_h, new_w), mode='bilinear', align_corners=True)
else:
raise NotImplementedError("Pool way of RMI is not defined!")
# we do not need the gradient of label.
label_shape = labels_4D.size()
n, c = label_shape[0], label_shape[1]
# combine the high dimension points from label and probability map. new shape [N, C, radius * radius, H, W]
la_vectors, pr_vectors = self.map_get_pairs(labels_4D, probs_4D, radius=self.rmi_radius, is_combine=0)
la_vectors = la_vectors.view([n, c, self.half_d, -1]).type(torch.double).to(self.device).requires_grad_(False)
pr_vectors = pr_vectors.view([n, c, self.half_d, -1]).type(torch.double).to(self.device)
# small diagonal matrix, shape = [1, 1, radius * radius, radius * radius]
diag_matrix = torch.eye(self.half_d).unsqueeze(dim=0).unsqueeze(dim=0)
# the mean and covariance of these high dimension points
# Var(X) = E(X^2) - E(X) E(X), N * Var(X) = X^2 - X E(X)
la_vectors = la_vectors - la_vectors.mean(dim=3, keepdim=True)
la_cov = torch.matmul(la_vectors, la_vectors.transpose(2, 3))
pr_vectors = pr_vectors - pr_vectors.mean(dim=3, keepdim=True)
pr_cov = torch.matmul(pr_vectors, pr_vectors.transpose(2, 3))
# https://github.com/pytorch/pytorch/issues/7500
# waiting for batched torch.cholesky_inverse()
pr_cov_inv = torch.inverse(pr_cov + diag_matrix.type_as(pr_cov) * self._POS_ALPHA)
# if the dimension of the point is less than 9, you can use the below function
# to acceleration computational speed.
# pr_cov_inv = utils.batch_cholesky_inverse(pr_cov + diag_matrix.type_as(pr_cov) * _POS_ALPHA)
la_pr_cov = torch.matmul(la_vectors, pr_vectors.transpose(2, 3))
# the approxiamation of the variance, det(c A) = c^n det(A), A is in n x n shape;
# then log det(c A) = n log(c) + log det(A).
# appro_var = appro_var / n_points, we do not divide the appro_var by number of points here,
# and the purpose is to avoid underflow issue.
# If A = A^T, A^-1 = (A^-1)^T.
appro_var = la_cov - torch.matmul(la_pr_cov.matmul(pr_cov_inv), la_pr_cov.transpose(-2, -1))
# appro_var = la_cov - torch.chain_matmul(la_pr_cov, pr_cov_inv, la_pr_cov.transpose(-2, -1))
# appro_var = torch.div(appro_var, n_points.type_as(appro_var)) + diag_matrix.type_as(appro_var) * 1e-6
# The lower bound. If A is nonsingular, ln( det(A) ) = Tr( ln(A) ).
rmi_now = 0.5 * self.log_det_by_cholesky(appro_var + diag_matrix.type_as(appro_var) * self._POS_ALPHA)
# rmi_now = 0.5 * torch.logdet(appro_var + diag_matrix.type_as(appro_var) * _POS_ALPHA)
# mean over N samples. sum over classes.
rmi_per_class = rmi_now.view([-1, self.num_classes]).mean(dim=0).float()
# is_half = False
# if is_half:
# rmi_per_class = torch.div(rmi_per_class, float(self.half_d / 2.0))
# else:
rmi_per_class = torch.div(rmi_per_class, float(self.half_d))
rmi_loss = torch.sum(rmi_per_class) if self._IS_SUM else torch.mean(rmi_per_class)
return rmi_loss
@staticmethod
def map_get_pairs(labels_4D, probs_4D, radius=3, is_combine=True):
"""get map pairs
Args:
labels_4D : labels, shape [N, C, H, W]
probs_4D : probabilities, shape [N, C, H, W]
radius : the square radius
Return:
tensor with shape [N, C, radius * radius, H - (radius - 1), W - (radius - 1)]
"""
# pad to ensure the following slice operation is valid
# pad_beg = int(radius // 2)
# pad_end = radius - pad_beg
# the original height and width
label_shape = labels_4D.size()
h, w = label_shape[2], label_shape[3]
new_h, new_w = h - (radius - 1), w - (radius - 1)
# https://pytorch.org/docs/stable/nn.html?highlight=f%20pad#torch.nn.functional.pad
# padding = (pad_beg, pad_end, pad_beg, pad_end)
# labels_4D, probs_4D = F.pad(labels_4D, padding), F.pad(probs_4D, padding)
# get the neighbors
la_ns = []
pr_ns = []
# for x in range(0, radius, 1):
for y in range(0, radius, 1):
for x in range(0, radius, 1):
la_now = labels_4D[:, :, y:y + new_h, x:x + new_w]
pr_now = probs_4D[:, :, y:y + new_h, x:x + new_w]
la_ns.append(la_now)
pr_ns.append(pr_now)
if is_combine:
# for calculating RMI
pair_ns = la_ns + pr_ns
p_vectors = torch.stack(pair_ns, dim=2)
return p_vectors
else:
# for other purpose
la_vectors = torch.stack(la_ns, dim=2)
pr_vectors = torch.stack(pr_ns, dim=2)
return la_vectors, pr_vectors
@staticmethod
def log_det_by_cholesky(matrix):
"""
Args:
matrix: matrix must be a positive define matrix.
shape [N, C, D, D].
Ref:
https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/ops/linalg/linalg_impl.py
"""
# This uses the property that the log det(A) = 2 * sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
chol = torch.cholesky(matrix)
# return 2.0 * torch.sum(torch.log(torch.diagonal(chol, dim1=-2, dim2=-1) + 1e-6), dim=-1)
return 2.0 * torch.sum(torch.log(torch.diagonal(chol, dim1=-2, dim2=-1) + 1e-8), dim=-1)
# ===================== #
# Source: https://github.com/RElbers/region-mutual-information-pytorch
# Segmentation loss (memory intensive)
class RMILossAlt(nn.Module):
"""
PyTorch Module which calculates the Region Mutual Information loss (https://arxiv.org/abs/1910.12037).
"""
def __init__(self,
with_logits,
radius=3,
bce_weight=0.5,
downsampling_method='max',
stride=3,
use_log_trace=True,
use_double_precision=True,
epsilon=0.0005, **_):
"""
:param with_logits:
If True, apply the sigmoid function to the prediction before calculating loss.
:param radius:
RMI radius.
:param bce_weight:
Weight of the binary cross entropy. Must be between 0 and 1.
:param downsampling_method:
Downsampling method used before calculating RMI. Must be one of ['avg', 'max', 'region-extraction'].
If 'region-extraction', then downscaling is done during the region extraction phase. Meaning that the stride is the spacing between consecutive regions.
:param stride:
Stride used for downsampling.
:param use_log_trace:
Whether to calculate the log of the trace, instead of the log of the determinant. See equation (15).
:param use_double_precision:
Calculate the RMI using doubles in order to fix potential numerical issues.
:param epsilon:
Magnitude of the entries added to the diagonal of M in order to fix potential numerical issues.
"""
super().__init__()
self.use_double_precision = use_double_precision
self.with_logits = with_logits
self.bce_weight = bce_weight
self.stride = stride
self.downsampling_method = downsampling_method
self.radius = radius
self.use_log_trace = use_log_trace
self.epsilon = epsilon
def forward(self, logits, labels, **_):
labels = labels.unsqueeze(1)
# Calculate BCE if needed
if self.bce_weight != 0:
if self.with_logits:
bce = F.binary_cross_entropy_with_logits(logits, target=labels)
else:
bce = F.binary_cross_entropy(logits, target=labels)
bce = bce.mean() * self.bce_weight
else:
bce = 0.0
# Apply sigmoid to get probabilities. See final paragraph of section 4.
if self.with_logits:
logits = torch.sigmoid(logits)
# Calculate RMI loss
rmi = self.rmi_loss(input_=logits, target=labels)
rmi = rmi.mean() * (1.0 - self.bce_weight)
return rmi + bce
def rmi_loss(self, input_, target):
"""
Calculates the RMI loss between the prediction and target.
:return:
RMI loss
"""
if input_.shape != target.shape:
raise AssertionError
vector_size = self.radius * self.radius
# Get region vectors
y = self.extract_region_vector(target)
p = self.extract_region_vector(input_)
# Convert to doubles for better precision
if self.use_double_precision:
y = y.double()
p = p.double()
# Small diagonal matrix to fix numerical issues
eps = torch.eye(vector_size, dtype=y.dtype, device=y.device) * self.epsilon
eps = eps.unsqueeze(dim=0).unsqueeze(dim=0)
# Subtract mean
y = y - y.mean(dim=3, keepdim=True)
p = p - p.mean(dim=3, keepdim=True)
# Covariances
y_cov = y @ transpose(y)
p_cov = p @ transpose(p)
y_p_cov = y @ transpose(p)
# Approximated posterior covariance matrix of Y given P
m = y_cov - y_p_cov @ transpose(inverse(p_cov + eps)) @ transpose(y_p_cov)
# Lower bound of RMI
if self.use_log_trace:
rmi = 0.5 * log_trace(m + eps)
else:
rmi = 0.5 * log_det(m + eps)
# Normalize
rmi = rmi / float(vector_size)
# Sum over classes, mean over samples.
return rmi.sum(dim=1).mean(dim=0)
def extract_region_vector(self, x):
"""
Downsamples and extracts square regions from x.
Returns the flattened vectors of length radius*radius.
"""
x = self.downsample(x)
stride = self.stride if self.downsampling_method == 'region-extraction' else 1
x_regions = F.unfold(x, kernel_size=self.radius, stride=stride)
x_regions = x_regions.view((*x.shape[:2], self.radius ** 2, -1))
return x_regions
def downsample(self, x):
# Skip if stride is 1
if self.stride == 1:
return x
# Skip if we pool during region extraction.
if self.downsampling_method == 'region-extraction':
return x
padding = self.stride // 2
if self.downsampling_method == 'max':
return F.max_pool2d(x, kernel_size=self.stride, stride=self.stride, padding=padding)
if self.downsampling_method == 'avg':
return F.avg_pool2d(x, kernel_size=self.stride, stride=self.stride, padding=padding)
raise ValueError(self.downsampling_method)
def transpose(x):
return x.transpose(-2, -1)
def inverse(x):
return torch.inverse(x)
def log_trace(x):
x = torch.cholesky(x)
diag = torch.diagonal(x, dim1=-2, dim2=-1)
return 2 * torch.sum(torch.log(diag + 1e-8), dim=-1)
def log_det(x):
return torch.logdet(x)
# ====================== #
# Source: https://github.com/NRCan/geo-deep-learning/blob/develop/losses/boundary_loss.py
class BoundaryLoss(nn.Module):
"""Boundary Loss proposed in:
<NAME> al., Boundary Loss for Remote Sensing Imagery Semantic Segmentation
https://arxiv.org/abs/1905.07852
"""
# in previous implementations theta0=3, theta=5
def __init__(self, theta0=19, theta=19, ignore_index=None, weight=None, is_binary: bool = False, **_):
super().__init__()
self.theta0 = theta0
self.theta = theta
self.ignore_index = ignore_index
self.weight = weight
self.is_binary = is_binary
def forward(self, logits, labels, **_):
"""
Input:
- logits: the output from model (before softmax)
shape (N, C, H, W)
- labels: ground truth map
shape (N, H, w)
Return:
- boundary loss, averaged over mini-batch
"""
n, c, _, _ = logits.shape
# sigmoid / softmax so that predicted map can be distributed in [0, 1]
if self.is_binary:
logits = torch.sigmoid(logits)
else:
logits = torch.softmax(logits, dim=1)
# one-hot vector of ground truth
# print(gt.shape)
# zo = F.one_hot(gt, c)
# print(zo.shape)
if self.is_binary:
one_hot_gt = labels
else:
one_hot_gt = F.one_hot(labels.long()).permute(0, 3, 1, 2).squeeze(dim=-1).contiguous().float()
# boundary map
gt_b = F.max_pool2d(1 - one_hot_gt, kernel_size=self.theta0, stride=1, padding=(self.theta0 - 1) // 2)
gt_b -= 1 - one_hot_gt
pred_b = F.max_pool2d(1 - logits, kernel_size=self.theta0, stride=1, padding=(self.theta0 - 1) // 2)
pred_b -= 1 - logits
# extended boundary map
gt_b_ext = F.max_pool2d(gt_b, kernel_size=self.theta, stride=1, padding=(self.theta - 1) // 2)
pred_b_ext = F.max_pool2d(pred_b, kernel_size=self.theta, stride=1, padding=(self.theta - 1) // 2)
# reshape
gt_b = gt_b.view(n, c, -1)
pred_b = pred_b.view(n, c, -1)
gt_b_ext = gt_b_ext.view(n, c, -1)
pred_b_ext = pred_b_ext.view(n, c, -1)
# Precision, Recall
eps = 1e-7
P = (torch.sum(pred_b * gt_b_ext, dim=2) + eps) / (torch.sum(pred_b, dim=2) + eps)
R = (torch.sum(pred_b_ext * gt_b, dim=2) + eps) / (torch.sum(gt_b, dim=2) + eps)
# Boundary F1 Score
BF1 = (2 * P * R + eps) / (P + R + eps)
# summing BF1 Score for each class and average over mini-batch
loss = torch.mean(1 - BF1)
return loss
# ====================== #
"""
Hausdorff loss implementation based on paper:
https://arxiv.org/pdf/1904.10030.pdf
copy pasted from - all credit goes to original authors:
https://github.com/SilmarilBearer/HausdorffLoss
"""
from scipy.ndimage.morphology import distance_transform_edt as edt
from scipy.ndimage import convolve
import cv2
class HausdorffDTLoss(nn.Module):
"""Binary Hausdorff loss based on distance transform"""
def __init__(self, alpha=2.0, **_):
super(HausdorffDTLoss, self).__init__()
self.alpha = alpha
@torch.no_grad()
@staticmethod
def distance_field(img: np.ndarray) -> np.ndarray:
field = np.zeros_like(img)
for batch in range(len(img)):
fg_mask = img[batch] > 0.5
if fg_mask.any():
bg_mask = ~fg_mask
fg_dist = edt(fg_mask)
bg_dist = edt(bg_mask)
field[batch] = fg_dist + bg_dist
return field
def forward(self, logits: torch.Tensor, labels: torch.Tensor, debug=False, **_) -> torch.Tensor:
"""
Uses one binary channel: 1 - fg, 0 - bg
pred: (b, 1, x, y, z) or (b, 1, x, y)
target: (b, 1, x, y, z) or (b, 1, x, y)
"""
labels = labels.unsqueeze(1)
if logits.dim() not in (4, 5):
raise AssertionError("Only 2D and 3D supported")
if (logits.dim() != labels.dim()):
raise AssertionError("Prediction and target need to be of same dimension")
# this is necessary for binary loss
logits = torch.sigmoid(logits)
pred_dt = torch.from_numpy(self.distance_field(logits.detach().cpu().numpy())).float()
target_dt = torch.from_numpy(self.distance_field(labels.detach().cpu().numpy())).float()
pred_error = (logits - labels) ** 2
distance = pred_dt.to(logits.device) ** self.alpha + target_dt.to(logits.device) ** self.alpha
dt_field = pred_error * distance
loss = dt_field.mean()
if debug:
return (
loss.detach().cpu().numpy(),
(
dt_field.detach().cpu().numpy()[0, 0],
pred_error.detach().cpu().numpy()[0, 0],
distance.detach().cpu().numpy()[0, 0],
pred_dt.detach().cpu().numpy()[0, 0],
target_dt.detach().cpu().numpy()[0, 0],
),
)
else:
return loss
class HausdorffERLoss(nn.Module):
"""Binary Hausdorff loss based on morphological erosion"""
def __init__(self, alpha=2.0, erosions=10, **kwargs):
super(HausdorffERLoss, self).__init__()
self.alpha = alpha
self.erosions = erosions
self.prepare_kernels()
def prepare_kernels(self):
cross = np.array([cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))])
bound = np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]]])
self.kernel2D = cross * 0.2
self.kernel3D = np.array([bound, cross, bound]) * (1 / 7)
@torch.no_grad()
def perform_erosion(self, pred: np.ndarray, target: np.ndarray, debug) -> np.ndarray:
bound = (pred - target) ** 2
if bound.ndim == 5:
kernel = self.kernel3D
elif bound.ndim == 4:
kernel = self.kernel2D
else:
raise ValueError(f"Dimension {bound.ndim} is nor supported.")
eroted = np.zeros_like(bound)
erosions = []
for batch in range(len(bound)):
# debug
erosions.append(np.copy(bound[batch][0]))
for k in range(self.erosions):
# compute convolution with kernel
dilation = convolve(bound[batch], kernel, mode="constant", cval=0.0)
# apply soft thresholding at 0.5 and normalize
erosion = dilation - 0.5
erosion[erosion < 0] = 0
if erosion.ptp() != 0:
erosion = (erosion - erosion.min()) / erosion.ptp()
# save erosion and add to loss
bound[batch] = erosion
eroted[batch] += erosion * (k + 1) ** self.alpha
if debug:
erosions.append(np.copy(erosion[0]))
# image visualization in debug mode
if debug:
return eroted, erosions
else:
return eroted
def forward(self, pred: torch.Tensor, target: torch.Tensor, debug=False) -> torch.Tensor:
"""
Uses one binary channel: 1 - fg, 0 - bg
pred: (b, 1, x, y, z) or (b, 1, x, y)
target: (b, 1, x, y, z) or (b, 1, x, y)
"""
target = target.unsqueeze(1)
if pred.dim() not in (4, 5):
raise AssertionError("Only 2D and 3D supported")
if (pred.dim() != target.dim()):
raise AssertionError("Prediction and target need to be of same dimension")
pred = torch.sigmoid(pred)
if debug:
eroted, erosions = self.perform_erosion(pred.detach().cpu().numpy(), target.detach().cpu().numpy(), debug)
return eroted.mean(), erosions
else:
eroted = torch.from_numpy(self.perform_erosion(pred.detach().cpu().numpy(), target.detach().cpu().numpy(), debug)).float()
loss = eroted.mean()
return loss
# ====================== #
"""
Recall Loss
copy pasted from - all credit goes to original authors:
https://github.com/shuaizzZ/Recall-Loss-PyTorch/blob/master/recall_loss.py
"""
class RecallLoss(nn.Module):
""" An unofficial implementation of
<Recall Loss for Imbalanced Image Classification and Semantic Segmentation>
Created by: <NAME>
Email: <EMAIL>
recall = TP / (TP + FN)
Args:
weight: An array of shape [C,]
predict: A float32 tensor of shape [N, C, *], for Semantic segmentation task is [N, C, H, W]
target: A int64 tensor of shape [N, *], for Semantic segmentation task is [N, H, W]
Return:
diceloss
"""
def __init__(self, weight=None, **_):
super(RecallLoss, self).__init__()
if weight is not None:
weight = torch.Tensor(weight)
self.weight = weight / torch.sum(weight) # Normalized weight
self.smooth = 1e-5
def forward(self, logits, labels, **_):
N, C = logits.size()[:2]
_, predict = torch.max(logits, 1)# # (N, C, *) ==> (N, 1, *)
predict = predict.view(N, 1, -1) # (N, 1, *)
labels = labels.view(N, 1, -1) # (N, 1, *)
last_size = labels.size(-1)
## convert predict & target (N, 1, *) into one hot vector (N, C, *)
predict_onehot = torch.zeros((N, C, last_size)).cuda() # (N, 1, *) ==> (N, C, *)
predict_onehot.scatter_(1, predict, 1) # (N, C, *)
target_onehot = torch.zeros((N, C, last_size)).cuda() # (N, 1, *) ==> (N, C, *)
target_onehot.scatter_(1, labels, 1) # (N, C, *)
true_positive = torch.sum(predict_onehot * target_onehot, dim=2) # (N, C)
total_target = torch.sum(target_onehot, dim=2) # (N, C)
## Recall = TP / (TP + FN)
recall = (true_positive + self.smooth) / (total_target + self.smooth) # (N, C)
if hasattr(self, 'weight'):
if self.weight.type() != logits.type():
self.weight = self.weight.type_as(logits)
recall = recall * self.weight * C # (N, C)
recall_loss = 1 - torch.mean(recall) # 1
return recall_loss
# ====================== #
class SoftInvDiceLoss(torch.nn.Module):
"""
Well-performing loss for binary segmentation
"""
def __init__(self, smooth=1., is_binary=True, **_):
super(SoftInvDiceLoss, self).__init__()
self.smooth = smooth
self.is_binary = is_binary
def forward(self, logits, labels, **_):
# sigmoid / softmax so that predicted map can be distributed in [0, 1]
if self.is_binary:
logits = torch.sigmoid(logits)
else:
logits = torch.softmax(logits, dim=1)
iflat = 1 - logits.view(-1)
tflat = 1 - labels.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + self.smooth) / (iflat.sum() + tflat.sum() + self.smooth))
# ======================= #
# --- COMBINED LOSSES --- #
class OhemBCEDicePenalizeBorderLoss(OhemCrossEntropy2d):
"""
Combined OHEM (Online Hard Example Mining) process with BCE-Dice penalized loss
"""
def __init__(self, thresh=0.6, min_kept=0, ignore_index=-100, kernel_size=21, **_):
super().__init__()
self.ignore_label = ignore_index
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.criterion = BCEDicePenalizeBorderLoss(kernel_size=kernel_size)
class RMIBCEDicePenalizeBorderLoss(RMILossAlt):
"""
Combined RMI and BCEDicePenalized Loss
"""
def __init__(self, kernel_size=21, rmi_weight=1.0, bce_weight=1.0, **kwargs):
super().__init__(**kwargs)
self.bce = BCEDicePenalizeBorderLoss(kernel_size=kernel_size)
self.bce_weight = bce_weight
self.rmi_weight = rmi_weight
def to(self, device):
super().to(device=device)
self.bce.to(device=device)
def forward(self, logits, labels, **_):
if labels.shape != logits.shape:
if logits.shape > labels.shape:
labels.unsqueeze(dim=1)
else:
raise Exception(f'Non-matching shapes for logits ({logits.shape}) and labels ({labels.shape})')
# Calculate RMI loss
rmi = self.rmi_loss(input_=torch.sigmoid(logits), target=labels)
bce = self.bce(logits, labels)
# rmi = rmi.mean() * (1.0 - self.bce_weight)
return self.rmi_weight * rmi + self.bce_weight * bce |
qt__pyqt__pyside__pyqode/Is_IP__QRegExpValidator/main.py | DazEB2/SimplePyScripts | 117 | 80841 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QLineEdit, QApplication
from IP_Validator import get_ip_validator
if __name__ == '__main__':
app = QApplication([])
mw = QLineEdit()
mw.setValidator(get_ip_validator())
mw.setText("0.0.0.0")
mw.show()
app.exec()
|
mindboggle/guts/mesh.py | cemlyn007/mindboggle | 118 | 80850 | #!/usr/bin/env python
"""
Operations on surface mesh vertices.
Authors:
- <NAME>, 2012 (<EMAIL>)
- <NAME>, 2012-2016 (<EMAIL>) http://binarybottle.com
Copyright 2016, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
def find_neighbors_from_file(input_vtk):
"""
Generate the list of unique, sorted indices of neighboring vertices
for all vertices in the faces of a triangular mesh in a VTK file.
Parameters
----------
input_vtk : string
name of input VTK file containing surface mesh
Returns
-------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import find_neighbors_from_file
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> neighbor_lists[0:3]
[[1, 4, 48, 49], [0, 4, 5, 49, 2], [1, 5, 6, 49, 50, 54]]
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> index = 100 # doctest: +SKIP
>>> IDs = -1 * np.ones(len(neighbor_lists)) # doctest: +SKIP
>>> IDs[index] = 1 # doctest: +SKIP
>>> IDs[neighbor_lists[index]] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'find_neighbors_from_file.vtk', IDs,
... 'neighbors', IDs) # doctest: +SKIP
>>> plot_surfaces('find_neighbors_from_file.vtk') # doctest: +SKIP
"""
from mindboggle.mio.vtks import read_faces_points
from mindboggle.guts.mesh import find_neighbors
faces, points, npoints = read_faces_points(input_vtk)
neighbor_lists = find_neighbors(faces, npoints)
return neighbor_lists
def find_neighbors(faces, npoints):
"""
Generate the list of unique, sorted indices of neighboring vertices
for all vertices in the faces of a triangular mesh.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
npoints: integer
number of vertices on the mesh
Returns
-------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_neighbors
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]]
>>> npoints = 5
>>> neighbor_lists = find_neighbors(faces, npoints)
>>> neighbor_lists
[[1, 2, 3, 4], [0, 2, 4, 3], [0, 1, 3], [0, 2, 4, 1], [0, 3, 1]]
Real example:
>>> import numpy as np
>>> from mindboggle.guts.mesh import find_neighbors
>>> from mindboggle.mio.vtks import read_faces_points
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
>>> faces, points, npoints = read_faces_points(vtk_file)
>>> neighbor_lists = find_neighbors(faces, npoints)
>>> neighbor_lists[0:3]
[[1, 4, 48, 49], [0, 4, 5, 49, 2], [1, 5, 6, 49, 50, 54]]
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
>>> index = 100 # doctest: +SKIP
>>> IDs = -1 * np.ones(len(neighbor_lists)) # doctest: +SKIP
>>> IDs[index] = 1 # doctest: +SKIP
>>> IDs[neighbor_lists[index]] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'find_neighbors.vtk', IDs, 'neighbors', IDs) # doctest: +SKIP
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> plot_surfaces('find_neighbors.vtk') # doctest: +SKIP
"""
neighbor_lists = [[] for x in range(npoints)]
for face in faces:
[v0, v1, v2] = face
if v1 not in neighbor_lists[v0]:
neighbor_lists[v0].append(v1)
if v2 not in neighbor_lists[v0]:
neighbor_lists[v0].append(v2)
if v0 not in neighbor_lists[v1]:
neighbor_lists[v1].append(v0)
if v2 not in neighbor_lists[v1]:
neighbor_lists[v1].append(v2)
if v0 not in neighbor_lists[v2]:
neighbor_lists[v2].append(v0)
if v1 not in neighbor_lists[v2]:
neighbor_lists[v2].append(v1)
return neighbor_lists
def find_neighbors_vertex(faces, index):
"""
Find neighbors to a surface mesh vertex.
For a set of surface mesh faces and the index of a surface vertex,
find unique indices for neighboring vertices.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
index : int
index of surface vertex
Returns
-------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Examples
--------
>>> from mindboggle.guts.mesh import find_neighbors_vertex
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4]]
>>> index = 1
>>> neighbor_lists = find_neighbors_vertex(faces, index)
>>> neighbor_lists
[0, 2, 4]
"""
import numpy as np
# Make sure argument is a numpy array
if not isinstance(faces, np.ndarray):
faces = np.array(faces)
# Create list of vertex indices sharing the same faces as "index"
I = [faces[np.where(faces[:,i] == index)[0], :] for i in (0,1,2)]
# Create single list from nested lists
I = [int(x) for lst in I for sublst in lst for x in sublst]
# Find unique indices not equal to "index"
neighbor_list = []; [neighbor_list.append(x)
for x in I if x not in neighbor_list if x != index]
return neighbor_list
def find_neighborhood(neighbor_lists, indices, nedges=1):
"""
Find neighbors in the neighborhood of given surface mesh vertices.
For indices to surface mesh vertices, find unique indices for
vertices in the neighborhood of the vertices.
Parameters
----------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
indices : list of integers
indices of surface vertices
nedges : integer
number of edges to propagate from indices
Returns
-------
neighborhood : list of integers
indices to vertices in neighborhood
Examples
--------
>>> from mindboggle.guts.mesh import find_neighborhood
>>> neighbor_lists = [[0,1],[0,2],[1,4,5],[2],[],[0,1,4,5]]
>>> indices = [1,3,4]
>>> neighborhood = find_neighborhood(neighbor_lists, indices, 2)
>>> neighborhood
[0, 2, 5]
"""
# Initialize seed list with indices
neighborhood = []
seed_list = indices[:]
completed = seed_list[:]
# Propagate nedges away from indices:
for iedge in range(nedges):
# Find neighbors of seeds:
if seed_list:
local_neighbors = []
[local_neighbors.extend(neighbor_lists[x]) for x in seed_list]
# Select neighbors that have not been previously selected:
seed_list = list(set(local_neighbors).difference(completed))
# Add to neighborhood:
neighborhood.extend(seed_list)
completed.extend(seed_list)
neighborhood = [int(x) for x in neighborhood]
return neighborhood
def find_endpoints(indices, neighbor_lists):
"""
Extract endpoints from connected set of vertices.
Parameters
----------
indices : list of integers
indices to connected vertices
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
indices_endpoints : list of integers
indices to endpoints of connected vertices
Examples
--------
>>> # Find endpoints of fundus in a fold:
>>> from mindboggle.guts.mesh import find_endpoints
>>> from mindboggle.guts.mesh import find_neighbors_from_file
>>> from mindboggle.mio.fetch_data import prep_tests
>>> from mindboggle.mio.vtks import read_scalars
>>> urls, fetch_data = prep_tests()
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> fundus_file = fetch_data(urls['left_fundus_per_fold'], '', '.vtk')
>>> folds, name = read_scalars(folds_file, True, True)
>>> fundi, name = read_scalars(fundus_file, True, True)
>>> background_value = -1
>>> # Limit number of folds to speed up the test:
>>> limit_folds = True
>>> if limit_folds:
... fold_numbers = [2]
... i0 = [i for i,x in enumerate(folds) if x not in fold_numbers]
... folds[i0] = background_value
... fundi[i0] = background_value
... indices = [i for i,x in enumerate(fundi) if x != background_value]
>>> neighbor_lists = find_neighbors_from_file(fundus_file)
>>> indices_endpoints = find_endpoints(indices, neighbor_lists)
>>> indices_endpoints[0:5]
[32782, 35142, 45244, 49010, 63051]
View endpoints (skip test):
>>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> fundi[indices_endpoints] = 50 # doctest: +SKIP
>>> rewrite_scalars(fundus_file, 'find_endpoints.vtk', fundi,
... 'endpoints', folds, background_value) # doctest: +SKIP
>>> plot_surfaces('find_endpoints.vtk') # doctest: +SKIP
"""
# Find vertices with only one neighbor in a set of given indices:
I = set(indices)
indices_endpoints = [x for x in indices
if len(I.intersection(neighbor_lists[x])) == 1]
return indices_endpoints
def find_edges(faces):
"""
Find all edges on a mesh
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Returns
-------
edges : list of lists of integers
each element is a 2-tuple of vertex ids representing an edge
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_edges
>>> faces=[[0,1,2], [0,1,4], [1,2,3], [0,2,5]]
>>> find_edges(faces)
[[0, 1], [1, 2], [0, 2], [1, 4], [0, 4], [2, 3], [1, 3], [2, 5], [0, 5]]
"""
edges = [ ]
for face in faces:
for edge in [face[0:2], face[1:3], [face[0], face[2]] ]:
if not edge in edges: # I know that this is costly
edges.append(edge)
return edges
def find_faces_at_edges(faces):
"""
For each edge on the mesh, find the two faces that share the edge.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Returns
-------
faces_at_edges : dictionary
keys are tuples of two vertex IDs and values are 2-tuples of face IDs
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_faces_at_edges
>>> faces=[[0,1,2], [0,1,4], [1,2,3], [0,2,5]]
>>> faces_at_edges = find_faces_at_edges(faces)
>>> faces_at_edges[(0,2)]
[0, 3]
>>> faces_at_edges[(2,1)]
[0, 2]
Notes ::
The faces are assumed to be triangular.
"""
faces_at_edges = {}
for face_id, face in enumerate(faces):
for edge in [face[0:2], face[1:3], [face[0], face[2]] ]:
faces_at_edges.setdefault((edge[0], edge[1]), []).append(face_id)
faces_at_edges.setdefault((edge[1], edge[0]), []).append(face_id) # make it symmetric
return faces_at_edges
def find_faces_with_vertex(index, faces):
"""
For a given vertex, find all faces containing this vertex.
Note: faces do not have to be triangles.
Parameters
----------
index : integer
index to a vertex
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Returns
-------
faces_with_vertex : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_faces_with_vertex
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]]
>>> index = 3
>>> find_faces_with_vertex(index, faces)
[[0, 2, 3], [0, 3, 4], [4, 3, 1]]
"""
faces_with_vertex = [x for x in faces if index in x]
return faces_with_vertex
def find_faces_at_vertices(faces, npoints):
"""
For each vertex, find all faces containing this vertex.
Note: faces do not have to be triangles.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
npoints: integer
number of vertices on the mesh
Returns
-------
faces_at_vertices : list of lists of integers
faces_at_vertices[i] is a list of faces that contain the i-th vertex
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_faces_at_vertices
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]]
>>> npoints = 5
>>> find_faces_at_vertices(faces, npoints)
[[0, 1, 2, 3], [0, 3, 4], [0, 1], [1, 2, 4], [2, 3, 4]]
"""
faces_at_vertices = [[] for i in range(npoints)]
for face_id, face in enumerate(faces):
for vertex in face:
faces_at_vertices[vertex].append(face_id)
return faces_at_vertices
def find_adjacent_faces(faces):
"""
For each face in a list of faces, find adjacent faces.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
(-1 indicates no result for a given face or vertex)
Returns
-------
adjacent_faces: list of pairs of lists of three integers
list 1 indexes three faces adjacent to the three face's edges;
list 2 indexes three vertices opposite the adjacent faces:
adjacent_faces[i][0] = [face0, face1, face2], neighbors of face i
(face0 is the neighbor of face i facing vertex0)
adjacent_faces[i][1] = [vertex0, vertex1, vertex2] for face i
(vertex0 is the vertex of face0 not in face i)
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_adjacent_faces
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]]
>>> adjacent_faces = find_adjacent_faces(faces)
>>> adjacent_faces[0:2]
[[[-1, 1, 3], [-1, 3, 4]], [[-1, 2, 0], [-1, 4, 1]]]
"""
#print("Calculating face neighbor list")
n_faces = len(faces)
adjacent_faces = []
[adjacent_faces.append([[-1,-1,-1], [-1,-1,-1]]) for i in range(n_faces)]
Done =[]
[Done.append(0) for i in range(n_faces)]
# Loop through faces:
for i1, face1 in enumerate(faces):
# Loop through remaining faces:
for i2 in range(i1+1, n_faces):
face2 = faces[i2]
# Loop through first two vertices of face:
for ivertex in [0,1]:
index1 = face1[ivertex]
# Loop through remaining vertices of face:
for index2 in face1[ivertex+1:3]:
# If pair of vertices in face2:
if index1 in face2 and index2 in face2:
# Determine if it is face0, face1 or face2:
NbrID1 = 3 - face1.index(index1) - face1.index(index2)
NbrID2 = 3 - face2.index(index1) - face2.index(index2)
adjacent_faces[i1][0][NbrID1] = i2
adjacent_faces[i2][0][NbrID2] = i1
adjacent_faces[i1][1][NbrID1] = face2[NbrID2]
adjacent_faces[i2][1][NbrID2] = face1[NbrID1]
Done[i1] += 1
Done[i2] += 1
# Break if all three neighbors of face1 have been found:
if Done[i1] == 3:
break
return adjacent_faces
def find_complete_faces(indices, faces):
"""
Given a set of vertices, find the ones that make complete faces.
Parameters
----------
indices : list of integers
indices to connected vertices
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Returns
-------
indices_complete : list of integers
indices to vertices making up complete faces
Examples
--------
>>> from mindboggle.guts.mesh import find_complete_faces
>>> faces = [[0,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> indices = [3,7,2,5,9,4]
>>> find_complete_faces(indices, faces)
[2, 3, 7, 5]
"""
indices_complete_list = []
for face in faces:
if len(list(frozenset(face).intersection(indices))) == 3:
indices_complete_list.extend(face)
indices_complete = []
[indices_complete.append(x) for x in indices_complete_list
if x not in indices_complete]
return indices_complete
def keep_faces(faces, indices):
"""
Remove surface mesh faces whose three vertices are not all in "indices".
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
indices : list of integers
indices to vertices of the surface mesh that are to be retained
Returns
-------
faces : list of lists of three integers
reduced number of faces
Examples
--------
>>> from mindboggle.guts.mesh import keep_faces
>>> faces = [[1,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> indices = [0,1,2,3,4,5]
>>> keep_faces(faces, indices)
[[1, 2, 3], [3, 2, 5]]
"""
import numpy as np
fs = frozenset(indices)
faces = [lst for lst in faces if len(fs.intersection(lst)) == 3]
faces = np.reshape(np.ravel(faces), (-1, 3))
#len_faces = len(faces)
#if verbose and len(faces) < len_faces:
# print('Reduced {0} to {1} triangular faces'.
# format(len_faces, len(faces)))
return faces.tolist()
def reindex_faces_points(faces, points=[]):
"""
Renumber indices in faces and remove points (coordinates) not in faces.
Parameters
----------
faces : list of lists of integers
each sublist contains 3 indices of vertices that form a face
on a surface mesh
points : list of lists of floats (optional)
each sublist contains 3-D coordinates of a vertex on a surface mesh
Returns
-------
new_faces : list of lists of integers
each sublist contains 3 (renumbered) indices of vertices
that form a face on a surface mesh
new_points : list of lists of floats
each (new) sublist contains 3-D coordinates of a vertex on a surface mesh
original_indices : list integers
list of indices to original points
Examples
--------
>>> from mindboggle.guts.mesh import reindex_faces_points
>>> # Reindex faces:
>>> faces = [[8,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> new_faces, new_points, original_indices = reindex_faces_points(faces,
... points=[])
>>> new_faces
[[5, 0, 1], [0, 1, 4], [2, 4, 5], [1, 0, 3]]
Reindex faces of a limited number of folds of the brain:
>>> import numpy as np
>>> from mindboggle.guts.mesh import keep_faces
>>> from mindboggle.mio.vtks import read_faces_points
>>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> folds, name = read_scalars(folds_file, True, True)
>>> fold_numbers = [4]
>>> indices = [i for i,x in enumerate(folds) if x in fold_numbers]
>>> i0 = [i for i,x in enumerate(folds) if x not in fold_numbers]
>>> background_value = -1
>>> folds[i0] = background_value
>>> faces, points, npoints = read_faces_points(folds_file)
>>> faces = keep_faces(faces, indices)
>>> faces[0:3]
[[51535, 50324, 51529], [50317, 50325, 50326], [50324, 50332, 50333]]
>>> new_faces, new_points, original_indices = reindex_faces_points(faces,
... points)
>>> new_faces[0:3]
[[277, 690, 276], [689, 691, 692], [690, 698, 699]]
>>> [np.float("{0:.{1}f}".format(x, 5)) for x in points[0]]
[-13.7924, -76.0973, -2.57594]
>>> [np.float("{0:.{1}f}".format(x, 5)) for x in new_points[0]]
[-13.7802, -12.3814, 57.4042]
View reindexed fold on surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces
>>> plot_surfaces('reindex_faces_points.vtk') # doctest: +SKIP
"""
import numpy as np
import itertools
if isinstance(points, list):
pass
elif isinstance(points, np.ndarray):
points = points.tolist()
else:
raise IOError("points should be either a list or a numpy array.")
# set() to remove repeated indices and list() to order them for later use:
indices_to_keep = list(set(itertools.chain(*faces)))
reindex = dict([(old_index, new_index)
for new_index, old_index in enumerate(indices_to_keep)])
new_faces = [[reindex[old_index] for old_index in face] for face in faces]
if points:
new_points = [points[new_index] for new_index in indices_to_keep]
else:
new_points = None
original_indices = indices_to_keep
return new_faces, new_points, original_indices
def remove_neighbor_lists(neighbor_lists, indices):
"""
Remove all but a given set of indices from surface mesh neighbor lists.
Note :: SLOW!
Parameters
----------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
indices : list of integers
indices to vertices of the surface mesh
Returns
-------
neighbor_lists : list of lists of integers
each list has indices to remaining neighboring vertices for each vertex
Examples
--------
>>> from mindboggle.guts.mesh import remove_neighbor_lists
>>> neighbor_lists = [[1,2,3], [2,3,7], [12,43], [4,7,8], [3,2,5]]
>>> indices = [0,1,2,3,4,5]
>>> remove_neighbor_lists(neighbor_lists, indices)
[[1, 2, 3], [2, 3], [], [4], [2, 3, 5]]
"""
neighbor_lists = [list(frozenset(indices).intersection(x))
for x in neighbor_lists]
return neighbor_lists
def reindex_faces_0to1(faces):
"""
Convert 0-indices (Python) to 1-indices (Matlab) for all face indices.
Parameters
----------
faces : list of lists of integers
each sublist contains 3 0-indices of vertices that form a face
on a surface mesh
Returns
-------
faces : list of lists of integers
each sublist contains 3 1-indices of vertices that form a face
on a surface mesh
Examples
--------
>>> from mindboggle.guts.mesh import reindex_faces_0to1
>>> faces = [[0,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> reindex_faces_0to1(faces)
[[1, 3, 4], [3, 4, 8], [5, 8, 9], [4, 3, 6]]
"""
faces = [[old_index+1 for old_index in face] for face in faces]
return faces
def decimate(points, faces, reduction=0.75, smooth_steps=25,
scalars=[], save_vtk=False, output_vtk=''):
"""
Decimate vtk triangular mesh with vtk.vtkDecimatePro.
Parameters
----------
points : list of lists of floats
each element is a list of 3-D coordinates of a vertex on a surface mesh
faces : list of lists of integers
each element is list of 3 indices of vertices that form a face
on a surface mesh
reduction : float
fraction of mesh faces to remove
smooth_steps : integer
number of smoothing steps
scalars : list of integers or floats
optional scalars for output VTK file
save_vtk : bool
output decimated vtk file?
output_vtk : string
output decimated vtk file name
Returns
-------
points : list of lists of floats
decimated points
faces : list of lists of integers
decimated faces
scalars : list of integers or floats
scalars for output VTK file
output_vtk : string
output decimated vtk file
Examples
--------
>>> # Example: Twins-2-1 left postcentral pial surface, 0.75 decimation:
>>> from mindboggle.guts.mesh import decimate
>>> from mindboggle.mio.vtks import read_vtk
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
>>> points, f1, f2, faces, scalars, f3, f4, f5 = read_vtk(input_vtk)
>>> reduction = 0.5
>>> smooth_steps = 25
>>> save_vtk = True
>>> output_vtk = 'decimate.vtk'
>>> points2, faces2, scalars, output_vtk = decimate(points, faces,
... reduction, smooth_steps, scalars, save_vtk, output_vtk)
>>> (len(points), len(points2))
(145069, 72535)
>>> (len(faces), len(faces2))
(290134, 145066)
View decimated surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> plot_surfaces('decimate.vtk') # doctest: +SKIP
"""
import os
import vtk
# ------------------------------------------------------------------------
# vtk points:
# ------------------------------------------------------------------------
vtk_points = vtk.vtkPoints()
[vtk_points.InsertPoint(i, x[0], x[1], x[2]) for i,x in enumerate(points)]
# ------------------------------------------------------------------------
# vtk faces:
# ------------------------------------------------------------------------
vtk_faces = vtk.vtkCellArray()
for face in faces:
vtk_face = vtk.vtkPolygon()
vtk_face.GetPointIds().SetNumberOfIds(3)
vtk_face.GetPointIds().SetId(0, face[0])
vtk_face.GetPointIds().SetId(1, face[1])
vtk_face.GetPointIds().SetId(2, face[2])
vtk_faces.InsertNextCell(vtk_face)
# ------------------------------------------------------------------------
# vtk scalars:
# ------------------------------------------------------------------------
if scalars:
vtk_scalars = vtk.vtkFloatArray()
vtk_scalars.SetName("scalars")
for scalar in scalars:
vtk_scalars.InsertNextValue(scalar)
# ------------------------------------------------------------------------
# vtkPolyData:
# ------------------------------------------------------------------------
polydata = vtk.vtkPolyData()
polydata.SetPoints(vtk_points)
polydata.SetPolys(vtk_faces)
if scalars:
polydata.GetPointData().SetScalars(vtk_scalars)
# ------------------------------------------------------------------------
# Decimate:
# ------------------------------------------------------------------------
# We want to preserve topology (not let any cracks form).
# This may limit the total reduction possible.
decimate = vtk.vtkDecimatePro()
# Migrate to VTK6:
# http://www.vtk.org/Wiki/VTK/VTK_6_Migration/Replacement_of_SetInput
# Old: decimate.SetInput(polydata)
decimate.SetInputData(polydata)
decimate.SetTargetReduction(reduction)
decimate.PreserveTopologyOn()
# ------------------------------------------------------------------------
# Smooth:
# ------------------------------------------------------------------------
if save_vtk:
if not output_vtk:
output_vtk = os.path.join(os.getcwd(), 'decimated.vtk')
exporter = vtk.vtkPolyDataWriter()
else:
output_vtk = None
if smooth_steps > 0:
smoother = vtk.vtkSmoothPolyDataFilter()
# Migrate to VTK6:
# http://www.vtk.org/Wiki/VTK/VTK_6_Migration/Replacement_of_SetInput
# Old: smoother.SetInput(decimate.GetOutput())
smoother.SetInputConnection(decimate.GetOutputPort())
smoother.SetNumberOfIterations(smooth_steps)
smoother.Update()
out = smoother.GetOutput()
# Migrate to VTK6:
# http://www.vtk.org/Wiki/VTK/VTK_6_Migration/Replacement_of_SetInput
# Old: exporter.SetInput(smoother.GetOutput())
exporter.SetInputConnection(smoother.GetOutputPort())
else:
decimate.Update()
out = decimate.GetOutput()
if save_vtk:
# Migrate to VTK6:
# http://www.vtk.org/Wiki/VTK/VTK_6_Migration/Replacement_of_SetInput
# http://stackoverflow.com/questions/29020740/
# what-is-the-difference-in-setinputconnection-and-setinput
# Old: exporter.SetInput(decimate.GetOutput())
exporter.SetInputConnection(decimate.GetOutputPort())
# ------------------------------------------------------------------------
# Export output:
# ------------------------------------------------------------------------
if save_vtk:
exporter.SetFileName(output_vtk)
exporter.Write()
if not os.path.exists(output_vtk):
raise IOError(output_vtk + " not found")
# ------------------------------------------------------------------------
# Extract decimated points, faces, and scalars:
# ------------------------------------------------------------------------
points = [list(out.GetPoint(point_id))
for point_id in range(out.GetNumberOfPoints())]
if out.GetNumberOfPolys() > 0:
polys = out.GetPolys()
pt_data = out.GetPointData()
faces = [[int(polys.GetData().GetValue(j))
for j in range(i*4 + 1, i*4 + 4)]
for i in range(polys.GetNumberOfCells())]
if scalars:
scalars = [pt_data.GetScalars().GetValue(i)
for i in range(len(points))]
else:
faces = []
scalars = []
return points, faces, scalars, output_vtk
def decimate_file(input_vtk, reduction=0.5, smooth_steps=100,
save_vtk=True, output_vtk=''):
"""
Decimate vtk triangular mesh file with vtk.vtkDecimatePro.
Parameters
----------
input_vtk : string
input vtk file with triangular surface mesh
reduction : float
fraction of mesh faces to remove
do_smooth : bool
smooth after decimation?
save_vtk : bool
output decimated vtk file?
output_vtk : string
output decimated vtk file name
Returns
-------
output_vtk : string
output decimated vtk file
Examples
--------
>>> from mindboggle.guts.mesh import decimate_file
>>> from mindboggle.mio.vtks import read_vtk
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
>>> save_vtk = True
>>> output_vtk = 'decimate.vtk'
>>> reduction = 0.5
>>> smooth_steps = 25
>>> output_vtk = decimate_file(input_vtk, reduction, smooth_steps,
... save_vtk, output_vtk)
>>> f1, f2, f3, faces1, f4, f5, npoints1, f6 = read_vtk(input_vtk)
>>> f1, f2, f3, faces2, f4, f5, npoints2, f6 = read_vtk('decimate.vtk')
>>> (npoints1, npoints2)
(145069, 72535)
>>> (len(faces1), len(faces2))
(290134, 145066)
View decimated surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces
>>> plot_surfaces('decimate.vtk') # doctest: +SKIP
"""
from mindboggle.mio.vtks import read_vtk
from mindboggle.guts.mesh import decimate
if not save_vtk:
raise NotImplementedError()
# Read VTK surface mesh file:
points, indices, lines, faces, scalars, scalar_names, npoints, \
input_vtk = read_vtk(input_vtk)
# Decimate vtk triangular mesh with vtk.vtkDecimatePro
points, faces, scalars, output_vtk = decimate(points, faces, reduction,
smooth_steps, scalars,
save_vtk, output_vtk)
return output_vtk
def rescale_by_neighborhood(input_vtk, indices=[], nedges=10, p=99,
set_max_to_1=True, save_file=False, output_filestring='rescaled_scalars',
background_value=-1):
"""
Rescale the scalar values of a VTK file by a percentile value
in each vertex's surface mesh neighborhood.
Parameters
----------
input_vtk : string
name of VTK file with a scalar value for each vertex
indices : list of integers (optional)
indices of scalars to normalize
nedges : integer
number or edges from vertex, defining the size of its neighborhood
p : float in range of [0,100]
percentile used to normalize each scalar
set_max_to_1 : bool
set all rescaled values greater than 1 to 1.0?
save_file : bool
save output VTK file?
output_filestring : string (if save_file)
name of output file
background_value : integer
background value
Returns
-------
rescaled_scalars : list of floats
rescaled scalar values
rescaled_scalars_file : string (if save_file)
name of output VTK file with rescaled scalar values
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import rescale_by_neighborhood
>>> from mindboggle.mio.vtks import read_scalars
>>> from mindboggle.mio.plots import plot_surfaces
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_travel_depth'], '', '.vtk')
>>> indices = []
>>> nedges = 10
>>> p = 99
>>> set_max_to_1 = True
>>> save_file = True
>>> output_filestring = 'rescale_by_neighborhood'
>>> background_value = -1
>>> rescaled, rescaled_file = rescale_by_neighborhood(input_vtk,
... indices, nedges, p, set_max_to_1, save_file, output_filestring,
... background_value)
>>> scalars1, name = read_scalars(input_vtk)
>>> print('{0:0.5f}, {1:0.5f}'.format(max(scalars1), max(rescaled)))
34.95560, 1.00000
>>> print('{0:0.5f}, {1:0.5f}'.format(np.mean(scalars1), np.mean(rescaled)))
7.43822, 0.44950
View rescaled scalar values on surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> plot_surfaces(rescaled_file) # doctest: +SKIP
"""
import os
import numpy as np
from mindboggle.mio.vtks import read_scalars, rewrite_scalars
from mindboggle.guts.mesh import find_neighbors_from_file, find_neighborhood
# Load scalars and vertex neighbor lists:
scalars, name = read_scalars(input_vtk, True, True)
if not indices:
indices = [i for i,x in enumerate(scalars) if x != background_value]
#print(" Rescaling {0} scalar values by neighborhood...".format(len(indices)))
neighbor_lists = find_neighbors_from_file(input_vtk)
# Loop through vertices:
rescaled_scalars = scalars.copy()
for index in indices:
# Determine the scalars in the vertex's neighborhood:
neighborhood = find_neighborhood(neighbor_lists, [index], nedges)
# Compute a high neighborhood percentile to normalize vertex's value:
normalization_factor = np.percentile(scalars[neighborhood], p)
rescaled_scalar = scalars[index] / normalization_factor
rescaled_scalars[index] = rescaled_scalar
# Make any rescaled value greater than 1 equal to 1:
if set_max_to_1:
rescaled_scalars[[x for x in indices if rescaled_scalars[x] > 1.0]] = 1
rescaled_scalars = rescaled_scalars.tolist()
# ------------------------------------------------------------------------
# Return rescaled scalars and file name
# ------------------------------------------------------------------------
if save_file:
rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk')
rewrite_scalars(input_vtk, rescaled_scalars_file,
rescaled_scalars, 'rescaled_scalars', [],
background_value)
if not os.path.exists(rescaled_scalars_file):
raise IOError(rescaled_scalars_file + " not found")
else:
rescaled_scalars_file = None
return rescaled_scalars, rescaled_scalars_file
def rescale_by_label(input_vtk, labels_or_file, save_file=False,
output_filestring='rescaled_scalars',
background_value=-1, verbose=False):
"""
Rescale scalars for each label (such as depth values within each fold).
Default is to normalize the scalar values of a VTK file by
a percentile value in each vertex's surface mesh for each label.
Parameters
----------
input_vtk : string
name of VTK file with a scalar value for each vertex
labels_or_file : list or string
label number for each vertex or name of VTK file with index scalars
save_file : bool
save output VTK file?
output_filestring : string (if save_file)
name of output file
background_value : integer or float
background value
verbose : bool
print statements?
Returns
-------
rescaled_scalars : list of floats
scalar values rescaled for each label, for label numbers not equal to -1
rescaled_scalars_file : string (if save_file)
name of output VTK file with rescaled scalar values for each label
Examples
--------
>>> # Rescale depths by neighborhood within each label:
>>> import numpy as np
>>> from mindboggle.guts.mesh import rescale_by_label
>>> from mindboggle.mio.vtks import read_scalars
>>> from mindboggle.mio.plots import plot_surfaces
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_travel_depth'], '', '.vtk')
>>> labels_or_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> save_file = True
>>> output_filestring = 'rescale_by_label'
>>> background_value = -1
>>> verbose = False
>>> rescaled, rescaled_label_file = rescale_by_label(input_vtk,
... labels_or_file, save_file, output_filestring, background_value, verbose)
>>> scalars1, name = read_scalars(input_vtk)
>>> print('{0:0.5f}, {1:0.5f}'.format(max(scalars1), max(rescaled)))
34.95560, 1.00000
>>> print('{0:0.5f}, {1:0.5f}'.format(np.mean(scalars1), np.mean(rescaled)))
7.43822, 0.30677
View rescaled scalar values on surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> plot_surfaces(rescaled_label_file) # doctest: +SKIP
"""
import os
import numpy as np
from mindboggle.mio.vtks import read_scalars, rewrite_scalars
# Load scalars and vertex neighbor lists:
scalars, name = read_scalars(input_vtk, True, True)
if verbose:
print(" Rescaling scalar values within each label...")
# Load label numbers:
if isinstance(labels_or_file, str):
labels, name = read_scalars(labels_or_file, True, True)
elif isinstance(labels_or_file, list):
labels = labels_or_file
unique_labels = np.unique(labels)
# Loop through labels:
for label in unique_labels:
if verbose:
print(" Rescaling values within label {0} of {1} labels...".
format(int(label), len(unique_labels)))
indices = [i for i,x in enumerate(labels) if x == label]
if indices:
# Rescale by the maximum label scalar value:
scalars[indices] = scalars[indices] / np.max(scalars[indices])
#print(max(scalars), max(scalars[indices]))
rescaled_scalars = scalars.tolist()
# ------------------------------------------------------------------------
# Return rescaled scalars and file name
# ------------------------------------------------------------------------
if save_file:
rescaled_scalars_file = os.path.join(os.getcwd(),
output_filestring + '.vtk')
rewrite_scalars(input_vtk, rescaled_scalars_file,
rescaled_scalars, 'rescaled_scalars', labels,
background_value)
if not os.path.exists(rescaled_scalars_file):
raise IOError(rescaled_scalars_file + " not found")
else:
rescaled_scalars_file = None
return rescaled_scalars, rescaled_scalars_file
def area_of_faces(points, faces):
"""
Compute the areas of all triangles on the mesh.
Parameters
----------
points : list of lists of 3 floats
x,y,z coordinates for each vertex of the structure
faces : list of lists of 3 integers
3 indices to vertices that form a triangle on the mesh
Returns
-------
area: 1-D numpy array
area[i] is the area of the i-th triangle
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import area_of_faces
>>> from mindboggle.mio.vtks import read_vtk
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_area'], '', '.vtk')
>>> points, f1, f2, faces, f3, f4, f5, f6 = read_vtk(input_vtk)
>>> area = area_of_faces(points, faces)
>>> [np.float("{0:.{1}f}".format(x, 5)) for x in area[0:5]]
[0.21703, 0.27139, 0.29033, 0.1717, 0.36011]
"""
import numpy as np
area = np.zeros(len(faces))
points = np.array(points)
for i, triangle in enumerate(faces):
a = np.linalg.norm(points[triangle[0]] - points[triangle[1]])
b = np.linalg.norm(points[triangle[1]] - points[triangle[2]])
c = np.linalg.norm(points[triangle[2]] - points[triangle[0]])
s = (a+b+c) / 2.0
area[i] = np.sqrt(s*(s-a)*(s-b)*(s-c))
return area
def dilate(indices, nedges, neighbor_lists):
"""
Dilate region on a surface mesh.
Parameters
----------
indices : list of integers
indices of vertices to dilate
nedges : integer
number of edges to dilate across
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
dilated_indices : list of integers
indices of original vertices with dilated vertices
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import dilate, find_neighbors_from_file
>>> from mindboggle.mio.vtks import read_scalars
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> nedges = 3
>>> # Select a single fold:
>>> folds, name = read_scalars(folds_file, True, True)
>>> fold_number = 4
>>> indices = [i for i,x in enumerate(folds) if x == fold_number]
>>> dilated_indices = dilate(indices, nedges, neighbor_lists)
>>> (len(indices), len(dilated_indices))
(1151, 1545)
>>> dilated_indices[0:10]
[50317, 50324, 50325, 50326, 50327, 50332, 50333, 50334, 50339, 50340]
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> from mindboggle.mio.vtks import rewrite_scalars
>>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP
>>> IDs[dilated_indices] = 2 # doctest: +SKIP
>>> IDs[indices] = 1 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'dilate.vtk', IDs, 'dilated_fold', IDs) # doctest: +SKIP
>>> plot_surfaces('dilate.vtk') # doctest: +SKIP
"""
from mindboggle.guts.mesh import find_neighborhood
N = find_neighborhood(neighbor_lists, indices, nedges)
dilated_indices = indices[:]
dilated_indices.extend(N)
return dilated_indices
def erode(indices, nedges, neighbor_lists):
"""
Erode region on a surface mesh.
Parameters
----------
indices : list of integers
indices of vertices to erode
nedges : integer
number of edges to erode across
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
eroded_indices : list of integers
indices of original vertices without eroded vertices
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import erode, find_neighbors_from_file
>>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> nedges = 3
>>> # Select a single fold:
>>> folds, name = read_scalars(folds_file, True, True)
>>> fold_number = 4
>>> indices = [i for i,x in enumerate(folds) if x == fold_number]
>>> eroded_indices = erode(indices, nedges, neighbor_lists)
>>> (len(indices), len(eroded_indices))
(1151, 809)
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP
>>> IDs[indices] = 1 # doctest: +SKIP
>>> IDs[eroded_indices] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'erode.vtk', IDs, 'eroded_fold', IDs) # doctest: +SKIP
>>> plot_surfaces('erode.vtk') # doctest: +SKIP
"""
from mindboggle.guts.mesh import find_neighborhood
N1 = find_neighborhood(neighbor_lists, indices, nedges=1)
N2 = find_neighborhood(neighbor_lists, N1, nedges)
eroded_indices = list(frozenset(indices).difference(N2))
return eroded_indices
def extract_edge(indices, neighbor_lists):
"""
Erode region on a surface mesh to extract the region's edge.
Parameters
----------
indices : list of integers
indices of vertices to erode
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
edge_indices : list of integers
indices of eroded vertices
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import extract_edge
>>> from mindboggle.guts.mesh import find_neighbors_from_file
>>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> # Select a single fold:
>>> folds, name = read_scalars(folds_file, True, True)
>>> fold_number = 4
>>> indices = [i for i,x in enumerate(folds) if x == fold_number]
>>> edge_indices = extract_edge(indices, neighbor_lists)
>>> (len(indices), len(edge_indices))
(1151, 111)
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP
>>> IDs[indices] = 1 # doctest: +SKIP
>>> IDs[edge_indices] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'extract_edge.vtk', IDs, 'edges_of_fold', IDs) # doctest: +SKIP
>>> plot_surfaces('extract_edge.vtk') # doctest: +SKIP
"""
from mindboggle.guts.mesh import find_neighborhood
N1 = find_neighborhood(neighbor_lists, indices, nedges=1)
N2 = find_neighborhood(neighbor_lists, N1, nedges=1)
edge_indices = list(set(N2).intersection(indices))
return edge_indices
def topo_test(index, values, neighbor_lists):
"""
Test to see if vertex is a "simple point".
A simple point is a vertex that when added to or removed from an object
(e.g., a curve) on a surface mesh does not alter the object's topology.
"Simple" is not to be mistaken with the following usage:
"A vertex is usually assigned one of five possible classifications:
simple, complex, boundary, interior edge, or corner vertex.
A simple vertex is surrounded by a closed fan of triangles".
Parameters
----------
index : integer
index of vertex
values : numpy array of integers or floats
values for all vertices
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
sp : bool
simple point or not?
n_inside : integer
number of neighboring vertices with a value greater than threshold
Examples
--------
>>> # Square with a center vertex:
>>> # indices [[0,1,2],[3,4,6],[7,8,9]] = 0 and indices [2,4,6] = 1:
>>> import numpy as np
>>> from mindboggle.guts.mesh import topo_test
>>> values = np.array([0,0,1,0,1,0,1,0,0])
>>> neighbor_lists = [[1,3],[0,2,3,4],[1,4,5],
... [0,1,4,6],[1,2,3,5,6,7],[2,4,7,8],
... [3,4,7],[4,5,6,8],[5,7]]
>>> sps = []
>>> for index in range(9):
... sp, n_inside = topo_test(index, values, neighbor_lists)
... sps.append(sp)
>>> sps
[False, True, True, True, False, True, True, True, False]
"""
import numpy as np
# Make sure argument is a numpy array:
if not isinstance(values, np.ndarray):
values = np.array(values)
# Find neighbors to the input vertex, and binarize them
# into those greater or less than a class boundary threshold equal to 0.5
# ("inside" and "outside"); count inside and outside neighbors:
I_neighbors = neighbor_lists[index]
neighbor_values = values[I_neighbors]
inside = [I_neighbors[i] for i,x in enumerate(neighbor_values) if x > 0.5]
n_inside = len(inside)
n_outside = len(I_neighbors) - n_inside
# If the number of inside or outside neighbors is zero,
# than the vertex IS NOT a simple point:
if n_outside * n_inside == 0:
sp = False
# Or if either the number of inside or outside neighbors is one,
# than the vertex IS a simple point:
elif n_outside == 1 or n_inside == 1:
sp = True
# Otherwise, test to see if all of the inside neighbors share neighbors
# with each other, in which case the vertex IS a simple point:
else:
# For each neighbor exceeding the threshold,
# find its neighbors that also exceed the threshold,
# then store these neighbors' indices in a sublist of "N":
labels = list(range(1, n_inside + 1))
N = []
for i_in in range(n_inside):
new_neighbors = neighbor_lists[inside[i_in]]
new_neighbors = [x for x in new_neighbors
if values[x] > 0.5 if x != index]
new_neighbors.extend([inside[i_in]])
N.append(new_neighbors)
# Consolidate labels of connected vertices --
# Loop through neighbors (lists within "N"),
# reassigning the labels for the lists until each label's
# list(s) has a unique set of vertices:
change = True
while change:
change = False
# Loop through pairs of inside neighbors
# and continue if their two labels are different:
for i in range(n_inside - 1):
for j in range(i + 1, n_inside):
if labels[i] != labels[j]:
# Assign the two subsets the same label
# if they share at least one vertex,
# and continue looping:
if set(N[i]).intersection(N[j]):
labels[i] = max([labels[i], labels[j]])
labels[j] = labels[i]
change = True
# The vertex is a simple point if all of its neighbors
# (if any) share neighbors with each other (one unique label):
D = []
if len([D.append(x) for x in labels if x not in D]) == 1:
sp = True
else:
sp = False
return sp, n_inside
# def fill_holes(regions, neighbor_lists, values=[], exclude_range=[],
# background_value=-1):
# """
# Fill holes in regions on a surface mesh by using region boundaries.
#
# NOTE: assumes one set of connected vertices per region
#
# Steps ::
#
# 1. Segment region vertex neighbors into connected vertices (region boundaries).
# 2. Remove the largest region boundary, presumably the
# outer contour of the region, leaving smaller boundaries,
# presumably the contours of holes within the region.
# 3. Call label_holes() to fill holes with surrounding region numbers.
#
# Parameters
# ----------
# regions : numpy array of integers
# region numbers for all vertices
# neighbor_lists : list of lists of integers
# each list contains indices to neighboring vertices for each vertex
# values : list of integers
# values for vertices, for use in determining which holes to remove
# exclude_range : list of two floats
# hole is not filled if it contains values within this range
# (prevents cases where surface connected by folds mistaken for holes)
# background_value : integer
# background value
#
# Returns
# -------
# regions : numpy array of integers
# region numbers for all vertices
#
# Examples
# --------
# >>> import numpy as np
# >>> from mindboggle.guts.mesh import fill_holes
# >>> from mindboggle.guts.mesh import find_neighbors_from_file
# >>> from mindboggle.mio.vtks import read_scalars
# >>> from mindboggle.mio.fetch_data import prep_tests
# >>> urls, fetch_data = prep_tests()
# >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
# >>> background_value = -1
# >>> # Select one fold
# >>> folds, name = read_scalars(folds_file, True, True)
# >>> fold_number = 4
# >>> folds[folds != fold_number] = background_value
# >>> I = np.where(folds==fold_number)[0]
# >>> neighbor_lists = find_neighbors_from_file(folds_file)
# >>> ## Find vertex whose removal (with its neighbors) would create a hole:
# >>> #for index in I:
# ... # N1 = neighbor_lists[index]
# ... # stop = True
# ... # for n in N1:
# ... # if any(folds[neighbor_lists[n]] == background_value):
# ... # stop = False
# ... # break
# ... # else:
# ... # for f in neighbor_lists[n]:
# ... # if any(folds[neighbor_lists[f]] == background_value):
# ... # stop = False
# ... # break
# ... # if stop:
# ... # break
# >>> index = I[100]
# >>> N = neighbor_lists[index]
# >>> N.append(index)
# >>> N
# [36768, 37670, 36769, 37679, 38522, 38529, 37688, 37689, 37678]
# >>> folds[N] = background_value
# >>> I = [x for x in I if x not in N]
#
# View hole (skip test):
#
# >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
# >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
# >>> rewrite_scalars(folds_file, 'hole.vtk', folds, 'hole', folds) # doctest: +SKIP
# >>> plot_surfaces('hole.vtk') # doctest: +SKIP
#
# Fill hole:
#
# >>> exclude_range = []
# >>> regions = np.copy(folds)
# >>> values = np.copy(folds)
# >>> regions = fill_holes(regions, neighbor_lists, values, exclude_range,
# ... background_value)
# >>> indices = [i for i,x in enumerate(regions) if x != background_value]
# >>> indices[0:10]
# [34148, 34149, 34150, 34151, 34152, 34153, 34154, 34155, 34157, 34158]
#
# View filled hole (skip test):
#
# >>> rewrite_scalars(folds_file, 'fill_hole.vtk', regions, 'fill_hole', regions) # doctest: +SKIP
# >>> plot_surfaces('fill_hole.vtk') # doctest: +SKIP
#
# """
# import numpy as np
# from mindboggle.guts.segment import segment
#
# # Make sure argument is a numpy array
# if not isinstance(regions, np.ndarray):
# regions = np.array(regions)
#
# def label_holes(holes, regions, neighbor_lists):
# """
# Fill holes in regions on a surface mesh.
#
# Parameters
# ----------
# holes : list or array of integers
# hole numbers for all vertices
# regions : numpy array of integers
# region numbers for all vertices
# neighbor_lists : list of lists of integers
# each list contains indices to neighboring vertices for each vertex
#
# Returns
# -------
# regions : numpy array of integers
# region numbers for all vertices
#
# """
# import numpy as np
#
# # Make sure argument is a numpy array
# if not isinstance(regions, np.ndarray):
# regions = np.array(regions)
#
# # Identify the vertices for each hole
# hole_numbers = [x for x in np.unique(holes) if x != background_value]
# for n_hole in hole_numbers:
# I = [i for i,x in enumerate(holes) if x == n_hole]
#
# # Identify neighbors to these vertices
# N=[]; [N.extend(neighbor_lists[i]) for i in I]
# if N:
#
# # Assign the hole the maximum region ID number of its neighbors
# regions[I] = max([regions[x] for x in N])
#
# return regions
#
# # ------------------------------------------------------------------------
# # Find boundaries to holes
# # ------------------------------------------------------------------------
# hole_boundaries = background_value * np.ones(len(regions))
#
# # Identify vertices for each region
# region_numbers = [x for x in np.unique(regions) if x != background_value]
# count = 0
# for n_region in region_numbers:
# region_indices = np.where(regions == n_region)[0]
#
# # Identify neighbors to these vertices and their neighbors
# N = []
# [N.extend(neighbor_lists[x]) for x in region_indices]
# N = list(frozenset(N).difference(region_indices))
# N2 = []
# [N2.extend(neighbor_lists[x]) for x in N]
# N.extend(N2)
# N = list(frozenset(N).difference(region_indices))
# if N:
#
# # Segment neighbors into connected vertices (region boundaries)
# boundaries = segment(N, neighbor_lists)
#
# # Remove the largest region boundary, presumably the
# # outer contour of the region, leaving smaller boundaries,
# # presumably the contours of holes within the region
# boundary_numbers = [x for x in np.unique(boundaries)
# if x != background_value]
# max_size = 0
# max_number = 0
# for n_boundary in boundary_numbers:
# border_indices = np.where(boundaries == n_boundary)[0]
# if len(border_indices) > max_size:
# max_size = len(border_indices)
# max_number = n_boundary
# boundaries[boundaries == max_number] = background_value
# boundary_numbers = [x for x in boundary_numbers if x != max_number]
#
# # Add remaining boundaries to holes array
# for n_boundary in boundary_numbers:
# indices = [i for i,x in enumerate(boundaries) if x == n_boundary]
# hole_boundaries[indices] = count
# count += 1
#
# # ------------------------------------------------------------------------
# # Fill holes
# # ------------------------------------------------------------------------
# # If there are any holes
# if count > 0:
# hole_numbers = [x for x in np.unique(hole_boundaries)
# if x != background_value]
# background = [i for i,x in enumerate(regions)
# if x == background_value]
#
# # Grow seeds from hole boundaries to fill holes
# for n_hole in hole_numbers:
# seed_list = np.where(hole_boundaries == n_hole)[0].tolist()
# seed_lists = [list(frozenset(background).intersection(seed_list))]
# hole = segment(background, neighbor_lists, 1, seed_lists)
#
# # Label the vertices for each hole by surrounding region number
# # if hole does not include values within exclude_range:
# if len(exclude_range) == 2:
# Ihole = np.where(hole != background_value)[0]
# #if not len(frozenset(values[Ihole]).intersection(exclude_range)):
# if not [x for x in values[Ihole]
# if x > exclude_range[0] if x < exclude_range[1]]:
# regions = label_holes(hole, regions, neighbor_lists)
# else:
# regions = label_holes(hole, regions, neighbor_lists)
#
# return regions
# def close_surface_pair(faces, points1, points2, scalars, background_value=-1):
# """
# Close a surface patch by connecting its border vertices with
# corresponding vertices in a second surface file.
#
# Assumes no lines or indices when reading VTK files.
#
# Note ::
#
# Scalar values different than background define the surface patch.
# The two sets of points have a 1-to-1 mapping; they are from
# two surfaces whose corresponding vertices are shifted in position.
# For pial vs. gray-white matter, the two surfaces are not parallel,
# so connecting the vertices leads to intersecting faces.
#
# Parameters
# ----------
# faces : list of lists of integers
# each sublist contains 3 indices of vertices that form a face
# on a surface mesh
# points1 : list of lists of floats
# each sublist contains 3-D coordinates of a vertex on a surface mesh
# points2 : list of lists of floats
# points from second surface with 1-to-1 correspondence with points1
# scalars : numpy array of integers
# labels used to find foreground vertices
# background_value : integer
# scalar value for background vertices
#
# Returns
# -------
# closed_faces : list of lists of integers
# indices of vertices that form a face on the closed surface mesh
# closed_points : list of lists of floats
# 3-D coordinates from points1 and points2
# closed_scalars : list of integers
# scalar values for points1 and points2
#
# Examples
# --------
# >>> # Build a cube by closing two parallel planes:
# >>> from mindboggle.guts.mesh import close_surface_pair
# >>> # Build plane:
# >>> background_value = -1
# >>> n = 10 # plane edge length
# >>> points1 = []
# >>> for x in range(n):
# ... for y in range(n):
# ... points1.append([x,y,0])
# >>> points2 = [[x[0],x[1],1] for x in points1]
# >>> scalars = [background_value for x in range(len(points1))]
# >>> p = int(n*(n-1)/2 - 1)
# >>> for i in [p, p+1, p+n, p+n+1]:
# ... scalars[i] = 1
# >>> faces = []
# >>> for x in range(n-1):
# ... for y in range(n-1):
# ... faces.append([x+y*n,x+n+y*n,x+n+1+y*n])
# ... faces.append([x+y*n,x+1+y*n,x+n+1+y*n])
# >>> #write_vtk('plane.vtk', points1, [], [], faces, scalars)
# >>> #plot_surfaces('plane.vtk')
# >>> closed_faces, closed_points, closed_scalars = close_surface_pair(faces,
# ... points1, points2, scalars, background_value)
# >>> closed_faces[0:4]
# [[44, 54, 55], [44, 45, 55], [144, 154, 155], [144, 145, 155]]
#
# View cube (skip test):
#
# >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
# >>> from mindboggle.mio.vtks import write_vtk # doctest: +SKIP
# >>> write_vtk('cube.vtk', closed_points, [],[], closed_faces,
# ... closed_scalars, 'int') # doctest: +SKIP
# >>> plot_surfaces('cube.vtk') # doctest: +SKIP
#
# """
# import sys
# import numpy as np
#
# from mindboggle.guts.mesh import find_neighbors, keep_faces
# from mindboggle.guts.segment import extract_borders
#
# if isinstance(scalars, list):
# scalars = np.array(scalars)
#
# N = len(points1)
# closed_points = points1 + points2
#
# # Find all vertex neighbors and surface patch border vertices:
# neighbor_lists = find_neighbors(faces, N)
# I = np.where(scalars != background_value)[0]
# scalars[scalars == background_value] = background_value + 1
# scalars[I] = background_value + 2
# scalars = scalars.tolist()
# borders, u1, u2 = extract_borders(list(range(N)), scalars, neighbor_lists)
# if not len(borders):
# sys.exit('There are no border vertices!')
# borders = [x for x in borders if x in I]
#
# # Reindex copy of faces and combine with original (both zero-index):
# indices = list(range(N))
# indices2 = list(range(N, 2 * N))
# reindex = dict([(index, indices2[i]) for i, index in enumerate(indices)])
# faces = keep_faces(faces, I)
# faces2 = [[reindex[i] for i in face] for face in faces]
# closed_faces = faces + faces2
#
# # Connect border vertices between surface patches and add new faces:
# add_faces = []
# taken_already = []
# for index in borders:
# if index not in taken_already:
# neighbors = list(set(neighbor_lists[index]).intersection(borders))
# taken_already.append(index)
# #taken_already.extend([index] + neighbors)
# for neighbor in neighbors:
# add_faces.append([index, index + N, neighbor])
# add_faces.append([index + N, neighbor, neighbor + N])
# closed_faces = closed_faces + add_faces
#
# closed_scalars = scalars * 2
#
# return closed_faces, closed_points, closed_scalars
# ============================================================================
# Doctests
# ============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # py.test --doctest-modules |
src/test/tests/hybrid/ddf_vs_dbinning.py | visit-dav/vis | 226 | 80871 | <gh_stars>100-1000
from visit_utils import *
import math
def setup_plot():
DeleteAllPlots()
OpenDatabase(silo_data_path("rect3d.silo"))
exprs.define("coords", "coord(quadmesh3d)",etype="vector")
exprs.define("mesh_x_zonal","recenter(coords[0])")
exprs.define("mesh_y_zonal","recenter(coords[1])")
exprs.define("mass","d * volume(quadmesh3d)")
AddPlot("Pseudocolor","mass")
DrawPlots()
def ddf(opts):
# work around quirks related to the ddf pipeline expecting
# vars to already exist
predraw_vars = [ opts["codomain"]]
predraw_vars.extend(opts["varnames"])
for v in predraw_vars:
ChangeActivePlotsVar(v)
atts = visit.ConstructDDFAttributes()
ddf_op_map = {"avg": atts.Average,
"min": atts.Minimum,
"max": atts.Maximum,
"stddev": atts.StandardDeviation,
"var": atts.Variance,
"sum": atts.Sum,
"count": atts.Count,
"rms": atts.RMS,
"pdf": atts.PDF}
atts.ddfName = opts["name"]
atts.codomainName = opts["codomain"]
atts.varnames = opts["varnames"]
atts.ranges = opts["ranges"]
atts.numSamples = opts["samples"]
atts.statisticalOperator = ddf_op_map[opts["op"]]
visit.ConstructDDF(atts)
ndims = len(atts.numSamples)
ddf_varname = "%s_%s_%dd" % (opts["codomain"],opts["op"],ndims)
if len(atts.numSamples) == 1:
src_fname = "%s.ultra" % atts.ddfName
des_fname = "%s.ult" % (atts.ddfName)
common.sexe("mv %s %s" % (src_fname, des_fname))
lines = open(des_fname).readlines()
f = open(des_fname, "w")
f.write("# %s\n" % (ddf_varname))
for l in lines[1:]:
f.write(l)
f.close()
else:
ofname = "%s.vtk" % atts.ddfName
orig_vtk_var = "SCALARS %s float" % opts["codomain"]
ddf_vtk_var = "SCALARS %s float" % ddf_varname
data = open(ofname).read()
f = open(ofname, "w")
data = data.replace(orig_vtk_var,ddf_vtk_var)
f.write(data)
print("[ddf output: %s]" % ofname)
return ofname
def test_orig_mass():
setup_plot()
Test("ddf_vs_dbinning_input_plot")
res = query("Variable Sum")
DeleteAllPlots()
return res
def test_dbinning_using_coords():
setup_plot()
AddOperator("DataBinning")
datts = DataBinningAttributes()
datts.numDimensions = datts.Two
datts.dim1BinBasedOn = datts.X
datts.dim1SpecifyRange = 0
datts.dim1NumBins = 10
datts.dim2BinBasedOn = datts.Y
datts.dim2SpecifyRange = 0
datts.dim2NumBins = 10
datts.outOfBoundsBehavior = datts.Clamp
datts.reductionOperator = datts.Sum
datts.varForReduction = "mass"
datts.emptyVal = 0
datts.outputType = datts.OutputOnBins
SetOperatorOptions(datts)
DrawPlots()
# we have to export b/c we can't query the
# result of the operated created expr ...
ofname = "dbin_mass_sum_using_coords"
eatts = ExportDBAttributes()
eatts.db_type = "VTK"
eatts.filename = ofname
ExportDatabase(eatts)
DeleteAllPlots()
dbin_varname = "%s_%s_%dd" % ("mass","sum",2)
ofname += ".vtk"
orig_vtk_var = "SCALARS %s float" % "operators/DataBinning"
ddf_vtk_var = "SCALARS %s float" % dbin_varname
data = open(ofname).read()
f = open(ofname, "w")
data = data.replace(orig_vtk_var,ddf_vtk_var)
f.write(data)
f.close()
OpenDatabase(ofname)
AddPlot("Pseudocolor","mass_sum_2d")
DrawPlots()
Test("ddf_vs_dbinning_dbin_coords_result")
res = query("Variable Sum")
DeleteAllPlots()
CloseDatabase(ofname)
return res
def test_dbinning_using_coords_exprs():
setup_plot()
AddOperator("DataBinning")
datts = DataBinningAttributes()
datts.numDimensions = datts.Two
datts.dim1BinBasedOn = datts.Variable
datts.dim1Var = "mesh_x_zonal"
datts.dim1SpecifyRange = 0
datts.dim1NumBins = 10
datts.dim2BinBasedOn = datts.Variable
datts.dim2Var = "mesh_y_zonal"
datts.dim2SpecifyRange = 0
datts.dim2NumBins = 10
datts.outOfBoundsBehavior = datts.Clamp
datts.reductionOperator = datts.Sum
datts.varForReduction = "mass"
datts.emptyVal = 0
datts.outputType = datts.OutputOnBins
SetOperatorOptions(datts)
DrawPlots()
# we have to export b/c we can't query the
# result of the operated created expr ...
ofname = "dbin_mass_sum_using_coords_exprs"
eatts = ExportDBAttributes()
eatts.db_type = "VTK"
eatts.filename = ofname
ExportDatabase(eatts)
DeleteAllPlots()
dbin_varname = "%s_%s_%dd" % ("mass","sum",2)
ofname += ".vtk"
orig_vtk_var = "SCALARS %s float" % "operators/DataBinning"
ddf_vtk_var = "SCALARS %s float" % dbin_varname
data = open(ofname).read()
f = open(ofname, "w")
data = data.replace(orig_vtk_var,ddf_vtk_var)
f.write(data)
f.close()
OpenDatabase(ofname)
AddPlot("Pseudocolor","mass_sum_2d")
DrawPlots()
Test("ddf_vs_dbinning_dbin_coords_exprs_result")
res = query("Variable Sum")
DeleteAllPlots()
CloseDatabase(ofname)
return res
def test_ddf():
setup_plot()
ddf_opts = {"name": "ddf_mass_sum",
"op" : "sum",
"codomain" : "mass",
"varnames" : ("mesh_x_zonal",
"mesh_y_zonal"),
"ranges" : (0,1,
0,1),
"samples" : (10,10)}
ddf(ddf_opts)
DeleteAllPlots()
OpenDatabase("ddf_mass_sum.vtk")
AddPlot("Pseudocolor","mass_sum_2d")
DrawPlots()
Test("ddf_vs_dbinning_ddf_result")
res = query("Variable Sum")
DeleteAllPlots()
CloseDatabase("ddf_mass_sum.vtk")
return res
orig_val = test_orig_mass()
ddf_val = test_ddf()
dbin_coords_val = test_dbinning_using_coords()
dbin_cexprs_val = test_dbinning_using_coords_exprs()
TestText("Orig","Mass Sum = %s" % orig_val)
TestText("DDF","Mass Sum = %s" % ddf_val)
TestText("DBIN with Coords","Mass Sum = %s" % dbin_coords_val)
TestText("DBIN with Coords Exprs","Mass Sum = %s" % dbin_cexprs_val)
TestValueLT("Orig Equals DDF",abs(orig_val - ddf_val), 1e-4 )
TestValueLT("Orig Equals DBIN with Coords",abs(orig_val - dbin_coords_val), 1e-4 )
TestValueLT("Orig Equals DBIN with Coords Exprs",abs(orig_val - dbin_cexprs_val), 1e-4 )
Exit()
|
facemask_detector/model.py | anshul2807/Automation-scripts | 496 | 80873 | # Author - Abhinand --> https://github.com/abhinand5
# =====================================================================
# IMPORTS
# ======================================================================
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import pkbar
# =====================================================================
# PyTorch class for the model
# ======================================================================
class FaceMaskDetector(object):
def __init__(self, data_loader, device, pretrained=True):
self.pretrained = pretrained
self.data_loader = data_loader
self.device = device
def build_model(self, n_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
pretrained=self.pretrained
)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(
in_features,
n_classes + 1
)
self.model = model
def train(self, n_epochs, learning_rate):
dl_len = len(self.data_loader)
self.model.to(self.device)
params = [p for p in self.model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(
params, lr=learning_rate, momentum=0.9, weight_decay=0.0005
)
losses_per_ep = []
for epoch in range(n_epochs):
self.model.train()
ep_loss = 0
kbar = pkbar.Kbar(
target=dl_len,
epoch=epoch,
num_epochs=n_epochs,
width=20,
always_stateful=True,
)
for i, (images, annotations) in enumerate(self.data_loader):
images = list(image.to(self.device) for image in images)
annotations = [
{
k: v.to(self.device)
for k, v in t.items()
} for t in annotations
]
losses = self.model([images[0]], [annotations[0]])
loss = sum(loss for loss in losses.values())
optimizer.zero_grad()
loss.backward()
optimizer.step()
ep_loss += loss.item()
kbar.update(i, values=[("loss", ep_loss)])
losses_per_ep.append(ep_loss)
kbar.add(1)
return losses_per_ep
def predict(self, images):
self.model.to(self.device)
self.model.eval()
preds = self.model(images)
return preds
def save_model(self, path):
torch.save(self.model.state_dict(), path)
def load_model(self, path):
self.model.load_state_dict(torch.load(path))
|
src/main/python/apache/aurora/executor/common/resource_manager.py | jeremyvdw/aurora | 479 | 80885 | <gh_stars>100-1000
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
from mesos.interface import mesos_pb2
from twitter.common.metrics import LambdaGauge
from apache.aurora.executor.common.status_checker import (
StatusChecker,
StatusCheckerProvider,
StatusResult
)
from apache.aurora.executor.common.task_info import mesos_task_instance_from_assigned_task
from apache.thermos.monitoring.monitor import TaskMonitor
from apache.thermos.monitoring.resource import TaskResourceMonitor
class ResourceManager(StatusChecker):
""" Manage resources consumed by a Task """
def __init__(self, resources, resource_monitor):
"""
resources: Resources object specifying cpu, ram, disk limits for the task
resource_monitor: The ResourceMonitor to monitor resources
"""
self._resource_monitor = resource_monitor
# TODO(wickman) Remove cpu/ram reporting if MESOS-1458 is resolved.
self._max_cpu = resources.cpu().get()
self._max_ram = resources.ram().get()
self._max_disk = resources.disk().get()
self._kill_reason = None
self._kill_event = threading.Event()
@property
def _num_procs(self):
""" Total number of processes the task consists of (including child processes) """
return self._resource_monitor.sample()[1].num_procs
@property
def _ps_sample(self):
""" ProcessSample representing the aggregate resource consumption of the Task's processes """
return self._resource_monitor.sample()[1].process_sample
@property
def _disk_sample(self):
""" Integer in bytes representing the disk consumption in the Task's sandbox """
return self._resource_monitor.sample()[1].disk_usage
@property
def status(self):
sample = self._disk_sample
if sample > self._max_disk:
self._kill_event.set()
return StatusResult('Disk limit exceeded. Reserved %s bytes vs used %s bytes.' % (
self._max_disk, sample), mesos_pb2.TASK_FAILED)
def name(self):
return 'resource_manager'
def register_metrics(self):
self.metrics.register(LambdaGauge('disk_used', lambda: self._disk_sample))
self.metrics.register(LambdaGauge('disk_reserved', lambda: self._max_disk))
self.metrics.register(LambdaGauge('disk_percent',
lambda: 1.0 * self._disk_sample / self._max_disk))
self.metrics.register(LambdaGauge('cpu_used', lambda: self._ps_sample.rate))
self.metrics.register(LambdaGauge('cpu_reserved', lambda: self._max_cpu))
self.metrics.register(LambdaGauge('cpu_percent',
lambda: 1.0 * self._ps_sample.rate / self._max_cpu))
self.metrics.register(LambdaGauge('ram_used', lambda: self._ps_sample.rss))
self.metrics.register(LambdaGauge('ram_reserved', lambda: self._max_ram))
self.metrics.register(LambdaGauge('ram_percent',
lambda: 1.0 * self._ps_sample.rss / self._max_ram))
def start(self):
super(ResourceManager, self).start()
self.register_metrics()
self._resource_monitor.start()
class ResourceManagerProvider(StatusCheckerProvider):
def __init__(self, checkpoint_root, **resource_monitor_options):
self._checkpoint_root = checkpoint_root
self._resource_monitor_options = resource_monitor_options
def from_assigned_task(self, assigned_task, sandbox):
task_id = assigned_task.taskId
resources = mesos_task_instance_from_assigned_task(assigned_task).task().resources()
task_monitor = TaskMonitor(self._checkpoint_root, task_id)
resource_monitor = TaskResourceMonitor(
task_id,
task_monitor,
**self._resource_monitor_options)
return ResourceManager(resources, resource_monitor)
|
etl/parsers/etw/Microsoft_Windows_MediaFoundation_Performance_Core.py | IMULMUL/etl-parser | 104 | 80894 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-MediaFoundation-Performance-Core
GUID : b20e65ac-c905-4014-8f78-1b6a508142eb
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=1, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_1_0(Etw):
pattern = Struct(
"object" / Int64ul,
"WorkQueueId" / Int64ul,
"IsMultithread" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=2, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_2_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_u32StreamingPeriodMS" / Int32ul,
"m_u32RenderBufferSizeInFrames" / Int32ul,
"m_ui64ClockTicksPerSecond" / Int64ul,
"m_u32AudioClientType" / Int32ul,
"IsEventDriven" / Int8ul,
"FillSilenceWhenStarving" / Int8ul,
"FillCompressedSilenceWhenStarving" / Int8ul,
"DropLateData" / Int8ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=3, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_3_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=4, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_4_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=5, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_5_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=6, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_6_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=7, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_7_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=8, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_8_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=9, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_9_0(Etw):
pattern = Struct(
"object" / Int64ul,
"SystemTime" / Int64sl,
"mfsState" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=10, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_10_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=11, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_11_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=12, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_12_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul,
"pullPlayPosition" / Int64sl,
"pullRawPlayPosition" / Int64sl,
"pullRawWritePosition" / Int64sl,
"pullDevicePosition" / Int64sl,
"phnsCorrelatedTime" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=13, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_13_0(Etw):
pattern = Struct(
"object" / Int64ul,
"dwBytesWanted" / Int32ul,
"u32FramesToRender" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=14, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_14_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=15, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_15_0(Etw):
pattern = Struct(
"object" / Int64ul,
"dwBytesWanted" / Int32ul,
"m_bEOSReceived" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=16, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_16_0(Etw):
pattern = Struct(
"object" / Int64ul,
"dwBytesWanted" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=17, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_17_0(Etw):
pattern = Struct(
"object" / Int64ul,
"ullEOSPosition" / Int64sl,
"m_bIsEventDriven" / Int8ul,
"IsOffloadedStream" / Int8ul,
"IsOffloadedCompressedStream" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=18, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_18_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=19, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_19_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=20, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_20_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=21, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_21_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=22, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_22_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=23, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_23_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bFirstFill" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=24, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_24_0(Etw):
pattern = Struct(
"object" / Int64ul,
"u32CurrentPadding" / Int32ul,
"u32FramesToRender" / Int32ul,
"u32TimeLeft" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=25, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_25_0(Etw):
pattern = Struct(
"object" / Int64ul,
"FrameCount" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=26, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_26_0(Etw):
pattern = Struct(
"object" / Int64ul,
"BytesInUse" / Int32ul,
"dwBytesWanted" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=27, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_27_0(Etw):
pattern = Struct(
"object" / Int64ul,
"dwBytesStillWanted" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=28, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_28_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=29, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_29_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bEOS" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=30, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_30_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=31, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_31_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hr" / Int32sl,
"fInserted" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=32, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_32_0(Etw):
pattern = Struct(
"object" / Int64ul,
"fFlushed" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=33, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_33_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bEngineStarted" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=34, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_34_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=35, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_35_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bReset" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=36, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_36_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=37, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_37_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=38, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_38_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=39, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_39_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bEngineStarted" / Int8ul,
"bIsEventDriven" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=40, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_40_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=41, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_41_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=42, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_42_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=43, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_43_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=44, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_44_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=45, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_45_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=46, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_46_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=47, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_47_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=48, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_48_0(Etw):
pattern = Struct(
"object" / Int64ul,
"fFillBuffer" / Int8ul,
"bIsEventDriven" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=49, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_49_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=50, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_50_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=51, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_51_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=52, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_52_0(Etw):
pattern = Struct(
"object" / Int64ul,
"DisconnectReason" / Int32ul,
"bReacquire" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=53, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_53_0(Etw):
pattern = Struct(
"object" / Int64ul,
"dwFlags" / Int32ul,
"ui32EndpointRole" / Int32ul,
"eCategory" / Int32ul,
"bIsLowLatency" / Int8ul,
"bBufferDurationSpecified" / Int8ul,
"hnsBufferDuration" / Int64sl,
"bOnlyAudio" / Int8ul,
"bDisableOffload" / Int8ul,
"bNonSeekableStream" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=54, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_54_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=55, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_55_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=56, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_56_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=57, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_57_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=58, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_58_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hnsSampleTime" / Int64sl,
"hnsSampleDuration" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=59, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_59_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=60, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_60_0(Etw):
pattern = Struct(
"object" / Int64ul,
"mfRenderTime" / Int64sl,
"fDiscontinuity" / Int8ul,
"mfAudioState" / Int32ul,
"IsRateZero" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=61, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_61_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hnsSampleTime" / Int64sl,
"hnsSampleDuration" / Int64sl,
"bPrerollSample" / Int8ul,
"bDelayedSample" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=62, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_62_0(Etw):
pattern = Struct(
"object" / Int64ul,
"scenario" / Int32ul,
"fSignalPrerolled" / Int8ul,
"m_cSamplesPrerolled" / Int32ul,
"m_hnsPrerollDuration" / Int64sl,
"m_u32CurrentPrerolledBytes" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=63, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_63_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hnsSampleDuration" / Int64sl,
"m_hnsShortSampleTolerance" / Int64sl,
"m_cMaxPendingRequestSample" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=64, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_64_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=65, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_65_0(Etw):
pattern = Struct(
"object" / Int64ul,
"MarkerType" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=66, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_66_0(Etw):
pattern = Struct(
"object" / Int64ul,
"ControlPoint" / Int32sl,
"Type" / Int32sl,
"Value" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=67, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_67_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=68, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_68_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bFlushPreroll" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=69, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_69_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32sl,
"fDiscontinuity" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=70, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_70_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=71, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_71_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=72, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_72_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=73, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_73_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=74, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_74_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=75, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_75_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=76, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_76_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=77, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_77_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=78, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_78_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=79, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_79_0(Etw):
pattern = Struct(
"object" / Int64ul,
"IsUninitialized" / Int8ul,
"bInvalidatingStream" / Int8ul,
"mfaOriginalState" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=80, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_80_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=81, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_81_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=82, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_82_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32sl,
"phnsTimeNow" / Int64sl,
"phnsCorrelatedTime" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=83, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_83_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=84, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_84_0(Etw):
pattern = Struct(
"object" / Int64ul,
"phnsTimeNow" / Int64sl,
"m_mftMaxTimePriorToStreamSwitch" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=85, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_85_0(Etw):
pattern = Struct(
"object" / Int64ul,
"phnsTimeNow" / Int64sl,
"phnsCorrelatedTime" / Int64sl,
"m_bInvalidatingStream" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=86, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_86_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=87, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_87_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hnsSystemTime" / Int64sl,
"llClockStartOffset" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=88, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_88_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=89, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_89_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=90, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_90_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=91, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_91_0(Etw):
pattern = Struct(
"object" / Int64ul,
"StartOffset" / Int64sl,
"mftStartOffset" / Int64sl,
"bResetGapAndStallHandling" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=92, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_92_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32sl,
"StartOffset" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=93, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_93_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=94, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_94_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=95, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_95_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hnsSystemTime" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=96, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_96_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=97, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_97_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hnsSystemTime" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=98, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_98_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=99, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_99_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hnsSystemTime" / Int64sl,
"IsRateZero" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=100, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_100_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=101, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_101_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=102, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_102_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=103, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_103_0(Etw):
pattern = Struct(
"object" / Int64ul,
"dwFlags" / Int32sl,
"bUseResampler" / Int8ul,
"bClockRateMatchEnabled" / Int8ul,
"bUseLightWeightConverters" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=104, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_104_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32sl,
"m_bIsOffloadStream" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=105, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_105_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=106, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_106_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32sl,
"pullBytePosition" / Int64sl,
"phnsCorrelatedTime" / Int64sl,
"m_bIsCompressedStream" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=107, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_107_0(Etw):
pattern = Struct(
"object" / Int64ul,
"dwSamples" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=108, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_108_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=109, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_109_0(Etw):
pattern = Struct(
"object" / Int64ul,
"MarkerType" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=110, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_110_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=111, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_111_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=112, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_112_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=113, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_113_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=114, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_114_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=115, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_115_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=116, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_116_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=117, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_117_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=118, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_118_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=119, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_119_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=120, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_120_0(Etw):
pattern = Struct(
"object" / Int64ul,
"mfRenderTime" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=121, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_121_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32sl,
"pfDiscontinuity" / Int8ul,
"pullRenderBytePosition" / Int64sl,
"pdwBytesToStall" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=122, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_122_0(Etw):
pattern = Struct(
"object" / Int64ul,
"pfDiscontinuity" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=123, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_123_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32sl,
"pullRenderBytePosition" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=124, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_124_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=125, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_125_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=126, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_126_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bConvertToMFPos" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=127, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_127_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32sl,
"pullPlayPosition" / Int64sl,
"pullWritePosition" / Int64sl,
"pullDevicePlayPosition" / Int64sl,
"phnsCorrelatedTime" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=128, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_128_0(Etw):
pattern = Struct(
"object" / Int64ul,
"mftTrimAmount" / Int64sl,
"mftCutoff" / Int64sl,
"bTrimFromFront" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=129, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_129_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=130, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_130_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bEnable" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=131, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_131_0(Etw):
pattern = Struct(
"object" / Int64ul,
"cPendingRequestSample" / Int32sl,
"cMaxPendingRequestSample" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=132, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_132_0(Etw):
pattern = Struct(
"object" / Int64ul,
"cPendingRequestSample" / Int32sl,
"cMaxPendingRequestSample" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=133, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_133_0(Etw):
pattern = Struct(
"object" / Int64ul,
"cPendingRequestSample" / Int32sl,
"cMaxPendingRequestSample" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=134, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_134_0(Etw):
pattern = Struct(
"object" / Int64ul,
"cPendingRequestSample" / Int32sl,
"cMaxPendingRequestSample" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=135, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_135_0(Etw):
pattern = Struct(
"object" / Int64ul,
"cMaxPendingRequestSample" / Int32sl,
"NumContainers" / Int32sl,
"BytesInUse" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=136, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_136_0(Etw):
pattern = Struct(
"object" / Int64ul,
"MaxPendingRequestSample" / Int32sl,
"NumContainers" / Int32sl,
"DurationInUse" / Int64sl,
"hnsMinAllocation" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=137, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_137_0(Etw):
pattern = Struct(
"object" / Int64ul,
"cMaxPendingRequestSample" / Int32sl,
"NumContainers" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=138, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_138_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=139, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_139_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=140, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_140_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=141, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_141_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=142, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_142_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=143, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_143_0(Etw):
pattern = Struct(
"object" / Int64ul,
"TimeOutinMm" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=144, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_144_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=145, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_145_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=146, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_146_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=147, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_147_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bNeedFormatNegotiation" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=148, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_148_0(Etw):
pattern = Struct(
"object" / Int64ul,
"mfaOriginalStreamState" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=149, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_149_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=150, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_150_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=151, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_151_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hnsStreamInvalidationEventTime" / Int64sl,
"hnsLastCorrelatedTime" / Int64sl,
"hnsTimeElapsed" / Int64sl,
"hnsLastTime" / Int64sl,
"hnsNewLastTime" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=152, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_152_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=153, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_153_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bNeedFormatNegotiation" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=154, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_154_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=155, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_155_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=156, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_156_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bDeviceChange" / Int8ul,
"hnsNewStreamStartTime" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=157, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_157_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bStopped" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=158, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_158_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=159, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_159_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=160, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_160_0(Etw):
pattern = Struct(
"object" / Int64ul,
"eventType" / Int32sl,
"bReacquireDevice" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=161, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_161_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=162, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_162_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=163, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_163_0(Etw):
pattern = Struct(
"object" / Int64ul,
"eventType" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=164, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_164_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=165, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_165_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=166, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_166_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=167, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_167_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=168, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_168_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=169, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_169_0(Etw):
pattern = Struct(
"object" / Int64ul,
"dwNewState" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=170, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_170_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=171, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_171_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=172, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_172_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=173, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_173_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=174, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_174_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=175, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_175_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=176, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_176_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=177, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_177_0(Etw):
pattern = Struct(
"object" / Int64ul,
"MFTimeOfLastRenderSample" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=178, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_178_0(Etw):
pattern = Struct(
"object" / Int64ul,
"u32FramesToRender" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=179, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_179_0(Etw):
pattern = Struct(
"object" / Int64ul,
"ClockTime" / Int64sl,
"CorrelatedTime" / Int64sl,
"IsStreamInvalidating" / Int8ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=180, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_180_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=181, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_181_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_pCurrentMediaType" / Int64sl,
"IsStreamInvalidating" / Int8ul,
"hrAEFormatQuery" / Int32ul,
"hFormatResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=182, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_182_0(Etw):
pattern = Struct(
"object" / Int64ul,
"wFormatTag" / Int32ul,
"nChannels" / Int16ul,
"nSamplesPerSec" / Int32ul,
"nAvgBytesPerSec" / Int32ul,
"nBlockAlign" / Int16ul,
"wBitsPerSample" / Int16ul,
"cbSize" / Int16ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=183, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_183_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=184, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_184_0(Etw):
pattern = Struct(
"object" / Int64ul,
"clientType" / Int32ul,
"hFormatResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=185, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_185_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=186, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_186_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=187, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_187_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=188, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_188_0(Etw):
pattern = Struct(
"object" / Int64ul,
"clientType" / Int32ul,
"hFormatResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=189, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_189_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=190, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_190_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bufferDuration" / Int64sl,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=191, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_191_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=192, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_192_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=193, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_193_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=194, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_194_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=195, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_195_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=196, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_196_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=197, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_197_0(Etw):
pattern = Struct(
"object" / Int64ul,
"Key" / Int64ul,
"Delay" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=198, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_198_0(Etw):
pattern = Struct(
"object" / Int64ul,
"Key" / Int64ul,
"Delay" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=199, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_199_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=200, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_200_0(Etw):
pattern = Struct(
"object" / Int64ul,
"ClockTime0_us" / Int64sl,
"QPC0_us" / Int64sl,
"SmoothedQPC0_us" / Int64sl,
"QPCDelta_us" / Int64sl,
"WindowCount" / Int32sl,
"WindowWidth_us" / Int64sl,
"Accepted" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=500, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_500_0(Etw):
pattern = Struct(
"object" / Int64ul,
"u32FramesRead" / Int32ul,
"m_u64LastSampleTime" / Int64ul,
"u64Duration" / Int64ul,
"dwFlagsForSample" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=501, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_501_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_pParent" / Int64ul,
"eventType" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=502, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_502_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_bAudioProcessingRaw" / Int8ul,
"m_bIsEventDriven" / Int8ul,
"m_bIsLowLatency" / Int8ul,
"m_hnsBufferDuration" / Int64sl,
"m_uiAudioCategory" / Int32ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=503, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_503_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_spAudioSessionControl" / Int64ul,
"m_spAudioSessionEvents" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=504, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_504_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=505, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_505_0(Etw):
pattern = Struct(
"object" / Int64ul,
"wstrEndpointId" / WString,
"role" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=506, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_506_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=507, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_507_0(Etw):
pattern = Struct(
"object" / Int64ul,
"wstrEndpointId" / WString,
"role" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=508, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_508_0(Etw):
pattern = Struct(
"object" / Int64ul,
"uFailedLineNumber" / Int32ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=509, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_509_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=510, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_510_0(Etw):
pattern = Struct(
"object" / Int64ul,
"AudioClientProperties_bIsOffload" / Int8ul,
"AudioClientProperties_eCategory" / Int32ul,
"AudioClientProperties_Options" / Int32ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=511, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_511_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_spAudioClientForStreaming" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=512, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_512_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_u32BytesPerFrame" / Int32ul,
"m_u32FramesPerSecond" / Int32ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=513, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_513_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bFirstRead" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=514, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_514_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=515, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_515_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=516, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_516_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=517, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_517_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_bEngineStarted" / Int8ul,
"m_bIsEventDriven" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=518, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_518_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_bEngineStarted" / Int8ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=519, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_519_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bReset" / Int8ul,
"m_bEngineStarted" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=520, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_520_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=521, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_521_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=522, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_522_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=523, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_523_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=524, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_524_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=525, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_525_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_bEngineStarted" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=526, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_526_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=527, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_527_0(Etw):
pattern = Struct(
"object" / Int64ul,
"DisconnectReason" / Int32sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=528, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_528_0(Etw):
pattern = Struct(
"object" / Int64ul,
"u32FramesRead" / Int32ul,
"u32ActualFramesInCurrentPacket" / Int32ul,
"dwFlags" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=529, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_529_0(Etw):
pattern = Struct(
"object" / Int64ul,
"fLevel" / Float32l
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=530, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_530_0(Etw):
pattern = Struct(
"object" / Int64ul,
"bMute" / Int8ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=531, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_531_0(Etw):
pattern = Struct(
"object" / Int64ul,
"pParentObj" / Int64ul,
"dwWorkQueueId" / Int32ul,
"lWorkQueuePriority" / Int32sl,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=532, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_532_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=533, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_533_0(Etw):
pattern = Struct(
"object" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=534, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_534_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=535, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_535_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_u32BufferFrameCount" / Int32ul,
"m_u32BytesPerFrame" / Int32ul,
"m_u32FramesPerSecond" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=536, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_536_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=537, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_537_0(Etw):
pattern = Struct(
"object" / Int64ul,
"wFormatTag" / Int32ul,
"nChannels" / Int16ul,
"nSamplesPerSec" / Int32ul,
"nAvgBytesPerSec" / Int32ul,
"nBlockAlign" / Int16ul,
"wBitsPerSample" / Int16ul,
"cbSize" / Int16ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=538, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_538_0(Etw):
pattern = Struct(
"object" / Int64ul,
"wFormatTag" / Int32ul,
"nChannels" / Int16ul,
"nSamplesPerSec" / Int32ul,
"nAvgBytesPerSec" / Int32ul,
"nBlockAlign" / Int16ul,
"wBitsPerSample" / Int16ul,
"cbSize" / Int16ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=539, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_539_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=540, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_540_0(Etw):
pattern = Struct(
"object" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=541, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_541_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_ReadySampleCount" / Int32ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=542, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_542_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_ReadySampleCount" / Int32ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=543, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_543_0(Etw):
pattern = Struct(
"object" / Int64ul,
"pMediaType" / Int64ul,
"m_spCurrentMediaType" / Int64ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=544, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_544_0(Etw):
pattern = Struct(
"object" / Int64ul,
"m_spCurrentMediaType" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=545, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_545_0(Etw):
pattern = Struct(
"object" / Int64ul,
"wFormatTag" / Int32ul,
"nChannels" / Int16ul,
"nSamplesPerSec" / Int32ul,
"nAvgBytesPerSec" / Int32ul,
"nBlockAlign" / Int16ul,
"wBitsPerSample" / Int16ul,
"cbSize" / Int16ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=546, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_546_0(Etw):
pattern = Struct(
"object" / Int64ul,
"guidService" / Guid,
"riid" / Guid,
"pvObject" / Int64ul,
"hResult" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=547, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_547_0(Etw):
pattern = Struct(
"object" / Int64ul,
"u32CurrentPadding" / Int32ul,
"ulSamples" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=600, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_600_0(Etw):
pattern = Struct(
"object" / Int64ul,
"State" / Int32sl,
"ClockOffset" / Int64sl,
"QPC" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=650, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_650_0(Etw):
pattern = Struct(
"object" / Int64ul,
"SrcObject" / Int64ul,
"SamplesReceived" / Int32sl,
"LateSamples" / Int64sl,
"TotalLateTime_ms" / Int64sl,
"SampleLatency_hns" / Int64sl,
"SampleTime_hns" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=651, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_651_0(Etw):
pattern = Struct(
"object" / Int64ul,
"Node" / Int64ul,
"OutputIndex" / Int32sl,
"WorkQueueID" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=652, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_652_0(Etw):
pattern = Struct(
"object" / Int64ul,
"Node" / Int64ul,
"OutputIndex" / Int32sl,
"WorkQueueID" / Int32ul
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=700, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_700_0(Etw):
pattern = Struct(
"object" / Int64ul,
"Stream" / Int32sl,
"SamplePtr" / Int64ul,
"TimeStamp" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=701, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_701_0(Etw):
pattern = Struct(
"object" / Int64ul,
"Stream" / Int32sl,
"ES_Stream" / Int32sl,
"TimeStamp" / Int64sl,
"PackSize" / Int32sl,
"LastPCR" / Int64sl
)
@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb"), event_id=702, version=0)
class Microsoft_Windows_MediaFoundation_Performance_Core_702_0(Etw):
pattern = Struct(
"object" / Int64ul,
"PCR" / Int64sl
)
|
tools/dbgui/build_errors.py | rakati/ppci-mirror | 161 | 80897 | <filename>tools/dbgui/build_errors.py<gh_stars>100-1000
from qtwrapper import QtGui, QtWidgets, pyqtSignal, get_icon
class BuildErrors(QtWidgets.QTreeView):
sigErrorSelected = pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
model = QtGui.QStandardItemModel()
self.setModel(model)
self.clicked.connect(self.itemSelected)
self.errorIcon = get_icon('error.png')
self.model = QtGui.QStandardItemModel()
self.model.setHorizontalHeaderLabels(['Message', 'Row', 'Column'])
self.header().setStretchLastSection(True)
self.setModel(self.model)
def setErrorList(self, errorlist):
c = self.model.rowCount()
self.model.removeRows(0, c)
for e in errorlist:
item = QtGui.QStandardItem(self.errorIcon, str(e.msg))
item.setData(e)
row = str(e.loc.row) if e.loc else ''
irow = QtGui.QStandardItem(row)
irow.setData(e)
col = str(e.loc.col) if e.loc else ''
icol = QtGui.QStandardItem(col)
icol.setData(e)
self.model.appendRow([item, irow, icol])
for i in range(3):
self.resizeColumnToContents(i)
def itemSelected(self, index):
if not index.isValid():
return
item = self.model.itemFromIndex(index)
err = item.data()
self.sigErrorSelected.emit(err)
|
src/run/LPC_runcore.py | ufo2011/NXP-MCUBootUtility | 174 | 80909 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import math
import LPC_rundef
import rundef
import boot
sys.path.append(os.path.abspath(".."))
from gen import LPC_gencore
from gen import LPC_gendef
from ui import LPC_uidef
from ui import uidef
from ui import uivar
from ui import uilang
from mem import LPC_memdef
from boot import bltest
from boot import target
from utils import misc
def LPC_createTarget(device, exeBinRoot):
# Build path to target directory and config file.
cpu = "LPC55S69"
if device in uidef.kMcuDevice_Niobe4minis:
cpu = "LPC55S16"
elif device in uidef.kMcuDevice_Niobe4s:
cpu = "LPC55S69"
else:
pass
targetBaseDir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'targets', cpu)
# Check for existing target directory.
if not os.path.isdir(targetBaseDir):
targetBaseDir = os.path.join(os.path.dirname(exeBinRoot), 'src', 'targets', cpu)
if not os.path.isdir(targetBaseDir):
raise ValueError("Missing target directory at path %s" % targetBaseDir)
targetConfigFile = os.path.join(targetBaseDir, 'bltargetconfig.py')
# Check for config file existence.
if not os.path.isfile(targetConfigFile):
raise RuntimeError("Missing target config file at path %s" % targetConfigFile)
# Build locals dict by copying our locals and adjusting file path and name.
targetConfig = locals().copy()
targetConfig['__file__'] = targetConfigFile
targetConfig['__name__'] = 'bltargetconfig'
# Execute the target config script.
execfile(targetConfigFile, globals(), targetConfig)
# Create the target object.
tgt = target.Target(**targetConfig)
return tgt, targetBaseDir
class secBootLpcRun(LPC_gencore.secBootLpcGen):
def __init__(self, parent):
LPC_gencore.secBootLpcGen.__init__(self, parent)
if self.mcuSeries == uidef.kMcuSeries_LPC:
self.LPC_initRun()
def LPC_initRun( self ):
self.blhost = None
self.tgt = None
self.cpuDir = None
self.blhostVectorsDir = os.path.join(self.exeTopRoot, 'tools', 'blhost2_3', 'win', 'vectors')
self.LPC_isDeviceEnabledToOperate = True
self.bootDeviceMemId = 0
self.bootDeviceMemBase = None
self.comMemWriteUnit = 0x1
self.comMemEraseUnit = 0x1
self.comMemReadUnit = 0x1
self.LPC_createMcuTarget()
def LPC_createMcuTarget( self ):
self.tgt, self.cpuDir = LPC_createTarget(self.mcuDevice, self.exeBinRoot)
def LPC_getUsbid( self ):
self.LPC_createMcuTarget()
return [self.tgt.romUsbVid, self.tgt.romUsbPid, self.tgt.flashloaderUsbVid, self.tgt.flashloaderUsbPid]
def LPC_connectToDevice( self , connectStage):
if connectStage == uidef.kConnectStage_Rom or connectStage == uidef.kConnectStage_Flashloader:
# Create the target object.
self.LPC_createMcuTarget()
if self.isUartPortSelected:
blPeripheral = 'uart'
uartComPort = self.uartComPort
uartBaudrate = int(self.uartBaudrate)
usbVid = ''
usbPid = ''
elif self.isUsbhidPortSelected:
blPeripheral = 'usb'
uartComPort = ''
uartBaudrate = ''
if connectStage == uidef.kConnectStage_Rom:
usbVid = self.tgt.romUsbVid
usbPid = self.tgt.romUsbPid
elif connectStage == uidef.kConnectStage_Flashloader:
usbVid = self.tgt.flashloaderUsbVid
usbPid = self.tgt.flashloaderUsbPid
else:
pass
self.blhost = bltest.createBootloader(self.tgt,
self.blhostVectorsDir,
blPeripheral,
uartBaudrate, uartComPort,
usbVid, usbPid,
True)
elif connectStage == uidef.kConnectStage_Reset:
self.tgt = None
else:
pass
def LPC_pingRom( self ):
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_CurrentVersion)
self.printLog(cmdStr)
return (status == boot.status.kStatus_Success)
def _LPC_getMcuDeviceIds( self ):
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_SystemDeviceIdent)
self.printLog(cmdStr)
if status == boot.status.kStatus_Success:
self.printDeviceStatus("SYSCON->DEVICE_ID0 = " + self.convertLongIntHexText(str(hex(results[0]))))
else:
pass
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_UniqueDeviceIdent)
self.printLog(cmdStr)
if status == boot.status.kStatus_Success:
self.printDeviceStatus("PFR UUID0 = " + self.convertLongIntHexText(str(hex(results[0]))))
self.printDeviceStatus("PFR UUID1 = " + self.convertLongIntHexText(str(hex(results[1]))))
self.printDeviceStatus("PFR UUID2 = " + self.convertLongIntHexText(str(hex(results[2]))))
self.printDeviceStatus("PFR UUID3 = " + self.convertLongIntHexText(str(hex(results[3]))))
else:
pass
def LPC_getMcuDeviceInfoViaRom( self ):
self.printDeviceStatus("----------MCU ROM info-----------")
self.getMcuDeviceBootloaderVersion()
self._LPC_getMcuDeviceIds()
def _LPC_getC040hdFlashProperties( self ):
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_FlashSectorSize)
self.printLog(cmdStr)
if status == boot.status.kStatus_Success:
self.printDeviceStatus("Sector Size = " + self.showAsOptimalMemoryUnit(results[0]))
self.comMemEraseUnit = results[0]
else:
pass
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_FlashSizeInBytes)
self.printLog(cmdStr)
if status == boot.status.kStatus_Success:
self.printDeviceStatus("Total Size = " + self.showAsOptimalMemoryUnit(results[0]))
else:
pass
def LPC_getBootDeviceInfoViaRom ( self ):
if self.bootDevice == LPC_uidef.kBootDevice_InternalNor:
self.printDeviceStatus("-------On-chip NOR memory------")
self._LPC_getC040hdFlashProperties()
else:
pass
def _LPC_prepareForBootDeviceOperation ( self ):
if self.bootDevice == LPC_uidef.kBootDevice_InternalNor:
self.bootDeviceMemBase = self.tgt.c040hdNorMemBase
else:
pass
def _eraseC040hdNorForImageLoading( self ):
imageLen = os.path.getsize(self.destAppFilename)
memEraseLen = misc.align_up(imageLen, self.comMemEraseUnit)
status, results, cmdStr = self.blhost.flashEraseRegion(self.tgt.c040hdNorMemBase, memEraseLen)
self.printLog(cmdStr)
return (status == boot.status.kStatus_Success)
def LPC_flashBootableImage ( self ):
self._LPC_prepareForBootDeviceOperation()
imageLen = os.path.getsize(self.destAppFilename)
if self.bootDevice == LPC_uidef.kBootDevice_InternalNor:
if not self._eraseC040hdNorForImageLoading():
return False
if self.secureBootType == LPC_uidef.kSecureBootType_PlainUnsigned:
pass
imageLoadAddr = self.bootDeviceMemBase
status, results, cmdStr = self.blhost.writeMemory(imageLoadAddr, self.destAppFilename)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
else:
pass
if self.isConvertedAppUsed:
try:
os.remove(self.srcAppFilename)
except:
pass
self.isConvertedAppUsed = False
return True
def LPC_resetMcuDevice( self ):
status, results, cmdStr = self.blhost.reset()
self.printLog(cmdStr)
return (status == boot.status.kStatus_Success)
|
amazon/paapi5_python_sdk/search_items_request.py | frenners/python-amazon-paapi | 121 | 80941 | # coding: utf-8
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
"""
ProductAdvertisingAPI
https://webservices.amazon.com/paapi5/documentation/index.html # noqa: E501
"""
import pprint
import re # noqa: F401
import six
from .availability import Availability # noqa: F401,E501
from .condition import Condition # noqa: F401,E501
from .delivery_flag import DeliveryFlag # noqa: F401,E501
from .max_price import MaxPrice # noqa: F401,E501
from .merchant import Merchant # noqa: F401,E501
from .min_price import MinPrice # noqa: F401,E501
from .min_reviews_rating import MinReviewsRating # noqa: F401,E501
from .min_saving_percent import MinSavingPercent # noqa: F401,E501
from .offer_count import OfferCount # noqa: F401,E501
from .partner_type import PartnerType # noqa: F401,E501
from .properties import Properties # noqa: F401,E501
from .search_items_resource import SearchItemsResource # noqa: F401,E501
from .sort_by import SortBy # noqa: F401,E501
class SearchItemsRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'actor': 'str',
'artist': 'str',
'author': 'str',
'availability': 'Availability',
'brand': 'str',
'browse_node_id': 'str',
'condition': 'Condition',
'currency_of_preference': 'str',
'delivery_flags': 'list[DeliveryFlag]',
'item_count': 'int',
'item_page': 'int',
'keywords': 'str',
'languages_of_preference': 'list[str]',
'marketplace': 'str',
'max_price': 'MaxPrice',
'merchant': 'Merchant',
'min_price': 'MinPrice',
'min_reviews_rating': 'MinReviewsRating',
'min_saving_percent': 'MinSavingPercent',
'offer_count': 'OfferCount',
'partner_tag': 'str',
'partner_type': 'PartnerType',
'properties': 'Properties',
'resources': 'list[SearchItemsResource]',
'search_index': 'str',
'sort_by': 'SortBy',
'title': 'str'
}
attribute_map = {
'actor': 'Actor',
'artist': 'Artist',
'author': 'Author',
'availability': 'Availability',
'brand': 'Brand',
'browse_node_id': 'BrowseNodeId',
'condition': 'Condition',
'currency_of_preference': 'CurrencyOfPreference',
'delivery_flags': 'DeliveryFlags',
'item_count': 'ItemCount',
'item_page': 'ItemPage',
'keywords': 'Keywords',
'languages_of_preference': 'LanguagesOfPreference',
'marketplace': 'Marketplace',
'max_price': 'MaxPrice',
'merchant': 'Merchant',
'min_price': 'MinPrice',
'min_reviews_rating': 'MinReviewsRating',
'min_saving_percent': 'MinSavingPercent',
'offer_count': 'OfferCount',
'partner_tag': 'PartnerTag',
'partner_type': 'PartnerType',
'properties': 'Properties',
'resources': 'Resources',
'search_index': 'SearchIndex',
'sort_by': 'SortBy',
'title': 'Title'
}
def __init__(self, actor=None, artist=None, author=None, availability=None, brand=None, browse_node_id=None, condition=None, currency_of_preference=None, delivery_flags=None, item_count=None, item_page=None, keywords=None, languages_of_preference=None, marketplace=None, max_price=None, merchant=None, min_price=None, min_reviews_rating=None, min_saving_percent=None, offer_count=None, partner_tag=None, partner_type=None, properties=None, resources=None, search_index=None, sort_by=None, title=None): # noqa: E501
"""SearchItemsRequest - a model defined in Swagger""" # noqa: E501
self._actor = None
self._artist = None
self._author = None
self._availability = None
self._brand = None
self._browse_node_id = None
self._condition = None
self._currency_of_preference = None
self._delivery_flags = None
self._item_count = None
self._item_page = None
self._keywords = None
self._languages_of_preference = None
self._marketplace = None
self._max_price = None
self._merchant = None
self._min_price = None
self._min_reviews_rating = None
self._min_saving_percent = None
self._offer_count = None
self._partner_tag = None
self._partner_type = None
self._properties = None
self._resources = None
self._search_index = None
self._sort_by = None
self._title = None
self.discriminator = None
if actor is not None:
self.actor = actor
if artist is not None:
self.artist = artist
if author is not None:
self.author = author
if availability is not None:
self.availability = availability
if brand is not None:
self.brand = brand
if browse_node_id is not None:
self.browse_node_id = browse_node_id
if condition is not None:
self.condition = condition
if currency_of_preference is not None:
self.currency_of_preference = currency_of_preference
if delivery_flags is not None:
self.delivery_flags = delivery_flags
if item_count is not None:
self.item_count = item_count
if item_page is not None:
self.item_page = item_page
if keywords is not None:
self.keywords = keywords
if languages_of_preference is not None:
self.languages_of_preference = languages_of_preference
if marketplace is not None:
self.marketplace = marketplace
if max_price is not None:
self.max_price = max_price
if merchant is not None:
self.merchant = merchant
if min_price is not None:
self.min_price = min_price
if min_reviews_rating is not None:
self.min_reviews_rating = min_reviews_rating
if min_saving_percent is not None:
self.min_saving_percent = min_saving_percent
if offer_count is not None:
self.offer_count = offer_count
self.partner_tag = partner_tag
self.partner_type = partner_type
if properties is not None:
self.properties = properties
if resources is not None:
self.resources = resources
if search_index is not None:
self.search_index = search_index
if sort_by is not None:
self.sort_by = sort_by
if title is not None:
self.title = title
@property
def actor(self):
"""Gets the actor of this SearchItemsRequest. # noqa: E501
:return: The actor of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._actor
@actor.setter
def actor(self, actor):
"""Sets the actor of this SearchItemsRequest.
:param actor: The actor of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._actor = actor
@property
def artist(self):
"""Gets the artist of this SearchItemsRequest. # noqa: E501
:return: The artist of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._artist
@artist.setter
def artist(self, artist):
"""Sets the artist of this SearchItemsRequest.
:param artist: The artist of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._artist = artist
@property
def author(self):
"""Gets the author of this SearchItemsRequest. # noqa: E501
:return: The author of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._author
@author.setter
def author(self, author):
"""Sets the author of this SearchItemsRequest.
:param author: The author of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._author = author
@property
def availability(self):
"""Gets the availability of this SearchItemsRequest. # noqa: E501
:return: The availability of this SearchItemsRequest. # noqa: E501
:rtype: Availability
"""
return self._availability
@availability.setter
def availability(self, availability):
"""Sets the availability of this SearchItemsRequest.
:param availability: The availability of this SearchItemsRequest. # noqa: E501
:type: Availability
"""
self._availability = availability
@property
def brand(self):
"""Gets the brand of this SearchItemsRequest. # noqa: E501
:return: The brand of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this SearchItemsRequest.
:param brand: The brand of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._brand = brand
@property
def browse_node_id(self):
"""Gets the browse_node_id of this SearchItemsRequest. # noqa: E501
:return: The browse_node_id of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._browse_node_id
@browse_node_id.setter
def browse_node_id(self, browse_node_id):
"""Sets the browse_node_id of this SearchItemsRequest.
:param browse_node_id: The browse_node_id of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._browse_node_id = browse_node_id
@property
def condition(self):
"""Gets the condition of this SearchItemsRequest. # noqa: E501
:return: The condition of this SearchItemsRequest. # noqa: E501
:rtype: Condition
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this SearchItemsRequest.
:param condition: The condition of this SearchItemsRequest. # noqa: E501
:type: Condition
"""
self._condition = condition
@property
def currency_of_preference(self):
"""Gets the currency_of_preference of this SearchItemsRequest. # noqa: E501
:return: The currency_of_preference of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._currency_of_preference
@currency_of_preference.setter
def currency_of_preference(self, currency_of_preference):
"""Sets the currency_of_preference of this SearchItemsRequest.
:param currency_of_preference: The currency_of_preference of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._currency_of_preference = currency_of_preference
@property
def delivery_flags(self):
"""Gets the delivery_flags of this SearchItemsRequest. # noqa: E501
:return: The delivery_flags of this SearchItemsRequest. # noqa: E501
:rtype: list[DeliveryFlag]
"""
return self._delivery_flags
@delivery_flags.setter
def delivery_flags(self, delivery_flags):
"""Sets the delivery_flags of this SearchItemsRequest.
:param delivery_flags: The delivery_flags of this SearchItemsRequest. # noqa: E501
:type: list[DeliveryFlag]
"""
self._delivery_flags = delivery_flags
@property
def item_count(self):
"""Gets the item_count of this SearchItemsRequest. # noqa: E501
:return: The item_count of this SearchItemsRequest. # noqa: E501
:rtype: int
"""
return self._item_count
@item_count.setter
def item_count(self, item_count):
"""Sets the item_count of this SearchItemsRequest.
:param item_count: The item_count of this SearchItemsRequest. # noqa: E501
:type: int
"""
self._item_count = item_count
@property
def item_page(self):
"""Gets the item_page of this SearchItemsRequest. # noqa: E501
:return: The item_page of this SearchItemsRequest. # noqa: E501
:rtype: int
"""
return self._item_page
@item_page.setter
def item_page(self, item_page):
"""Sets the item_page of this SearchItemsRequest.
:param item_page: The item_page of this SearchItemsRequest. # noqa: E501
:type: int
"""
self._item_page = item_page
@property
def keywords(self):
"""Gets the keywords of this SearchItemsRequest. # noqa: E501
:return: The keywords of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._keywords
@keywords.setter
def keywords(self, keywords):
"""Sets the keywords of this SearchItemsRequest.
:param keywords: The keywords of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._keywords = keywords
@property
def languages_of_preference(self):
"""Gets the languages_of_preference of this SearchItemsRequest. # noqa: E501
:return: The languages_of_preference of this SearchItemsRequest. # noqa: E501
:rtype: list[str]
"""
return self._languages_of_preference
@languages_of_preference.setter
def languages_of_preference(self, languages_of_preference):
"""Sets the languages_of_preference of this SearchItemsRequest.
:param languages_of_preference: The languages_of_preference of this SearchItemsRequest. # noqa: E501
:type: list[str]
"""
self._languages_of_preference = languages_of_preference
@property
def marketplace(self):
"""Gets the marketplace of this SearchItemsRequest. # noqa: E501
:return: The marketplace of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._marketplace
@marketplace.setter
def marketplace(self, marketplace):
"""Sets the marketplace of this SearchItemsRequest.
:param marketplace: The marketplace of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._marketplace = marketplace
@property
def max_price(self):
"""Gets the max_price of this SearchItemsRequest. # noqa: E501
:return: The max_price of this SearchItemsRequest. # noqa: E501
:rtype: MaxPrice
"""
return self._max_price
@max_price.setter
def max_price(self, max_price):
"""Sets the max_price of this SearchItemsRequest.
:param max_price: The max_price of this SearchItemsRequest. # noqa: E501
:type: MaxPrice
"""
self._max_price = max_price
@property
def merchant(self):
"""Gets the merchant of this SearchItemsRequest. # noqa: E501
:return: The merchant of this SearchItemsRequest. # noqa: E501
:rtype: Merchant
"""
return self._merchant
@merchant.setter
def merchant(self, merchant):
"""Sets the merchant of this SearchItemsRequest.
:param merchant: The merchant of this SearchItemsRequest. # noqa: E501
:type: Merchant
"""
self._merchant = merchant
@property
def min_price(self):
"""Gets the min_price of this SearchItemsRequest. # noqa: E501
:return: The min_price of this SearchItemsRequest. # noqa: E501
:rtype: MinPrice
"""
return self._min_price
@min_price.setter
def min_price(self, min_price):
"""Sets the min_price of this SearchItemsRequest.
:param min_price: The min_price of this SearchItemsRequest. # noqa: E501
:type: MinPrice
"""
self._min_price = min_price
@property
def min_reviews_rating(self):
"""Gets the min_reviews_rating of this SearchItemsRequest. # noqa: E501
:return: The min_reviews_rating of this SearchItemsRequest. # noqa: E501
:rtype: MinReviewsRating
"""
return self._min_reviews_rating
@min_reviews_rating.setter
def min_reviews_rating(self, min_reviews_rating):
"""Sets the min_reviews_rating of this SearchItemsRequest.
:param min_reviews_rating: The min_reviews_rating of this SearchItemsRequest. # noqa: E501
:type: MinReviewsRating
"""
self._min_reviews_rating = min_reviews_rating
@property
def min_saving_percent(self):
"""Gets the min_saving_percent of this SearchItemsRequest. # noqa: E501
:return: The min_saving_percent of this SearchItemsRequest. # noqa: E501
:rtype: MinSavingPercent
"""
return self._min_saving_percent
@min_saving_percent.setter
def min_saving_percent(self, min_saving_percent):
"""Sets the min_saving_percent of this SearchItemsRequest.
:param min_saving_percent: The min_saving_percent of this SearchItemsRequest. # noqa: E501
:type: MinSavingPercent
"""
self._min_saving_percent = min_saving_percent
@property
def offer_count(self):
"""Gets the offer_count of this SearchItemsRequest. # noqa: E501
:return: The offer_count of this SearchItemsRequest. # noqa: E501
:rtype: OfferCount
"""
return self._offer_count
@offer_count.setter
def offer_count(self, offer_count):
"""Sets the offer_count of this SearchItemsRequest.
:param offer_count: The offer_count of this SearchItemsRequest. # noqa: E501
:type: OfferCount
"""
self._offer_count = offer_count
@property
def partner_tag(self):
"""Gets the partner_tag of this SearchItemsRequest. # noqa: E501
:return: The partner_tag of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._partner_tag
@partner_tag.setter
def partner_tag(self, partner_tag):
"""Sets the partner_tag of this SearchItemsRequest.
:param partner_tag: The partner_tag of this SearchItemsRequest. # noqa: E501
:type: str
"""
if partner_tag is None:
raise ValueError("Invalid value for `partner_tag`, must not be `None`") # noqa: E501
self._partner_tag = partner_tag
@property
def partner_type(self):
"""Gets the partner_type of this SearchItemsRequest. # noqa: E501
:return: The partner_type of this SearchItemsRequest. # noqa: E501
:rtype: PartnerType
"""
return self._partner_type
@partner_type.setter
def partner_type(self, partner_type):
"""Sets the partner_type of this SearchItemsRequest.
:param partner_type: The partner_type of this SearchItemsRequest. # noqa: E501
:type: PartnerType
"""
if partner_type is None:
raise ValueError("Invalid value for `partner_type`, must not be `None`") # noqa: E501
self._partner_type = partner_type
@property
def properties(self):
"""Gets the properties of this SearchItemsRequest. # noqa: E501
:return: The properties of this SearchItemsRequest. # noqa: E501
:rtype: Properties
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this SearchItemsRequest.
:param properties: The properties of this SearchItemsRequest. # noqa: E501
:type: Properties
"""
self._properties = properties
@property
def resources(self):
"""Gets the resources of this SearchItemsRequest. # noqa: E501
:return: The resources of this SearchItemsRequest. # noqa: E501
:rtype: list[SearchItemsResource]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this SearchItemsRequest.
:param resources: The resources of this SearchItemsRequest. # noqa: E501
:type: list[SearchItemsResource]
"""
self._resources = resources
@property
def search_index(self):
"""Gets the search_index of this SearchItemsRequest. # noqa: E501
:return: The search_index of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._search_index
@search_index.setter
def search_index(self, search_index):
"""Sets the search_index of this SearchItemsRequest.
:param search_index: The search_index of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._search_index = search_index
@property
def sort_by(self):
"""Gets the sort_by of this SearchItemsRequest. # noqa: E501
:return: The sort_by of this SearchItemsRequest. # noqa: E501
:rtype: SortBy
"""
return self._sort_by
@sort_by.setter
def sort_by(self, sort_by):
"""Sets the sort_by of this SearchItemsRequest.
:param sort_by: The sort_by of this SearchItemsRequest. # noqa: E501
:type: SortBy
"""
self._sort_by = sort_by
@property
def title(self):
"""Gets the title of this SearchItemsRequest. # noqa: E501
:return: The title of this SearchItemsRequest. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this SearchItemsRequest.
:param title: The title of this SearchItemsRequest. # noqa: E501
:type: str
"""
self._title = title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SearchItemsRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchItemsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
docs_src/fastapi/docs001.py | naterenegar/ormar | 905 | 80961 | from typing import List, Optional
import databases
import sqlalchemy
from fastapi import FastAPI
import ormar
app = FastAPI()
metadata = sqlalchemy.MetaData()
database = databases.Database("sqlite:///test.db")
app.state.database = database
@app.on_event("startup")
async def startup() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
class Category(ormar.Model):
class Meta:
tablename = "categories"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
class Item(ormar.Model):
class Meta:
tablename = "items"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
category: Optional[Category] = ormar.ForeignKey(Category, nullable=True)
@app.get("/items/", response_model=List[Item])
async def get_items():
items = await Item.objects.select_related("category").all()
return items
@app.post("/items/", response_model=Item)
async def create_item(item: Item):
await item.save()
return item
@app.post("/categories/", response_model=Category)
async def create_category(category: Category):
await category.save()
return category
@app.put("/items/{item_id}")
async def get_item(item_id: int, item: Item):
item_db = await Item.objects.get(pk=item_id)
return await item_db.update(**item.dict())
@app.delete("/items/{item_id}")
async def delete_item(item_id: int, item: Item = None):
if item:
return {"deleted_rows": await item.delete()}
item_db = await Item.objects.get(pk=item_id)
return {"deleted_rows": await item_db.delete()}
|
ludwig/utils/horovod_utils.py | hfurkanbozkurt/ludwig | 970 | 80988 | <gh_stars>100-1000
# Copyright (c) 2020 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from typing import Any, List, Optional
import torch
try:
import horovod.torch
_HVD = horovod.torch
except (ModuleNotFoundError, ImportError):
_HVD = None
def initialize_horovod():
if not _HVD:
"""
raise ValueError("Horovod backend specified, "
"but cannot import `horovod.tensorflow`. "
"Install Horovod following the instructions at: "
"https://github.com/horovod/horovod")
"""
raise ValueError(
"Horovod backend specified, "
"but cannot import `horovod.torch`. "
"Install Horovod following the instructions at: "
"https://github.com/horovod/horovod"
)
_HVD.init()
return _HVD
def has_horovodrun():
"""Returns True if running with `horovodrun` using Gloo or OpenMPI."""
return "OMPI_COMM_WORLD_RANK" in os.environ or "HOROVOD_RANK" in os.environ
def return_first(fn):
"""Wraps function so results are only returned by the first (coordinator) rank.
The purpose of this function is to reduce network overhead.
"""
def wrapped(*args, **kwargs):
res = fn(*args, **kwargs)
return res if _HVD.rank() == 0 else None
return wrapped
def gather_all_tensors(result: torch.Tensor, group: Optional[Any] = None) -> List[torch.Tensor]:
"""Function to gather all tensors from several processes onto a list that is broadcast to all processes.
Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case
tensors are padded, gathered and then trimmed to secure equal workload for all processes.
:param result: the value to sync
:param group: the process group to gather results from (not supported: always uses world)
:return: list with size equal to the process group where gathered_result[i]
corresponds to result tensor from process i
"""
if group is not None:
raise ValueError("Horovod does not support allgather using a subcommunicator at this time. " "Unset `group`.")
if _HVD is None or not _HVD.is_initialized():
return [result]
if len(result.shape) == 0:
# Convert scalars to single dimension tensors
result = result.reshape(1)
is_bool = False
if result.dtype == torch.bool:
# need to convert to int due to Horovod limitation
result = result.int()
is_bool = True
# sync and gather all
gathered = _HVD.allgather(result)
gathered_result = list(gathered.split(1, dim=0))
if is_bool:
# convert back if needed
gathered_result = [t.bool() for t in gathered_result]
return gathered_result
def is_distributed_available() -> bool:
return _HVD is not None and _HVD.is_initialized()
|
tests/projects/test_kubernetes.py | PeterSulcs/mlflow | 10,351 | 80991 | import yaml
import pytest
from unittest import mock
import kubernetes
from kubernetes.config.config_exception import ConfigException
from mlflow.projects import kubernetes as kb
from mlflow.exceptions import ExecutionException
from mlflow.entities import RunStatus
def test_run_command_creation(): # pylint: disable=unused-argument
"""
Tests command creation.
"""
command = [
"python train.py --alpha 0.5 --l1-ratio 0.1",
"--comment 'foo bar'",
'--comment-bis "bar foo"',
]
command = kb._get_run_command(command)
assert [
"python",
"train.py",
"--alpha",
"0.5",
"--l1-ratio",
"0.1",
"--comment",
"'foo bar'",
"--comment-bis",
"'bar foo'",
] == command
def test_valid_kubernetes_job_spec(): # pylint: disable=unused-argument
"""
Tests job specification for Kubernetes.
"""
custom_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" env: \n"
" - name: DUMMY\n"
' value: "test_var"\n'
" restartPolicy: Never\n"
)
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["mlflow", "run", ".", "--no-conda", "-P", "alpha=0.5"]
env_vars = {"RUN_ID": "1"}
job_definition = kb._get_kubernetes_job_definition(
project_name=project_name,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=custom_template,
)
container_spec = job_definition["spec"]["template"]["spec"]["containers"][0]
assert container_spec["name"] == project_name
assert container_spec["image"] == image_tag + "@" + image_digest
assert container_spec["command"] == command
assert 2 == len(container_spec["env"])
assert container_spec["env"][0]["name"] == "DUMMY"
assert container_spec["env"][0]["value"] == "test_var"
assert container_spec["env"][1]["name"] == "RUN_ID"
assert container_spec["env"][1]["value"] == "1"
def test_run_kubernetes_job():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = "docker-for-desktop"
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
args = kube_config_mock.call_args_list
assert args[0][1]["context"] == kube_context
def test_run_kubernetes_job_current_kubecontext():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = None
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
with mock.patch("kubernetes.config.load_incluster_config") as incluster_kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
assert kube_config_mock.call_count == 1
assert incluster_kube_config_mock.call_count == 0
def test_run_kubernetes_job_in_cluster():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = None
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
kube_config_mock.side_effect = ConfigException()
with mock.patch("kubernetes.config.load_incluster_config") as incluster_kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
assert kube_config_mock.call_count == 1
assert incluster_kube_config_mock.call_count == 1
def test_push_image_to_registry():
image_uri = "dockerhub_account/mlflow-kubernetes-example"
with mock.patch("docker.from_env") as docker_mock:
client = mock.MagicMock()
docker_mock.return_value = client
kb.push_image_to_registry(image_uri)
assert client.images.push.call_count == 1
args = client.images.push.call_args_list
assert args[0][1]["repository"] == image_uri
def test_push_image_to_registry_handling_errors():
image_uri = "dockerhub_account/mlflow-kubernetes-example"
with pytest.raises(ExecutionException):
kb.push_image_to_registry(image_uri)
def test_submitted_run_get_status_killed():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
with mock.patch("kubernetes.client.BatchV1Api.delete_namespaced_job") as kube_api_mock:
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
submitted_run.cancel()
assert RunStatus.KILLED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_failed():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
condition = kubernetes.client.models.V1JobCondition(type="Failed", status="True")
job_status = kubernetes.client.models.V1JobStatus(
active=1,
completion_time=None,
conditions=[condition],
failed=1,
start_time=1,
succeeded=None,
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
print("status", submitted_run.get_status())
assert RunStatus.FAILED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_succeeded():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True")
job_status = kubernetes.client.models.V1JobStatus(
active=None,
completion_time=None,
conditions=[condition],
failed=None,
start_time=None,
succeeded=1,
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
print("status", submitted_run.get_status())
assert RunStatus.FINISHED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_running():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
job_status = kubernetes.client.models.V1JobStatus(
active=1, completion_time=None, conditions=None, failed=1, start_time=1, succeeded=1
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
assert RunStatus.RUNNING == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
print(args)
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_state_transitions():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
def set_return_value(**kwargs):
job_status = kubernetes.client.models.V1JobStatus(**kwargs)
kube_api_mock.return_value = kubernetes.client.models.V1Job(status=job_status)
set_return_value()
assert RunStatus.SCHEDULED == submitted_run.get_status()
set_return_value(start_time=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, active=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, succeeded=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, succeeded=1, completion_time=2)
assert RunStatus.RUNNING == submitted_run.get_status()
condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True")
set_return_value(
conditions=[condition], failed=1, start_time=1, completion_time=2, succeeded=1
)
assert RunStatus.FINISHED == submitted_run.get_status()
|
samples/debug/symbols/symsearch.py | IMULMUL/PythonForWindows | 479 | 81001 | import argparse
import os
import windows
import windows.debug.symbols as symbols
parser = argparse.ArgumentParser(prog=__file__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('pattern')
parser.add_argument('file', help="The PE file to load")
parser.add_argument('--addr', type=lambda x: int(x, 0), default=0, help="The load address of the PE")
parser.add_argument('--tag', type=lambda x: int(x, 0), default=0)
parser.add_argument('--dbghelp', help='The path of DBG help to use (default use env:PFW_DBGHELP_PATH)')
args = parser.parse_args()
if args.dbghelp:
symbols.set_dbghelp_path(args.dbghelp)
else:
if "PFW_DBGHELP_PATH" not in os.environ:
print("Not dbghelp path given and no environ var 'PFW_DBGHELP_PATH' sample may fail")
sh = symbols.VirtualSymbolHandler()
mod = sh.load_file(path=args.file, addr=args.addr)
res = sh.search(args.pattern, mod=mod, tag=args.tag)
print("{0} symbols found:".format(len(res)))
for sym in res:
print(" * {0!r}".format(sym))
|
tests/test_fixture.py | acpaquette/deepstate | 684 | 81002 | <reponame>acpaquette/deepstate<filename>tests/test_fixture.py
from __future__ import print_function
import logrun
import deepstate_base
class FixtureTest(deepstate_base.DeepStateTestCase):
def run_deepstate(self, deepstate):
(r, output) = logrun.logrun([deepstate, "build/examples/Fixture"],
"deepstate.out", 1800)
self.assertEqual(r, 0)
self.assertTrue("Passed: MyTest_Something" in output)
self.assertFalse("Failed: MyTest_Something" in output)
self.assertTrue("Setting up!" in output)
self.assertTrue("Tearing down!" in output)
|
coldtype/renderer/winman/webview.py | goodhertz/coldtype | 142 | 81079 | <gh_stars>100-1000
import threading, json
from pathlib import Path
from http.server import SimpleHTTPRequestHandler, HTTPServer
from coldtype.renderer.winman.passthrough import WinmanPassthrough
from coldtype.renderer.config import ConfigOption, ColdtypeConfig
from coldtype.pens.svgpen import SVGPen
from coldtype.pens.jsonpen import JSONPen
WEBSOCKET_PORT = None
class WebViewerHandler(SimpleHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write(
(Path(__file__).parent.parent.parent / "webserver/webviewer.html")
.read_text()
.replace("localhost:8007", f"localhost:{WEBSOCKET_PORT}")
.encode("utf8"))
def do_HEAD(self):
self._set_headers()
def log_message(self, format, *args):
pass
class WinmanWebview(WinmanPassthrough):
def __init__(self, config:ColdtypeConfig, renderer):
self.config = config
self.renderer = renderer
global WEBSOCKET_PORT
WEBSOCKET_PORT = self.config.websocket_port
wv_port = self.config.webviewer_port
if wv_port != 0:
print("WEBVIEWER>", f"localhost:{wv_port}")
def start_server(port):
httpd = HTTPServer(('', port), WebViewerHandler)
httpd.serve_forever()
daemon = threading.Thread(name='daemon_server',
target=start_server, args=(wv_port,))
daemon.setDaemon(True)
daemon.start()
def turn_over(self):
renders = []
try:
title = self.renderer.watchees[0][1].name
except:
title = "coldtype"
for idx, (render, result, rp) in enumerate(self.renderer.previews_waiting):
if self.renderer.args.format == "canvas": # TODO config?
renders.append(dict(
fmt="canvas",
jsonpen=JSONPen.Composite(result, render.rect),
rect=[*render.rect],
bg=[*render.bg]))
else:
renders.append(dict(
fmt="svg", svg=SVGPen.Composite(
result,
render.rect,
viewBox=render.viewBox),
rect=[*render.rect],
bg=[*render.bg]))
if renders:
for _, client in self.renderer.server.connections.items():
if hasattr(client, "webviewer") and client.webviewer:
client.sendMessage(json.dumps({
"renders":renders,
"title":title
}))
return []
|
build-support/migration-support/fix_deprecated_globs_usage.py | rcuza/pants | 1,806 | 81101 | #!/usr/bin/env python3
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""A script to replace deprecated uses of `globs`, `rglobs`, and `zglobs` in BUILD files with a
direct list of files and globs.
Run `python3 fix_deprecated_globs_usage.py --help`.
"""
import argparse
import ast
import itertools
import logging
import os.path
import re
from difflib import unified_diff
from enum import Enum
from functools import partial
from pathlib import Path
from typing import Dict, List, NamedTuple, Optional, Set, Union
def main() -> None:
args = create_parser().parse_args()
build_files: Set[Path] = set(
fp
for folder in args.folders
for fp in [*folder.rglob("BUILD"), *folder.rglob("BUILD.*")]
# Check that it really is a BUILD file
if fp.is_file() and fp.stem == "BUILD"
)
updates: Dict[Path, List[str]] = {}
for build in build_files:
try:
possibly_new_build = generate_possibly_new_build(build)
except Exception:
logging.warning(f"Could not parse the BUILD file {build}. Skipping.")
continue
if possibly_new_build is not None:
updates[build] = possibly_new_build
for build, new_content in updates.items():
if args.preview:
print(generate_diff(build, new_content))
else:
build.write_text("\n".join(new_content) + "\n")
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Modernize BUILD files to no longer use globs, rglobs, and zglobs.",
)
parser.add_argument(
"folders", type=Path, nargs="+", help="Folders to recursively search for `BUILD` files"
)
parser.add_argument(
"-p",
"--preview",
action="store_true",
help="Output to stdout rather than overwriting BUILD files.",
)
return parser
class GlobType(Enum):
globs = "globs"
rglobs = "rglobs"
zglobs = "zglobs"
class GlobFunction(NamedTuple):
glob_type: GlobType
includes: List[str]
excludes: Optional[List[str]]
@staticmethod
def normalize_rglob(rglob: str) -> str:
"""We must expand rglobs for them to work properly.
In rglobs, * at the beginning of a path component means "any number of directories, including 0".
So every time we see ^*, we need to output "**/*whatever".
See https://github.com/pantsbuild/pants/blob/9832c8f6d8b60648cf906775506864aad0ffdb33/src/python/pants/source/wrapped_globs.py#L303
for the original implementation.
"""
components = rglob.split(os.path.sep)
out: List[str] = []
for component in components:
if component == "**":
if out and out[-1].startswith("**"):
continue
out.append(component)
elif component[0] == "*":
if out and out[-1].startswith("**"):
# We want to translate *.py to **/*.py, not **/**/*.py
out.append(component)
else:
out.append("**/" + component)
else:
out.append(component)
return os.path.join(*out)
@classmethod
def parse(cls, glob_func: ast.Call, *, build_file: Path) -> Optional["GlobFunction"]:
# NB: technically, glob arguments can be different than `globs`, `rglobs`, and `zglobs`, such
# as using `set()` in an `exclude` clause. We don't try to handle this edge case.
try:
glob_type = GlobType(glob_func.func.id) # type: ignore[attr-defined]
except ValueError:
logging.warning(
f"Could not parse the glob type `{glob_func.func.id}` in {build_file} at " # type: ignore[attr-defined]
f"line {glob_func.lineno}. Please manually update."
)
return None
if not all(isinstance(arg, ast.Str) for arg in glob_func.args):
logging.warning(
f"Could not parse the globs in {build_file} at line {glob_func.lineno}. Likely, you are "
f"using variables instead of raw strings. Please manually update."
)
return None
include_globs: List[str] = [arg.s for arg in glob_func.args] # type: ignore[attr-defined]
# Excludes are tricky...The optional `exclude` keyword is guaranteed to have a list as its
# value, but that list can have any of these elements:
# * `str`
# * `glob`, `rglob`, or `zglob`
# * list of either of the above options
exclude_globs: Optional[List[str]] = None
exclude_arg: Optional[ast.keyword] = next(iter(glob_func.keywords), None)
if exclude_arg is not None and isinstance(exclude_arg.value, ast.List):
exclude_elements: List[Union[ast.Call, ast.Str, ast.List]] = exclude_arg.value.elts # type: ignore[assignment]
nested_exclude_elements: List[Union[ast.Call, ast.Str]] = list(
itertools.chain.from_iterable(
nested_list.elts # type: ignore[misc]
for nested_list in exclude_elements
if isinstance(nested_list, ast.List)
)
)
combined_exclude_elements: List[Union[ast.Call, ast.Str]] = [
element
for element in (*exclude_elements, *nested_exclude_elements)
# Lists are already flattened, so we want to remove them from this collection.
if not isinstance(element, ast.List)
]
if not all(isinstance(arg, (ast.Call, ast.Str)) for arg in combined_exclude_elements):
logging.warning(
f"Could not parse the exclude globs in {build_file} at line {glob_func.lineno}. Likely, "
f"you are using variables instead of raw strings. Please manually update."
)
return None
exclude_globs = [arg.s for arg in combined_exclude_elements if isinstance(arg, ast.Str)]
exclude_glob_functions = (
cls.parse(glob, build_file=build_file)
for glob in combined_exclude_elements
if isinstance(glob, ast.Call)
)
for exclude_glob_function in exclude_glob_functions:
if exclude_glob_function is not None:
exclude_globs.extend(exclude_glob_function.includes)
# We sort because of how we use recursion to evaluate `globs` within the `exclude` clause.
# Without sorting, the results would appear out of order. Given this difficulty, it's not
# worth trying to preserve the original order.
exclude_globs.sort()
if glob_type == GlobType.rglobs:
include_globs = [cls.normalize_rglob(include) for include in include_globs]
return GlobFunction(glob_type=glob_type, includes=include_globs, excludes=exclude_globs)
def convert_to_sources_list(self, *, use_single_quotes: bool = False) -> str:
escaped_excludes = [f"!{exclude}" for exclude in self.excludes or ()]
quote = "'" if use_single_quotes else '"'
quoted_globs = (f"{quote}{glob}{quote}" for glob in (*self.includes, *escaped_excludes))
return f"[{', '.join(quoted_globs)}]"
def use_single_quotes(line: str) -> bool:
num_single_quotes = sum(1 for c in line if c == "'")
num_double_quotes = sum(1 for c in line if c == '"')
return num_single_quotes > num_double_quotes
def warning_msg(
*, build_file: Path, lineno: int, field_name: str, replacement: str, script_restriction: str
) -> str:
return (
f"Could not update {build_file} at line {lineno}. This script {script_restriction}. Please "
f"manually update the `{field_name}` field to `{replacement}`."
)
SCRIPT_RESTRICTIONS = {
"no_comments": "cannot safely preserve comments",
"no_bundles": "cannot safely update `bundles` fields",
"sources_must_be_single_line": (
"can only safely update the `sources` field when its declared on a single line"
),
"sources_must_be_distinct_line": (
"can only safely update the `sources` field when it's declared on a new distinct line, "
"separate from the target type and other fields"
),
}
def generate_possibly_new_build(build_file: Path) -> Optional[List[str]]:
"""If any targets use `globs`, `rglobs`, or `zglobs`, this will return a replaced BUILD file."""
original_text = build_file.read_text()
original_text_lines = original_text.splitlines()
updated_text_lines = original_text_lines.copy()
targets: List[ast.Call] = [
target.value
for target in ast.parse(original_text).body
if isinstance(target, ast.Expr) and isinstance(target.value, ast.Call)
]
for target in targets:
bundles_arg: Optional[ast.keyword] = next(
(
kwarg
for kwarg in target.keywords
if kwarg.arg == "bundles" and isinstance(kwarg.value, ast.List)
),
None,
)
if bundles_arg is not None:
bundle_funcs: List[ast.Call] = [
element
for element in bundles_arg.value.elts # type: ignore[attr-defined]
if isinstance(element, ast.Call) and element.func.id == "bundle" # type: ignore[attr-defined]
]
for bundle_func in bundle_funcs:
# Every `bundle` is guaranteed to have a `fileset` defined.
fileset_arg: [ast.keyword] = next( # type: ignore[misc]
kwarg for kwarg in bundle_func.keywords if kwarg.arg == "fileset"
)
if not isinstance(fileset_arg.value, ast.Call):
continue
parsed_glob_function = GlobFunction.parse(fileset_arg.value, build_file=build_file)
if parsed_glob_function is None:
continue
lineno = fileset_arg.value.lineno
original_line = updated_text_lines[lineno - 1].rstrip()
formatted_replacement = parsed_glob_function.convert_to_sources_list(
use_single_quotes=use_single_quotes(original_line),
)
logging.warning(
warning_msg(
build_file=build_file,
lineno=lineno,
field_name="bundle(fileset=)",
replacement=formatted_replacement,
script_restriction=SCRIPT_RESTRICTIONS["no_bundles"],
)
)
sources_arg: Optional[ast.keyword] = next(
(kwarg for kwarg in target.keywords if kwarg.arg == "sources"), None
)
if not sources_arg or not isinstance(sources_arg.value, ast.Call):
continue
parsed_glob_function = GlobFunction.parse(sources_arg.value, build_file=build_file)
if parsed_glob_function is None:
continue
lineno: int = sources_arg.value.lineno # type: ignore[no-redef]
original_line = updated_text_lines[lineno - 1].rstrip()
formatted_replacement = parsed_glob_function.convert_to_sources_list(
use_single_quotes=use_single_quotes(original_line),
)
sources_warning = partial(
warning_msg,
build_file=build_file,
lineno=lineno,
field_name="sources",
replacement=formatted_replacement,
)
if "#" in original_line:
logging.warning(sources_warning(script_restriction=SCRIPT_RESTRICTIONS["no_comments"]))
continue
has_multiple_lines = not (original_line.endswith(")") or original_line[-2:] == "),")
if has_multiple_lines:
logging.warning(
sources_warning(
script_restriction=SCRIPT_RESTRICTIONS["sources_must_be_single_line"]
)
)
continue
prefix = re.match(r"\s*sources\s*=\s*", original_line)
if not prefix:
logging.warning(
sources_warning(
script_restriction=SCRIPT_RESTRICTIONS["sources_must_be_distinct_line"]
)
)
continue
updated_text_lines[lineno - 1] = f"{prefix[0]}{formatted_replacement},"
return updated_text_lines if updated_text_lines != original_text_lines else None
def generate_diff(build_file: Path, new_content: List[str]) -> str:
def green(s: str) -> str:
return f"\x1b[32m{s}\x1b[0m"
def red(s: str) -> str:
return f"\x1b[31m{s}\x1b[0m"
diff = unified_diff(
build_file.read_text().splitlines(),
new_content,
fromfile=str(build_file),
tofile=str(build_file),
)
msg = ""
for line in diff:
if line.startswith("+") and not line.startswith("+++"):
msg += green(line)
elif line.startswith("-") and not line.startswith("---"):
msg += red(line)
else:
msg += line
if not (line.startswith("+++") or line.startswith("---") or line.startswith("@@ ")):
msg += "\n"
return msg
if __name__ == "__main__":
logging.basicConfig(format="[%(levelname)s]: %(message)s")
try:
main()
except KeyboardInterrupt:
pass
|
workflows/pipe-templates/__COMMON/luigi/src/@.py | msleprosy/cloud-pipeline | 126 | 81112 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import luigi
from luigi.util import inherits
import pipeline
from pipeline import LogEntry, TaskStatus
class DefaultPipeline(pipeline.Pipeline):
def requires(self):
yield self.clone(Task)
@inherits(DefaultPipeline)
class Task(pipeline.HelperTask):
helper = False
def output(self):
return luigi.LocalTarget("./tmp.txt")
def run(self):
self.log_event(LogEntry(self.run_id,
TaskStatus.RUNNING, "Running luigi pipeline",
self.__repr__(),
self.uu_name))
with open(self.output().path, "w") as result:
result.write("Running luigi pipeline")
if __name__ == '__main__':
val = luigi.run()
if not val:
sys.exit(1)
|
f5/bigip/tm/sys/crypto.py | nghia-tran/f5-common-python | 272 | 81120 | # coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® system config module
REST URI
``http://localhost/mgmt/tm/sys/config``
GUI Path
N/A
REST Kind
``tm:sys:config:*``
"""
from f5.bigip.mixins import CommandExecutionMixin
from f5.bigip.resource import Collection
from f5.bigip.resource import OrganizingCollection
from f5.bigip.resource import Resource
class Crypto(OrganizingCollection):
def __init__(self, sys):
super(Crypto, self).__init__(sys)
self._meta_data['allowed_lazy_attributes'] = [
Certs,
Keys
]
class Keys(Collection, CommandExecutionMixin):
"""BIG-IP® Crypto key collection
note::
This collection supports install command.
Given the fact that we will be expecting hyphen
parameters, the function will need to utilize
variable keyword argument syntax. In other words
define a dictionary with the arbitrary keys and
then pass it as in the form **foo into the method
call. e.g.
param_set ={'from-local-file': FOOPATH, 'name': 'FOOKEY'}
bigip.tm.sys.crypto.keys.exec_cmd('install', **param_set)
"""
def __init__(self, crypto):
super(Keys, self).__init__(crypto)
self._meta_data['allowed_lazy_attributes'] = [Key]
self._meta_data['allowed_commands'].append('install')
self._meta_data['attribute_registry'] =\
{'tm:sys:crypto:key:keystate': Key}
class Key(Resource):
"""BIG-IP® Crypto key resource"""
def __init__(self, keys):
super(Key, self).__init__(keys)
self._meta_data['required_json_kind'] = 'tm:sys:crypto:key:keystate'
class Certs(Collection, CommandExecutionMixin):
"""BIG-IP® Crypto cert collection
note::
This collection supports install command.
Given the fact that we will be expecting hyphen
parameters, the function will need to utilize
variable keyword argument syntax. In other words
define a dictionary with the arbitrary keys and
then pass it as in the form **foo into the method
call. e.g.
param_set ={'from-local-file': FOOPATH, 'name': 'FOOCERT'}
bigip.tm.sys.crypto.certs.exec_cmd('install', **param_set)
"""
def __init__(self, crypto):
super(Certs, self).__init__(crypto)
self._meta_data['allowed_lazy_attributes'] = [Cert]
self._meta_data['allowed_commands'].append('install')
self._meta_data['attribute_registry'] =\
{'tm:sys:crypto:cert:certstate': Cert}
class Cert(Resource):
"""BIG-IP® Crypto cert resource"""
def __init__(self, certs):
super(Cert, self).__init__(certs)
self._meta_data['required_json_kind'] = 'tm:sys:crypto:cert:certstate'
|
tests/misc/test_utils.py | lpd6375/qlib | 8,637 | 81121 | from unittest.case import TestCase
import unittest
import pandas as pd
import numpy as np
from datetime import datetime
from qlib import init
from qlib.config import C
from qlib.log import TimeInspector
from qlib.utils.time import cal_sam_minute as cal_sam_minute_new, get_min_cal
def cal_sam_minute(x, sam_minutes):
"""
Sample raw calendar into calendar with sam_minutes freq, shift represents the shift minute the market time
- open time of stock market is [9:30 - shift*pd.Timedelta(minutes=1)]
- mid close time of stock market is [11:29 - shift*pd.Timedelta(minutes=1)]
- mid open time of stock market is [13:00 - shift*pd.Timedelta(minutes=1)]
- close time of stock market is [14:59 - shift*pd.Timedelta(minutes=1)]
"""
# TODO: actually, this version is much faster when no cache or optimization
day_time = pd.Timestamp(x.date())
shift = C.min_data_shift
open_time = day_time + pd.Timedelta(hours=9, minutes=30) - shift * pd.Timedelta(minutes=1)
mid_close_time = day_time + pd.Timedelta(hours=11, minutes=29) - shift * pd.Timedelta(minutes=1)
mid_open_time = day_time + pd.Timedelta(hours=13, minutes=00) - shift * pd.Timedelta(minutes=1)
close_time = day_time + pd.Timedelta(hours=14, minutes=59) - shift * pd.Timedelta(minutes=1)
if open_time <= x <= mid_close_time:
minute_index = (x - open_time).seconds // 60
elif mid_open_time <= x <= close_time:
minute_index = (x - mid_open_time).seconds // 60 + 120
else:
raise ValueError("datetime of calendar is out of range")
minute_index = minute_index // sam_minutes * sam_minutes
if 0 <= minute_index < 120:
return open_time + minute_index * pd.Timedelta(minutes=1)
elif 120 <= minute_index < 240:
return mid_open_time + (minute_index - 120) * pd.Timedelta(minutes=1)
else:
raise ValueError("calendar minute_index error, check `min_data_shift` in qlib.config.C")
class TimeUtils(TestCase):
@classmethod
def setUpClass(cls):
init()
def test_cal_sam_minute(self):
# test the correctness of the code
random_n = 1000
cal = get_min_cal()
def gen_args():
for time in np.random.choice(cal, size=random_n, replace=True):
sam_minutes = np.random.choice([1, 2, 3, 4, 5, 6])
dt = pd.Timestamp(
datetime(
2021,
month=3,
day=3,
hour=time.hour,
minute=time.minute,
second=time.second,
microsecond=time.microsecond,
)
)
args = dt, sam_minutes
yield args
for args in gen_args():
assert cal_sam_minute(*args) == cal_sam_minute_new(*args)
# test the performance of the code
args_l = list(gen_args())
with TimeInspector.logt():
for args in args_l:
cal_sam_minute(*args)
with TimeInspector.logt():
for args in args_l:
cal_sam_minute_new(*args)
if __name__ == "__main__":
unittest.main()
|
src/bls/python-impl/hkdf.py | chasingkirkjufell/navcoin-core | 103 | 81143 | <reponame>chasingkirkjufell/navcoin-core
from math import ceil
import hmac
import hashlib
BLOCK_SIZE = 32
def extract(salt: bytes, ikm: bytes) -> bytes:
h = hmac.new(salt, ikm, hashlib.sha256)
return h.digest()
def expand(L: int, prk: bytes, info: bytes) -> bytes:
N: int = ceil(L / BLOCK_SIZE)
bytes_written: int = 0
okm: bytes = b""
for i in range(1, N + 1):
if i == 1:
h = hmac.new(prk, info + bytes([1]), hashlib.sha256)
T: bytes = h.digest()
else:
h = hmac.new(prk, T + info + bytes([i]), hashlib.sha256)
T = h.digest()
to_write = L - bytes_written
if to_write > BLOCK_SIZE:
to_write = BLOCK_SIZE
okm += T[:to_write]
bytes_written += to_write
assert bytes_written == L
return okm
def extract_expand(L: int, key: bytes, salt: bytes, info: bytes) -> bytes:
prk = extract(salt, key)
return expand(L, prk, info)
"""
Copyright 2020 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
openmc/data/nbody.py | janmalec/openmc | 262 | 81147 | from numbers import Real, Integral
import numpy as np
import openmc.checkvalue as cv
from .angle_energy import AngleEnergy
from .endf import get_cont_record
class NBodyPhaseSpace(AngleEnergy):
"""N-body phase space distribution
Parameters
----------
total_mass : float
Total mass of product particles
n_particles : int
Number of product particles
atomic_weight_ratio : float
Atomic weight ratio of target nuclide
q_value : float
Q value for reaction in eV
Attributes
----------
total_mass : float
Total mass of product particles
n_particles : int
Number of product particles
atomic_weight_ratio : float
Atomic weight ratio of target nuclide
q_value : float
Q value for reaction in eV
"""
def __init__(self, total_mass, n_particles, atomic_weight_ratio, q_value):
self.total_mass = total_mass
self.n_particles = n_particles
self.atomic_weight_ratio = atomic_weight_ratio
self.q_value = q_value
@property
def total_mass(self):
return self._total_mass
@property
def n_particles(self):
return self._n_particles
@property
def atomic_weight_ratio(self):
return self._atomic_weight_ratio
@property
def q_value(self):
return self._q_value
@total_mass.setter
def total_mass(self, total_mass):
name = 'N-body phase space total mass'
cv.check_type(name, total_mass, Real)
cv.check_greater_than(name, total_mass, 0.)
self._total_mass = total_mass
@n_particles.setter
def n_particles(self, n_particles):
name = 'N-body phase space number of particles'
cv.check_type(name, n_particles, Integral)
cv.check_greater_than(name, n_particles, 0)
self._n_particles = n_particles
@atomic_weight_ratio.setter
def atomic_weight_ratio(self, atomic_weight_ratio):
name = 'N-body phase space atomic weight ratio'
cv.check_type(name, atomic_weight_ratio, Real)
cv.check_greater_than(name, atomic_weight_ratio, 0.0)
self._atomic_weight_ratio = atomic_weight_ratio
@q_value.setter
def q_value(self, q_value):
name = 'N-body phase space Q value'
cv.check_type(name, q_value, Real)
self._q_value = q_value
def to_hdf5(self, group):
"""Write distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
group.attrs['type'] = np.string_('nbody')
group.attrs['total_mass'] = self.total_mass
group.attrs['n_particles'] = self.n_particles
group.attrs['atomic_weight_ratio'] = self.atomic_weight_ratio
group.attrs['q_value'] = self.q_value
@classmethod
def from_hdf5(cls, group):
"""Generate N-body phase space distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
total_mass = group.attrs['total_mass']
n_particles = group.attrs['n_particles']
awr = group.attrs['atomic_weight_ratio']
q_value = group.attrs['q_value']
return cls(total_mass, n_particles, awr, q_value)
@classmethod
def from_ace(cls, ace, idx, q_value):
"""Generate N-body phase space distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
idx : int
Index in XSS array of the start of the energy distribution data
(LDIS + LOCC - 1)
q_value : float
Q-value for reaction in eV
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
n_particles = int(ace.xss[idx])
total_mass = ace.xss[idx + 1]
return cls(total_mass, n_particles, ace.atomic_weight_ratio, q_value)
@classmethod
def from_endf(cls, file_obj):
"""Generate N-body phase space distribution from an ENDF evaluation
Parameters
----------
file_obj : file-like object
ENDF file positions at the start of the N-body phase space
distribution
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
items = get_cont_record(file_obj)
total_mass = items[0]
n_particles = items[5]
# TODO: get awr and Q value
return cls(total_mass, n_particles, 1.0, 0.0)
|
template.py | ardovm/wxGlade | 225 | 81152 | <reponame>ardovm/wxGlade<gh_stars>100-1000
"""
Handles the template tags and description
@copyright: 2002-2007 <NAME>
@author: <NAME>
@author: <NAME>
@copyright: 2016 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import glob
import logging
import os
import wx
from xml.dom import minidom
from xml.sax import saxutils
import common
import config
import misc
import templates_ui
class Template(object):
"A class that handles the specific aspects of template files"
def __init__(self, filename=None):
self.author = ''
self.description = ''
self.instructions = ''
self.filename = filename
if filename is not None:
filexml = minidom.parse(filename)
# we have no use for all the xml data in the file. We only care
# about what is between the "description" tags
templatedata = filexml.getElementsByTagName('templatedata')
if len(templatedata):
desc_xml = templatedata[0]
try:
self.author = saxutils.unescape( desc_xml.getElementsByTagName('author')[0].firstChild.data )
except (IndexError, AttributeError): self.author = ''
try:
self.description = saxutils.unescape(
desc_xml.getElementsByTagName('description')[0].firstChild.data )
except (IndexError, AttributeError): self.description = ''
try:
self.instructions = saxutils.unescape(
desc_xml.getElementsByTagName( 'instructions')[0].firstChild.data)
except (IndexError, AttributeError): self.instructions = ''
else:
self.author = ''
self.description=''
self.instructions=''
def write(self, output, tabs):
outer_tab = u' ' * tabs
stmt = [u'%s<templatedata>\n' % outer_tab]
stmt += common.format_xml_tag(u'author', self.author, tabs + 1)
stmt += common.format_xml_tag(u'description', self.description, tabs + 1)
stmt += common.format_xml_tag(u'instructions', self.instructions, tabs + 1)
stmt.append( u'%s</templatedata>\n' % outer_tab )
output.extend(stmt)
class TemplateListDialog(templates_ui.TemplateListDialog):
"""\
Class TemplateListDialog
@ivar _logger: Class specific logging instance
"""
def __init__(self):
templates_ui.TemplateListDialog.__init__(self, None, -1, "")
self.templates = []
self.fill_template_list()
self.selected_template = None
def get_selected(self):
index = self.template_names.GetSelection()
if index >= 0:
return self.templates[index]
else:
return None
def on_open(self, event):
self.selected_template = self.get_selected()
self.EndModal(wx.ID_OPEN)
def on_select_template(self, event):
self.selected_template = self.get_selected()
if self.selected_template is not None:
t = Template(self.selected_template)
self.set_template_name(self.template_names.GetStringSelection())
self.author.SetValue(misc.wxstr(t.author))
self.description.SetValue(misc.wxstr(t.description))
self.instructions.SetValue(misc.wxstr(t.instructions))
if os.path.dirname(self.selected_template) == config.templates_path:
self.btn_delete.Disable()
self.btn_edit.Disable()
else:
self.btn_delete.Enable()
self.btn_edit.Enable()
else:
self.set_template_name("")
self.author.SetValue("")
self.description.SetValue("")
self.instructions.SetValue("")
if event:
event.Skip()
def set_template_name(self, name):
self.template_name.SetLabel(_("wxGlade template:\n") + misc.wxstr(name))
def on_edit(self, event):
self.selected_template = self.get_selected()
self.EndModal(wx.ID_EDIT)
def on_delete(self, event):
self.selected_template = self.get_selected()
if self.selected_template is not None:
name = self.template_names.GetStringSelection()
if wx.MessageBox( _("Delete template '%s'?") % misc.wxstr(name),
_("Are you sure?"), style=wx.YES|wx.NO|wx.CENTRE) == wx.YES:
try:
os.unlink(self.selected_template)
except Exception:
logging.exception(_('Internal Error'))
self.fill_template_list()
self.selected_template = None
def fill_template_list(self):
self.templates = load_templates()
self.template_names.Clear()
if self.templates:
for n in self.templates:
self.template_names.Append( os.path.splitext(os.path.basename(n))[0] )
# show details of first template
self.template_names.SetSelection(0)
self.on_select_template(None)
# end of class TemplateListDialog
def load_templates():
"Finds all the available templates"
if config.appdata_path != config.wxglade_path:
extra = glob.glob( os.path.join(config.appdata_path, "templates", "*.wgt") )
else:
extra = []
return sorted(glob.glob(os.path.join(config.templates_path, "*.wgt"))) + sorted(extra)
def select_template():
"Returns the filename of a template to load"
dlg = TemplateListDialog()
dlg.btn_delete.Hide()
dlg.btn_edit.Hide()
if dlg.ShowModal() == wx.ID_OPEN:
ret = dlg.selected_template
else:
ret = None
dlg.Destroy()
return ret
def save_template(data=None):
"Returns an out file name and template description for saving a template"
dlg = templates_ui.TemplateInfoDialog(None, -1, "")
if data is not None:
dlg.template_name.SetValue( misc.wxstr(os.path.basename(os.path.splitext(data.filename)[0])) )
dlg.author.SetValue(misc.wxstr(data.author))
dlg.description.SetValue(misc.wxstr(data.description))
dlg.instructions.SetValue(misc.wxstr(data.instructions))
ret = None
retdata = Template()
if dlg.ShowModal() == wx.ID_OK:
ret = dlg.template_name.GetValue().strip()
retdata.author = dlg.author.GetValue()
retdata.description = dlg.description.GetValue()
retdata.instructions = dlg.instructions.GetValue()
if not ret:
wx.MessageBox( _("Can't save a template with an empty name"), _("Error"), wx.OK|wx.ICON_ERROR )
dlg.Destroy()
name = ret
if ret:
template_directory = os.path.join(config.appdata_path, 'templates')
if not os.path.exists(template_directory):
try:
os.makedirs(template_directory)
except EnvironmentError:
logging.exception( _('ERROR creating directory "%s"'), template_directory )
return None, retdata
ret = os.path.join(template_directory, ret + '.wgt')
if ret and os.path.exists(ret) and \
wx.MessageBox( _("A template called '%s' already exists:\ndo you want to overwrite it?") % name,
_("Question"), wx.YES|wx.NO|wx.ICON_QUESTION) != wx.YES:
ret = None
return ret, retdata
def manage_templates():
dlg = TemplateListDialog()
dlg.btn_open.Hide()
#dlg.btn_edit.Hide()
ret = None
if dlg.ShowModal() == templates_ui.ID_EDIT:
ret = dlg.selected_template
dlg.Destroy()
return ret
|
tests/test_property.py | DeathGOD7/pythonnet | 3,183 | 81168 | # -*- coding: utf-8 -*-
"""Test CLR property support."""
import pytest
from Python.Test import PropertyTest
def test_public_instance_property():
"""Test public instance properties."""
ob = PropertyTest()
assert ob.PublicProperty == 0
ob.PublicProperty = 1
assert ob.PublicProperty == 1
with pytest.raises(TypeError):
del PropertyTest().PublicProperty
def test_public_static_property():
"""Test public static properties."""
ob = PropertyTest()
assert PropertyTest.PublicStaticProperty == 0
PropertyTest.PublicStaticProperty = 1
assert PropertyTest.PublicStaticProperty == 1
assert ob.PublicStaticProperty == 1
ob.PublicStaticProperty = 0
assert ob.PublicStaticProperty == 0
with pytest.raises(TypeError):
del PropertyTest.PublicStaticProperty
with pytest.raises(TypeError):
del PropertyTest().PublicStaticProperty
def test_protected_instance_property():
"""Test protected instance properties."""
ob = PropertyTest()
assert ob.ProtectedProperty == 0
ob.ProtectedProperty = 1
assert ob.ProtectedProperty == 1
with pytest.raises(TypeError):
del PropertyTest().ProtectedProperty
def test_protected_static_property():
"""Test protected static properties."""
ob = PropertyTest()
assert PropertyTest.ProtectedStaticProperty == 0
PropertyTest.ProtectedStaticProperty = 1
assert PropertyTest.ProtectedStaticProperty == 1
assert ob.ProtectedStaticProperty == 1
ob.ProtectedStaticProperty = 0
assert ob.ProtectedStaticProperty == 0
with pytest.raises(TypeError):
del PropertyTest.ProtectedStaticProperty
with pytest.raises(TypeError):
del PropertyTest().ProtectedStaticProperty
def test_internal_property():
"""Test internal properties."""
with pytest.raises(AttributeError):
_ = PropertyTest().InternalProperty
with pytest.raises(AttributeError):
_ = PropertyTest().InternalStaticProperty
with pytest.raises(AttributeError):
_ = PropertyTest.InternalStaticProperty
def test_private_property():
"""Test private properties."""
with pytest.raises(AttributeError):
_ = PropertyTest().PrivateProperty
with pytest.raises(AttributeError):
_ = PropertyTest().PrivateStaticProperty
with pytest.raises(AttributeError):
_ = PropertyTest.PrivateStaticProperty
def test_property_descriptor_get_set():
"""Test property descriptor get / set."""
# This test ensures that setting an attribute implemented with
# a descriptor actually goes through the descriptor (rather than
# silently replacing the descriptor in the instance or type dict.
ob = PropertyTest()
assert PropertyTest.PublicStaticProperty == 0
assert ob.PublicStaticProperty == 0
descriptor = PropertyTest.__dict__['PublicStaticProperty']
assert type(descriptor) != int
ob.PublicStaticProperty = 0
descriptor = PropertyTest.__dict__['PublicStaticProperty']
assert type(descriptor) != int
PropertyTest.PublicStaticProperty = 0
descriptor = PropertyTest.__dict__['PublicStaticProperty']
assert type(descriptor) != int
def test_property_descriptor_wrong_type():
"""Test setting a property using a value of the wrong type."""
with pytest.raises(TypeError):
ob = PropertyTest()
ob.PublicProperty = "spam"
def test_property_descriptor_abuse():
"""Test property descriptor abuse."""
desc = PropertyTest.__dict__['PublicProperty']
with pytest.raises(TypeError):
desc.__get__(0, 0)
with pytest.raises(TypeError):
desc.__set__(0, 0)
def test_interface_property():
"""Test properties of interfaces. Added after a bug report
that an IsAbstract check was inappropriate and prevented
use of properties when only the interface is known."""
from System.Collections import Hashtable, ICollection
mapping = Hashtable()
coll = ICollection(mapping)
assert coll.Count == 0
|
Filters/Hybrid/Testing/Python/TestFacetReader.py | forestGzh/VTK | 1,755 | 81190 | <reponame>forestGzh/VTK
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
ren1.SetBackground(0,0,0)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300,300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
facet0 = vtk.vtkFacetReader()
facet0.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/clown.facet")
Mapper5 = vtk.vtkPolyDataMapper()
Mapper5.SetInputConnection(facet0.GetOutputPort())
Mapper5.UseLookupTableScalarRangeOff()
Mapper5.SetScalarVisibility(1)
Mapper5.SetScalarModeToDefault()
Actor5 = vtk.vtkLODActor()
Actor5.SetMapper(Mapper5)
Actor5.GetProperty().SetRepresentationToSurface()
Actor5.GetProperty().SetInterpolationToGouraud()
Actor5.GetProperty().SetAmbient(0.15)
Actor5.GetProperty().SetDiffuse(0.85)
Actor5.GetProperty().SetSpecular(0.1)
Actor5.GetProperty().SetSpecularPower(100)
Actor5.GetProperty().SetSpecularColor(1,1,1)
Actor5.GetProperty().SetColor(1,1,1)
Actor5.SetNumberOfCloudPoints(30000)
ren1.AddActor(Actor5)
camera = vtk.vtkCamera()
camera.SetClippingRange(3,6)
camera.SetFocalPoint(.1,.03,-.5)
camera.SetPosition(4.4,-0.5,-.5)
camera.SetViewUp(0,0,-1)
ren1.SetActiveCamera(camera)
# enable user interface interactor
#iren SetUserMethod {wm deiconify .vtkInteract}
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.