hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cec890183bbac8661c59286f3a8b1f4c191a7210 | 19,239 | py | Python | pyredner/shape.py | yihang99/redner | 4c7e1b5c7eb2be079f4eb99e794aa6609de887f5 | [
"MIT"
] | null | null | null | pyredner/shape.py | yihang99/redner | 4c7e1b5c7eb2be079f4eb99e794aa6609de887f5 | [
"MIT"
] | null | null | null | pyredner/shape.py | yihang99/redner | 4c7e1b5c7eb2be079f4eb99e794aa6609de887f5 | [
"MIT"
] | null | null | null | import pyredner
import torch
import math
import redner
from typing import Optional
def compute_vertex_normal(vertices: torch.Tensor,
indices: torch.Tensor,
weighting_scheme: str = 'max'):
"""
Compute vertex normal by weighted average of nearby face normals.
Args
====
vertices: torch.Tensor
3D position of vertices.
float32 tensor with size num_vertices x 3
indices: torch.Tensor
Vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
weighting_scheme: str
How do we compute the weighting. Currently we support two weighting methods:
'max' and 'cotangent'.
'max' corresponds to Nelson Max's algorithm that uses the inverse length and sine of the angle as the weight
(see `Weights for Computing Vertex Normals from Facet Vectors <https://escholarship.org/content/qt7657d8h3/qt7657d8h3.pdf?t=ptt283>`_),
'cotangent' corresponds to weights derived through a discretization of the gradient of triangle area
(see, e.g., "Implicit Fairing of Irregular Meshes using Diffusion and Curvature Flow" from Desbrun et al.)
Returns
=======
torch.Tensor
float32 Tensor with size num_vertices x 3 representing vertex normal
"""
def dot(v1, v2):
return torch.sum(v1 * v2, dim = 1)
def squared_length(v):
return torch.sum(v * v, dim = 1)
def length(v):
return torch.sqrt(squared_length(v))
def safe_asin(v):
# Hack: asin(1)' is infinite, so we want to clamp the contribution
return torch.asin(v.clamp(0, 1-1e-6))
# XXX: This whole thing is inefficient but it's PyTorch's limitation
normals = torch.zeros(vertices.shape, dtype = torch.float32, device = vertices.device)
v = [vertices[indices[:, 0].long(), :],
vertices[indices[:, 1].long(), :],
vertices[indices[:, 2].long(), :]]
if weighting_scheme == 'max':
for i in range(3):
v0 = v[i]
v1 = v[(i + 1) % 3]
v2 = v[(i + 2) % 3]
e1 = v1 - v0
e2 = v2 - v0
e1_len = length(e1)
e2_len = length(e2)
side_a = e1 / torch.reshape(e1_len, [-1, 1])
side_b = e2 / torch.reshape(e2_len, [-1, 1])
if i == 0:
n = torch.cross(side_a, side_b)
n = torch.where(length(n).reshape(-1, 1).expand(-1, 3) > 0,
n / torch.reshape(length(n), [-1, 1]),
torch.zeros(n.shape, dtype=n.dtype, device=n.device))
# numerically stable angle between two unit direction vectors
# http://www.plunk.org/~hatch/rightway.php
angle = torch.where(dot(side_a, side_b) < 0,
torch.tensor(math.pi) - 2.0 * safe_asin(0.5 * length(side_a + side_b)),
2.0 * safe_asin(0.5 * length(side_b - side_a)))
sin_angle = torch.sin(angle)
e1e2 = e1_len * e2_len
# contrib is 0 when e1e2 is 0
contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
n * (sin_angle / e1e2).reshape(-1, 1).expand(-1, 3),
torch.zeros(n.shape, dtype=torch.float32, device=vertices.device))
index = indices[:, i].long().reshape(-1, 1).expand(-1, 3)
normals.scatter_add_(0, index, contrib)
# Assign 0, 0, 1 to degenerate faces
degenerate_normals = torch.zeros(normals.shape, dtype = torch.float32, device = vertices.device)
degenerate_normals[:, 2] = 1.0
normals = torch.where(length(normals).reshape(-1, 1).expand(-1, 3) > 0,
normals / torch.reshape(length(normals), [-1, 1]),
degenerate_normals)
elif weighting_scheme == 'cotangent':
# Cotangent weighting generates 0-length normal when
# the local surface is planar. Prepare weighted average normal
# computed using Nelson Max's algorithm for those cases.
max_normal = compute_vertex_normal(vertices, indices, 'max')
for i in range(3):
# Loop over each pair of edges sharing the same vertex,
# compute the cotangent and contribute to the third edge.
v0 = v[i]
v1 = v[(i + 1) % 3]
v2 = v[(i + 2) % 3]
e1 = v1 - v0
e2 = v2 - v0
e1_len = length(e1)
e2_len = length(e2)
side_a = e1 / torch.reshape(e1_len, [-1, 1])
side_b = e2 / torch.reshape(e2_len, [-1, 1])
if i == 0:
n = torch.cross(side_a, side_b)
n = torch.where(length(n).reshape(-1, 1).expand(-1, 3) > 0,
n / torch.reshape(length(n), [-1, 1]),
torch.zeros(n.shape, dtype=n.dtype, device=n.device))
# numerically stable angle between two unit direction vectors
# http://www.plunk.org/~hatch/rightway.php
angle = torch.where(dot(side_a, side_b) < 0,
torch.tensor(math.pi) - 2.0 * safe_asin(0.5 * length(side_a + side_b)),
2.0 * safe_asin(0.5 * length(side_b - side_a)))
cotangent = torch.tensor(1.0) / torch.tan(angle)
v1_index = indices[:, (i + 1) % 3].long().reshape(-1, 1).expand(-1, 3)
v2_index = indices[:, (i + 2) % 3].long().reshape(-1, 1).expand(-1, 3)
contrib = (v2 - v1) * cotangent.reshape([-1, 1])
normals.scatter_add_(0, v1_index, contrib)
normals.scatter_add_(0, v2_index, -contrib)
# Make sure the normals are pointing at the right direction
normals = torch.where(dot(normals, max_normal).reshape(-1, 1).expand(-1, 3) > 0, normals, -normals)
normals = torch.where(length(normals).reshape(-1, 1).expand(-1, 3) > 0.05,
normals / torch.reshape(length(normals), [-1, 1]),
max_normal)
else:
assert False, 'Unknown weighting scheme: {}'.format(weighting_scheme)
assert(torch.isfinite(normals).all())
return normals.contiguous()
def bound_vertices(vertices: torch.Tensor, indices: torch.Tensor):
"""
Calculate the indices of boundary vertices of a mesh
and express it in Tensor form.
Args
====
vertices: torch.Tensor
3D position of vertices.
float32 tensor with size num_vertices x 3
indices: torch.Tensor
Vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
Returns
=======
bound: torch.Tensor
float32 Tensor with size num_vertices representing vertex normal
bound[i] = 0. if i-th vertices is on boundary of mesh; else 1.
"""
neighbor_sum = torch.zeros(vertices.size(0), device=vertices.device)
for i in range(3):
contrib = indices[:, (i + 2) % 3] - indices[:, (i + 1) % 3]
index = indices[:, i].long()
neighbor_sum.scatter_add_(0, index, contrib.float())
# neighbor_sum[index[i]] += contrib[i]
return torch.where(neighbor_sum == 0,
torch.ones(vertices.size(0), device=vertices.device),
torch.zeros(vertices.size(0), device=vertices.device))
def smooth(vertices: torch.Tensor,
indices: torch.Tensor,
lmd: torch.float32,
weighting_scheme: str = 'reciprocal',
control: torch.Tensor = None):
"""
Update positions of vertices in a mesh. The shift amount of a vertex equals
to lmd times weight sum of all edges to neighbors.
$v_i += lmd * \frac {\sum_{j \in neighbors(i)} w_{ij}(v_j - v_i)} {\sum_{j \in neighbors(i)} w_{ij}}$
Args
====
vertices: torch.Tensor
3D position of vertices.
float32 tensor with size num_vertices x 3
indices: torch.Tensor
Vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
lmd: torch.float32
step length coefficient
weighting_scheme: str = 'reciprocal'
Different weighting schemes:
'reciprocal': (default)
w[i][j] = 1 / len(v[j] - v[i])
'uniform':
w[i][j] = 1
'cotangent':
w[i][j] = cot(angle(i-m-j)) + cot(angle(i-n-j))
m and n are vertices that form triangles with i and j
control: torch.Tensor
extra coefficient deciding which vertices to be update.
In default case, do not update boundary vertices of the mesh
control (default) = bound_vertices(vertices, indices)
type help(pyredner.bound_vertices)
"""
if control is None:
control = bound_vertices(vertices, indices)
else:
assert control.numel() == vertices.size(0), 'Size of control tensor inconsistent with number of vertices'
def dot(v1, v2):
return torch.sum(v1 * v2, dim=1)
def squared_length(v):
return torch.sum(v * v, dim=1)
def length(v):
return torch.sqrt(squared_length(v))
def safe_asin(v):
# Hack: asin(1)' is infinite, so we want to clamp the contribution
return torch.asin(v.clamp(0, 1 - 1e-6))
total_contrib = torch.zeros(vertices.shape, dtype=torch.float32, device=vertices.device)
total_weight_contrib = torch.zeros(vertices.shape, dtype=torch.float32, device=vertices.device)
v = [vertices[indices[:, 0].long(), :],
vertices[indices[:, 1].long(), :],
vertices[indices[:, 2].long(), :]]
for i in range(3):
v0 = v[i]
v1 = v[(i + 1) % 3]
v2 = v[(i + 2) % 3]
e1 = v1 - v0
e2 = v2 - v0
e1_len = length(e1)
e2_len = length(e2)
# XXX: Inefficient but it's PyTorch's limitation
e1e2 = e1_len * e2_len
# contrib is 0 when e1e2 is 0
if weighting_scheme == 'reciprocal':
contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
e1 / e1_len.reshape(-1, 1).expand(-1, 3) +
e2 / e2_len.reshape(-1, 1).expand(-1, 3),
torch.zeros(v0.shape, dtype=torch.float32, device=vertices.device))
weight_contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
torch.tensor(1.) / e1_len.reshape(-1, 1).expand(-1, 3) +
torch.tensor(1.) / e2_len.reshape(-1, 1).expand(-1, 3),
torch.zeros(v0.shape, dtype=torch.float32, device=vertices.device))
index = indices[:, i].long().reshape(-1, 1).expand(-1, 3)
total_contrib.scatter_add_(0, index, contrib)
total_weight_contrib.scatter_add_(0, index, weight_contrib)
elif weighting_scheme == 'uniform':
contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
e1 + e2,
torch.zeros(v0.shape, dtype=torch.float32, device=vertices.device))
weight_contrib = torch.where(e1e2.reshape(-1, 1).expand(-1, 3) > 0,
2 * torch.ones(v0.shape, dtype=torch.float32, device=vertices.device),
torch.zeros(v0.shape, dtype=torch.float32, device=vertices.device))
index = indices[:, i].long().reshape(-1, 1).expand(-1, 3)
total_contrib.scatter_add_(0, index, contrib)
total_weight_contrib.scatter_add_(0, index, weight_contrib)
elif weighting_scheme == 'cotangent':
pass
side_a = e1 / torch.reshape(e1_len, [-1, 1])
side_b = e2 / torch.reshape(e2_len, [-1, 1])
angle = torch.where(dot(side_a, side_b) < 0,
torch.tensor(math.pi) - 2.0 * safe_asin(0.5 * length(side_a + side_b)),
2.0 * safe_asin(0.5 * length(side_b - side_a)))
cotangent = torch.tensor(1.0) / torch.tan(angle)
v1_index = indices[:, (i + 1) % 3].long().reshape(-1, 1).expand(-1, 3)
v2_index = indices[:, (i + 2) % 3].long().reshape(-1, 1).expand(-1, 3)
contrib = (v2 - v1) * cotangent.reshape([-1, 1])
weight_contrib = cotangent.reshape([-1, 1]).expand(-1, 3)
total_contrib.scatter_add_(0, v1_index, contrib)
total_contrib.scatter_add_(0, v2_index, -contrib)
total_weight_contrib.scatter_add_(0, v1_index, weight_contrib)
total_weight_contrib.scatter_add_(0, v2_index, weight_contrib)
else:
assert False, 'Unknown weighting_scheme: {}'.format(weighting_scheme)
shift = total_contrib / total_weight_contrib * control.reshape(-1, 1)
vertices.data += shift * lmd
return
def compute_uvs(vertices, indices, print_progress = True):
"""
Compute UV coordinates of a given mesh using a charting algorithm
with least square conformal mapping. This calls the `xatlas <https://github.com/jpcy/xatlas>`_ library.
Args
====
vertices: torch.Tensor
3D position of vertices
float32 tensor with size num_vertices x 3
indices: torch.Tensor
vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
Returns
=======
torch.Tensor
uv vertices pool, float32 Tensor with size num_uv_vertices x 3
torch.Tensor
uv indices, int32 Tensor with size num_triangles x 3
"""
device = vertices.device
vertices = vertices.cpu()
indices = indices.cpu()
uv_trimesh = redner.UVTriMesh(redner.float_ptr(vertices.data_ptr()),
redner.int_ptr(indices.data_ptr()),
redner.float_ptr(0),
redner.int_ptr(0),
int(vertices.shape[0]),
0,
int(indices.shape[0]))
atlas = redner.TextureAtlas()
num_uv_vertices = redner.automatic_uv_map([uv_trimesh], atlas, print_progress)[0]
uvs = torch.zeros(num_uv_vertices, 2, dtype=torch.float32)
uv_indices = torch.zeros_like(indices)
uv_trimesh.uvs = redner.float_ptr(uvs.data_ptr())
uv_trimesh.uv_indices = redner.int_ptr(uv_indices.data_ptr())
uv_trimesh.num_uv_vertices = num_uv_vertices
redner.copy_texture_atlas(atlas, [uv_trimesh])
vertices = vertices.to(device)
indices = indices.to(device)
uvs = uvs.to(device)
uv_indices = uv_indices.to(device)
return uvs, uv_indices
class Shape:
"""
redner supports only triangle meshes for now. It stores a pool of
vertices and access the pool using integer index. Some times the
two vertices can have the same 3D position but different texture
coordinates, because UV mapping creates seams and need to duplicate
vertices. In this can we can use an additional "uv_indices" array
to access the uv pool.
Args
====
vertices: torch.Tensor
3D position of vertices
float32 tensor with size num_vertices x 3
indices: torch.Tensor
vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
uvs: Optional[torch.Tensor]:
optional texture coordinates.
float32 tensor with size num_uvs x 2
doesn't need to be the same size with vertices if uv_indices is not None
normals: Optional[torch.Tensor]
shading normal
float32 tensor with size num_normals x 3
doesn't need to be the same size with vertices if normal_indices is not None
uv_indices: Optional[torch.Tensor]
overrides indices when accessing uv coordinates
int32 tensor with size num_uvs x 2
normal_indices: Optional[torch.Tensor]
overrides indices when accessing shading normals
int32 tensor with size num_normals x 2
"""
def __init__(self,
vertices: torch.Tensor,
indices: torch.Tensor,
material_id: int,
uvs: Optional[torch.Tensor] = None,
normals: Optional[torch.Tensor] = None,
uv_indices: Optional[torch.Tensor] = None,
normal_indices: Optional[torch.Tensor] = None,
colors: Optional[torch.Tensor] = None):
assert(vertices.dtype == torch.float32)
assert(vertices.is_contiguous())
assert(len(vertices.shape) == 2 and vertices.shape[1] == 3)
assert(indices.dtype == torch.int32)
assert(indices.is_contiguous())
assert(len(indices.shape) == 2 and indices.shape[1] == 3)
if uvs is not None:
assert(uvs.dtype == torch.float32)
assert(uvs.is_contiguous())
assert(len(uvs.shape) == 2 and uvs.shape[1] == 2)
if normals is not None:
assert(normals.dtype == torch.float32)
assert(normals.is_contiguous())
assert(len(normals.shape) == 2 and normals.shape[1] == 3)
if uv_indices is not None:
assert(uv_indices.dtype == torch.int32)
assert(uv_indices.is_contiguous())
assert(len(uv_indices.shape) == 2 and uv_indices.shape[1] == 3)
if normal_indices is not None:
assert(normal_indices.dtype == torch.int32)
assert(normal_indices.is_contiguous())
assert(len(normal_indices.shape) == 2 and normal_indices.shape[1] == 3)
if colors is not None:
assert(colors.dtype == torch.float32)
assert(colors.is_contiguous())
assert(len(colors.shape) == 2 and colors.shape[1] == 3)
self.vertices = vertices
self.indices = indices
self.material_id = material_id
self.uvs = uvs
self.normals = normals
self.uv_indices = uv_indices
self.normal_indices = normal_indices
self.colors = colors
self.light_id = -1
def state_dict(self):
return {
'vertices': self.vertices,
'indices': self.indices,
'material_id': self.material_id,
'light_id': self.light_id,
'uvs': self.uvs,
'normals': self.normals,
'uv_indices': self.uv_indices,
'normal_indices': self.normal_indices,
'colors': self.colors
}
@classmethod
def load_state_dict(cls, state_dict):
out = cls(
state_dict['vertices'],
state_dict['indices'],
state_dict['material_id'],
state_dict['uvs'],
state_dict['normals'],
state_dict['uv_indices'],
state_dict['normal_indices'],
state_dict['colors'])
out.light_id = state_dict['light_id']
return out
| 44.74186 | 147 | 0.569208 |
007949e214fdd7b2f5836a3e55ea1db058df576a | 15,967 | py | Python | src/roar/compound_image_folder_dataset.py | YaNgZhAnG-V5/RoarTorch | c994e16f956f1a76edda9bb1cca5998cb06f1ce3 | [
"MIT"
] | null | null | null | src/roar/compound_image_folder_dataset.py | YaNgZhAnG-V5/RoarTorch | c994e16f956f1a76edda9bb1cca5998cb06f1ce3 | [
"MIT"
] | null | null | null | src/roar/compound_image_folder_dataset.py | YaNgZhAnG-V5/RoarTorch | c994e16f956f1a76edda9bb1cca5998cb06f1ce3 | [
"MIT"
] | null | null | null | from copy import deepcopy
import numpy as np
import torch
import torchvision
import torchvision.transforms as T
from PIL import Image
from torch.utils.data import DataLoader
from src.dataset import factory as dataset_factory
from src.roar import roar_core
from src.utils.sysutils import get_cores_count
class CompoundImageFolderDataset(torch.utils.data.Dataset):
"""
To load Image Folder dataset along with images attribution files.
"""
def __init__(self, dataset_name,
image_files_train_path,
image_files_validation_path,
image_files_test_path,
attribution_files_train_path,
attribution_files_validation_path,
attribution_files_test_path,
save_image=True,
num_saved_images=10,
percentile=0.1,
roar=True,
non_perturbed_testset=True):
"""
Args:
dataset_name:
image_files_train_path:
image_files_validation_path:
image_files_test_path:
attribution_files_train_path:
attribution_files_validation_path:
attribution_files_test_path:
save_image: whether to save images (original & perturbed & augmented) for debugging
num_saved_images: number of saved images
percentile: The % of pixels to remove from input image.
roar: Set to True for ROAR metric, False for KAR metric.
non_perturbed_testset: Set to True if we don't want to perturb testset images
"""
if dataset_name not in dataset_factory.MAP_DATASET_TO_ENUM:
raise ValueError(f'Invalid dataset_name {dataset_name}')
self.dataset_name = dataset_name
self.attribution_files_train_path = attribution_files_train_path
self.attribution_files_validation_path = attribution_files_validation_path
self.attribution_files_test_path = attribution_files_test_path
self.save_image = save_image
self.num_saved_images = num_saved_images
self.index = 0
self.percentile = percentile
self.roar = roar
self.non_perturbed_testset = non_perturbed_testset
self.dataset_class = dataset_factory.get_dataset_class(dataset_name=dataset_name)
self.mean = self.dataset_class.mean
self.std = self.dataset_class.std
self.demean = [-m / s for m, s in zip(self.mean, self.std)]
self.destd = [1 / s for s in self.std]
self.train_normalize_transform = self.dataset_class.get_train_transform(enable_augmentation=False)
self.evaluation_normalize_transform = self.dataset_class.get_validation_transform()
# Used for visualization of preprocessed images.
self.denormalize_transform = torchvision.transforms.Normalize(self.demean, self.destd)
# Note - For training, we do not apply augmentation transform.
# First, image is loaded, most/least important pixels are removed and then augmentations are applied.
self.training_images_dataset = torchvision.datasets.ImageFolder(root=image_files_train_path,
transform=torchvision.transforms.ToTensor())
self.validation_images_dataset = torchvision.datasets.ImageFolder(root=image_files_validation_path,
transform=self.evaluation_normalize_transform)
self.test_images_dataset = torchvision.datasets.ImageFolder(root=image_files_test_path,
transform=self.evaluation_normalize_transform)
self.training_attribution_map_dataset = torchvision.datasets.ImageFolder(
root=attribution_files_train_path,
transform=torchvision.transforms.Compose(
[torchvision.transforms.Grayscale(num_output_channels=3), torchvision.transforms.ToTensor()]))
self.validation_attribution_map_dataset = torchvision.datasets.ImageFolder(
root=attribution_files_validation_path,
transform=torchvision.transforms.Compose(
[torchvision.transforms.Grayscale(num_output_channels=3), torchvision.transforms.ToTensor()]))
self.test_attribution_map_dataset = torchvision.datasets.ImageFolder(
root=attribution_files_test_path,
transform=torchvision.transforms.Compose(
[torchvision.transforms.Grayscale(num_output_channels=3), torchvision.transforms.ToTensor()]))
self.mode = 'training'
def __getitem__(self, index):
if self.mode == 'training':
image, label = self.training_images_dataset[index]
attribution_map, label = self.training_attribution_map_dataset[index]
mean = self.mean
elif self.mode == 'validation':
image, label = self.validation_images_dataset[index]
attribution_map, label = self.validation_attribution_map_dataset[index]
mean = [0, 0, 0]
else:
image, label = self.test_images_dataset[index]
attribution_map, label = self.test_attribution_map_dataset[index]
mean = [0, 0, 0] # validation and training images already are normalized.
# Below code is left intentionally for one to quickly check if input data to model is correct.
if self.save_image:
T.ToPILImage()(image).save(f'tmp_imgs/input_{self.index}.jpg') # only for training, for validation/test, denormalize first.
image = np.array(image)
attribution_map = np.max(attribution_map.numpy(), axis=0, keepdims=True)
if self.non_perturbed_testset:
if self.mode == 'training':
image, introduced_attribution = roar_core.remove(image, attribution_map, mean, self.percentile, keep=not self.roar, gray=True)
T.ToPILImage()((introduced_attribution * 255).transpose(1, 2, 0).astype(np.uint8)).save(
f'tmp_imgs/introduced_feature_{self.index}.jpg')
else:
image, introduced_attribution = roar_core.remove(image, attribution_map, mean, self.percentile, keep=not self.roar, gray=True)
T.ToPILImage()((introduced_attribution * 255).transpose(1, 2, 0).astype(np.uint8)).save(
f'tmp_imgs/introduced_feature_{self.index}.jpg')
if self.save_image:
T.ToPILImage()((image * 255).transpose(1, 2, 0).astype(np.uint8)).save(f'tmp_imgs/pert_input_{self.index}.jpg')
if self.mode == 'training':
# Do augmentation(randomscale/randomcrop) transform only after removal of pixels is done.
image = image.transpose(1, 2, 0) # PIL needs HXWX3, converting from 3xHxW .
image = self.train_normalize_transform(Image.fromarray((image * 255).astype(np.uint8)))
if self.save_image:
T.ToPILImage()(self.denormalize_transform(image)).save(f'tmp_imgs/augmented_{self.index}.jpg')
# increment index for saved images
self.index = (self.index+1)%self.num_saved_images
if self.mode == "training":
return image, label, introduced_attribution
else:
return image, label
def __len__(self):
if self.mode == 'training':
return self.train_dataset_size
elif self.mode == 'validation':
return self.val_dataset_size
else:
return self.test_dataset_size
def get_train_dataloader(self, data_args) -> DataLoader:
self.mode = 'training'
# Deepcopy ensures any changes to mode variable will not influence this dataloader
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
def get_validation_dataloader(self, data_args) -> DataLoader:
self.mode = 'validation'
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
def get_test_dataloader(self, data_args):
self.mode = 'test'
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
@property
def classes(self):
return self.training_attribution_map_dataset.classes
@property
def train_dataset_size(self):
return len(self.training_images_dataset)
@property
def val_dataset_size(self):
return len(self.validation_images_dataset)
@property
def test_dataset_size(self):
return len(self.test_images_dataset)
# ToDo Add debug method for Compound Image Folder Dataset.
class AttributionMapDataset(torch.utils.data.Dataset):
"""
To load attribution maps as dataset.
"""
def __init__(self,
attribution_files_train_path,
attribution_files_validation_path,
attribution_files_test_path,
percentile,
save_image=True,
num_saved_images=10,
):
"""
Args:
attribution_files_train_path:
attribution_files_validation_path:
attribution_files_test_path:
"""
self.attribution_files_train_path = attribution_files_train_path
self.attribution_files_validation_path = attribution_files_validation_path
self.attribution_files_test_path = attribution_files_test_path
self.percentile = percentile
self.save_image = save_image
self.num_saved_images = num_saved_images
self.index = 0
self.training_attribution_map_dataset = torchvision.datasets.ImageFolder(
root=attribution_files_train_path,
transform=torchvision.transforms.Compose(
[torchvision.transforms.Grayscale(num_output_channels=3), torchvision.transforms.ToTensor()]))
self.validation_attribution_map_dataset = torchvision.datasets.ImageFolder(
root=attribution_files_validation_path,
transform=torchvision.transforms.Compose(
[torchvision.transforms.Grayscale(num_output_channels=3), torchvision.transforms.ToTensor()]))
self.test_attribution_map_dataset = torchvision.datasets.ImageFolder(
root=attribution_files_test_path,
transform=torchvision.transforms.Compose(
[torchvision.transforms.Grayscale(num_output_channels=3), torchvision.transforms.ToTensor()]))
self.mode = 'training'
def __getitem__(self, index):
if self.mode == 'training':
attribution_map, label = self.training_attribution_map_dataset[index]
elif self.mode == 'validation':
attribution_map, label = self.validation_attribution_map_dataset[index]
else:
attribution_map, label = self.test_attribution_map_dataset[index]
# perturb the attribution map
# TODO add KAR version
gray_img_tensor, _ = torch.max(attribution_map, axis=0, keepdims=True)
gray_img_tensor = gray_img_tensor.squeeze()
num_pixels = torch.numel(gray_img_tensor)
num, _ = torch.topk(gray_img_tensor.flatten(), int(num_pixels * self.percentile / 100))
attribution_map[:, gray_img_tensor < num[-1]] = 0
# Below code is left intentionally for one to quickly check if input data to model is correct.
if self.save_image:
T.ToPILImage()(attribution_map).save(f'tmp_imgs/perturbed_attribution_map_{self.index}.jpg')
# increment index for saved images
self.index = (self.index+1)%self.num_saved_images
return attribution_map, label
def __len__(self):
if self.mode == 'training':
return self.train_dataset_size
elif self.mode == 'validation':
return self.val_dataset_size
else:
return self.test_dataset_size
def get_train_dataloader(self, data_args) -> DataLoader:
self.mode = 'training'
# Deepcopy ensures any changes to mode variable will not influence this dataloader
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
def get_validation_dataloader(self, data_args) -> DataLoader:
self.mode = 'validation'
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
def get_test_dataloader(self, data_args):
self.mode = 'test'
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
@property
def classes(self):
return self.training_attribution_map_dataset.classes
@property
def train_dataset_size(self):
return len(self.training_attribution_map_dataset)
@property
def val_dataset_size(self):
return len(self.validation_attribution_map_dataset)
@property
def test_dataset_size(self):
return len(self.test_attribution_map_dataset)
class ImageAndAttributionDataset(torch.utils.data.Dataset):
def __init__(self, image_dataset, attribution_dataset):
self.image_dataset = image_dataset
self.attribution_dataset = attribution_dataset
def __getitem__(self, index):
image = self.image_dataset[index]
attribution = self.attribution_dataset[index]
return image, attribution
def __len__(self):
return len(self.image_dataset)
def get_train_dataloader(self, data_args) -> DataLoader:
self.image_dataset.mode = 'training'
self.attribution_dataset.mode = 'training'
# Deepcopy ensures any changes to mode variable will not influence this dataloader
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
def get_validation_dataloader(self, data_args) -> DataLoader:
self.image_dataset.mode = 'validation'
self.attribution_dataset.mode = 'validation'
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
def get_test_dataloader(self, data_args):
self.image_dataset.mode = 'test'
self.attribution_dataset.mode = 'test'
return torch.utils.data.DataLoader(deepcopy(self),
batch_size=data_args['batch_size'],
shuffle=data_args['shuffle'],
num_workers=get_cores_count())
| 46.281159 | 142 | 0.63174 |
e1e76488c050925ac08cc3faa725e2213bc97fbd | 107 | py | Python | track/__init__.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | track/__init__.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | track/__init__.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .centroidtracker import CentroidTracker
from .sorttracker import SortTracker
| 21.4 | 44 | 0.766355 |
5a3933c14b8886718458269603da2239d36db66a | 4,879 | py | Python | src/TeamAssigner.py | unithmallavaram/TeamFormationAssistant_V2 | c2fd982765adf0c716bb836732a4dcae66c3a2ce | [
"MIT"
] | 2 | 2020-09-24T04:38:02.000Z | 2020-10-05T01:37:53.000Z | src/TeamAssigner.py | unithmallavaram/TeamFormationAssistant_V2 | c2fd982765adf0c716bb836732a4dcae66c3a2ce | [
"MIT"
] | 20 | 2020-09-21T01:31:50.000Z | 2020-10-01T11:37:25.000Z | src/TeamAssigner.py | unithmallavaram/TeamFormationAssistant_V2 | c2fd982765adf0c716bb836732a4dcae66c3a2ce | [
"MIT"
] | 3 | 2020-09-25T21:05:11.000Z | 2021-09-11T15:12:42.000Z | import pandas as pd
import mysql.connector
import math
connection = mysql.connector.connect(
host="sefall2021.cosnmrdyk6wi.us-east-2.rds.amazonaws.com",
database='teamformationassistant',
user="root",
password="SEFall2021"
)
def persistTeamData(teamData):
if connection.is_connected():
cursor = connection.cursor()
for row in teamData.index:
sql = "INSERT INTO Team(ProjectId, ProjectName, MemberId, MemberName)VALUES(%s,%s,%s,%s);"
cursor.execute(sql,(str(teamData.loc[row, 'ProjectId']),str(teamData.loc[row, 'ProjectName']),str(teamData.loc[row, 'MemberId']),str(teamData.loc[row, 'MemberName'])))
connection.commit()
def setEmployeeAssignement(employ):
if connection.is_connected():
cursor = connection.cursor()
sql ="UPDATE Member SET IsAssigned= %s WHERE MemberId = %s ;"
cursor.execute(sql,(1,employ))
connection.commit()
def setJobAssignement(job):
if connection.is_connected():
cursor = connection.cursor()
sql ="UPDATE Requirements SET IsAssigned= %s WHERE JobId= %s ;"
cursor.execute(sql,(1,job))
connection.commit()
def memberToTeamMapping(MemberData,ProjectData,RequirementsData):
jobIDs = RequirementsData['JobId'].tolist()
employee = MemberData.loc[MemberData['IsAssigned'] == 0]
employee = employee['MemberId'].tolist()
teamData = pd.DataFrame(columns = ['ProjectId', 'ProjectName', 'MemberId', 'MemberName'])
for jobID in jobIDs:
Req = RequirementsData.loc[RequirementsData['JobId'] == jobID]
reqLanguage = Req['LanguagePreferred'].tolist()[0]
skillweight = float(Req['SkillWeight'])
experienceWeight = float(Req['ExperienceWeight'])
hoursWeight = float(Req['HoursWeight'])
languageWeight = float(Req['LanguageWeight'])
budgetWeight = float(Req['BudgetWeight'])
ProjectId = Req['ProjectId'].tolist()[0]
Project = ProjectData.loc[ProjectData['ProjectId'] == ProjectId]
ProjectName = Project['ProjectName'].tolist()
if(len(ProjectName) == 0):
ProjectName = 'Not Provided'
else:
ProjectName = Project['ProjectName'].tolist()[0]
highScore = 0
selectedEmploy = ''
for employ in employee:
employData = MemberData.loc[MemberData['MemberId'] == employ]
skillScore = float(employData['SkillScore'])
expScore = float(employData['Experience'])
availableHours = float(employData['AvailableHoursPerWeek'])
hourlyRate = float(employData['HourlyRate'])
languageScore = 0
if reqLanguage in employData['Languages'].tolist()[0].split(","):
languageScore = 1
memscore = (skillweight*skillScore)/100+(experienceWeight*expScore)/10+(hoursWeight*availableHours)/40+(languageWeight*languageScore)/5+(budgetWeight*hourlyRate)/100
if (memscore > highScore):
selectedEmploy = employ
highScore = memscore
if (selectedEmploy not in employee):
continue
employee.remove(selectedEmploy)
setEmployeeAssignement(int(selectedEmploy))
setJobAssignement(int(jobID))
Member = MemberData.loc[MemberData['MemberId'] == selectedEmploy]
MemberName = Member['MemberName'].tolist()[0]
teamData = teamData.append({'ProjectId' : ProjectId , 'ProjectName' : ProjectName, 'MemberId' : selectedEmploy, 'MemberName' : MemberName},
ignore_index = True)
return teamData
def main():
if connection.is_connected():
Member_Query = pd.read_sql_query(
'''select * from Member''', connection)
Project_Query = pd.read_sql_query(
'''select * from Project''', connection)
Requirements_Query = pd.read_sql_query(
'''select * from Requirements''', connection)
MemberData = pd.DataFrame(Member_Query, columns=['MemberId','MemberName','DOB','Languages','IsAssigned',
'HourlyRate','MemberRole','Experience','SkillScore','AvailableHoursPerWeek'])
ProjectData = pd.DataFrame(Project_Query, columns=['ProjectId','ProjectName','ProjectEndDate','ProjectTeamSize','Budget',
'Tools','IsAssignmentComplete','Priority'])
RequirementsData = pd.DataFrame(Requirements_Query, columns=['JobId','ProjectId','LanguagePreferred','IsAssigned' ,'Skill','MemberRole',
'AvailableHoursPerWeek','SkillWeight','ExperienceWeight','HoursWeight','LanguageWeight','BudgetWeight'])
RequirementsData = RequirementsData.loc[RequirementsData['IsAssigned'] == 0]
teamData = memberToTeamMapping(MemberData,ProjectData,RequirementsData)
persistTeamData(teamData)
if __name__=="__main__":
main()
| 38.417323 | 179 | 0.651158 |
72920b8387be430edac3ae75a1cfb39808a8ecb8 | 455 | py | Python | evap/evaluation/migrations/0038_questionnaire_staff_only.py | JenniferStamm/EvaP | 1d71e4efcd34d01f28e30c6026c8dcc708921193 | [
"MIT"
] | null | null | null | evap/evaluation/migrations/0038_questionnaire_staff_only.py | JenniferStamm/EvaP | 1d71e4efcd34d01f28e30c6026c8dcc708921193 | [
"MIT"
] | null | null | null | evap/evaluation/migrations/0038_questionnaire_staff_only.py | JenniferStamm/EvaP | 1d71e4efcd34d01f28e30c6026c8dcc708921193 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0037_specify_on_delete'),
]
operations = [
migrations.AddField(
model_name='questionnaire',
name='staff_only',
field=models.BooleanField(verbose_name='display for staff only', default=False),
),
]
| 22.75 | 92 | 0.632967 |
fa3675b07dd3eed75f3c7e37cade05d365bb4341 | 6,355 | py | Python | test/functional/rpc_createmultisig.py | nccproject/ncc | 068ccc82a73d28136546095261ad8ccef7e541a3 | [
"MIT"
] | null | null | null | test/functional/rpc_createmultisig.py | nccproject/ncc | 068ccc82a73d28136546095261ad8ccef7e541a3 | [
"MIT"
] | null | null | null | test/functional/rpc_createmultisig.py | nccproject/ncc | 068ccc82a73d28136546095261ad8ccef7e541a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.nccconfig import COINBASE_MATURITY, INITIAL_BLOCK_REWARD
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
)
from test_framework.key import ECPubKey
import binascii
import decimal
import itertools
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
node0, node1, node2 = self.nodes
add = [node1.getnewaddress() for _ in range(self.nkeys)]
self.pub = [node1.getaddressinfo(a)["pubkey"] for a in add]
self.priv = [node1.dumpprivkey(a) for a in add]
self.final = node2.getnewaddress()
def run_test(self):
node0, node1, node2 = self.nodes
self.check_addmultisigaddress_errors()
self.log.info('Generating blocks ...')
node0.generate(49 + COINBASE_MATURITY)
self.sync_all()
self.moved = 0
for self.nkeys in [3, 5]:
for self.nsigs in [2, 3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
# Test mixed compressed and uncompressed pubkeys
self.log.info('Mixed compressed and uncompressed multisigs are not allowed')
pk0 = node0.getaddressinfo(node0.getnewaddress())['pubkey']
pk1 = node1.getaddressinfo(node1.getnewaddress())['pubkey']
pk2 = node2.getaddressinfo(node2.getnewaddress())['pubkey']
# decompress pk2
pk_obj = ECPubKey()
pk_obj.set(binascii.unhexlify(pk2))
pk_obj.compressed = False
pk2 = binascii.hexlify(pk_obj.get_bytes()).decode()
# Check all permutations of keys because order matters apparently
for keys in itertools.permutations([pk0, pk1, pk2]):
# Results should be the same as this legacy one
legacy_addr = node0.createmultisig(2, keys, 'legacy')['address']
assert_equal(legacy_addr, node0.addmultisigaddress(2, keys, '', 'legacy')['address'])
# Generate addresses with the segwit types. These should all make legacy addresses
assert_equal(legacy_addr, node0.createmultisig(2, keys, 'bech32')['address'])
assert_equal(legacy_addr, node0.createmultisig(2, keys, 'p2sh-segwit')['address'])
assert_equal(legacy_addr, node0.addmultisigaddress(2, keys, '', 'bech32')['address'])
assert_equal(legacy_addr, node0.addmultisigaddress(2, keys, '', 'p2sh-segwit')['address'])
def check_addmultisigaddress_errors(self):
self.log.info('Check that addmultisigaddress fails when the private keys are missing')
addresses = [self.nodes[1].getnewaddress(address_type='legacy') for _ in range(2)]
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
for a in addresses:
# Importing all addresses should not change the result
self.nodes[0].importaddress(a)
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
def checkbalances(self):
node0, node1, node2 = self.nodes
node0.generate(COINBASE_MATURITY)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert COINBASE_MATURITY + 50 < height < 2 * COINBASE_MATURITY + 100
total = (height - COINBASE_MATURITY) * INITIAL_BLOCK_REWARD
assert bal1 == 0
assert bal2 == self.moved
assert bal0 + bal1 + bal2 == total
def do_multisig(self):
node0, node1, node2 = self.nodes
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
if self.output_type == 'bech32':
assert madd[0:4] == "qcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd in v["scriptPubKey"].get("addresses", [])]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.01000000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
prevtx_err = dict(prevtxs[0])
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "Missing redeemScript/witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs - 1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], True)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
| 41.809211 | 160 | 0.654131 |
f670947956cda28cd9f49c37f93fda53e1f88ca8 | 362 | py | Python | sponsor/helpers.py | Make-Munich/SaBoT | cabc7e2f5e0f7166d94d2ef683f75d8d3be02834 | [
"MIT"
] | 19 | 2016-04-09T10:13:26.000Z | 2020-06-21T23:14:16.000Z | sponsor/helpers.py | Make-Munich/SaBoT | cabc7e2f5e0f7166d94d2ef683f75d8d3be02834 | [
"MIT"
] | 13 | 2017-01-14T20:42:45.000Z | 2019-08-10T22:48:44.000Z | sponsor/helpers.py | Make-Munich/SaBoT | cabc7e2f5e0f7166d94d2ef683f75d8d3be02834 | [
"MIT"
] | 9 | 2016-04-09T12:52:48.000Z | 2018-08-16T19:08:16.000Z | import os.path
from django.conf import settings
def sponsor_filesanitize(fileattr, sponsor):
f = getattr(sponsor, fileattr)
if not f:
return None
absolutePath = settings.MEDIA_ROOT + f.name
(base, name) = os.path.split(absolutePath)
(oldname, ext) = os.path.splitext(name)
newname = sponsor.contact.companyName + ext
return (absolutePath, newname)
| 21.294118 | 44 | 0.748619 |
c683449b2b44f124b2a5867406368bade0d19af1 | 2,272 | py | Python | fno4vc/project_path.py | slimgroup/FNOs4VC | 53504f72c8280442da41a4340cf0e4455556f97e | [
"MIT"
] | null | null | null | fno4vc/project_path.py | slimgroup/FNOs4VC | 53504f72c8280442da41a4340cf0e4455556f97e | [
"MIT"
] | null | null | null | fno4vc/project_path.py | slimgroup/FNOs4VC | 53504f72c8280442da41a4340cf0e4455556f97e | [
"MIT"
] | null | null | null | import git
import os
from typing import Optional
def gitdir() -> str:
"""Find the absolute path to the GitHub repository root.
"""
git_repo = git.Repo(os.getcwd(), search_parent_directories=True)
git_root = git_repo.git.rev_parse('--show-toplevel')
return git_root
def datadir(path: str, mkdir: Optional[bool] = True) -> str:
"""The absolute path to a directory at the data directory.
Data directory, located at the GitHub repository root, is for training and
testing data. Here the path is created if it does not exist upon call if
`mkdir` is True.
Args:
path: A string for directory name located at the data directory.
mkdir: An optional boolean for whether to create the directory if it
does not exist.
"""
path = os.path.join(gitdir(), 'data/', path)
if (not os.path.exists(path)) and mkdir:
os.makedirs(path)
return path
def plotsdir(path: str, mkdir: Optional[bool] = True) -> str:
"""The absolute path to a directory at the plot directory.
Plot directory, located at the GitHub repository root, is storing figure of
experiment results. Here the path is created if it does not exist upon call
if `mkdir` is True.
Args:
path: A string for directory name located at the plot directory.
mkdir: An optional boolean for whether to create the directory if it
does not exist.
"""
path = os.path.join(gitdir(), 'plots/', path)
if (not os.path.exists(path)) and mkdir:
os.makedirs(path)
return path
def checkpointsdir(path: str, mkdir: Optional[bool] = True) -> str:
"""The absolute path to a directory at the checkpoint directory.
Checkpoint directory, located at the GitHub repository root, is storing
intermediate training checkpoints, e.g., network weights. Here the path is
created if it does not exist upon call if `mkdir` is True.
Args:
path: A string for directory name located at the checkpoint directory.
mkdir: An optional boolean for whether to create the directory if it
does not exist.
"""
path = os.path.join(datadir('checkpoints'), path)
if (not os.path.exists(path)) and mkdir:
os.makedirs(path)
return path
| 34.424242 | 79 | 0.675176 |
2aa8d99b6e1a1720cc940aa7842f281e4e14c140 | 1,310 | py | Python | redditchat/core/migrations/0003_auto__add_field_room_shortname_display.py | reverie/seddit.com | 3ffeeae66c85a3b4dd0f164929f171bd7dc23a2f | [
"MIT"
] | null | null | null | redditchat/core/migrations/0003_auto__add_field_room_shortname_display.py | reverie/seddit.com | 3ffeeae66c85a3b4dd0f164929f171bd7dc23a2f | [
"MIT"
] | 4 | 2020-02-11T23:01:20.000Z | 2021-06-10T17:58:40.000Z | redditchat/core/migrations/0003_auto__add_field_room_shortname_display.py | reverie/seddit.com | 3ffeeae66c85a3b4dd0f164929f171bd7dc23a2f | [
"MIT"
] | null | null | null | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Room.shortname_display'
db.add_column('core_room', 'shortname_display', self.gf('django.db.models.fields.CharField')(default='', max_length=256), keep_default=False)
def backwards(self, orm):
# Deleting field 'Room.shortname_display'
db.delete_column('core_room', 'shortname_display')
models = {
'core.room': {
'Meta': {'object_name': 'Room'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('common.fields.UUIDField', [], {'auto': 'True', 'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'shortname_display': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
| 38.529412 | 149 | 0.601527 |
d95c653bfae34df2c41d126c6226dde8b2556c7e | 5,812 | py | Python | test/functional/p2p_fingerprint.py | mitchelvanamstel/SAPP | 8ccd8b949c814388f5f0e2e20e8016d38e291876 | [
"MIT"
] | 83 | 2018-11-17T02:20:34.000Z | 2022-01-26T05:05:44.000Z | test/functional/p2p_fingerprint.py | mitchelvanamstel/SAPP | 8ccd8b949c814388f5f0e2e20e8016d38e291876 | [
"MIT"
] | 42 | 2017-09-12T03:09:56.000Z | 2021-01-27T18:43:28.000Z | test/functional/p2p_fingerprint.py | mitchelvanamstel/SAPP | 8ccd8b949c814388f5f0e2e20e8016d38e291876 | [
"MIT"
] | 37 | 2015-10-02T19:33:04.000Z | 2021-04-21T22:26:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If an stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
network_thread_start,
wait_until,
)
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
assert_equal,
)
class P2PFingerprintTest(PivxTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Check whether last block received from node has a given hash
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
# Check whether last block header received from node has a given hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
node0.wait_for_verack()
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generate(10)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generate(nblocks=1)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| 37.986928 | 79 | 0.685478 |
01cb7b968bb34021e5f08a583b90562a727000c9 | 2,084 | py | Python | apps/account/utils.py | kush-josh/smart-content | 6070a72d4e190eb5de63495c094785ab4c190b5b | [
"MIT"
] | null | null | null | apps/account/utils.py | kush-josh/smart-content | 6070a72d4e190eb5de63495c094785ab4c190b5b | [
"MIT"
] | 7 | 2018-02-23T09:35:55.000Z | 2018-02-23T11:56:18.000Z | apps/account/utils.py | joshtechnologygroup/smart-content | 6070a72d4e190eb5de63495c094785ab4c190b5b | [
"MIT"
] | 1 | 2018-02-25T18:56:33.000Z | 2018-02-25T18:56:33.000Z | from apps.account import models as account_models
from libs import peers_utils
new_user_url = "addAuthor/"
new_contract_url = "addContract/"
new_content_url = "addContent/"
updated_content_url = "addContentContract/"
def broadcast_new_user_account(account):
"""
:param account:
:return:
"""
if account.profile.user.request_to_verify:
payload = {
"user": account.profile.user.username,
"address": account.address,
"verification_signature": account.profile.user.verification_signature
}
peers_utils.broadcast_to_peers("post", new_user_url, json=payload)
def broadcast_new_contract(contract):
"""
:param contract:
:return:
"""
payload = {
"address": contract.address,
"contract": contract.contract
}
peers_utils.broadcast_to_peers("post", new_contract_url, json=payload)
def broadcast_content(content, url):
"""
:param content:
:return:
"""
entities = content.content.get("entities")
req_trusted_entities = []
for value in entities.values():
is_required = value.get("role")
if is_required:
req_trusted_entities.extend(value.get("address", []))
payload = {
"address": content.address,
"entities": req_trusted_entities,
"content": content.content
}
peers_utils.broadcast_to_peers("post", url, json=payload)
def broadcast_new_content(content):
"""
:param content:
:return:
"""
broadcast_content(content, new_content_url)
def broadcast_updated_content(content):
"""
:param content:
:return:
"""
broadcast_content(content, updated_content_url)
def find_account_by_addr(addr):
"""
Stub to hit the account and get relevant details
Right now mocked to hit and get from PostgreSQL
:param addr:
:return:
"""
return account_models.Account.objects.filter(address=addr).first()
def update_account_by_addr(addr, **kwargs):
account_models.Account.objects.filter(address=addr).update(**kwargs)
| 23.954023 | 81 | 0.666507 |
0c1c74d1aaa30c0e4ef78d135d20eca469e1f1b6 | 2,180 | py | Python | bindings/python/cntk/ops/tests/combine_test.py | digimatronics/MS_Deep_Learning_Cognitive | 4a6e8488fb5f3111151431289bbb72707f8c9c13 | [
"RSA-MD"
] | 1 | 2021-07-16T05:38:50.000Z | 2021-07-16T05:38:50.000Z | bindings/python/cntk/ops/tests/combine_test.py | trevhartzell/CNTK | dcdbd0d787580206d99d00d49c7db2fe4236431a | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/ops/tests/combine_test.py | trevhartzell/CNTK | dcdbd0d787580206d99d00d49c7db2fe4236431a | [
"RSA-MD"
] | 1 | 2021-01-21T05:58:03.000Z | 2021-01-21T05:58:03.000Z | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
"""
Unit tests for combine operation, only forward pass is tested
"""
from __future__ import division
import numpy as np
import pytest
from .ops_test_utils import AA, I, precision, PRECISION_TO_TYPE, compare_lists_of_np_arrays, cntk_device
from ...utils import sanitize_dtype_cntk, eval as cntk_eval
from .. import plus, minus, classification_error, cross_entropy_with_softmax
TENSOR_PAIRS = [
# (first operand, second_operand, ops, expected_forward)
([[1., 2., 3., 4., 5]], [[0., 0., 0., 1., 0.]], [plus, minus],
[[[1., 2., 3., 5., 5]],[[1., 2., 3., 3., 5]]]),
([[1., 2., 3., 4., 5]], [[0., 0., 0., 1., 0.]], [cross_entropy_with_softmax, classification_error],
[[[1.]],[[1.451914]]])
]
@pytest.mark.parametrize("left_operand, right_operand, operations, expected_results", TENSOR_PAIRS)
def test_op_combine(left_operand, right_operand, operations, expected_results, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
from .. import combine
left_value = AA(left_operand, dtype=dt)
right_value = AA(right_operand, dtype=dt)
a = I(shape=left_value.shape,
dtype=sanitize_dtype_cntk(precision),
needs_gradient=True,
name='a')
b = I(shape=right_value.shape,
dtype=sanitize_dtype_cntk(precision),
needs_gradient=True,
name='b')
left_value.shape = (1, 1) + left_value.shape
right_value.shape = (1, 1) + right_value.shape
forward_input = {a: left_value, b: right_value}
combine_list = []
for op in operations:
combine_list.append(op(a,b))
combine_node = combine(combine_list)
expected_forward_results = [np.asarray([[i]], dtype=dt) for i in expected_results]
forward_results, _ = cntk_eval(combine_node, forward_input, precision,
cntk_device(device_id))
results = list(forward_results.values())
assert compare_lists_of_np_arrays(results, expected_forward_results)
| 33.538462 | 104 | 0.659174 |
f192fb00f54200570b03de6d09375f974bba3efc | 888 | py | Python | core/urls.py | LFTPadilla/backend | 701ed24d519b3e70beeecffb51c11fc7fe3d8c14 | [
"MIT"
] | null | null | null | core/urls.py | LFTPadilla/backend | 701ed24d519b3e70beeecffb51c11fc7fe3d8c14 | [
"MIT"
] | null | null | null | core/urls.py | LFTPadilla/backend | 701ed24d519b3e70beeecffb51c11fc7fe3d8c14 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib import admin
from django.urls import path, include # add this
from django.contrib.auth.models import User
from rest_framework import routers
router = routers.DefaultRouter()
#router.register(r'', UserViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('rest/', include('rest_framework.urls', namespace='rest_framework')),
path("auth/", include("authentication.urls")),
path("app/", include("app.urls")),
path('', include(router.urls))
]
"""
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
#path('', include(router.urls)),
path("apigateway/", include("authentication.urls")), # add this
path("app/", include("app.urls")) # add this
]
""" | 26.117647 | 82 | 0.663288 |
20ac91c1322d46b0683e04deed22b87da9255917 | 406 | py | Python | indicoio/preprocess/pdf.py | IndicoDataSolutions/indicoio | 2d92b09dddbac892934cbc4265f26ea9a4c89fac | [
"MIT"
] | null | null | null | indicoio/preprocess/pdf.py | IndicoDataSolutions/indicoio | 2d92b09dddbac892934cbc4265f26ea9a4c89fac | [
"MIT"
] | 3 | 2020-01-22T15:30:43.000Z | 2020-02-10T15:50:19.000Z | indicoio/preprocess/pdf.py | IndicoDataSolutions/indicoio-py | 2d92b09dddbac892934cbc4265f26ea9a4c89fac | [
"MIT"
] | null | null | null | from pathlib import Path
import base64
def pdf_preprocess(pdf):
"""
Load pdfs from local filepath if not already b64 encoded
"""
path = Path(pdf)
if path.exists():
# a filepath is provided, read and encode
with path.open("rb") as f:
return base64.b64encode(f.read()).decode("utf-8")
else:
# assume pdf is already b64 encoded
return pdf
| 23.882353 | 61 | 0.6133 |
6b20f9d84d79443a3539df571b982bb835d2f6d8 | 1,559 | py | Python | vzug/vzug.py | tcinbis/python-vzug | 7064fe3f00743e036bb20cd8a7e7909c91cff313 | [
"Apache-2.0"
] | null | null | null | vzug/vzug.py | tcinbis/python-vzug | 7064fe3f00743e036bb20cd8a7e7909c91cff313 | [
"Apache-2.0"
] | null | null | null | vzug/vzug.py | tcinbis/python-vzug | 7064fe3f00743e036bb20cd8a7e7909c91cff313 | [
"Apache-2.0"
] | null | null | null | """A Python Client to get Infos from V-ZUG devices."""
import logging
import json
import requests
from yarl import URL
from . import make_call
from .constants import (
API,
DEVICE_STATUS,
)
_LOGGER = logging.getLogger(__name__)
class VZUG:
"""A class for handling the communication with a V-ZUG device."""
def __init__(self, host: str, username: str, password: str, session: requests.Session() = None) -> None:
"""Initialize the devices."""
self._host = host
self._username = username
self._password = password
self._session = session
self._device_status = None
self.uri = URL.build(scheme="http", host=self._host).join(URL(API))
async def get_device_status(self) -> None:
"""Get the details from the devices."""
url = URL(self.uri).update_query({'command': DEVICE_STATUS})
response = await make_call(self, uri=url, username=self._username, password=self._password)
self._device_status = response
@property
def device_status(self) -> str:
"""Return the current device details."""
return self._device_status
# See "Using Asyncio in Python" by Caleb Hattingh for implementation details.
async def close(self) -> None:
"""Close an open client session."""
if self._session and self._close_session:
await self._session.close()
async def __aenter__(self) -> "VZUG":
"""Async enter."""
return self
async def __aexit__(self, *exc_info) -> None:
"""Async exit.""" | 31.816327 | 108 | 0.649775 |
e81a8b428d6e12b2f1febbb65e14002ec427a172 | 2,969 | py | Python | cv_lib/cv_lib/utils.py | elmajdma/seismic-deeplearning | bc084abe153509c40b45f8bf0f80dfda1049d7dc | [
"MIT"
] | 270 | 2019-12-17T13:40:51.000Z | 2022-03-20T10:02:11.000Z | cv_lib/cv_lib/utils.py | elmajdma/seismic-deeplearning | bc084abe153509c40b45f8bf0f80dfda1049d7dc | [
"MIT"
] | 233 | 2019-12-18T17:59:36.000Z | 2021-08-03T13:43:49.000Z | cv_lib/cv_lib/utils.py | elmajdma/seismic-deeplearning | bc084abe153509c40b45f8bf0f80dfda1049d7dc | [
"MIT"
] | 118 | 2019-12-17T13:41:43.000Z | 2022-03-29T02:06:36.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import logging
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
def normalize(array, MIN, MAX):
"""
Normalizes a segmentation image array by the global range of the data,
MIN and MAX, for use with PIL.Image
"""
den = MAX - MIN
if den == 0:
den += np.finfo(float).eps
return (array - MIN) / den
def mask_to_disk(mask, fname, n_classes, cmap_name="rainbow"):
"""
write segmentation mask to disk using a particular colormap
mask (float): this contains the predicted labels in the range [0, n_classes].
fname (str): of the the image to be saved
n_classes (int): total number of classes in the dataset
cmap_name (str): name of the matplotlib colormap to be used. The default "rainbow"
colormap works well for any number of classes.
"""
cmap = plt.get_cmap(cmap_name)
Image.fromarray(cmap(mask / n_classes, bytes=True)).save(fname)
def image_to_disk(image, fname, MIN, MAX, cmap_name="seismic"):
"""
write segmentation image to disk using a particular colormap
"""
cmap = plt.get_cmap(cmap_name)
Image.fromarray(cmap(normalize(image, MIN, MAX), bytes=True)).save(fname)
def decode_segmap(label_mask, n_classes, colormap_name="rainbow"):
"""
Decode segmentation class labels into a colour image
Args:
label_mask (np.ndarray): an (N,H,W) array of integer values denoting
the class label at each spatial location.
Returns:
(np.ndarray): the resulting decoded color image (NCHW).
"""
out = np.zeros((label_mask.shape[0], 3, label_mask.shape[1], label_mask.shape[2]))
cmap = plt.get_cmap(colormap_name)
# loop over the batch
for i in range(label_mask.shape[0]):
im = Image.fromarray(cmap((label_mask[i, :, :] / n_classes), bytes=True)).convert("RGB")
out[i, :, :, :] = np.array(im).swapaxes(0, 2).swapaxes(1, 2)
return out
def load_log_configuration(log_config_file):
"""
Loads logging configuration from the given configuration file.
"""
if not os.path.exists(log_config_file) or not os.path.isfile(log_config_file):
msg = "%s configuration file does not exist!", log_config_file
logging.getLogger(__name__).error(msg)
raise ValueError(msg)
try:
logging.config.fileConfig(log_config_file, disable_existing_loggers=False)
logging.getLogger(__name__).info("%s configuration file was loaded.", log_config_file)
except Exception as e:
logging.getLogger(__name__).error("Failed to load configuration from %s!", log_config_file)
logging.getLogger(__name__).debug(str(e), exc_info=True)
raise e
def generate_path(base_path, *directories):
path = os.path.join(base_path, *directories)
if not os.path.exists(path):
os.makedirs(path)
return path
| 34.523256 | 99 | 0.677332 |
a27f4171c3366755409098afa7ef72e3fb7b9bea | 3,048 | py | Python | tests/unit/sagemaker/monitor/test_data_capture_config.py | eitansela/sagemaker-python-sdk | aa54102b5113b1d39bbbd4d9d341775f84641681 | [
"Apache-2.0"
] | 1 | 2021-07-22T00:23:51.000Z | 2021-07-22T00:23:51.000Z | tests/unit/sagemaker/monitor/test_data_capture_config.py | eitansela/sagemaker-python-sdk | aa54102b5113b1d39bbbd4d9d341775f84641681 | [
"Apache-2.0"
] | 24 | 2021-05-18T07:10:27.000Z | 2021-05-28T13:36:51.000Z | tests/unit/sagemaker/monitor/test_data_capture_config.py | eitansela/sagemaker-python-sdk | aa54102b5113b1d39bbbd4d9d341775f84641681 | [
"Apache-2.0"
] | 1 | 2021-09-27T04:08:37.000Z | 2021-09-27T04:08:37.000Z | # Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from mock import Mock
from sagemaker.model_monitor import DataCaptureConfig
DEFAULT_ENABLE_CAPTURE = True
DEFAULT_SAMPLING_PERCENTAGE = 20
DEFAULT_BUCKET_NAME = "default-bucket"
DEFAULT_DESTINATION_S3_URI = "s3://{}/model-monitor/data-capture".format(DEFAULT_BUCKET_NAME)
DEFAULT_KMS_KEY_ID = None
DEFAULT_CAPTURE_MODES = ["REQUEST", "RESPONSE"]
DEFAULT_CSV_CONTENT_TYPES = ["text/csv"]
DEFAULT_JSON_CONTENT_TYPES = ["application/json"]
NON_DEFAULT_ENABLE_CAPTURE = False
NON_DEFAULT_CAPTURE_STATUS = "STOPPED"
NON_DEFAULT_SAMPLING_PERCENTAGE = 97
NON_DEFAULT_DESTINATION_S3_URI = "s3://uri/"
NON_DEFAULT_KMS_KEY_ID = "my_kms_key_id"
NON_DEFAULT_CAPTURE_MODES = ["RESPONSE"]
NON_DEFAULT_CSV_CONTENT_TYPES = ["custom/csv-format"]
NON_DEFAULT_JSON_CONTENT_TYPES = ["custom/json-format"]
def test_init_when_non_defaults_provided():
data_capture_config = DataCaptureConfig(
enable_capture=NON_DEFAULT_ENABLE_CAPTURE,
sampling_percentage=NON_DEFAULT_SAMPLING_PERCENTAGE,
destination_s3_uri=NON_DEFAULT_DESTINATION_S3_URI,
kms_key_id=NON_DEFAULT_KMS_KEY_ID,
csv_content_types=NON_DEFAULT_CSV_CONTENT_TYPES,
json_content_types=NON_DEFAULT_JSON_CONTENT_TYPES,
)
assert data_capture_config.enable_capture == NON_DEFAULT_ENABLE_CAPTURE
assert data_capture_config.sampling_percentage == NON_DEFAULT_SAMPLING_PERCENTAGE
assert data_capture_config.destination_s3_uri == NON_DEFAULT_DESTINATION_S3_URI
assert data_capture_config.kms_key_id == NON_DEFAULT_KMS_KEY_ID
assert data_capture_config.csv_content_types == NON_DEFAULT_CSV_CONTENT_TYPES
assert data_capture_config.json_content_types == NON_DEFAULT_JSON_CONTENT_TYPES
def test_init_when_optionals_not_provided():
sagemaker_session = Mock()
sagemaker_session.default_bucket.return_value = DEFAULT_BUCKET_NAME
data_capture_config = DataCaptureConfig(
enable_capture=DEFAULT_ENABLE_CAPTURE, sagemaker_session=sagemaker_session
)
assert data_capture_config.enable_capture == DEFAULT_ENABLE_CAPTURE
assert data_capture_config.sampling_percentage == DEFAULT_SAMPLING_PERCENTAGE
assert data_capture_config.destination_s3_uri == DEFAULT_DESTINATION_S3_URI
assert data_capture_config.kms_key_id == DEFAULT_KMS_KEY_ID
assert data_capture_config.csv_content_types == DEFAULT_CSV_CONTENT_TYPES
assert data_capture_config.json_content_types == DEFAULT_JSON_CONTENT_TYPES
| 43.542857 | 93 | 0.815945 |
cbeb67bf1fd08a3c02cdb61e1ab99251507a47cb | 453 | py | Python | Others/ddcc/ddcc2020-qual/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | Others/ddcc/ddcc2020-qual/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | Others/ddcc/ddcc2020-qual/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
x, y = map(int, input().split())
ans = 0
if x == 1:
ans += 300000
elif x == 2:
ans += 200000
elif x == 3:
ans += 100000
if y == 1:
ans += 300000
elif y == 2:
ans += 200000
elif y == 3:
ans += 100000
if x == 1 and y == 1:
ans += 400000
print(ans)
if __name__ == '__main__':
main()
| 15.1 | 37 | 0.381898 |
1a4f98b28bd1740021ee5fd3efdf4e08dad3127f | 3,497 | py | Python | tests/unit/test_cache.py | tevansuk/pip | 87a2bedc170fbf9594ae6d79f3a19efc0fd63f3c | [
"MIT"
] | null | null | null | tests/unit/test_cache.py | tevansuk/pip | 87a2bedc170fbf9594ae6d79f3a19efc0fd63f3c | [
"MIT"
] | null | null | null | tests/unit/test_cache.py | tevansuk/pip | 87a2bedc170fbf9594ae6d79f3a19efc0fd63f3c | [
"MIT"
] | null | null | null | import os
from pip._vendor.packaging.tags import Tag
from pip._internal.cache import WheelCache, _hash_dict
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.utils.compat import expanduser
from pip._internal.utils.misc import ensure_dir
def test_expands_path():
wc = WheelCache("~/.foo/", None)
assert wc.cache_dir == expanduser("~/.foo/")
def test_falsey_path_none():
wc = WheelCache(False, None)
assert wc.cache_dir is None
def test_subdirectory_fragment():
"""
Test the subdirectory URL fragment is part of the cache key.
"""
wc = WheelCache("~/.foo/", None)
link1 = Link("git+https://g.c/o/r#subdirectory=d1")
link2 = Link("git+https://g.c/o/r#subdirectory=d2")
assert wc.get_path_for_link(link1) != wc.get_path_for_link(link2)
def test_wheel_name_filter(tmpdir):
"""
Test the wheel cache filters on wheel name when several wheels
for different package are stored under the same cache directory.
"""
wc = WheelCache(tmpdir, FormatControl())
link = Link("https://g.c/package.tar.gz")
cache_path = wc.get_path_for_link(link)
ensure_dir(cache_path)
with open(os.path.join(cache_path, "package-1.0-py3-none-any.whl"), "w"):
pass
# package matches wheel name
cached_link = wc.get(link, "package", [Tag("py3", "none", "any")])
assert cached_link is not link
assert os.path.exists(cached_link.file_path)
# package2 does not match wheel name
assert wc.get(link, "package2", [Tag("py3", "none", "any")]) is link
def test_cache_hash():
h = _hash_dict({"url": "https://g.c/o/r"})
assert h == "72aa79d3315c181d2cc23239d7109a782de663b6f89982624d8c1e86"
h = _hash_dict({"url": "https://g.c/o/r", "subdirectory": "sd"})
assert h == "8b13391b6791bf7f3edeabb41ea4698d21bcbdbba7f9c7dc9339750d"
h = _hash_dict({"subdirectory": u"/\xe9e"})
assert h == "f83b32dfa27a426dec08c21bf006065dd003d0aac78e7fc493d9014d"
def test_get_path_for_link_legacy(tmpdir):
"""
Test that an existing cache entry that was created with the legacy hashing
mechanism is returned by WheelCache._get_candidates().
"""
wc = WheelCache(tmpdir, FormatControl())
link = Link("https://g.c/o/r")
path = wc.get_path_for_link(link)
legacy_path = wc.get_path_for_link_legacy(link)
assert path != legacy_path
ensure_dir(path)
with open(os.path.join(path, "test-1.0.0-pyz-none-any.whl"), "w"):
pass
ensure_dir(legacy_path)
with open(os.path.join(legacy_path, "test-1.0.0-pyx-none-any.whl"), "w"):
pass
expected_candidates = {
"test-1.0.0-pyx-none-any.whl", "test-1.0.0-pyz-none-any.whl"
}
candidates = {c[0] for c in wc._get_candidates(link, "test")}
assert candidates == expected_candidates
def test_get_with_legacy_entry_only(tmpdir):
"""
Test that an existing cache entry that was created with the legacy hashing
mechanism is actually returned in WheelCache.get().
"""
wc = WheelCache(tmpdir, FormatControl())
link = Link("https://g.c/o/r")
legacy_path = wc.get_path_for_link_legacy(link)
ensure_dir(legacy_path)
with open(os.path.join(legacy_path, "test-1.0.0-py3-none-any.whl"), "w"):
pass
cached_link = wc.get(link, "test", [Tag("py3", "none", "any")])
assert (
os.path.normcase(os.path.dirname(cached_link.file_path)) ==
os.path.normcase(legacy_path)
)
| 35.323232 | 78 | 0.686017 |
02fdc4913719e38395b3e92717cbf6fe9bb6b616 | 4,382 | py | Python | grafana_backup/dataDefinition/DataDefinition.py | primoz-p/grafana-backup-tool | 13038a645a32f59483f7d7e068d953c5c98aaa5c | [
"MIT"
] | null | null | null | grafana_backup/dataDefinition/DataDefinition.py | primoz-p/grafana-backup-tool | 13038a645a32f59483f7d7e068d953c5c98aaa5c | [
"MIT"
] | null | null | null | grafana_backup/dataDefinition/DataDefinition.py | primoz-p/grafana-backup-tool | 13038a645a32f59483f7d7e068d953c5c98aaa5c | [
"MIT"
] | null | null | null | import abc
import os
from abc import ABC
from grafana_backup.commons import save_json
class DataDefinition(ABC):
def __init__(self, settings, name):
self.name = name
self.backup_dir = settings.get('BACKUP_DIR')
self.timestamp = settings.get('TIMESTAMP')
self.limit = settings.get('SEARCH_API_LIMIT')
self.grafana_url = settings.get('GRAFANA_URL')
self.http_get_headers = settings.get('HTTP_GET_HEADERS')
self.verify_ssl = settings.get('VERIFY_SSL')
self.client_cert = settings.get('CLIENT_CERT')
self.debug = settings.get('DEBUG')
self.pretty_print = settings.get('PRETTY_PRINT')
self.uid_support = settings.get('UID_SUPPORT')
self.folder_path = '{0}/{1}/{2}s'.format(self.backup_dir, self.timestamp,
self.name.replace(' ', '_'))
self.log_path = '{0}/{1}/{2}s.txt'.format(self.backup_dir, self.timestamp,
self.name.replace(' ', '_'))
def save(self):
if not os.path.exists(self.folder_path):
os.makedirs(self.folder_path)
page = 1
data = self.search_data(page)
if data:
with open(u"{0}".format(self.log_path), 'w') as f:
for element in data:
self.save_element(element, f)
def search_data(self, page):
print("Searching {0}s ...".format(self.name.lower()))
(status, content, api_url) = self.search_data_execute(page, self.limit, self.grafana_url, self.http_get_headers, self.verify_ssl, self.client_cert, self.debug)
if status == 200:
print(" {0}s found: {1}".format(self.name.title(), len(content)))
return content
else:
print(" Error searching {0}s:"
"\n status: {1}"
"\n message: {2}".format(self.name.lower(), status, content))
return []
def save_element(self, element, log_file):
(status, content, api_url) = self.load_element(element, self.grafana_url,
self.http_get_headers, self.verify_ssl,
self.client_cert, self.debug,
self.uid_support)
if status == 200:
file_name = self.get_element_filename(element)
file_path = save_json(file_name, content, self.folder_path, self.name.lower(),
self.pretty_print)
log = "{0}: '{1}'".format(self.name.title(),
self.get_element_main_description(element))
for element_name in ['uid', 'id', 'name', 'title']:
if element_name in element:
log += "\n {0}: {1}".format(element_name, element[element_name])
if 'url' in element and element['url']:
url = element['url']
if not url.startswith('http') and not url.startswith('172'):
url = "{0}{1}".format(self.grafana_url, url)
log += "\n URL: {0}".format(url)
log += "\n API: {0}".format(api_url)
log += "\n saved to: {0}".format(file_path)
print(log)
log_file.write(log + '\n')
@abc.abstractmethod
def search_data_execute(self, page, limit, grafana_url, http_get_headers, verify_ssl,
client_cert, debug):
raise NotImplementedError
@abc.abstractmethod
def load_element(self, element, grafana_url, http_get_headers, verify_ssl, client_cert, debug,
uid_support):
pass
@staticmethod
def get_element_main_description(element):
for element_name in ['name', 'title']:
if element_name in element:
return element[element_name]
def get_element_filename(self, element):
if 'uid' in element:
file_name = str(element['uid'])
elif 'id' in element:
file_name = str(element['id'])
if 'uri' in element:
file_name += "_" + element['uri'].replace('db/', '')
return file_name
| 41.339623 | 271 | 0.529895 |
7dbe726b3c59d90f26772d4d475bb9b4fd445fe6 | 5,588 | py | Python | test/sessionfixtures.py | Pytwitcher/pytwitcher-api | d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336 | [
"BSD-3-Clause"
] | 26 | 2015-05-13T15:49:29.000Z | 2021-02-12T00:27:02.000Z | test/sessionfixtures.py | Pytwitcher/pytwitcher-api | d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336 | [
"BSD-3-Clause"
] | 21 | 2015-03-15T13:39:21.000Z | 2018-06-02T01:12:15.000Z | test/sessionfixtures.py | Pytwitcher/pytwitcher-api | d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336 | [
"BSD-3-Clause"
] | 3 | 2016-09-16T10:53:03.000Z | 2017-08-26T22:42:32.000Z | import mock
import pytest
import requests
from pytwitcherapi import session, constants
def create_mockresponse(returnvalue):
"""Create a response that will return the given value when calling the json method
:param returnvalue: the returnvalue for the response.json method
:type returnvalue: :class:`dict`
:returns: a mock object with a mocked json method
:rtype: :class:`mock.Mock`
:raises: None
"""
mockresponse = mock.Mock()
mockresponse.json.return_value = returnvalue
return mockresponse
@pytest.fixture(scope="function")
def mock_session(monkeypatch):
"""Replace the request method of session with a mock."""
monkeypatch.setattr(requests.Session, "request", mock.Mock())
@pytest.fixture(scope="function")
def ts(mock_session):
"""Return a :class:`session.TwitchSession`
and mock the request of :class:`Session`
"""
return session.TwitchSession()
@pytest.fixture(scope="function",
params=[400, 499, 500, 599])
def mock_session_error_status(request, mock_session):
"""Make sessions return a response with error codes"""
response = requests.Response()
response.status_code = request.param
requests.Session.request.return_value = response
@pytest.fixture(scope="function")
def games_search_response(game1json, game2json):
"""Return a response mock that returns the searchresult json
with game1json and game2json when you call the json method.
"""
searchjson = {"games": [game1json, game2json]}
return create_mockresponse(searchjson)
@pytest.fixture(scope="function")
def top_games_response(game1json, game2json):
"""Return a response mock that returns the topgames result
json with game1json and game2json when you call the json method.
"""
topjson = {"top": [{'game': game1json,
'viewers': 123,
'channels': 32},
{'game': game2json,
'viewers': 7312,
'channels': 95}]}
return create_mockresponse(topjson)
@pytest.fixture(scope="function")
def search_channels_response(channel1json, channel2json):
"""Return a response mock that returns the search result
with channel1json and channel2json when you call the json method
"""
searchjson = {"channels": [channel1json,
channel2json]}
return create_mockresponse(searchjson)
@pytest.fixture(scope="function")
def get_channel_response(channel1json):
"""Return a response mock that returns channel1json
when calling the json method
"""
return create_mockresponse(channel1json)
@pytest.fixture(scope="function")
def search_streams_response(stream1json, stream2json):
"""Return a response mock that returns the searchresult
with stream1json and stream2json when calling the json method
"""
searchjson = {"streams": [stream1json,
stream2json]}
return create_mockresponse(searchjson)
@pytest.fixture(scope="function")
def get_stream_response(stream1json):
"""Return a response mock that returns the get stream result
with stream1json when calling the json method
"""
json = {"stream": stream1json}
return create_mockresponse(json)
@pytest.fixture(scope="function")
def get_offline_stream_response():
"""Return a response mock that returns the get stream result
with no stream when calling the json method
"""
return create_mockresponse({'stream': None})
@pytest.fixture(scope="function")
def get_user_response(user1json):
"""Return a mock response that returns user1json
when calling the json method
"""
return create_mockresponse(user1json)
@pytest.fixture(scope='function')
def access_token_response():
return create_mockresponse(
{u'token': u'{"channel":"test_channel"}',
u'mobile_restricted': False,
u'sig': u'f63275898c8aa0b88a6e22acf95088323f006b9d'})
@pytest.fixture(scope='function')
def playlist():
"""Return a sample playlist text"""
p = """#EXTM3U
#EXT-X-MEDIA:TYPE=VIDEO,GROUP-ID="chunked",NAME="Source"
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=128000,CODECS="mp4a.40.2",VIDEO="chunked"
sourclink
#EXT-X-MEDIA:TYPE=VIDEO,GROUP-ID="high",NAME="High"
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=128000,CODECS="mp4a.40.2",VIDEO="high"
highlink
#EXT-X-MEDIA:TYPE=VIDEO,GROUP-ID="medium",NAME="Medium"
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=128000,CODECS="mp4a.40.2",VIDEO="medium"
mediumlink
#EXT-X-MEDIA:TYPE=VIDEO,GROUP-ID="low",NAME="Low"
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=128000,CODECS="mp4a.40.2",VIDEO="low"
lowlink
#EXT-X-MEDIA:TYPE=VIDEO,GROUP-ID="mobile",NAME="Mobile"
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=128000,CODECS="mp4a.40.2",VIDEO="mobile"
mobilelink
#EXT-X-MEDIA:TYPE=VIDEO,GROUP-ID="audio_only",NAME="Audio Only"
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=128000,CODECS="mp4a.40.2",VIDEO="audio_only"
audioonlylink"""
return p
@pytest.fixture(scope='function')
def login_server(request, user1, monkeypatch):
monkeypatch.setattr(constants, 'LOGIN_SERVER_ADRESS', ('', 0))
def query_login_user():
return user1
ts = session.TwitchSession()
ts.query_login_user = query_login_user
def shutdown():
ts.shutdown_login_server()
request.addfinalizer(shutdown)
ts.start_login_server()
port = ts.login_server.socket.getsockname()[1]
redirecturi = constants.REDIRECT_URI.replace('42420', str(port))
monkeypatch.setattr(constants, 'REDIRECT_URI', redirecturi)
return ts
| 32.678363 | 86 | 0.704009 |
dd0b668ed4c2e0bd9ea1ac268b71815ec6ec5ed8 | 489 | py | Python | tests/helpers.py | graingert/compose | 371ea479f5995a31145ee1f597aae8c08110aa25 | [
"Apache-2.0"
] | null | null | null | tests/helpers.py | graingert/compose | 371ea479f5995a31145ee1f597aae8c08110aa25 | [
"Apache-2.0"
] | null | null | null | tests/helpers.py | graingert/compose | 371ea479f5995a31145ee1f597aae8c08110aa25 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
from compose.config.config import ConfigDetails
from compose.config.config import ConfigFile
from compose.config.config import load
def build_config(contents, **kwargs):
return load(build_config_details(contents, **kwargs))
def build_config_details(contents, working_dir='working_dir', filename='filename.yml'):
return ConfigDetails(
working_dir,
[ConfigFile(filename, contents)])
| 28.764706 | 87 | 0.789366 |
d96bef468600ed3dcdc0bc994b07361d70617610 | 1,651 | py | Python | src/tox/package/view.py | Djailla/tox | a9ec66182dffd76cf9569d1e8b956d082b26545c | [
"MIT"
] | 1 | 2022-03-29T12:44:45.000Z | 2022-03-29T12:44:45.000Z | src/tox/package/view.py | Djailla/tox | a9ec66182dffd76cf9569d1e8b956d082b26545c | [
"MIT"
] | 4 | 2021-03-31T20:05:34.000Z | 2022-01-13T03:08:13.000Z | src/tox/package/view.py | Djailla/tox | a9ec66182dffd76cf9569d1e8b956d082b26545c | [
"MIT"
] | null | null | null | import os
from itertools import chain
import six
from tox.reporter import verbosity1
def create_session_view(package, temp_dir):
"""once we build a package we cannot return that directly, as a subsequent call
might delete that package (in order to do its own build); therefore we need to
return a view of the file that it's not prone to deletion and can be removed when the
session ends
"""
if not package:
return package
package_dir = temp_dir.join("package")
package_dir.ensure(dir=True)
# we'll number the active instances, and use the max value as session folder for a new build
# note we cannot change package names as PEP-491 (wheel binary format)
# is strict about file name structure
exists = [i.basename for i in package_dir.listdir()]
file_id = max(chain((0,), (int(i) for i in exists if six.text_type(i).isnumeric())))
session_dir = package_dir.join(str(file_id + 1))
session_dir.ensure(dir=True)
session_package = session_dir.join(package.basename)
# if we can do hard links do that, otherwise just copy
links = False
if hasattr(os, "link"):
try:
os.link(str(package), str(session_package))
links = True
except (OSError, NotImplementedError):
pass
if not links:
package.copy(session_package)
operation = "links" if links else "copied"
common = session_package.common(package)
verbosity1(
"package {} {} to {} ({})".format(
common.bestrelpath(session_package), operation, common.bestrelpath(package), common,
),
)
return session_package
| 34.395833 | 96 | 0.674137 |
c7c71cd5a559cf610c50bfac57457cfedac770c5 | 57,115 | py | Python | Lib/tkinter/ttk.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 1,886 | 2021-05-03T23:58:43.000Z | 2022-03-31T19:15:58.000Z | Lib/tkinter/ttk.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 70 | 2021-05-04T23:25:35.000Z | 2022-03-31T18:42:08.000Z | Lib/tkinter/ttk.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 52 | 2021-05-04T21:26:03.000Z | 2022-03-08T18:02:56.000Z | """Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Spinbox", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import tkinter
from tkinter import _flatten, _join, _stringify, _splitdict
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optvalue(value, script=False):
"""Internal function."""
if script:
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
value = _stringify(value)
elif isinstance(value, (list, tuple)):
value = _join(value)
return value
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
opts = []
for opt, value in optdict.items():
if not ignore or opt not in ignore:
opts.append("-%s" % opt)
if value is not None:
opts.append(_format_optvalue(value, script))
return _flatten(opts)
def _mapdict_values(items):
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
# E.g. (script=False):
# [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]
# returns:
# ['active selected', 'grey', 'focus', [1, 2, 3, 4]]
opt_val = []
for *state, val in items:
# hacks for backward compatibility
state[0] # raise IndexError if empty
if len(state) == 1:
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or ''
else:
# group multiple states
state = ' '.join(state) # raise TypeError if not str
opt_val.append(state)
if val is not None:
opt_val.append(val)
return opt_val
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
opts = []
for opt, value in mapdict.items():
opts.extend(("-%s" % opt,
_format_optvalue(_mapdict_values(value), script)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _join(_mapdict_values(args[1:]))
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _join(_mapdict_values(args[2:]))
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (_format_optvalue(args[1], script),)
if script:
spec = '{%s}' % spec
opts = ' '.join(opts)
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't have to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(_format_optdict(opts, True, ("children",)))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.items():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(_format_optdict(opts['configure'], True))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(_format_mapdict(opts['map'], True))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'items'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(tk, ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
ltuple = tk.splitlist(ltuple)
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(tk, val)
opts[opt] = val
return res
def _val_or_dict(tk, options, *args):
"""Format options then call Tk command with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If an option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = tk.call(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _splitdict(tk, res, conv=_tclobj_to_py)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = str(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def _to_number(x):
if isinstance(x, str):
if '.' in x:
x = float(x)
else:
x = int(x)
return x
def _tclobj_to_py(val):
"""Return value converted from Tcl object to Python object."""
if val and hasattr(val, '__len__') and not isinstance(val, str):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = list(map(_convert_stringval, val))
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
return val
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
adict[opt] = _tclobj_to_py(val)
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if tkinter._support_default_root:
master = tkinter._default_root or tkinter.Tk()
else:
raise RuntimeError(
"No master specified and tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
result = _val_or_dict(self.tk, kw, self._name, "configure", style)
if result or query_opt:
return result
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(self.tk.splitlist(
self.tk.call(self._name, "map", style, '-%s' % query_opt)))
return _splitdict(
self.tk,
self.tk.call(self._name, "map", style, *_format_mapdict(kw)),
conv=_tclobj_to_py)
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(self.tk,
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return tuple(n.lstrip('-') for n in self.tk.splitlist(
self.tk.call(self._name, "element", "names")))
def element_options(self, elementname):
"""Return the list of elementname's options."""
return tuple(o.lstrip('-') for o in self.tk.splitlist(
self.tk.call(self._name, "element", "options", elementname)))
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.splitlist(self.tk.call(self._name, "theme", "names"))
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.getboolean(
self.tk.call(self._w, "instate", ' '.join(statespec)))
if ret and callback:
return callback(*args, **kw)
return ret
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self._getints(self.tk.call(self._w, "bbox", index))
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return self.tk.getboolean(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
Entry.__init__(self, master, "ttk::combobox", **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
if newindex is None:
return self.tk.getint(self.tk.call(self._w, "current"))
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.getint(self.tk.call(self._w, "index", tab_id))
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.splitlist(self.tk.call(self._w, "tabs") or ())
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.getint(self.tk.call(self._w, "sashpos", index, newpos))
PanedWindow = Panedwindow # tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if omitted."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
retval = Widget.configure(self, cnf, **kw)
if not isinstance(cnf, (type(None), str)):
kw.update(cnf)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
return retval
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Spinbox(Entry):
"""Ttk Spinbox is an Entry with increment and decrement arrows
It is commonly used for number entry or to select from a list of
string values.
"""
def __init__(self, master=None, **kw):
"""Construct a Ttk Spinbox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, validate,
validatecommand, xscrollcommand, invalidcommand
WIDGET-SPECIFIC OPTIONS
to, from_, increment, values, wrap, format, command
"""
Entry.__init__(self, master, "ttk::spinbox", **kw)
def set(self, value):
"""Sets the value of the Spinbox to value."""
self.tk.call(self._w, "set", value)
class Treeview(Widget, tkinter.XView, tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self._getints(self.tk.call(self._w, "bbox", item, column)) or ''
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.splitlist(
self.tk.call(self._w, "children", item or '') or ())
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the tree,
False otherwise."""
return self.tk.getboolean(self.tk.call(self._w, "exists", item))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, str):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.getint(self.tk.call(self._w, "index", item))
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid is not None:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self):
"""Returns the tuple of selected items."""
return self.tk.splitlist(self.tk.call(self._w, "selection"))
def _selection(self, selop, items):
if len(items) == 1 and isinstance(items[0], (tuple, list)):
items = items[0]
self.tk.call(self._w, "selection", selop, items)
def selection_set(self, *items):
"""The specified items becomes the new selection."""
self._selection("set", items)
def selection_add(self, *items):
"""Add all of the specified items to the selection."""
self._selection("add", items)
def selection_remove(self, *items):
"""Remove all of the specified items from the selection."""
self._selection("remove", items)
def selection_toggle(self, *items):
"""Toggle the selection state of each specified item."""
self._selection("toggle", items)
def set(self, item, column=None, value=None):
"""Query or set the value of given item.
With one argument, return a dictionary of column/value pairs
for the specified item. With two arguments, return the current
value of the specified column. With three arguments, set the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _splitdict(self.tk, res,
cut_minus=False, conv=_tclobj_to_py)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
if item is None:
return self.tk.splitlist(
self.tk.call(self._w, "tag", "has", tagname))
else:
return self.tk.getboolean(
self.tk.call(self._w, "tag", "has", tagname, item))
# Extensions
class LabeledScale(Frame):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct a horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
pass
else:
del self._variable
super().destroy()
self.label = None
self.scale = None
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_ = _to_number(self.scale['from'])
to = _to_number(self.scale['to'])
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
@property
def value(self):
"""Return current scale value."""
return self._variable.get()
@value.setter
def value(self, val):
"""Set new scale value."""
self._variable.set(val)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise tkinter.TclError('unknown option -%s' % (
next(iter(kwargs.keys()))))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=tkinter._setit(self._variable, val, self._callback),
variable=self._variable)
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
try:
del self._variable
except AttributeError:
pass
super().destroy()
| 34.365223 | 80 | 0.617719 |
3f14e5002842205715ffd89e6a0357cc8c769dd3 | 5,400 | py | Python | savu/plugins/ring_removal/ring_removal_filtering.py | dtasev/Savu | acb2578c85472e76cb292c4242c1ed2f2332f3e3 | [
"Apache-2.0"
] | null | null | null | savu/plugins/ring_removal/ring_removal_filtering.py | dtasev/Savu | acb2578c85472e76cb292c4242c1ed2f2332f3e3 | [
"Apache-2.0"
] | 1 | 2019-07-30T12:31:51.000Z | 2019-07-30T12:31:51.000Z | savu/plugins/ring_removal/ring_removal_filtering.py | DTasev/Savu | acb2578c85472e76cb292c4242c1ed2f2332f3e3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: Remove stripe artefacts
:platform: Unix
:synopsis: A plugin working in sinogram space to remove stripe artefacts
.. moduleauthor:: Nghia Vo <scientificsoftware@diamond.ac.uk>
"""
from savu.plugins.plugin import Plugin
from savu.plugins.driver.cpu_plugin import CpuPlugin
from savu.plugins.utils import register_plugin
from savu.data.plugin_list import CitationInformation
import numpy as np
from scipy.ndimage import median_filter
from scipy import signal
import pyfftw.interfaces.scipy_fftpack as fft
@register_plugin
class RingRemovalFiltering(Plugin, CpuPlugin):
"""
Method to remove stripe artefacts in a sinogram (<-> ring artefacts in a \
reconstructed image) using a filtering-based method in the combination \
with a sorting-based method. Note that it's different to a FFT-based or \
wavelet-FFT-based method.
:param sigma: Sigma of the Gaussian window. Used to separate the low-pass\
and high-pass components of each sinogram column. Default: 3.
:param size: Size of the median filter window. Used to\
clean stripes. Default: 31.
"""
def __init__(self):
super(RingRemovalFiltering, self).__init__(
"RingRemovalFiltering")
def setup(self):
in_dataset, out_dataset = self.get_datasets()
out_dataset[0].create_dataset(in_dataset[0])
in_pData, out_pData = self.get_plugin_datasets()
in_pData[0].plugin_data_setup('SINOGRAM', 'single')
out_pData[0].plugin_data_setup('SINOGRAM', 'single')
def pre_process(self):
in_pData = self.get_plugin_in_datasets()
width_dim = \
in_pData[0].get_data_dimension_by_axis_label('detector_x')
height_dim = \
in_pData[0].get_data_dimension_by_axis_label('rotation_angle')
sino_shape = list(in_pData[0].get_shape())
self.pad = 150 # To reduce artifact caused by FFT
self.width1 = sino_shape[width_dim]
self.height1 = sino_shape[height_dim] + 2*self.pad
listindex = np.arange(0.0, sino_shape[height_dim], 1.0)
self.matindex = np.tile(listindex, (self.width1, 1))
sigma = np.clip(np.int16(self.parameters['sigma']), 1, self.height1-1)
self.window = signal.gaussian(self.height1, std = sigma)
self.listsign = np.power(-1.0,np.arange(self.height1))
def remove_stripe_based_sorting(self, matindex, sinogram, size):
"""
Remove stripes using the sorting technique.
---------
Parameters: - sinogram: 2D array.
- size: window size of the median filter.
---------
Return: - stripe-removed sinogram.
"""
sinogram = np.transpose(sinogram)
matcomb = np.asarray(np.dstack((matindex, sinogram)))
matsort = np.asarray(
[row[row[:, 1].argsort()] for row in matcomb])
matsort[:, :, 1] = median_filter(matsort[:, :, 1], (size, 1))
matsortback = np.asarray(
[row[row[:, 0].argsort()] for row in matsort])
sino_corrected = matsortback[:, :, 1]
return np.transpose(sino_corrected)
def process_frames(self, data):
sinogram = np.transpose(np.copy(data[0]))
sinogram2 = np.pad(
sinogram,((0, 0),(self.pad, self.pad)), mode = 'reflect')
size = np.clip(np.int16(self.parameters['size']), 1, self.width1-1)
sinosmooth = np.zeros_like(sinogram)
for i,sinolist in enumerate(sinogram2):
sinosmooth[i] = np.real(fft.ifft(fft.fft(sinolist
*self.listsign)*self.window)
*self.listsign)[self.pad:self.height1-self.pad]
sinosharp = sinogram - sinosmooth
sinosmooth_cor = np.transpose(
self.remove_stripe_based_sorting(
self.matindex, np.transpose(sinosmooth), size))
return np.transpose(sinosmooth_cor + sinosharp)
def get_citation_information(self):
cite_info = CitationInformation()
cite_info.description = \
("The code of ring removal is the implementation of the work of \
Nghia T. Vo et al. taken from algorithm 2 and 3 in this paper.")
cite_info.bibtex = \
("@article{Vo:18,\n" +
"title={Superior techniques for eliminating ring artifacts in\
X-ray micro-tomography},\n" +
"author={Nghia T. Vo, Robert C. Atwood,\
and Michael Drakopoulos},\n" +
"journal={Opt. Express},\n" +
"volume={26},\n" +
"number={22},\n" +
"pages={28396--28412},\n" +
"year={2018},\n" +
"publisher={OSA}" +
"}")
cite_info.doi = "doi: DOI: 10.1364/OE.26.028396"
return cite_info
| 41.221374 | 78 | 0.637037 |
7e10e294e2ba42c523107723d10e94cae28917af | 11,063 | py | Python | programs/tests.py | bycristhian/psp | 019825e010386b6acc8c5466e7a6765218cb10d9 | [
"MIT"
] | 2 | 2020-09-04T17:06:41.000Z | 2020-10-05T01:46:20.000Z | programs/tests.py | bycristhian/psp | 019825e010386b6acc8c5466e7a6765218cb10d9 | [
"MIT"
] | null | null | null | programs/tests.py | bycristhian/psp | 019825e010386b6acc8c5466e7a6765218cb10d9 | [
"MIT"
] | null | null | null |
# Django
from django.test import TestCase
from django.urls import reverse_lazy
# Models
from django.contrib.auth.models import User
from users.models import Profile
from logs.models import Phase
from programs.models import ProgrammingLanguage, Program
from projects.models import Project, Module
# Forms
from programs.forms import CreateListPip, CreateProgramForm
# Utils
from datetime import datetime, date
class FormCreatePIPTestCase(TestCase):
def setUp(self):
self.form1 = CreateListPip(data={
'name': 'Test PIP',
'date': datetime(2020, 4, 22, 8, 20, 0),
'problems': 'Test PIP',
'proposal': 'Test PIP',
'comment': 'Test PIP'
})
self.form2 = CreateListPip(data={
'name': 'Test PIP #2'
})
# Validate Form is valid
def test_form_is_valid(self):
self.assertTrue(self.form1.is_valid())
# Validate Form isn't valid
def test_form_is_invalid(self):
self.assertEquals(len(self.form2.errors), 4)
class FormCreateProgramTestCase(TestCase):
def setUp(self):
User.objects.create_user(username='bycristhian', email='by@gmail.com', password='by123')
ProgrammingLanguage.objects.create(name='Python')
self.form1 = CreateProgramForm(data={
'username_programmer': 'bycristhian',
'name_programming_language': 'Python',
'name': 'Test name program',
'description': 'This description must be 50 or greater characteres',
'start_date': date(2020, 7, 21),
'planning_date': date(2020, 7, 31)
})
self.form2 = CreateProgramForm(data={
'username_programmer': 'Nothing',
'name_programming_language': 'JavaScript',
'name': 'Test Form 2 Name program',
'description': 'This description must be 50 or greater characteres',
'start_date': date(2020, 7, 21),
'planning_date': date(2020, 7, 20)
})
def test_form1_is_valid(self):
self.assertTrue(self.form1.is_valid())
def test_form2_user_not_exists(self):
self.assertFalse(self.form2.is_valid())
self.assertEqual(self.form2.errors['username_programmer'][0], "The programmer doesn't exists")
def test_form2_language_not_exists(self):
self.assertFalse(self.form2.is_valid())
self.assertEqual(
self.form2.errors['name_programming_language'][0],
"The programming language doesn't exists"
)
def test_form2_dates_incorrect(self):
self.assertFalse(self.form2.is_valid())
self.assertEqual(
self.form2.errors['planning_date'][0],
"The planning date cannot be less than the start date"
)
class AccessProgramProgrammerTestCase(TestCase):
def setUp(self):
self.language = ProgrammingLanguage.objects.create(name='Python')
self.phase = Phase.objects.create(
name='Unit Test',
abbreviation='UT',
description='This phase is Unit Testing',
order_index=5
)
self.admin = User.objects.create_user(
username='Admin1',
email='admin@gmail.com',
password='admin123'
)
self.user1 = User.objects.create_user(
username='User1',
email='user@gmail.com',
password='user123'
)
self.user2 = User.objects.create_user(
username='User2',
email='user2@gmail.com',
password='user456'
)
self.profile1 = Profile.objects.create(user=self.user1)
self.profile2 = Profile.objects.create(user=self.user2)
self.profile3 = Profile.objects.create(user=self.admin, type_user='administrador')
self.project = Project.objects.create(
name='Project Test 1',
description='This description must be 50 or greater characteres',
start_date=date(2020, 7, 20),
planning_date=date(2020, 7, 31),
admin=self.admin
)
self.module = Module.objects.create(
project=self.project,
name='Module Test 1',
description='This description must be 50 or greater characteres',
start_date=date(2020, 7, 20),
planning_date=date(2020, 7, 25)
)
self.program1 = Program.objects.create(
name='Program Test 1',
description='This description must be 50 or greater characteres',
programmer=self.user1,
language=self.language,
module=self.module,
start_date=date(2020, 7, 21),
planning_date=date(2020, 7, 23)
)
self.url_program = reverse_lazy('programs:detail_program', kwargs={'pk_program': self.program1.pk})
def test_user_not_authenticated(self):
response = self.client.get(self.url_program)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse_lazy('users:login'))
def test_user_allowed(self):
self.client.force_login(user=self.user1)
response = self.client.get(self.url_program)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response=response, template_name='programs/summary/program_opened.html')
def test_user_not_allowed(self):
self.client.force_login(user=self.user2)
response = self.client.get(self.url_program)
self.assertEqual(response.status_code, 403)
class CreateReportTestCase(TestCase):
def setUp(self):
self.admin = User.objects.create_user(
username='admin',
email='admin@gmail.com',
password='admin123'
)
self.programmer = User.objects.create_user(
username='programmer',
email='programmer@gmail.com',
password='programmer123'
)
Profile.objects.create(user=self.admin, type_user='administrador')
Profile.objects.create(user=self.programmer)
self.project = Project.objects.create(
name='Project 1',
description='This projects is a test and description must be greater 30 characteres',
start_date=date(2020, 7, 20),
planning_date=date(2020, 7, 25),
admin=self.admin
)
self.project.users.add(self.programmer)
self.module = Module.objects.create(
name='Module 1',
description='This module is a test and this description must be greater that 30 characteres',
project=self.project,
start_date=date(2020, 7, 21),
planning_date=date(2020, 7, 23)
)
self.language = ProgrammingLanguage.objects.create(name='Python')
self.program = Program.objects.create(
name='Program 1',
description='This program is a test and this description must be greater that 30 characteres',
programmer=self.programmer,
module=self.module,
start_date=date(2020, 7, 21),
planning_date=date(2020, 7, 22),
language=self.language
)
self.url_create_report = reverse_lazy(
'programs:reports_view',
kwargs={'pk_program': self.program.pk}
)
def test_create_test_report(self):
self.client.force_login(user=self.programmer)
response = self.client.post(self.url_create_report, data={
'date': datetime(2020, 7, 21, 8, 30, 30),
'name': 'This is test name',
'description': 'This is test description',
'objetive': 'This is test objetive',
'conditions': 'This is test conditions',
'expect_results': 'This is test expect results',
'current_results': 'This is test current results'
})
self.assertEqual(response.status_code, 302)
self.assertEqual(self.program.reports_view.all().count(), 1)
class AssignProgramToProgrammerTestCase(TestCase):
def setUp(self):
self.language = ProgrammingLanguage.objects.create(name='Go')
self.user1 = User.objects.create(
username='user1',
password='user123',
email='user@gmail.com'
)
self.profile_user1 = Profile.objects.create(user=self.user1)
self.user2 = User.objects.create(
username='user2',
password='user456',
email='user2@gmail.com'
)
self.profile_user2 = Profile.objects.create(user=self.user2)
self.admin = User.objects.create(
username='admin',
password='admin123',
email='admin@gmail.com'
)
self.profile = Profile.objects.create(user=self.admin, type_user='administrador')
self.project = Project.objects.create(
name='Project #1',
description='This is a description of project must be greater to 50 characters',
start_date=date(2020, 7, 25),
planning_date=date(2020, 7, 31),
admin=self.admin
)
self.project.users.add(self.user1)
self.module = Module.objects.create(
name='Module #1',
project=self.project,
description='This is a description of module must be greater to 50 characters',
start_date=date(2020, 7, 26),
planning_date=date(2020, 7, 29)
)
self.url_create_program = reverse_lazy('programs:create_program', kwargs={'pk_module': self.module.pk})
def test_assign_program_to_user1(self):
self.client.force_login(user=self.admin)
response = self.client.post(self.url_create_program, data={
'name': 'Cola de mensajes usando Python',
'start_date': date(2020, 7, 27),
'planning_date': date(2020, 7, 28),
'description': 'Este programa es un servicio de distribuidor de mensajes a distintos microservices en cual se debe hacer con RabbitMQ usando Celery (Python)',
'username_programmer': 'user1',
'name_programming_language': 'Go'
})
self.assertEqual(response.status_code, 302)
self.assertEqual(self.module.program_module.all().count(), 1)
def test_assign_program_to_user2(self):
self.client.force_login(user=self.admin)
response = self.client.post(self.url_create_program, data={
'name': 'Cola de mensajes usando Python',
'start_date': date(2020, 7, 27),
'planning_date': date(2020, 7, 28),
'description': 'Este programa es un servicio de distribuidor de mensajes a distintos microservices en cual se debe hacer con RabbitMQ usando Celery (Python)',
'username_programmer': 'user2',
'name_programming_language': 'Go'
})
self.assertEqual(response.status_code, 302)
self.assertEqual(self.module.program_module.all().count(), 0) | 33.322289 | 170 | 0.6143 |
5fd68643ea87d55fcf328d9df328b64126f0f3fd | 9,757 | py | Python | tensorlayer/iterate.py | awesome-archive/tensorlayer | 120a79f957926475b6f3db02da71a269f8130771 | [
"Apache-2.0"
] | null | null | null | tensorlayer/iterate.py | awesome-archive/tensorlayer | 120a79f957926475b6f3db02da71a269f8130771 | [
"Apache-2.0"
] | null | null | null | tensorlayer/iterate.py | awesome-archive/tensorlayer | 120a79f957926475b6f3db02da71a269f8130771 | [
"Apache-2.0"
] | 1 | 2018-03-12T23:57:57.000Z | 2018-03-12T23:57:57.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from six.moves import xrange
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
"""Generate a generator that input a group of example in numpy.array and
their labels, return the examples and labels by the given batch size.
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every row is a example.
batch_size : int
The batch size.
shuffle : boolean
Indicating whether to use a shuffling queue, shuffle the dataset before return.
Examples
--------
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.minibatches(inputs=X, targets=y, batch_size=2, shuffle=False):
>>> print(batch)
... (array([['a', 'a'],
... ['b', 'b']],
... dtype='<U1'), array([0, 1]))
... (array([['c', 'c'],
... ['d', 'd']],
... dtype='<U1'), array([2, 3]))
... (array([['e', 'e'],
... ['f', 'f']],
... dtype='<U1'), array([4, 5]))
Notes
-----
If you have two inputs and one label and want to shuffle them together, e.g. X1 (1000, 100), X2 (1000, 80) and Y (1000, 1), you can stack them together (`np.hstack((X1, X2))`)
into (1000, 180) and feed to ``inputs``. After getting a batch, you can split it back into X1 and X2.
"""
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
def seq_minibatches(inputs, targets, batch_size, seq_length, stride=1):
"""Generate a generator that return a batch of sequence inputs and targets.
If `batch_size=100` and `seq_length=5`, one return will have 500 rows (examples).
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every element is a example.
batch_size : int
The batch size.
seq_length : int
The sequence length.
stride : int
The stride step, default is 1.
Examples
--------
Synced sequence input and output.
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0, 1, 2, 3, 4, 5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1):
>>> print(batch)
... (array([['a', 'a'],
... ['b', 'b'],
... ['b', 'b'],
... ['c', 'c']],
... dtype='<U1'), array([0, 1, 1, 2]))
... (array([['c', 'c'],
... ['d', 'd'],
... ['d', 'd'],
... ['e', 'e']],
... dtype='<U1'), array([2, 3, 3, 4]))
...
...
Many to One
>>> return_last = True
>>> num_steps = 2
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> Y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=Y, batch_size=2, seq_length=num_steps, stride=1):
>>> x, y = batch
>>> if return_last:
>>> tmp_y = y.reshape((-1, num_steps) + y.shape[1:])
>>> y = tmp_y[:, -1]
>>> print(x, y)
... [['a' 'a']
... ['b' 'b']
... ['b' 'b']
... ['c' 'c']] [1 2]
... [['c' 'c']
... ['d' 'd']
... ['d' 'd']
... ['e' 'e']] [3 4]
"""
assert len(inputs) == len(targets)
n_loads = (batch_size * stride) + (seq_length - stride)
for start_idx in range(0, len(inputs) - n_loads + 1, (batch_size * stride)):
seq_inputs = np.zeros((batch_size, seq_length) + inputs.shape[1:], dtype=inputs.dtype)
seq_targets = np.zeros((batch_size, seq_length) + targets.shape[1:], dtype=targets.dtype)
for b_idx in xrange(batch_size):
start_seq_idx = start_idx + (b_idx * stride)
end_seq_idx = start_seq_idx + seq_length
seq_inputs[b_idx] = inputs[start_seq_idx:end_seq_idx]
seq_targets[b_idx] = targets[start_seq_idx:end_seq_idx]
flatten_inputs = seq_inputs.reshape((-1, ) + inputs.shape[1:])
flatten_targets = seq_targets.reshape((-1, ) + targets.shape[1:])
yield flatten_inputs, flatten_targets
def seq_minibatches2(inputs, targets, batch_size, num_steps):
"""Generate a generator that iterates on two list of words. Yields (Returns) the source contexts and
the target context by the given batch_size and num_steps (sequence_length).
In TensorFlow's tutorial, this generates the `batch_size` pointers into the raw PTB data, and allows minibatch iteration along these pointers.
Parameters
----------
inputs : list of data
The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.
targets : list of data
The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.
batch_size : int
The batch size.
num_steps : int
The number of unrolls. i.e. sequence length
Yields
------
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
Raises
------
ValueError : if batch_size or num_steps are too high.
Examples
--------
>>> X = [i for i in range(20)]
>>> Y = [i for i in range(20,40)]
>>> for batch in tl.iterate.seq_minibatches2(X, Y, batch_size=2, num_steps=3):
... x, y = batch
... print(x, y)
...
... [[ 0. 1. 2.]
... [ 10. 11. 12.]]
... [[ 20. 21. 22.]
... [ 30. 31. 32.]]
...
... [[ 3. 4. 5.]
... [ 13. 14. 15.]]
... [[ 23. 24. 25.]
... [ 33. 34. 35.]]
...
... [[ 6. 7. 8.]
... [ 16. 17. 18.]]
... [[ 26. 27. 28.]
... [ 36. 37. 38.]]
Notes
-----
- Hint, if the input data are images, you can modify the source code `data = np.zeros([batch_size, batch_len)` to `data = np.zeros([batch_size, batch_len, inputs.shape[1], inputs.shape[2], inputs.shape[3]])`.
"""
assert len(inputs) == len(targets)
data_len = len(inputs)
batch_len = data_len // batch_size
# data = np.zeros([batch_size, batch_len])
data = np.zeros((batch_size, batch_len) + inputs.shape[1:], dtype=inputs.dtype)
data2 = np.zeros([batch_size, batch_len])
for i in range(batch_size):
data[i] = inputs[batch_len * i:batch_len * (i + 1)]
data2[i] = targets[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
x2 = data2[:, i * num_steps:(i + 1) * num_steps]
yield (x, x2)
def ptb_iterator(raw_data, batch_size, num_steps):
"""Generate a generator that iterates on a list of words, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.
Yields the source contexts and the target context by the given batch_size and num_steps (sequence_length).
In TensorFlow's tutorial, this generates `batch_size` pointers into the raw
PTB data, and allows minibatch iteration along these pointers.
Parameters
----------
raw_data : a list
the context in list format; note that context usually be
represented by splitting by space, and then convert to unique
word IDs.
batch_size : int
the batch size.
num_steps : int
the number of unrolls. i.e. sequence_length
Yields
------
Pairs of the batched data, each a matrix of shape [batch_size, num_steps].
The second element of the tuple is the same data time-shifted to the
right by one.
Raises
------
ValueError : if batch_size or num_steps are too high.
Examples
--------
>>> train_data = [i for i in range(20)]
>>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3):
>>> x, y = batch
>>> print(x, y)
... [[ 0 1 2] <---x 1st subset/ iteration
... [10 11 12]]
... [[ 1 2 3] <---y
... [11 12 13]]
...
... [[ 3 4 5] <--- 1st batch input 2nd subset/ iteration
... [13 14 15]] <--- 2nd batch input
... [[ 4 5 6] <--- 1st batch target
... [14 15 16]] <--- 2nd batch target
...
... [[ 6 7 8] 3rd subset/ iteration
... [16 17 18]]
... [[ 7 8 9]
... [17 18 19]]
"""
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1]
yield (x, y)
| 35.871324 | 212 | 0.556524 |
adb3d05bf0d3c7dbf8d3e295dc5fb8e3a79aa3a4 | 286 | py | Python | setup.py | ezdookie/django-country-site | d534a0bd80af20fd05dbf133d917155533bd628c | [
"MIT"
] | null | null | null | setup.py | ezdookie/django-country-site | d534a0bd80af20fd05dbf133d917155533bd628c | [
"MIT"
] | null | null | null | setup.py | ezdookie/django-country-site | d534a0bd80af20fd05dbf133d917155533bd628c | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='django-country-site',
version='0.1.0',
url='https://github.com/ezdookie/django-country-site',
author='ezdookie',
author_email='brian@bleax.com',
license='MIT',
packages=['django_country_site'],
zip_safe=False
) | 22 | 58 | 0.671329 |
ed38bcc8413b94fcd3835bb447cae5c1aaa787db | 246,033 | py | Python | config/main.py | davidpil2002/sonic-utilities | 576c9efc0dc33a047b9bb31fd58b53866cd62206 | [
"Apache-2.0"
] | null | null | null | config/main.py | davidpil2002/sonic-utilities | 576c9efc0dc33a047b9bb31fd58b53866cd62206 | [
"Apache-2.0"
] | null | null | null | config/main.py | davidpil2002/sonic-utilities | 576c9efc0dc33a047b9bb31fd58b53866cd62206 | [
"Apache-2.0"
] | null | null | null | #!/usr/sbin/env python
import click
import ipaddress
import json
import jsonpatch
import netaddr
import netifaces
import os
import re
import subprocess
import sys
import time
import itertools
from collections import OrderedDict
from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat
from minigraph import parse_device_desc_xml
from natsort import natsorted
from portconfig import get_child_ports
from socket import AF_INET, AF_INET6
from sonic_py_common import device_info, multi_asic
from sonic_py_common.interface import get_interface_table_name, get_port_table_name, get_intf_longname
from utilities_common import util_base
from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector
from utilities_common.db import Db
from utilities_common.intf_filter import parse_interface_in_filter
from utilities_common import bgp_util
import utilities_common.cli as clicommon
from utilities_common.general import load_db_config
from .utils import log
from . import aaa
from . import chassis_modules
from . import console
from . import feature
from . import kdump
from . import kube
from . import muxcable
from . import nat
from . import vlan
from . import vxlan
from . import plugins
from .config_mgmt import ConfigMgmtDPB
from . import mclag
# mock masic APIs for unit test
try:
if os.environ["UTILITIES_UNIT_TESTING"] == "1" or os.environ["UTILITIES_UNIT_TESTING"] == "2":
modules_path = os.path.join(os.path.dirname(__file__), "..")
tests_path = os.path.join(modules_path, "tests")
sys.path.insert(0, modules_path)
sys.path.insert(0, tests_path)
import mock_tables.dbconnector
if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic":
import mock_tables.mock_multi_asic
mock_tables.dbconnector.load_namespace_config()
except KeyError:
pass
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help', '-?'])
SONIC_GENERATED_SERVICE_PATH = '/etc/sonic/generated_services.conf'
SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen'
VLAN_SUB_INTERFACE_SEPARATOR = '.'
ASIC_CONF_FILENAME = 'asic.conf'
DEFAULT_CONFIG_DB_FILE = '/etc/sonic/config_db.json'
DEFAULT_CONFIG_YANG_FILE = '/etc/sonic/config_yang.json'
NAMESPACE_PREFIX = 'asic'
INTF_KEY = "interfaces"
INIT_CFG_FILE = '/etc/sonic/init_cfg.json'
DEFAULT_NAMESPACE = ''
CFG_LOOPBACK_PREFIX = "Loopback"
CFG_LOOPBACK_PREFIX_LEN = len(CFG_LOOPBACK_PREFIX)
CFG_LOOPBACK_NAME_TOTAL_LEN_MAX = 11
CFG_LOOPBACK_ID_MAX_VAL = 999
CFG_LOOPBACK_NO="<0-999>"
CFG_PORTCHANNEL_PREFIX = "PortChannel"
CFG_PORTCHANNEL_PREFIX_LEN = 11
CFG_PORTCHANNEL_NAME_TOTAL_LEN_MAX = 15
CFG_PORTCHANNEL_MAX_VAL = 9999
CFG_PORTCHANNEL_NO="<0-9999>"
PORT_MTU = "mtu"
PORT_SPEED = "speed"
PORT_TPID = "tpid"
DEFAULT_TPID = "0x8100"
asic_type = None
DSCP_RANGE = click.IntRange(min=0, max=63)
TTL_RANGE = click.IntRange(min=0, max=255)
QUEUE_RANGE = click.IntRange(min=0, max=255)
GRE_TYPE_RANGE = click.IntRange(min=0, max=65535)
#
# Helper functions
#
# Sort nested dict
def sort_dict(data):
""" Sort of 1st level and 2nd level dict of data naturally by its key
data: data to be sorted
"""
if type(data) is not dict:
return data
for table in data:
if type(data[table]) is dict:
data[table] = OrderedDict(natsorted(data[table].items()))
return OrderedDict(natsorted(data.items()))
# Read given JSON file
def read_json_file(fileName):
try:
with open(fileName) as f:
result = json.load(f)
except Exception as e:
raise Exception(str(e))
return result
def _get_breakout_options(ctx, args, incomplete):
""" Provides dynamic mode option as per user argument i.e. interface name """
all_mode_options = []
interface_name = args[-1]
breakout_cfg_file = device_info.get_path_to_port_config_file()
if not os.path.isfile(breakout_cfg_file) or not breakout_cfg_file.endswith('.json'):
return []
else:
breakout_file_input = read_json_file(breakout_cfg_file)
if interface_name in breakout_file_input[INTF_KEY]:
breakout_mode_options = [mode for i, v in breakout_file_input[INTF_KEY].items() if i == interface_name \
for mode in v["breakout_modes"].keys()]
all_mode_options = [str(c) for c in breakout_mode_options if incomplete in c]
return all_mode_options
def _validate_interface_mode(ctx, breakout_cfg_file, interface_name, target_brkout_mode, cur_brkout_mode):
""" Validate Parent interface and user selected mode before starting deletion or addition process """
breakout_file_input = read_json_file(breakout_cfg_file)["interfaces"]
if interface_name not in breakout_file_input:
click.secho("[ERROR] {} is not a Parent port. So, Breakout Mode is not available on this port".format(interface_name), fg='red')
return False
# Check whether target breakout mode is available for the user-selected interface or not
if target_brkout_mode not in breakout_file_input[interface_name]["breakout_modes"].keys():
click.secho('[ERROR] Target mode {} is not available for the port {}'. format(target_brkout_mode, interface_name), fg='red')
return False
# Get config db context
config_db = ctx.obj['config_db']
port_dict = config_db.get_table('PORT')
# Check whether there is any port in config db.
if not port_dict:
click.echo("port_dict is None!")
return False
# Check whether the user-selected interface is part of 'port' table in config db.
if interface_name not in port_dict:
click.secho("[ERROR] {} is not in port_dict".format(interface_name))
return False
click.echo("\nRunning Breakout Mode : {} \nTarget Breakout Mode : {}".format(cur_brkout_mode, target_brkout_mode))
if (cur_brkout_mode == target_brkout_mode):
click.secho("[WARNING] No action will be taken as current and desired Breakout Mode are same.", fg='magenta')
sys.exit(0)
return True
def load_ConfigMgmt(verbose):
""" Load config for the commands which are capable of change in config DB. """
try:
cm = ConfigMgmtDPB(debug=verbose)
return cm
except Exception as e:
raise Exception("Failed to load the config. Error: {}".format(str(e)))
def breakout_warnUser_extraTables(cm, final_delPorts, confirm=True):
"""
Function to warn user about extra tables while Dynamic Port Breakout(DPB).
confirm: re-confirm from user to proceed.
Config Tables Without Yang model considered extra tables.
cm = instance of config MGMT class.
"""
try:
# check if any extra tables exist
eTables = cm.tablesWithOutYang()
if len(eTables):
# find relavent tables in extra tables, i.e. one which can have deleted
# ports
tables = cm.configWithKeys(configIn=eTables, keys=final_delPorts)
click.secho("Below Config can not be verified, It may cause harm "\
"to the system\n {}".format(json.dumps(tables, indent=2)))
click.confirm('Do you wish to Continue?', abort=True)
except Exception as e:
raise Exception("Failed in breakout_warnUser_extraTables. Error: {}".format(str(e)))
return
def breakout_Ports(cm, delPorts=list(), portJson=dict(), force=False, \
loadDefConfig=False, verbose=False):
deps, ret = cm.breakOutPort(delPorts=delPorts, portJson=portJson, \
force=force, loadDefConfig=loadDefConfig)
# check if DPB failed
if ret == False:
if not force and deps:
click.echo("Dependecies Exist. No further action will be taken")
click.echo("*** Printing dependecies ***")
for dep in deps:
click.echo(dep)
sys.exit(0)
else:
click.echo("[ERROR] Port breakout Failed!!! Opting Out")
raise click.Abort()
return
#
# Helper functions
#
def _get_device_type():
"""
Get device type
TODO: move to sonic-py-common
"""
command = "{} -m -v DEVICE_METADATA.localhost.type".format(SONIC_CFGGEN_PATH)
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
device_type, err = proc.communicate()
if err:
click.echo("Could not get the device type from minigraph, setting device type to Unknown")
device_type = 'Unknown'
else:
device_type = device_type.strip()
return device_type
def interface_alias_to_name(config_db, interface_alias):
"""Return default interface name if alias name is given as argument
"""
vlan_id = ""
sub_intf_sep_idx = -1
if interface_alias is not None:
sub_intf_sep_idx = interface_alias.find(VLAN_SUB_INTERFACE_SEPARATOR)
if sub_intf_sep_idx != -1:
vlan_id = interface_alias[sub_intf_sep_idx + 1:]
# interface_alias holds the parent port name so the subsequent logic still applies
interface_alias = interface_alias[:sub_intf_sep_idx]
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_alias)
if namespace is None:
return None
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
if interface_alias is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
if interface_alias == port_dict[port_name]['alias']:
return port_name if sub_intf_sep_idx == -1 else port_name + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
# Interface alias not in port_dict, just return interface_alias, e.g.,
# portchannel is passed in as argument, which does not have an alias
return interface_alias if sub_intf_sep_idx == -1 else interface_alias + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
def interface_name_is_valid(config_db, interface_name):
"""Check if the interface name is valid
"""
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_name)
if namespace is None:
return False
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
port_channel_dict = config_db.get_table('PORTCHANNEL')
sub_port_intf_dict = config_db.get_table('VLAN_SUB_INTERFACE')
loopback_dict = config_db.get_table('LOOPBACK_INTERFACE')
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
if interface_name == port_name:
return True
if port_channel_dict:
for port_channel_name in port_channel_dict:
if interface_name == port_channel_name:
return True
if sub_port_intf_dict:
for sub_port_intf_name in sub_port_intf_dict:
if interface_name == sub_port_intf_name:
return True
if loopback_dict:
for loopback_name in loopback_dict:
if interface_name == loopback_name:
return True
return False
def interface_name_to_alias(config_db, interface_name):
"""Return alias interface name if default name is given as argument
"""
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_name)
if namespace is None:
return None
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
if interface_name is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
if interface_name == port_name:
return port_dict[port_name]['alias']
return None
def get_interface_ipaddresses(config_db, interface_name):
"""Get IP addresses attached to interface
"""
ipaddresses = set()
table_name = get_interface_table_name(interface_name)
if not table_name:
return ipaddresses
keys = config_db.get_keys(table_name)
for key in keys:
if isinstance(key, tuple) and len(key) == 2:
iface, interface_ip = key
if iface == interface_name:
ipaddresses.add(ipaddress.ip_interface(interface_ip))
return ipaddresses
def is_interface_bind_to_vrf(config_db, interface_name):
"""Get interface if bind to vrf or not
"""
table_name = get_interface_table_name(interface_name)
if table_name == "":
return False
entry = config_db.get_entry(table_name, interface_name)
if entry and entry.get("vrf_name"):
return True
return False
def is_portchannel_name_valid(portchannel_name):
"""Port channel name validation
"""
# Return True if Portchannel name is PortChannelXXXX (XXXX can be 0-9999)
if portchannel_name[:CFG_PORTCHANNEL_PREFIX_LEN] != CFG_PORTCHANNEL_PREFIX :
return False
if (portchannel_name[CFG_PORTCHANNEL_PREFIX_LEN:].isdigit() is False or
int(portchannel_name[CFG_PORTCHANNEL_PREFIX_LEN:]) > CFG_PORTCHANNEL_MAX_VAL) :
return False
if len(portchannel_name) > CFG_PORTCHANNEL_NAME_TOTAL_LEN_MAX:
return False
return True
def is_portchannel_present_in_db(db, portchannel_name):
"""Check if Portchannel is present in Config DB
"""
# Return True if Portchannel name exists in the CONFIG_DB
portchannel_list = db.get_table(CFG_PORTCHANNEL_PREFIX)
if portchannel_list is None:
return False
if portchannel_name in portchannel_list:
return True
return False
def is_port_member_of_this_portchannel(db, port_name, portchannel_name):
"""Check if a port is member of given portchannel
"""
portchannel_list = db.get_table(CFG_PORTCHANNEL_PREFIX)
if portchannel_list is None:
return False
for k,v in db.get_table('PORTCHANNEL_MEMBER'):
if (k == portchannel_name) and (v == port_name):
return True
return False
# Return the namespace where an interface belongs
# The port name input could be in default mode or in alias mode.
def get_port_namespace(port):
# If it is a non multi-asic platform, or if the interface is management interface
# return DEFAULT_NAMESPACE
if not multi_asic.is_multi_asic() or port == 'eth0':
return DEFAULT_NAMESPACE
# Get the table to check for interface presence
table_name = get_port_table_name(port)
if table_name == "":
return None
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
# If the interface naming mode is alias, search the tables for alias_name.
if clicommon.get_interface_naming_mode() == "alias":
port_dict = config_db.get_table(table_name)
if port_dict:
for port_name in port_dict:
if port == port_dict[port_name]['alias']:
return namespace
else:
entry = config_db.get_entry(table_name, port)
if entry:
return namespace
return None
def del_interface_bind_to_vrf(config_db, vrf_name):
"""del interface bind to vrf
"""
tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']
for table_name in tables:
interface_dict = config_db.get_table(table_name)
if interface_dict:
for interface_name in interface_dict:
if 'vrf_name' in interface_dict[interface_name] and vrf_name == interface_dict[interface_name]['vrf_name']:
interface_ipaddresses = get_interface_ipaddresses(config_db, interface_name)
for ipaddress in interface_ipaddresses:
remove_router_interface_ip_address(config_db, interface_name, ipaddress)
config_db.set_entry(table_name, interface_name, None)
def set_interface_naming_mode(mode):
"""Modify SONIC_CLI_IFACE_MODE env variable in user .bashrc
"""
user = os.getenv('SUDO_USER')
bashrc_ifacemode_line = "export SONIC_CLI_IFACE_MODE={}".format(mode)
# In case of multi-asic, we can check for the alias mode support in any of
# the namespaces as this setting of alias mode should be identical everywhere.
# Here by default we set the namespaces to be a list just having '' which
# represents the linux host. In case of multi-asic, we take the first namespace
# created for the front facing ASIC.
namespaces = [DEFAULT_NAMESPACE]
if multi_asic.is_multi_asic():
namespaces = multi_asic.get_all_namespaces()['front_ns']
# Ensure all interfaces have an 'alias' key in PORT dict
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespaces[0])
config_db.connect()
port_dict = config_db.get_table('PORT')
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
try:
if port_dict[port_name]['alias']:
pass
except KeyError:
click.echo("Platform does not support alias mapping")
raise click.Abort()
if not user:
user = os.getenv('USER')
if user != "root":
bashrc = "/home/{}/.bashrc".format(user)
else:
click.get_current_context().fail("Cannot set interface naming mode for root user!")
f = open(bashrc, 'r')
filedata = f.read()
f.close()
if "SONIC_CLI_IFACE_MODE" not in filedata:
newdata = filedata + bashrc_ifacemode_line
newdata += "\n"
else:
newdata = re.sub(r"export SONIC_CLI_IFACE_MODE=\w+",
bashrc_ifacemode_line, filedata)
f = open(bashrc, 'w')
f.write(newdata)
f.close()
click.echo("Please logout and log back in for changes take effect.")
def get_intf_ipv6_link_local_mode(ctx, interface_name, table_name):
config_db = ctx.obj["config_db"]
intf = config_db.get_table(table_name)
if interface_name in intf:
if 'ipv6_use_link_local_only' in intf[interface_name]:
return intf[interface_name]['ipv6_use_link_local_only']
else:
return "disable"
else:
return ""
def _is_neighbor_ipaddress(config_db, ipaddress):
"""Returns True if a neighbor has the IP address <ipaddress>, False if not
"""
entry = config_db.get_entry('BGP_NEIGHBOR', ipaddress)
return True if entry else False
def _get_all_neighbor_ipaddresses(config_db):
"""Returns list of strings containing IP addresses of all BGP neighbors
"""
addrs = []
bgp_sessions = config_db.get_table('BGP_NEIGHBOR')
for addr, session in bgp_sessions.items():
addrs.append(addr)
return addrs
def _get_neighbor_ipaddress_list_by_hostname(config_db, hostname):
"""Returns list of strings, each containing an IP address of neighbor with
hostname <hostname>. Returns empty list if <hostname> not a neighbor
"""
addrs = []
bgp_sessions = config_db.get_table('BGP_NEIGHBOR')
for addr, session in bgp_sessions.items():
if 'name' in session and session['name'] == hostname:
addrs.append(addr)
return addrs
def _change_bgp_session_status_by_addr(config_db, ipaddress, status, verbose):
"""Start up or shut down BGP session by IP address
"""
verb = 'Starting' if status == 'up' else 'Shutting'
click.echo("{} {} BGP session with neighbor {}...".format(verb, status, ipaddress))
config_db.mod_entry('bgp_neighbor', ipaddress, {'admin_status': status})
def _change_bgp_session_status(config_db, ipaddr_or_hostname, status, verbose):
"""Start up or shut down BGP session by IP address or hostname
"""
ip_addrs = []
# If we were passed an IP address, convert it to lowercase because IPv6 addresses were
# stored in ConfigDB with all lowercase alphabet characters during minigraph parsing
if _is_neighbor_ipaddress(config_db, ipaddr_or_hostname.lower()):
ip_addrs.append(ipaddr_or_hostname.lower())
else:
# If <ipaddr_or_hostname> is not the IP address of a neighbor, check to see if it's a hostname
ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, ipaddr_or_hostname)
if not ip_addrs:
return False
for ip_addr in ip_addrs:
_change_bgp_session_status_by_addr(config_db, ip_addr, status, verbose)
return True
def _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname):
"""validates whether the given ip or host name is a BGP neighbor
"""
ip_addrs = []
if _is_neighbor_ipaddress(config_db, neighbor_ip_or_hostname.lower()):
ip_addrs.append(neighbor_ip_or_hostname.lower())
else:
ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, neighbor_ip_or_hostname.upper())
return ip_addrs
def _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname):
"""Removes BGP configuration of the given neighbor
"""
ip_addrs = _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname)
if not ip_addrs:
return False
for ip_addr in ip_addrs:
config_db.mod_entry('bgp_neighbor', ip_addr, None)
click.echo("Removed configuration of BGP neighbor {}".format(ip_addr))
return True
def _change_hostname(hostname):
current_hostname = os.uname()[1]
if current_hostname != hostname:
clicommon.run_command('echo {} > /etc/hostname'.format(hostname), display_cmd=True)
clicommon.run_command('hostname -F /etc/hostname', display_cmd=True)
clicommon.run_command(r'sed -i "/\s{}$/d" /etc/hosts'.format(current_hostname), display_cmd=True)
clicommon.run_command('echo "127.0.0.1 {}" >> /etc/hosts'.format(hostname), display_cmd=True)
def _clear_cbf():
CBF_TABLE_NAMES = [
'DSCP_TO_FC_MAP',
'EXP_TO_FC_MAP']
namespace_list = [DEFAULT_NAMESPACE]
if multi_asic.get_num_asics() > 1:
namespace_list = multi_asic.get_namespaces_from_linux()
for ns in namespace_list:
if ns is DEFAULT_NAMESPACE:
config_db = ConfigDBConnector()
else:
config_db = ConfigDBConnector(
use_unix_socket_path=True, namespace=ns
)
config_db.connect()
for cbf_table in CBF_TABLE_NAMES:
config_db.delete_table(cbf_table)
def _clear_qos():
QOS_TABLE_NAMES = [
'PORT_QOS_MAP',
'QUEUE',
'TC_TO_PRIORITY_GROUP_MAP',
'MAP_PFC_PRIORITY_TO_QUEUE',
'TC_TO_QUEUE_MAP',
'DSCP_TO_TC_MAP',
'MPLS_TC_TO_TC_MAP',
'SCHEDULER',
'PFC_PRIORITY_TO_PRIORITY_GROUP_MAP',
'WRED_PROFILE',
'CABLE_LENGTH',
'BUFFER_PG',
'BUFFER_QUEUE',
'BUFFER_PORT_INGRESS_PROFILE_LIST',
'BUFFER_PORT_EGRESS_PROFILE_LIST',
'BUFFER_PROFILE',
'BUFFER_POOL',
'DEFAULT_LOSSLESS_BUFFER_PARAMETER',
'LOSSLESS_TRAFFIC_PATTERN']
namespace_list = [DEFAULT_NAMESPACE]
if multi_asic.get_num_asics() > 1:
namespace_list = multi_asic.get_namespaces_from_linux()
for ns in namespace_list:
if ns is DEFAULT_NAMESPACE:
config_db = ConfigDBConnector()
else:
config_db = ConfigDBConnector(
use_unix_socket_path=True, namespace=ns
)
config_db.connect()
for qos_table in QOS_TABLE_NAMES:
config_db.delete_table(qos_table)
def _get_sonic_generated_services(num_asic):
if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH):
return None
generated_services_list = []
generated_multi_instance_services = []
with open(SONIC_GENERATED_SERVICE_PATH) as generated_service_file:
for line in generated_service_file:
if '@' in line:
line = line.replace('@', '')
if num_asic > 1:
generated_multi_instance_services.append(line.rstrip('\n'))
else:
generated_services_list.append(line.rstrip('\n'))
else:
generated_services_list.append(line.rstrip('\n'))
return generated_services_list, generated_multi_instance_services
# Callback for confirmation prompt. Aborts if user enters "n"
def _abort_if_false(ctx, param, value):
if not value:
ctx.abort()
def _get_disabled_services_list(config_db):
disabled_services_list = []
feature_table = config_db.get_table('FEATURE')
if feature_table is not None:
for feature_name in feature_table:
if not feature_name:
log.log_warning("Feature is None")
continue
state = feature_table[feature_name]['state']
if not state:
log.log_warning("Enable state of feature '{}' is None".format(feature_name))
continue
if state == "disabled":
disabled_services_list.append(feature_name)
else:
log.log_warning("Unable to retreive FEATURE table")
return disabled_services_list
def _stop_services():
try:
subprocess.check_call("sudo monit status", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
click.echo("Disabling container monitoring ...")
clicommon.run_command("sudo monit unmonitor container_checker")
except subprocess.CalledProcessError as err:
pass
click.echo("Stopping SONiC target ...")
clicommon.run_command("sudo systemctl stop sonic.target --job-mode replace-irreversibly")
def _get_sonic_services():
out = clicommon.run_command("systemctl list-dependencies --plain sonic.target | sed '1d'", return_cmd=True)
return (unit.strip() for unit in out.splitlines())
def _get_delayed_sonic_services():
rc1 = clicommon.run_command("systemctl list-dependencies --plain sonic-delayed.target | sed '1d'", return_cmd=True)
rc2 = clicommon.run_command("systemctl is-enabled {}".format(rc1.replace("\n", " ")), return_cmd=True)
timer = [line.strip() for line in rc1.splitlines()]
state = [line.strip() for line in rc2.splitlines()]
services = []
for unit in timer:
if state[timer.index(unit)] == "enabled":
services.append(unit.rstrip(".timer"))
return services
def _reset_failed_services():
for service in itertools.chain(_get_sonic_services(), _get_delayed_sonic_services()):
clicommon.run_command("systemctl reset-failed {}".format(service))
def _restart_services():
click.echo("Restarting SONiC target ...")
clicommon.run_command("sudo systemctl restart sonic.target")
try:
subprocess.check_call("sudo monit status", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
click.echo("Enabling container monitoring ...")
clicommon.run_command("sudo monit monitor container_checker")
except subprocess.CalledProcessError as err:
pass
# Reload Monit configuration to pick up new hostname in case it changed
click.echo("Reloading Monit configuration ...")
clicommon.run_command("sudo monit reload")
def _get_delay_timers():
out = clicommon.run_command("systemctl list-dependencies sonic-delayed.target --plain |sed '1d'", return_cmd=True)
return [timer.strip() for timer in out.splitlines()]
def _delay_timers_elapsed():
for timer in _get_delay_timers():
out = clicommon.run_command("systemctl show {} --property=LastTriggerUSecMonotonic --value".format(timer), return_cmd=True)
if out.strip() == "0":
return False
return True
def _per_namespace_swss_ready(service_name):
out = clicommon.run_command("systemctl show {} --property ActiveState --value".format(service_name), return_cmd=True)
if out.strip() != "active":
return False
out = clicommon.run_command("systemctl show {} --property ActiveEnterTimestampMonotonic --value".format(service_name), return_cmd=True)
swss_up_time = float(out.strip())/1000000
now = time.monotonic()
if (now - swss_up_time > 120):
return True
else:
return False
def _swss_ready():
list_of_swss = []
num_asics = multi_asic.get_num_asics()
if num_asics == 1:
list_of_swss.append("swss.service")
else:
for asic in range(num_asics):
service = "swss@{}.service".format(asic)
list_of_swss.append(service)
for service_name in list_of_swss:
if _per_namespace_swss_ready(service_name) == False:
return False
return True
def _is_system_starting():
out = clicommon.run_command("sudo systemctl is-system-running", return_cmd=True)
return out.strip() == "starting"
def interface_is_in_vlan(vlan_member_table, interface_name):
""" Check if an interface is in a vlan """
for _, intf in vlan_member_table:
if intf == interface_name:
return True
return False
def interface_is_in_portchannel(portchannel_member_table, interface_name):
""" Check if an interface is part of portchannel """
for _, intf in portchannel_member_table:
if intf == interface_name:
return True
return False
def interface_has_mirror_config(mirror_table, interface_name):
""" Check if port is already configured with mirror config """
for _, v in mirror_table.items():
if 'src_port' in v and v['src_port'] == interface_name:
return True
if 'dst_port' in v and v['dst_port'] == interface_name:
return True
return False
def validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction):
""" Check if SPAN mirror-session config is valid """
if len(config_db.get_entry('MIRROR_SESSION', session_name)) != 0:
click.echo("Error: {} already exists".format(session_name))
return False
vlan_member_table = config_db.get_table('VLAN_MEMBER')
mirror_table = config_db.get_table('MIRROR_SESSION')
portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER')
if dst_port:
if not interface_name_is_valid(config_db, dst_port):
click.echo("Error: Destination Interface {} is invalid".format(dst_port))
return False
if interface_is_in_vlan(vlan_member_table, dst_port):
click.echo("Error: Destination Interface {} has vlan config".format(dst_port))
return False
if interface_has_mirror_config(mirror_table, dst_port):
click.echo("Error: Destination Interface {} already has mirror config".format(dst_port))
return False
if interface_is_in_portchannel(portchannel_member_table, dst_port):
click.echo("Error: Destination Interface {} has portchannel config".format(dst_port))
return False
if clicommon.is_port_router_interface(config_db, dst_port):
click.echo("Error: Destination Interface {} is a L3 interface".format(dst_port))
return False
if src_port:
for port in src_port.split(","):
if not interface_name_is_valid(config_db, port):
click.echo("Error: Source Interface {} is invalid".format(port))
return False
if dst_port and dst_port == port:
click.echo("Error: Destination Interface cant be same as Source Interface")
return False
if interface_has_mirror_config(mirror_table, port):
click.echo("Error: Source Interface {} already has mirror config".format(port))
return False
if direction:
if direction not in ['rx', 'tx', 'both']:
click.echo("Error: Direction {} is invalid".format(direction))
return False
return True
def cli_sroute_to_config(ctx, command_str, strict_nh = True):
if len(command_str) < 2 or len(command_str) > 9:
ctx.fail("argument is not in pattern prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>!")
if "prefix" not in command_str:
ctx.fail("argument is incomplete, prefix not found!")
if "nexthop" not in command_str and strict_nh:
ctx.fail("argument is incomplete, nexthop not found!")
nexthop_str = None
config_entry = {}
vrf_name = ""
if "nexthop" in command_str:
idx = command_str.index("nexthop")
prefix_str = command_str[:idx]
nexthop_str = command_str[idx:]
else:
prefix_str = command_str[:]
if prefix_str:
if 'prefix' in prefix_str and 'vrf' in prefix_str:
# prefix_str: ['prefix', 'vrf', Vrf-name, ip]
vrf_name = prefix_str[2]
ip_prefix = prefix_str[3]
elif 'prefix' in prefix_str:
# prefix_str: ['prefix', ip]
ip_prefix = prefix_str[1]
else:
ctx.fail("prefix is not in pattern!")
if nexthop_str:
if 'nexthop' in nexthop_str and 'vrf' in nexthop_str:
# nexthop_str: ['nexthop', 'vrf', Vrf-name, ip]
config_entry["nexthop"] = nexthop_str[3]
config_entry["nexthop-vrf"] = nexthop_str[2]
elif 'nexthop' in nexthop_str and 'dev' in nexthop_str:
# nexthop_str: ['nexthop', 'dev', ifname]
config_entry["ifname"] = nexthop_str[2]
elif 'nexthop' in nexthop_str:
# nexthop_str: ['nexthop', ip]
config_entry["nexthop"] = nexthop_str[1]
else:
ctx.fail("nexthop is not in pattern!")
try:
ipaddress.ip_network(ip_prefix)
if 'nexthop' in config_entry:
nh_list = config_entry['nexthop'].split(',')
for nh in nh_list:
# Nexthop to portchannel
if nh.startswith('PortChannel'):
config_db = ctx.obj['config_db']
if not nh in config_db.get_keys('PORTCHANNEL'):
ctx.fail("portchannel does not exist.")
else:
ipaddress.ip_address(nh)
except ValueError:
ctx.fail("ip address is not valid.")
if not vrf_name == "":
key = vrf_name + "|" + ip_prefix
else:
key = ip_prefix
return key, config_entry
def update_sonic_environment():
"""Prepare sonic environment variable using SONiC environment template file.
"""
SONIC_ENV_TEMPLATE_FILE = os.path.join('/', "usr", "share", "sonic", "templates", "sonic-environment.j2")
SONIC_VERSION_YML_FILE = os.path.join('/', "etc", "sonic", "sonic_version.yml")
SONIC_ENV_FILE = os.path.join('/', "etc", "sonic", "sonic-environment")
if os.path.isfile(SONIC_ENV_TEMPLATE_FILE) and os.path.isfile(SONIC_VERSION_YML_FILE):
clicommon.run_command(
"{} -d -y {} -t {},{}".format(
SONIC_CFGGEN_PATH,
SONIC_VERSION_YML_FILE,
SONIC_ENV_TEMPLATE_FILE,
SONIC_ENV_FILE
),
display_cmd=True
)
def cache_arp_entries():
success = True
cache_dir = '/host/config-reload'
click.echo('Caching ARP table to {}'.format(cache_dir))
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
arp_cache_cmd = '/usr/local/bin/fast-reboot-dump.py -t {}'.format(cache_dir)
cache_proc = subprocess.Popen(arp_cache_cmd, shell=True, text=True, stdout=subprocess.PIPE)
_, cache_err = cache_proc.communicate()
if cache_err:
click.echo("Could not cache ARP and FDB info prior to reloading")
success = False
if not cache_err:
fdb_cache_file = os.path.join(cache_dir, 'fdb.json')
arp_cache_file = os.path.join(cache_dir, 'arp.json')
fdb_filter_cmd = '/usr/local/bin/filter_fdb_entries -f {} -a {} -c /etc/sonic/configdb.json'.format(fdb_cache_file, arp_cache_file)
filter_proc = subprocess.Popen(fdb_filter_cmd, shell=True, text=True, stdout=subprocess.PIPE)
_, filter_err = filter_proc.communicate()
if filter_err:
click.echo("Could not filter FDB entries prior to reloading")
success = False
# If we are able to successfully cache ARP table info, signal SWSS to restore from our cache
# by creating /host/config-reload/needs-restore
if success:
restore_flag_file = os.path.join(cache_dir, 'needs-restore')
open(restore_flag_file, 'w').close()
return success
def remove_router_interface_ip_address(config_db, interface_name, ipaddress_to_remove):
table_name = get_interface_table_name(interface_name)
keys = config_db.get_keys(table_name)
for key in keys:
if not isinstance(key, tuple) or len(key) != 2:
continue
iface, ipaddress_string = key
if iface != interface_name:
continue
if ipaddress.ip_interface(ipaddress_string) == ipaddress_to_remove:
config_db.set_entry(table_name, (interface_name, ipaddress_string), None)
def validate_ipv4_address(ctx, param, ip_addr):
"""Helper function to validate ipv4 address
"""
try:
ip_n = ipaddress.ip_network(ip_addr, False)
if ip_n.version != 4:
raise click.UsageError("{} is not a valid IPv4 address".format(ip_addr))
return ip_addr
except ValueError as e:
raise click.UsageError(str(e))
def validate_gre_type(ctx, _, value):
"""A validator for validating input gre_type
"""
try:
base = 10
if value.lower().startswith('0x'):
base = 16
gre_type_value = int(value, base)
if gre_type_value < GRE_TYPE_RANGE.min or gre_type_value > GRE_TYPE_RANGE.max:
raise click.UsageError("{} is not a valid GRE type".format(value))
return gre_type_value
except ValueError:
raise click.UsageError("{} is not a valid GRE type".format(value))
# This is our main entrypoint - the main 'config' command
@click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS)
@click.pass_context
def config(ctx):
"""SONiC command line - 'config' command"""
#
# Load asic_type for further use
#
global asic_type
try:
version_info = device_info.get_sonic_version_info()
if version_info:
asic_type = version_info['asic_type']
else:
asic_type = None
except (KeyError, TypeError) as e:
print("Caught an exception: " + str(e))
raise click.Abort()
# Load database config files
load_db_config()
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
ctx.obj = Db()
# Add groups from other modules
config.add_command(aaa.aaa)
config.add_command(aaa.tacacs)
config.add_command(aaa.radius)
config.add_command(chassis_modules.chassis)
config.add_command(console.console)
config.add_command(feature.feature)
config.add_command(kdump.kdump)
config.add_command(kube.kubernetes)
config.add_command(muxcable.muxcable)
config.add_command(nat.nat)
config.add_command(vlan.vlan)
config.add_command(vxlan.vxlan)
#add mclag commands
config.add_command(mclag.mclag)
config.add_command(mclag.mclag_member)
config.add_command(mclag.mclag_unique_ip)
@config.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Existing files will be overwritten, continue?')
@click.argument('filename', required=False)
def save(filename):
"""Export current config DB to a file on disk.\n
<filename> : Names of configuration file(s) to save, separated by comma with no spaces in between
"""
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
if multi_asic.is_multi_asic():
num_cfg_file += num_asic
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
# In case of multi-asic mode we have additional config_db{NS}.json files for
# various namespaces created per ASIC. {NS} is the namespace index.
for inst in range(-1, num_cfg_file-1):
#inst = -1, refers to the linux host where there is no namespace.
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
if namespace is None:
command = "{} -d --print-data > {}".format(SONIC_CFGGEN_PATH, file)
else:
command = "{} -n {} -d --print-data > {}".format(SONIC_CFGGEN_PATH, namespace, file)
log.log_info("'save' executing...")
clicommon.run_command(command, display_cmd=True)
config_db = sort_dict(read_json_file(file))
with open(file, 'w') as config_db_file:
json.dump(config_db, config_db_file, indent=4)
@config.command()
@click.option('-y', '--yes', is_flag=True)
@click.argument('filename', required=False)
def load(filename, yes):
"""Import a previous saved config DB dump file.
<filename> : Names of configuration file(s) to load, separated by comma with no spaces in between
"""
if filename is None:
message = 'Load config from the default config file(s) ?'
else:
message = 'Load config from the file(s) {} ?'.format(filename)
if not yes:
click.confirm(message, abort=True)
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
if multi_asic.is_multi_asic():
num_cfg_file += num_asic
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
# In case of multi-asic mode we have additional config_db{NS}.json files for
# various namespaces created per ASIC. {NS} is the namespace index.
for inst in range(-1, num_cfg_file-1):
#inst = -1, refers to the linux host where there is no namespace.
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
# if any of the config files in linux host OR namespace is not present, return
if not os.path.exists(file):
click.echo("The config_db file {} doesn't exist".format(file))
return
if namespace is None:
command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file)
else:
command = "{} -n {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, namespace, file)
log.log_info("'load' executing...")
clicommon.run_command(command, display_cmd=True)
def print_dry_run_message(dry_run):
if dry_run:
click.secho("** DRY RUN EXECUTION **", fg="yellow", underline=True)
@config.command('apply-patch')
@click.argument('patch-file-path', type=str, required=True)
@click.option('-f', '--format', type=click.Choice([e.name for e in ConfigFormat]),
default=ConfigFormat.CONFIGDB.name,
help='format of config of the patch is either ConfigDb(ABNF) or SonicYang',
show_default=True)
@click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state')
@click.option('-n', '--ignore-non-yang-tables', is_flag=True, default=False, help='ignore validation for tables without YANG models', hidden=True)
@click.option('-i', '--ignore-path', multiple=True, help='ignore validation for config specified by given path which is a JsonPointer', hidden=True)
@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing')
@click.pass_context
def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, ignore_path, verbose):
"""Apply given patch of updates to Config. A patch is a JsonPatch which follows rfc6902.
This command can be used do partial updates to the config with minimum disruption to running processes.
It allows addition as well as deletion of configs. The patch file represents a diff of ConfigDb(ABNF)
format or SonicYang format.
<patch-file-path>: Path to the patch file on the file-system."""
try:
print_dry_run_message(dry_run)
with open(patch_file_path, 'r') as fh:
text = fh.read()
patch_as_json = json.loads(text)
patch = jsonpatch.JsonPatch(patch_as_json)
config_format = ConfigFormat[format.upper()]
GenericUpdater().apply_patch(patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path)
click.secho("Patch applied successfully.", fg="cyan", underline=True)
except Exception as ex:
click.secho("Failed to apply patch", fg="red", underline=True, err=True)
ctx.fail(ex)
@config.command()
@click.argument('target-file-path', type=str, required=True)
@click.option('-f', '--format', type=click.Choice([e.name for e in ConfigFormat]),
default=ConfigFormat.CONFIGDB.name,
help='format of target config is either ConfigDb(ABNF) or SonicYang',
show_default=True)
@click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state')
@click.option('-n', '--ignore-non-yang-tables', is_flag=True, default=False, help='ignore validation for tables without YANG models', hidden=True)
@click.option('-i', '--ignore-path', multiple=True, help='ignore validation for config specified by given path which is a JsonPointer', hidden=True)
@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing')
@click.pass_context
def replace(ctx, target_file_path, format, dry_run, ignore_non_yang_tables, ignore_path, verbose):
"""Replace the whole config with the specified config. The config is replaced with minimum disruption e.g.
if ACL config is different between current and target config only ACL config is updated, and other config/services
such as DHCP will not be affected.
**WARNING** The target config file should be the whole config, not just the part intended to be updated.
<target-file-path>: Path to the target file on the file-system."""
try:
print_dry_run_message(dry_run)
with open(target_file_path, 'r') as fh:
target_config_as_text = fh.read()
target_config = json.loads(target_config_as_text)
config_format = ConfigFormat[format.upper()]
GenericUpdater().replace(target_config, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path)
click.secho("Config replaced successfully.", fg="cyan", underline=True)
except Exception as ex:
click.secho("Failed to replace config", fg="red", underline=True, err=True)
ctx.fail(ex)
@config.command()
@click.argument('checkpoint-name', type=str, required=True)
@click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state')
@click.option('-n', '--ignore-non-yang-tables', is_flag=True, default=False, help='ignore validation for tables without YANG models', hidden=True)
@click.option('-i', '--ignore-path', multiple=True, help='ignore validation for config specified by given path which is a JsonPointer', hidden=True)
@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing')
@click.pass_context
def rollback(ctx, checkpoint_name, dry_run, ignore_non_yang_tables, ignore_path, verbose):
"""Rollback the whole config to the specified checkpoint. The config is rolled back with minimum disruption e.g.
if ACL config is different between current and checkpoint config only ACL config is updated, and other config/services
such as DHCP will not be affected.
<checkpoint-name>: The checkpoint name, use `config list-checkpoints` command to see available checkpoints."""
try:
print_dry_run_message(dry_run)
GenericUpdater().rollback(checkpoint_name, verbose, dry_run, ignore_non_yang_tables, ignore_path)
click.secho("Config rolled back successfully.", fg="cyan", underline=True)
except Exception as ex:
click.secho("Failed to rollback config", fg="red", underline=True, err=True)
ctx.fail(ex)
@config.command()
@click.argument('checkpoint-name', type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing')
@click.pass_context
def checkpoint(ctx, checkpoint_name, verbose):
"""Take a checkpoint of the whole current config with the specified checkpoint name.
<checkpoint-name>: The checkpoint name, use `config list-checkpoints` command to see available checkpoints."""
try:
GenericUpdater().checkpoint(checkpoint_name, verbose)
click.secho("Checkpoint created successfully.", fg="cyan", underline=True)
except Exception as ex:
click.secho("Failed to create a config checkpoint", fg="red", underline=True, err=True)
ctx.fail(ex)
@config.command('delete-checkpoint')
@click.argument('checkpoint-name', type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing')
@click.pass_context
def delete_checkpoint(ctx, checkpoint_name, verbose):
"""Delete a checkpoint with the specified checkpoint name.
<checkpoint-name>: The checkpoint name, use `config list-checkpoints` command to see available checkpoints."""
try:
GenericUpdater().delete_checkpoint(checkpoint_name, verbose)
click.secho("Checkpoint deleted successfully.", fg="cyan", underline=True)
except Exception as ex:
click.secho("Failed to delete config checkpoint", fg="red", underline=True, err=True)
ctx.fail(ex)
@config.command('list-checkpoints')
@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing')
@click.pass_context
def list_checkpoints(ctx, verbose):
"""List the config checkpoints available."""
try:
checkpoints_list = GenericUpdater().list_checkpoints(verbose)
formatted_output = json.dumps(checkpoints_list, indent=4)
click.echo(formatted_output)
except Exception as ex:
click.secho("Failed to list config checkpoints", fg="red", underline=True, err=True)
ctx.fail(ex)
@config.command()
@click.option('-y', '--yes', is_flag=True)
@click.option('-l', '--load-sysinfo', is_flag=True, help='load system default information (mac, portmap etc) first.')
@click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services')
@click.option('-d', '--disable_arp_cache', default=False, is_flag=True, help='Do not cache ARP table before reloading (applies to dual ToR systems only)')
@click.option('-f', '--force', default=False, is_flag=True, help='Force config reload without system checks')
@click.option('-t', '--file_format', default='config_db',type=click.Choice(['config_yang', 'config_db']),show_default=True,help='specify the file format')
@click.argument('filename', required=False)
@clicommon.pass_db
def reload(db, filename, yes, load_sysinfo, no_service_restart, disable_arp_cache, force, file_format):
"""Clear current configuration and import a previous saved config DB dump file.
<filename> : Names of configuration file(s) to load, separated by comma with no spaces in between
"""
if not force and not no_service_restart:
if _is_system_starting():
click.echo("System is not up. Retry later or use -f to avoid system checks")
return
if not _delay_timers_elapsed():
click.echo("Relevant services are not up. Retry later or use -f to avoid system checks")
return
if not _swss_ready():
click.echo("SwSS container is not ready. Retry later or use -f to avoid system checks")
return
if filename is None:
message = 'Clear current config and reload config in {} format from the default config file(s) ?'.format(file_format)
else:
message = 'Clear current config and reload config in {} from the file(s) {} ?'.format(file_format, filename)
if not yes:
click.confirm(message, abort=True)
log.log_info("'reload' executing...")
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
# single config_yang file for the multi asic device
if multi_asic.is_multi_asic() and file_format == 'config_db':
num_cfg_file += num_asic
# Remove cached PG drop counters data
dropstat_dir_prefix = '/tmp/dropstat'
command = "rm -rf {}-*".format(dropstat_dir_prefix)
clicommon.run_command(command, display_cmd=True)
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
# For dual ToR devices, cache ARP and FDB info
localhost_metadata = db.cfgdb.get_table('DEVICE_METADATA')['localhost']
cache_arp_table = not disable_arp_cache and 'subtype' in localhost_metadata and localhost_metadata['subtype'].lower() == 'dualtor'
if cache_arp_table:
cache_arp_entries()
#Stop services before config push
if not no_service_restart:
log.log_info("'reload' stopping services...")
_stop_services()
# In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB
# service running in the host + DB services running in each ASIC namespace created per ASIC.
# In the below logic, we get all namespaces in this platform and add an empty namespace ''
# denoting the current namespace which we are in ( the linux host )
for inst in range(-1, num_cfg_file-1):
# Get the namespace name, for linux host it is None
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if file_format == 'config_db':
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
else:
file = DEFAULT_CONFIG_YANG_FILE
# Check the file exists before proceeding.
if not os.path.exists(file):
click.echo("The config file {} doesn't exist".format(file))
continue
if load_sysinfo:
try:
command = "{} -j {} -v DEVICE_METADATA.localhost.hwsku".format(SONIC_CFGGEN_PATH, file)
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
output, err = proc.communicate()
except FileNotFoundError as e:
click.echo("{}".format(str(e)), err=True)
raise click.Abort()
except Exception as e:
click.echo("{}\n{}".format(type(e), str(e)), err=True)
raise click.Abort()
if not output:
click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta')
sys.exit(1)
cfg_hwsku = output.strip()
if namespace is None:
config_db = ConfigDBConnector()
else:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
client = config_db.get_redis_client(config_db.CONFIG_DB)
client.flushdb()
if load_sysinfo:
if namespace is None:
command = "{} -H -k {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku)
else:
command = "{} -H -k {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku, namespace)
clicommon.run_command(command, display_cmd=True)
# For the database service running in linux host we use the file user gives as input
# or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace,
# the default config_db<namespaceID>.json format is used.
config_gen_opts = ""
if os.path.isfile(INIT_CFG_FILE):
config_gen_opts += " -j {} ".format(INIT_CFG_FILE)
if file_format == 'config_db':
config_gen_opts += ' -j {} '.format(file)
else:
config_gen_opts += ' -Y {} '.format(file)
if namespace is not None:
config_gen_opts += " -n {} ".format(namespace)
command = "{sonic_cfggen} {options} --write-to-db".format(
sonic_cfggen=SONIC_CFGGEN_PATH,
options=config_gen_opts)
clicommon.run_command(command, display_cmd=True)
client.set(config_db.INIT_INDICATOR, 1)
# Migrate DB contents to latest version
db_migrator='/usr/local/bin/db_migrator.py'
if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK):
if namespace is None:
command = "{} -o migrate".format(db_migrator)
else:
command = "{} -o migrate -n {}".format(db_migrator, namespace)
clicommon.run_command(command, display_cmd=True)
# Re-generate the environment variable in case config_db.json was edited
update_sonic_environment()
# We first run "systemctl reset-failed" to remove the "failed"
# status from all services before we attempt to restart them
if not no_service_restart:
_reset_failed_services()
log.log_info("'reload' restarting services...")
_restart_services()
@config.command("load_mgmt_config")
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Reload mgmt config?')
@click.argument('filename', default='/etc/sonic/device_desc.xml', type=click.Path(exists=True))
def load_mgmt_config(filename):
"""Reconfigure hostname and mgmt interface based on device description file."""
log.log_info("'load_mgmt_config' executing...")
command = "{} -M {} --write-to-db".format(SONIC_CFGGEN_PATH, filename)
clicommon.run_command(command, display_cmd=True)
#FIXME: After config DB daemon for hostname and mgmt interface is implemented, we'll no longer need to do manual configuration here
config_data = parse_device_desc_xml(filename)
hostname = config_data['DEVICE_METADATA']['localhost']['hostname']
_change_hostname(hostname)
mgmt_conf = netaddr.IPNetwork(list(config_data['MGMT_INTERFACE'].keys())[0][1])
gw_addr = list(config_data['MGMT_INTERFACE'].values())[0]['gwaddr']
command = "ifconfig eth0 {} netmask {}".format(str(mgmt_conf.ip), str(mgmt_conf.netmask))
clicommon.run_command(command, display_cmd=True)
command = "ip route add default via {} dev eth0 table default".format(gw_addr)
clicommon.run_command(command, display_cmd=True, ignore_error=True)
command = "ip rule add from {} table default".format(str(mgmt_conf.ip))
clicommon.run_command(command, display_cmd=True, ignore_error=True)
command = "[ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid"
clicommon.run_command(command, display_cmd=True, ignore_error=True)
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
@config.command("load_minigraph")
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Reload config from minigraph?')
@click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services')
@clicommon.pass_db
def load_minigraph(db, no_service_restart):
"""Reconfigure based on minigraph."""
log.log_info("'load_minigraph' executing...")
#Stop services before config push
if not no_service_restart:
log.log_info("'load_minigraph' stopping services...")
_stop_services()
# For Single Asic platform the namespace list has the empty string
# for mulit Asic platform the empty string to generate the config
# for host
namespace_list = [DEFAULT_NAMESPACE]
num_npus = multi_asic.get_num_asics()
if num_npus > 1:
namespace_list += multi_asic.get_namespaces_from_linux()
for namespace in namespace_list:
if namespace is DEFAULT_NAMESPACE:
config_db = ConfigDBConnector()
cfggen_namespace_option = " "
ns_cmd_prefix = ""
else:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
cfggen_namespace_option = " -n {}".format(namespace)
ns_cmd_prefix = "sudo ip netns exec {} ".format(namespace)
config_db.connect()
client = config_db.get_redis_client(config_db.CONFIG_DB)
client.flushdb()
if os.path.isfile('/etc/sonic/init_cfg.json'):
command = "{} -H -m -j /etc/sonic/init_cfg.json {} --write-to-db".format(SONIC_CFGGEN_PATH, cfggen_namespace_option)
else:
command = "{} -H -m --write-to-db {}".format(SONIC_CFGGEN_PATH, cfggen_namespace_option)
clicommon.run_command(command, display_cmd=True)
client.set(config_db.INIT_INDICATOR, 1)
# Update SONiC environmnet file
update_sonic_environment()
if os.path.isfile('/etc/sonic/acl.json'):
clicommon.run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True)
# Load port_config.json
try:
load_port_config(db.cfgdb, '/etc/sonic/port_config.json')
except Exception as e:
click.secho("Failed to load port_config.json, Error: {}".format(str(e)), fg='magenta')
# generate QoS and Buffer configs
clicommon.run_command("config qos reload --no-dynamic-buffer", display_cmd=True)
# get the device type
device_type = _get_device_type()
if device_type != 'MgmtToRRouter' and device_type != 'MgmtTsToR' and device_type != 'EPMS':
clicommon.run_command("pfcwd start_default", display_cmd=True)
# Write latest db version string into db
db_migrator='/usr/local/bin/db_migrator.py'
if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK):
for namespace in namespace_list:
if namespace is DEFAULT_NAMESPACE:
cfggen_namespace_option = " "
else:
cfggen_namespace_option = " -n {}".format(namespace)
clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option)
# We first run "systemctl reset-failed" to remove the "failed"
# status from all services before we attempt to restart them
if not no_service_restart:
_reset_failed_services()
#FIXME: After config DB daemon is implemented, we'll no longer need to restart every service.
log.log_info("'load_minigraph' restarting services...")
_restart_services()
click.echo("Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`.")
def load_port_config(config_db, port_config_path):
if not os.path.isfile(port_config_path):
return
try:
# Load port_config.json
port_config_input = read_json_file(port_config_path)
except Exception:
raise Exception("Bad format: json file broken")
# Validate if the input is an array
if not isinstance(port_config_input, list):
raise Exception("Bad format: port_config is not an array")
if len(port_config_input) == 0 or 'PORT' not in port_config_input[0]:
raise Exception("Bad format: PORT table not exists")
port_config = port_config_input[0]['PORT']
# Ensure all ports are exist
port_table = {}
for port_name in port_config.keys():
port_entry = config_db.get_entry('PORT', port_name)
if not port_entry:
raise Exception("Port {} is not defined in current device".format(port_name))
port_table[port_name] = port_entry
# Update port state
for port_name in port_config.keys():
if 'admin_status' not in port_config[port_name]:
continue
if 'admin_status' in port_table[port_name]:
if port_table[port_name]['admin_status'] == port_config[port_name]['admin_status']:
continue
clicommon.run_command('config interface {} {}'.format(
'startup' if port_config[port_name]['admin_status'] == 'up' else 'shutdown',
port_name), display_cmd=True)
return
#
# 'hostname' command
#
@config.command('hostname')
@click.argument('new_hostname', metavar='<new_hostname>', required=True)
def hostname(new_hostname):
"""Change device hostname without impacting the traffic."""
config_db = ConfigDBConnector()
config_db.connect()
config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"hostname" : new_hostname})
try:
command = "service hostname-config restart"
clicommon.run_command(command, display_cmd=True)
except SystemExit as e:
click.echo("Restarting hostname-config service failed with error {}".format(e))
raise
# Reload Monit configuration to pick up new hostname in case it changed
click.echo("Reloading Monit configuration ...")
clicommon.run_command("sudo monit reload")
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
#
# 'synchronous_mode' command ('config synchronous_mode ...')
#
@config.command('synchronous_mode')
@click.argument('sync_mode', metavar='<enable|disable>', required=True)
def synchronous_mode(sync_mode):
""" Enable or disable synchronous mode between orchagent and syncd \n
swss restart required to apply the configuration \n
Options to restart swss and apply the configuration: \n
1. config save -y \n
config reload -y \n
2. systemctl restart swss
"""
if sync_mode == 'enable' or sync_mode == 'disable':
config_db = ConfigDBConnector()
config_db.connect()
config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"synchronous_mode" : sync_mode})
click.echo("""Wrote %s synchronous mode into CONFIG_DB, swss restart required to apply the configuration: \n
Option 1. config save -y \n
config reload -y \n
Option 2. systemctl restart swss""" % sync_mode)
else:
raise click.BadParameter("Error: Invalid argument %s, expect either enable or disable" % sync_mode)
#
# 'portchannel' group ('config portchannel ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
# TODO add "hidden=True if this is a single ASIC platform, once we have click 7.0 in all branches.
@click.option('-n', '--namespace', help='Namespace name',
required=True if multi_asic.is_multi_asic() else False, type=click.Choice(multi_asic.get_namespace_list()))
@click.pass_context
def portchannel(ctx, namespace):
# Set namespace to default_namespace if it is None.
if namespace is None:
namespace = DEFAULT_NAMESPACE
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=str(namespace))
config_db.connect()
ctx.obj = {'db': config_db, 'namespace': str(namespace)}
@portchannel.command('add')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.option('--min-links', default=1, type=click.IntRange(1,1024))
@click.option('--fallback', default='false')
@click.pass_context
def add_portchannel(ctx, portchannel_name, min_links, fallback):
"""Add port channel"""
if is_portchannel_name_valid(portchannel_name) != True:
ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'"
.format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO))
db = ctx.obj['db']
if is_portchannel_present_in_db(db, portchannel_name):
ctx.fail("{} already exists!".format(portchannel_name))
fvs = {'admin_status': 'up',
'mtu': '9100',
'lacp_key': 'auto'}
if min_links != 0:
fvs['min_links'] = str(min_links)
if fallback != 'false':
fvs['fallback'] = 'true'
db.set_entry('PORTCHANNEL', portchannel_name, fvs)
@portchannel.command('del')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.pass_context
def remove_portchannel(ctx, portchannel_name):
"""Remove port channel"""
if is_portchannel_name_valid(portchannel_name) != True:
ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'"
.format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO))
db = ctx.obj['db']
# Dont proceed if the port channel does not exist
if is_portchannel_present_in_db(db, portchannel_name) is False:
ctx.fail("{} is not present.".format(portchannel_name))
if len([(k, v) for k, v in db.get_table('PORTCHANNEL_MEMBER') if k == portchannel_name]) != 0:
click.echo("Error: Portchannel {} contains members. Remove members before deleting Portchannel!".format(portchannel_name))
else:
db.set_entry('PORTCHANNEL', portchannel_name, None)
@portchannel.group(cls=clicommon.AbbreviationGroup, name='member')
@click.pass_context
def portchannel_member(ctx):
pass
@portchannel_member.command('add')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.argument('port_name', metavar='<port_name>', required=True)
@click.pass_context
def add_portchannel_member(ctx, portchannel_name, port_name):
"""Add member to port channel"""
db = ctx.obj['db']
if clicommon.is_port_mirror_dst_port(db, port_name):
ctx.fail("{} is configured as mirror destination port".format(port_name))
# Check if the member interface given by user is valid in the namespace.
if port_name.startswith("Ethernet") is False or interface_name_is_valid(db, port_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
# Dont proceed if the port channel name is not valid
if is_portchannel_name_valid(portchannel_name) is False:
ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'"
.format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO))
# Dont proceed if the port channel does not exist
if is_portchannel_present_in_db(db, portchannel_name) is False:
ctx.fail("{} is not present.".format(portchannel_name))
# Dont allow a port to be member of port channel if it is configured with an IP address
for key,value in db.get_table('INTERFACE').items():
if type(key) == tuple:
continue
if key == port_name:
ctx.fail(" {} has ip address configured".format(port_name))
return
# Dont allow a port to be member of port channel if it is configured as a VLAN member
for k,v in db.get_table('VLAN_MEMBER'):
if v == port_name:
ctx.fail("%s Interface configured as VLAN_MEMBER under vlan : %s" %(port_name,str(k)))
return
# Dont allow a port to be member of port channel if it is already member of a port channel
for k,v in db.get_table('PORTCHANNEL_MEMBER'):
if v == port_name:
ctx.fail("{} Interface is already member of {} ".format(v,k))
# Dont allow a port to be member of port channel if its speed does not match with existing members
for k,v in db.get_table('PORTCHANNEL_MEMBER'):
if k == portchannel_name:
member_port_entry = db.get_entry('PORT', v)
port_entry = db.get_entry('PORT', port_name)
if member_port_entry is not None and port_entry is not None:
member_port_speed = member_port_entry.get(PORT_SPEED)
port_speed = port_entry.get(PORT_SPEED)
if member_port_speed != port_speed:
ctx.fail("Port speed of {} is different than the other members of the portchannel {}"
.format(port_name, portchannel_name))
# Dont allow a port to be member of port channel if its MTU does not match with portchannel
portchannel_entry = db.get_entry('PORTCHANNEL', portchannel_name)
if portchannel_entry and portchannel_entry.get(PORT_MTU) is not None :
port_entry = db.get_entry('PORT', port_name)
if port_entry and port_entry.get(PORT_MTU) is not None:
port_mtu = port_entry.get(PORT_MTU)
portchannel_mtu = portchannel_entry.get(PORT_MTU)
if portchannel_mtu != port_mtu:
ctx.fail("Port MTU of {} is different than the {} MTU size"
.format(port_name, portchannel_name))
# Dont allow a port to be member of port channel if its TPID is not at default 0x8100
# If TPID is supported at LAG level, when member is added, the LAG's TPID is applied to the
# new member by SAI.
port_entry = db.get_entry('PORT', port_name)
if port_entry and port_entry.get(PORT_TPID) is not None:
port_tpid = port_entry.get(PORT_TPID)
if port_tpid != DEFAULT_TPID:
ctx.fail("Port TPID of {}: {} is not at default 0x8100".format(port_name, port_tpid))
db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name),
{'NULL': 'NULL'})
@portchannel_member.command('del')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.argument('port_name', metavar='<port_name>', required=True)
@click.pass_context
def del_portchannel_member(ctx, portchannel_name, port_name):
"""Remove member from portchannel"""
# Dont proceed if the port channel name is not valid
if is_portchannel_name_valid(portchannel_name) is False:
ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'"
.format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO))
db = ctx.obj['db']
# Check if the member interface given by user is valid in the namespace.
if interface_name_is_valid(db, port_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
# Dont proceed if the port channel does not exist
if is_portchannel_present_in_db(db, portchannel_name) is False:
ctx.fail("{} is not present.".format(portchannel_name))
# Dont proceed if the the port is not an existing member of the port channel
if not is_port_member_of_this_portchannel(db, port_name, portchannel_name):
ctx.fail("{} is not a member of portchannel {}".format(port_name, portchannel_name))
db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name), None)
db.set_entry('PORTCHANNEL_MEMBER', portchannel_name + '|' + port_name, None)
#
# 'mirror_session' group ('config mirror_session ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='mirror_session')
def mirror_session():
pass
#
# 'add' subgroup ('config mirror_session add ...')
#
@mirror_session.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('src_ip', metavar='<src_ip>', callback=validate_ipv4_address, required=True)
@click.argument('dst_ip', metavar='<dst_ip>', callback=validate_ipv4_address, required=True)
@click.argument('dscp', metavar='<dscp>', type=DSCP_RANGE, required=True)
@click.argument('ttl', metavar='<ttl>', type=TTL_RANGE, required=True)
@click.argument('gre_type', metavar='[gre_type]', callback=validate_gre_type, required=False)
@click.argument('queue', metavar='[queue]', type=QUEUE_RANGE, required=False)
@click.option('--policer')
def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer):
""" Add ERSPAN mirror session.(Legacy support) """
add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer)
@mirror_session.group(cls=clicommon.AbbreviationGroup, name='erspan')
@click.pass_context
def erspan(ctx):
""" ERSPAN mirror_session """
pass
#
# 'add' subcommand
#
@erspan.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('src_ip', metavar='<src_ip>', callback=validate_ipv4_address, required=True)
@click.argument('dst_ip', metavar='<dst_ip>', callback=validate_ipv4_address,required=True)
@click.argument('dscp', metavar='<dscp>', type=DSCP_RANGE, required=True)
@click.argument('ttl', metavar='<ttl>', type=TTL_RANGE, required=True)
@click.argument('gre_type', metavar='[gre_type]', callback=validate_gre_type, required=False)
@click.argument('queue', metavar='[queue]', type=QUEUE_RANGE, required=False)
@click.argument('src_port', metavar='[src_port]', required=False)
@click.argument('direction', metavar='[direction]', required=False)
@click.option('--policer')
def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction):
""" Add ERSPAN mirror session """
add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction)
def gather_session_info(session_info, policer, queue, src_port, direction):
if policer:
session_info['policer'] = policer
if queue:
session_info['queue'] = queue
if src_port:
if clicommon.get_interface_naming_mode() == "alias":
src_port_list = []
for port in src_port.split(","):
src_port_list.append(interface_alias_to_name(None, port))
src_port=",".join(src_port_list)
session_info['src_port'] = src_port
if not direction:
direction = "both"
session_info['direction'] = direction.upper()
return session_info
def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port=None, direction=None):
session_info = {
"type" : "ERSPAN",
"src_ip": src_ip,
"dst_ip": dst_ip,
"dscp": dscp,
"ttl": ttl
}
if gre_type:
session_info['gre_type'] = gre_type
session_info = gather_session_info(session_info, policer, queue, src_port, direction)
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False:
return
config_db.set_entry("MIRROR_SESSION", session_name, session_info)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False:
return
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info)
@mirror_session.group(cls=clicommon.AbbreviationGroup, name='span')
@click.pass_context
def span(ctx):
""" SPAN mirror session """
pass
@span.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('dst_port', metavar='<dst_port>', required=True)
@click.argument('src_port', metavar='[src_port]', required=False)
@click.argument('direction', metavar='[direction]', required=False)
@click.argument('queue', metavar='[queue]', type=QUEUE_RANGE, required=False)
@click.option('--policer')
def add(session_name, dst_port, src_port, direction, queue, policer):
""" Add SPAN mirror session """
add_span(session_name, dst_port, src_port, direction, queue, policer)
def add_span(session_name, dst_port, src_port, direction, queue, policer):
if clicommon.get_interface_naming_mode() == "alias":
dst_port = interface_alias_to_name(None, dst_port)
if dst_port is None:
click.echo("Error: Destination Interface {} is invalid".format(dst_port))
return
session_info = {
"type" : "SPAN",
"dst_port": dst_port,
}
session_info = gather_session_info(session_info, policer, queue, src_port, direction)
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False:
return
config_db.set_entry("MIRROR_SESSION", session_name, session_info)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False:
return
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info)
@mirror_session.command()
@click.argument('session_name', metavar='<session_name>', required=True)
def remove(session_name):
""" Delete mirror session """
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
config_db.set_entry("MIRROR_SESSION", session_name, None)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None)
#
# 'pfcwd' group ('config pfcwd ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def pfcwd():
"""Configure pfc watchdog """
pass
@pfcwd.command()
@click.option('--action', '-a', type=click.Choice(['drop', 'forward', 'alert']))
@click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('ports', nargs=-1)
@click.argument('detection-time', type=click.IntRange(100, 5000))
def start(action, restoration_time, ports, detection_time, verbose):
"""
Start PFC watchdog on port(s). To config all ports, use all as input.
Example:
config pfcwd start --action drop all 400 --restoration-time 400
"""
cmd = "pfcwd start"
if action:
cmd += " --action {}".format(action)
if ports:
ports = set(ports) - set(['ports', 'detection-time'])
cmd += " {}".format(' '.join(ports))
if detection_time:
cmd += " {}".format(detection_time)
if restoration_time:
cmd += " --restoration-time {}".format(restoration_time)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def stop(verbose):
""" Stop PFC watchdog """
cmd = "pfcwd stop"
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('poll_interval', type=click.IntRange(100, 3000))
def interval(poll_interval, verbose):
""" Set PFC watchdog counter polling interval (ms) """
cmd = "pfcwd interval {}".format(poll_interval)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('counter_poll')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('counter_poll', type=click.Choice(['enable', 'disable']))
def counter_poll(counter_poll, verbose):
""" Enable/disable counter polling """
cmd = "pfcwd counter_poll {}".format(counter_poll)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('big_red_switch')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))
def big_red_switch(big_red_switch, verbose):
""" Enable/disable BIG_RED_SWITCH mode """
cmd = "pfcwd big_red_switch {}".format(big_red_switch)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('start_default')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def start_default(verbose):
""" Start PFC WD by default configurations """
cmd = "pfcwd start_default"
clicommon.run_command(cmd, display_cmd=verbose)
#
# 'cbf' group ('config cbf ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def cbf(ctx):
"""CBF-related configuration tasks"""
pass
@cbf.command('clear')
def clear():
"""Clear CBF configuration"""
log.log_info("'cbf clear' executing...")
_clear_cbf()
@cbf.command('reload')
@click.pass_context
@click.option(
'--json-data', type=click.STRING,
help="json string with additional data, valid with --dry-run option"
)
@click.option(
'--dry_run', type=click.STRING,
help="Dry run, writes config to the given file"
)
def reload(ctx, dry_run, json_data):
"""Reload CBF configuration"""
log.log_info("'cbf reload' executing...")
_clear_cbf()
_, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs()
sonic_version_file = device_info.get_sonic_version_file()
from_db = "-d --write-to-db"
if dry_run:
from_db = "--additional-data \'{}\'".format(json_data) if json_data else ""
namespace_list = [DEFAULT_NAMESPACE]
if multi_asic.get_num_asics() > 1:
namespace_list = multi_asic.get_namespaces_from_linux()
for ns in namespace_list:
if ns is DEFAULT_NAMESPACE:
asic_id_suffix = ""
config_db = ConfigDBConnector()
else:
asic_id = multi_asic.get_asic_id_from_name(ns)
if asic_id is None:
click.secho(
"Command 'cbf reload' failed with invalid namespace '{}'".
format(ns),
fg="yellow"
)
raise click.Abort()
asic_id_suffix = str(asic_id)
config_db = ConfigDBConnector(
use_unix_socket_path=True, namespace=ns
)
config_db.connect()
cbf_template_file = os.path.join(hwsku_path, asic_id_suffix, "cbf.json.j2")
if os.path.isfile(cbf_template_file):
cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns)
fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db"
command = "{} {} {} -t {},{} -y {}".format(
SONIC_CFGGEN_PATH, cmd_ns, from_db,
cbf_template_file, fname, sonic_version_file
)
# Apply the configuration
clicommon.run_command(command, display_cmd=True)
else:
click.secho("CBF definition template not found at {}".format(
cbf_template_file
), fg="yellow")
#
# 'qos' group ('config qos ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def qos(ctx):
"""QoS-related configuration tasks"""
pass
@qos.command('clear')
def clear():
"""Clear QoS configuration"""
log.log_info("'qos clear' executing...")
_clear_qos()
def _update_buffer_calculation_model(config_db, model):
"""Update the buffer calculation model into CONFIG_DB"""
buffer_model_changed = False
device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost')
if device_metadata.get('buffer_model') != model:
buffer_model_changed = True
device_metadata['buffer_model'] = model
config_db.set_entry('DEVICE_METADATA', 'localhost', device_metadata)
return buffer_model_changed
@qos.command('reload')
@click.pass_context
@click.option('--no-dynamic-buffer', is_flag=True, help="Disable dynamic buffer calculation")
@click.option(
'--json-data', type=click.STRING,
help="json string with additional data, valid with --dry-run option"
)
@click.option(
'--dry_run', type=click.STRING,
help="Dry run, writes config to the given file"
)
def reload(ctx, no_dynamic_buffer, dry_run, json_data):
"""Reload QoS configuration"""
log.log_info("'qos reload' executing...")
_clear_qos()
_, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs()
sonic_version_file = device_info.get_sonic_version_file()
from_db = "-d --write-to-db"
if dry_run:
from_db = "--additional-data \'{}\'".format(json_data) if json_data else ""
namespace_list = [DEFAULT_NAMESPACE]
if multi_asic.get_num_asics() > 1:
namespace_list = multi_asic.get_namespaces_from_linux()
buffer_model_updated = False
vendors_supporting_dynamic_buffer = ["mellanox"]
for ns in namespace_list:
if ns is DEFAULT_NAMESPACE:
asic_id_suffix = ""
config_db = ConfigDBConnector()
else:
asic_id = multi_asic.get_asic_id_from_name(ns)
if asic_id is None:
click.secho(
"Command 'qos reload' failed with invalid namespace '{}'".
format(ns),
fg="yellow"
)
raise click.Abort()
asic_id_suffix = str(asic_id)
config_db = ConfigDBConnector(
use_unix_socket_path=True, namespace=ns
)
config_db.connect()
if not no_dynamic_buffer and asic_type in vendors_supporting_dynamic_buffer:
buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers_dynamic.json.j2")
buffer_model_updated |= _update_buffer_calculation_model(config_db, "dynamic")
else:
buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers.json.j2")
if asic_type in vendors_supporting_dynamic_buffer:
buffer_model_updated |= _update_buffer_calculation_model(config_db, "traditional")
if os.path.isfile(buffer_template_file):
qos_template_file = os.path.join(
hwsku_path, asic_id_suffix, "qos.json.j2"
)
if os.path.isfile(qos_template_file):
cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns)
fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db"
command = "{} {} {} -t {},{} -t {},{} -y {}".format(
SONIC_CFGGEN_PATH, cmd_ns, from_db, buffer_template_file,
fname, qos_template_file, fname, sonic_version_file
)
# Apply the configurations only when both buffer and qos
# configuration files are present
clicommon.run_command(command, display_cmd=True)
else:
click.secho("QoS definition template not found at {}".format(
qos_template_file
), fg="yellow")
else:
click.secho("Buffer definition template not found at {}".format(
buffer_template_file
), fg="yellow")
if buffer_model_updated:
print("Buffer calculation model updated, restarting swss is required to take effect")
def is_dynamic_buffer_enabled(config_db):
"""Return whether the current system supports dynamic buffer calculation"""
device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost')
return 'dynamic' == device_metadata.get('buffer_model')
#
# 'warm_restart' group ('config warm_restart ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='warm_restart')
@click.pass_context
@click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection')
def warm_restart(ctx, redis_unix_socket_path):
"""warm_restart-related configuration tasks"""
# Note: redis_unix_socket_path is a path string, and the ground truth is now from database_config.json.
# We only use it as a bool indicator on either unix_socket_path or tcp port
use_unix_socket_path = bool(redis_unix_socket_path)
config_db = ConfigDBConnector(use_unix_socket_path=use_unix_socket_path)
config_db.connect(wait_for_init=False)
# warm restart enable/disable config is put in stateDB, not persistent across cold reboot, not saved to config_DB.json file
state_db = SonicV2Connector(use_unix_socket_path=use_unix_socket_path)
state_db.connect(state_db.STATE_DB, False)
TABLE_NAME_SEPARATOR = '|'
prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR
ctx.obj = {'db': config_db, 'state_db': state_db, 'prefix': prefix}
@warm_restart.command('enable')
@click.argument('module', metavar='<module>', default='system', required=False)
@click.pass_context
def warm_restart_enable(ctx, module):
state_db = ctx.obj['state_db']
config_db = ctx.obj['db']
feature_table = config_db.get_table('FEATURE')
if module != 'system' and module not in feature_table:
exit('Feature {} is unknown'.format(module))
prefix = ctx.obj['prefix']
_hash = '{}{}'.format(prefix, module)
state_db.set(state_db.STATE_DB, _hash, 'enable', 'true')
state_db.close(state_db.STATE_DB)
@warm_restart.command('disable')
@click.argument('module', metavar='<module>', default='system', required=False)
@click.pass_context
def warm_restart_enable(ctx, module):
state_db = ctx.obj['state_db']
config_db = ctx.obj['db']
feature_table = config_db.get_table('FEATURE')
if module != 'system' and module not in feature_table:
exit('Feature {} is unknown'.format(module))
prefix = ctx.obj['prefix']
_hash = '{}{}'.format(prefix, module)
state_db.set(state_db.STATE_DB, _hash, 'enable', 'false')
state_db.close(state_db.STATE_DB)
@warm_restart.command('neighsyncd_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_neighsyncd_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1, 9999):
ctx.fail("neighsyncd warm restart timer must be in range 1-9999")
db.mod_entry('WARM_RESTART', 'swss', {'neighsyncd_timer': seconds})
@warm_restart.command('bgp_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_bgp_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1, 3600):
ctx.fail("bgp warm restart timer must be in range 1-3600")
db.mod_entry('WARM_RESTART', 'bgp', {'bgp_timer': seconds})
@warm_restart.command('teamsyncd_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_teamsyncd_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1, 3600):
ctx.fail("teamsyncd warm restart timer must be in range 1-3600")
db.mod_entry('WARM_RESTART', 'teamd', {'teamsyncd_timer': seconds})
@warm_restart.command('bgp_eoiu')
@click.argument('enable', metavar='<enable>', default='true', required=False, type=click.Choice(["true", "false"]))
@click.pass_context
def warm_restart_bgp_eoiu(ctx, enable):
db = ctx.obj['db']
db.mod_entry('WARM_RESTART', 'bgp', {'bgp_eoiu': enable})
def mvrf_restart_services():
"""Restart interfaces-config service and NTP service when mvrf is changed"""
"""
When mvrf is enabled, eth0 should be moved to mvrf; when it is disabled,
move it back to default vrf. Restarting the "interfaces-config" service
will recreate the /etc/network/interfaces file and restart the
"networking" service that takes care of the eth0 movement.
NTP service should also be restarted to rerun the NTP service with or
without "cgexec" accordingly.
"""
cmd="service ntp stop"
os.system (cmd)
cmd="systemctl restart interfaces-config"
os.system (cmd)
cmd="service ntp start"
os.system (cmd)
def vrf_add_management_vrf(config_db):
"""Enable management vrf in config DB"""
entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global")
if entry and entry['mgmtVrfEnabled'] == 'true' :
click.echo("ManagementVRF is already Enabled.")
return None
config_db.mod_entry('MGMT_VRF_CONFIG', "vrf_global", {"mgmtVrfEnabled": "true"})
mvrf_restart_services()
"""
The regular expression for grep in below cmd is to match eth0 line in /proc/net/route, sample file:
$ cat /proc/net/route
Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT
eth0 00000000 01803B0A 0003 0 0 202 00000000 0 0 0
"""
cmd = r"cat /proc/net/route | grep -E \"eth0\s+00000000\s+[0-9A-Z]+\s+[0-9]+\s+[0-9]+\s+[0-9]+\s+202\" | wc -l"
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = proc.communicate()
if int(output[0]) >= 1:
cmd="ip -4 route del default dev eth0 metric 202"
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
proc.communicate()
if proc.returncode != 0:
click.echo("Could not delete eth0 route")
def vrf_delete_management_vrf(config_db):
"""Disable management vrf in config DB"""
entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global")
if not entry or entry['mgmtVrfEnabled'] == 'false' :
click.echo("ManagementVRF is already Disabled.")
return None
config_db.mod_entry('MGMT_VRF_CONFIG', "vrf_global", {"mgmtVrfEnabled": "false"})
mvrf_restart_services()
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def snmpagentaddress(ctx):
"""SNMP agent listening IP address, port, vrf configuration"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
ip_family = {4: AF_INET, 6: AF_INET6}
@snmpagentaddress.command('add')
@click.argument('agentip', metavar='<SNMP AGENT LISTENING IP Address>', required=True)
@click.option('-p', '--port', help="SNMP AGENT LISTENING PORT")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None")
@click.pass_context
def add_snmp_agent_address(ctx, agentip, port, vrf):
"""Add the SNMP agent listening IP:Port%Vrf configuration"""
#Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|<port>|<vrf>
if not clicommon.is_ipaddress(agentip):
click.echo("Invalid IP address")
return False
config_db = ctx.obj['db']
if not vrf:
entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global")
if entry and entry['mgmtVrfEnabled'] == 'true' :
click.echo("ManagementVRF is Enabled. Provide vrf.")
return False
found = 0
ip = ipaddress.ip_address(agentip)
for intf in netifaces.interfaces():
ipaddresses = netifaces.ifaddresses(intf)
if ip_family[ip.version] in ipaddresses:
for ipaddr in ipaddresses[ip_family[ip.version]]:
if agentip.lower() == ipaddr['addr'].lower():
found = 1
break
if found == 1:
break
else:
click.echo("IP address is not available")
return
key = agentip+'|'
if port:
key = key+port
#snmpd does not start if we have two entries with same ip and port.
key1 = "SNMP_AGENT_ADDRESS_CONFIG|" + key + '*'
entry = config_db.get_keys(key1)
if entry:
ip_port = agentip + ":" + port
click.echo("entry with {} already exists ".format(ip_port))
return
key = key+'|'
if vrf:
key = key+vrf
config_db.set_entry('SNMP_AGENT_ADDRESS_CONFIG', key, {})
#Restarting the SNMP service will regenerate snmpd.conf and rerun snmpd
cmd="systemctl restart snmp"
os.system (cmd)
@snmpagentaddress.command('del')
@click.argument('agentip', metavar='<SNMP AGENT LISTENING IP Address>', required=True)
@click.option('-p', '--port', help="SNMP AGENT LISTENING PORT")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None")
@click.pass_context
def del_snmp_agent_address(ctx, agentip, port, vrf):
"""Delete the SNMP agent listening IP:Port%Vrf configuration"""
key = agentip+'|'
if port:
key = key+port
key = key+'|'
if vrf:
key = key+vrf
config_db = ctx.obj['db']
config_db.set_entry('SNMP_AGENT_ADDRESS_CONFIG', key, None)
cmd="systemctl restart snmp"
os.system (cmd)
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def snmptrap(ctx):
"""SNMP Trap server configuration to send traps"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@snmptrap.command('modify')
@click.argument('ver', metavar='<SNMP Version>', type=click.Choice(['1', '2', '3']), required=True)
@click.argument('serverip', metavar='<SNMP TRAP SERVER IP Address>', required=True)
@click.option('-p', '--port', help="SNMP Trap Server port, default 162", default="162")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None", default="None")
@click.option('-c', '--comm', help="Community", default="public")
@click.pass_context
def modify_snmptrap_server(ctx, ver, serverip, port, vrf, comm):
"""Modify the SNMP Trap server configuration"""
#SNMP_TRAP_CONFIG for each SNMP version
config_db = ctx.obj['db']
if ver == "1":
#By default, v1TrapDest value in snmp.yml is "NotConfigured". Modify it.
config_db.mod_entry('SNMP_TRAP_CONFIG', "v1TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
elif ver == "2":
config_db.mod_entry('SNMP_TRAP_CONFIG', "v2TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
else:
config_db.mod_entry('SNMP_TRAP_CONFIG', "v3TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
cmd="systemctl restart snmp"
os.system (cmd)
@snmptrap.command('del')
@click.argument('ver', metavar='<SNMP Version>', type=click.Choice(['1', '2', '3']), required=True)
@click.pass_context
def delete_snmptrap_server(ctx, ver):
"""Delete the SNMP Trap server configuration"""
config_db = ctx.obj['db']
if ver == "1":
config_db.mod_entry('SNMP_TRAP_CONFIG', "v1TrapDest", None)
elif ver == "2":
config_db.mod_entry('SNMP_TRAP_CONFIG', "v2TrapDest", None)
else:
config_db.mod_entry('SNMP_TRAP_CONFIG', "v3TrapDest", None)
cmd="systemctl restart snmp"
os.system (cmd)
#
# 'snmp' group ('config snmp ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='snmp')
@clicommon.pass_db
def snmp(db):
"""SNMP configuration tasks"""
@snmp.group(cls=clicommon.AbbreviationGroup)
@clicommon.pass_db
def community(db):
pass
def is_valid_community_type(commstr_type):
commstr_types = ['RO', 'RW']
if commstr_type not in commstr_types:
click.echo("Invalid community type. Must be either RO or RW")
return False
return True
def is_valid_user_type(user_type):
convert_user_type = {'noauthnopriv': 'noAuthNoPriv', 'authnopriv': 'AuthNoPriv', 'priv': 'Priv'}
if user_type not in convert_user_type:
message = ("Invalid user type. Must be one of these one of these three "
"'noauthnopriv' or 'authnopriv' or 'priv'")
click.echo(message)
return False, message
return True, convert_user_type[user_type]
def is_valid_auth_type(user_auth_type):
user_auth_types = ['MD5', 'SHA', 'HMAC-SHA-2']
if user_auth_type not in user_auth_types:
click.echo("Invalid user authentication type. Must be one of these 'MD5', 'SHA', or 'HMAC-SHA-2'")
return False
return True
def is_valid_encrypt_type(encrypt_type):
encrypt_types = ['DES', 'AES']
if encrypt_type not in encrypt_types:
click.echo("Invalid user encryption type. Must be one of these two 'DES' or 'AES'")
return False
return True
def snmp_community_secret_check(snmp_secret):
excluded_special_symbols = ['@', ":"]
if len(snmp_secret) > 32:
click.echo("SNMP community string length should be not be greater than 32")
click.echo("SNMP community string should not have any of these special "
"symbols {}".format(excluded_special_symbols))
click.echo("FAILED: SNMP community string length should be not be greater than 32")
return False
if any(char in excluded_special_symbols for char in snmp_secret):
click.echo("SNMP community string length should be not be greater than 32")
click.echo("SNMP community string should not have any of these special "
"symbols {}".format(excluded_special_symbols))
click.echo("FAILED: SNMP community string should not have any of these "
"special symbols {}".format(excluded_special_symbols))
return False
return True
def snmp_username_check(snmp_username):
excluded_special_symbols = ['@', ":"]
if len(snmp_username) > 32:
click.echo("SNMP user {} length should be not be greater than 32 characters".format(snmp_username))
click.echo("SNMP community string should not have any of these special "
"symbols {}".format(excluded_special_symbols))
click.echo("FAILED: SNMP user {} length should not be greater than 32 characters".format(snmp_username))
return False
if any(char in excluded_special_symbols for char in snmp_username):
click.echo("SNMP user {} length should be not be greater than 32 characters".format(snmp_username))
click.echo("SNMP community string should not have any of these special "
"symbols {}".format(excluded_special_symbols))
click.echo("FAILED: SNMP user {} should not have any of these special "
"symbols {}".format(snmp_username, excluded_special_symbols))
return False
return True
def snmp_user_secret_check(snmp_secret):
excluded_special_symbols = ['@', ":"]
if len(snmp_secret) < 8:
click.echo("SNMP user password length should be at least 8 characters")
click.echo("SNMP user password length should be not be greater than 64")
click.echo("SNMP user password should not have any of these special "
"symbols {}".format(excluded_special_symbols))
click.echo("FAILED: SNMP user password length should be at least 8 characters")
return False
if len(snmp_secret) > 64:
click.echo("SNMP user password length should be at least 8 characters")
click.echo("SNMP user password length should be not be greater than 64")
click.echo("SNMP user password should not have any of these special "
"symbols {}".format(excluded_special_symbols))
click.echo("FAILED: SNMP user password length should be not be greater than 64")
return False
if any(char in excluded_special_symbols for char in snmp_secret):
click.echo("SNMP user password length should be at least 8 characters")
click.echo("SNMP user password length should be not be greater than 64")
click.echo("SNMP user password should not have any of these special "
"symbols {}".format(excluded_special_symbols))
click.echo("FAILED: SNMP user password should not have any of these special "
"symbols {}".format(excluded_special_symbols))
return False
return True
@community.command('add')
@click.argument('community', metavar='<snmp_community>', required=True)
@click.argument('string_type', metavar='<RO|RW>', required=True)
@clicommon.pass_db
def add_community(db, community, string_type):
""" Add snmp community string"""
string_type = string_type.upper()
if not is_valid_community_type(string_type):
sys.exit(1)
if not snmp_community_secret_check(community):
sys.exit(2)
snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY")
if community in snmp_communities:
click.echo("SNMP community {} is already configured".format(community))
sys.exit(3)
db.cfgdb.set_entry('SNMP_COMMUNITY', community, {'TYPE': string_type})
click.echo("SNMP community {} added to configuration".format(community))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
@community.command('del')
@click.argument('community', metavar='<snmp_community>', required=True)
@clicommon.pass_db
def del_community(db, community):
""" Delete snmp community string"""
snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY")
if community not in snmp_communities:
click.echo("SNMP community {} is not configured".format(community))
sys.exit(1)
else:
db.cfgdb.set_entry('SNMP_COMMUNITY', community, None)
click.echo("SNMP community {} removed from configuration".format(community))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
@community.command('replace')
@click.argument('current_community', metavar='<current_community_string>', required=True)
@click.argument('new_community', metavar='<new_community_string>', required=True)
@clicommon.pass_db
def replace_community(db, current_community, new_community):
""" Replace snmp community string"""
snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY")
if not current_community in snmp_communities:
click.echo("Current SNMP community {} is not configured".format(current_community))
sys.exit(1)
if not snmp_community_secret_check(new_community):
sys.exit(2)
elif new_community in snmp_communities:
click.echo("New SNMP community {} to replace current SNMP community {} already "
"configured".format(new_community, current_community))
sys.exit(3)
else:
string_type = snmp_communities[current_community]['TYPE']
db.cfgdb.set_entry('SNMP_COMMUNITY', new_community, {'TYPE': string_type})
click.echo("SNMP community {} added to configuration".format(new_community))
db.cfgdb.set_entry('SNMP_COMMUNITY', current_community, None)
click.echo('SNMP community {} replace community {}'.format(new_community, current_community))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
@snmp.group(cls=clicommon.AbbreviationGroup)
@clicommon.pass_db
def contact(db):
pass
def is_valid_email(email):
return bool(re.search(r"^[\w\.\+\-]+\@[\w]+\.[a-z]{2,3}$", email))
@contact.command('add')
@click.argument('contact', metavar='<contact_name>', required=True)
@click.argument('contact_email', metavar='<contact_email>', required=True)
@clicommon.pass_db
def add_contact(db, contact, contact_email):
""" Add snmp contact name and email """
snmp = db.cfgdb.get_table("SNMP")
try:
if snmp['CONTACT']:
click.echo("Contact already exists. Use sudo config snmp contact modify instead")
sys.exit(1)
else:
db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email})
click.echo("Contact name {} and contact email {} have been added to "
"configuration".format(contact, contact_email))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
except KeyError:
if "CONTACT" not in snmp.keys():
if not is_valid_email(contact_email):
click.echo("Contact email {} is not valid".format(contact_email))
sys.exit(2)
db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email})
click.echo("Contact name {} and contact email {} have been added to "
"configuration".format(contact, contact_email))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
@contact.command('del')
@click.argument('contact', metavar='<contact_name>', required=True)
@clicommon.pass_db
def del_contact(db, contact):
""" Delete snmp contact name and email """
snmp = db.cfgdb.get_table("SNMP")
try:
if not contact in (list(snmp['CONTACT'].keys()))[0]:
click.echo("SNMP contact {} is not configured".format(contact))
sys.exit(1)
else:
db.cfgdb.set_entry('SNMP', 'CONTACT', None)
click.echo("SNMP contact {} removed from configuration".format(contact))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
except KeyError:
if "CONTACT" not in snmp.keys():
click.echo("Contact name {} is not configured".format(contact))
sys.exit(2)
@contact.command('modify')
@click.argument('contact', metavar='<contact>', required=True)
@click.argument('contact_email', metavar='<contact email>', required=True)
@clicommon.pass_db
def modify_contact(db, contact, contact_email):
""" Modify snmp contact"""
snmp = db.cfgdb.get_table("SNMP")
try:
current_snmp_contact_name = (list(snmp['CONTACT'].keys()))[0]
if current_snmp_contact_name == contact:
current_snmp_contact_email = snmp['CONTACT'][contact]
else:
current_snmp_contact_email = ''
if contact == current_snmp_contact_name and contact_email == current_snmp_contact_email:
click.echo("SNMP contact {} {} already exists".format(contact, contact_email))
sys.exit(1)
elif contact == current_snmp_contact_name and contact_email != current_snmp_contact_email:
if not is_valid_email(contact_email):
click.echo("Contact email {} is not valid".format(contact_email))
sys.exit(2)
db.cfgdb.mod_entry('SNMP', 'CONTACT', {contact: contact_email})
click.echo("SNMP contact {} email updated to {}".format(contact, contact_email))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
else:
if not is_valid_email(contact_email):
click.echo("Contact email {} is not valid".format(contact_email))
sys.exit(2)
db.cfgdb.set_entry('SNMP', 'CONTACT', None)
db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email})
click.echo("SNMP contact {} and contact email {} updated".format(contact, contact_email))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
except KeyError:
if "CONTACT" not in snmp.keys():
click.echo("Contact name {} is not configured".format(contact))
sys.exit(3)
@snmp.group(cls=clicommon.AbbreviationGroup)
@clicommon.pass_db
def location(db):
pass
@location.command('add')
@click.argument('location', metavar='<location>', required=True, nargs=-1)
@clicommon.pass_db
def add_location(db, location):
""" Add snmp location"""
if isinstance(location, tuple):
location = " ".join(location)
elif isinstance(location, list):
location = " ".join(location)
snmp = db.cfgdb.get_table("SNMP")
try:
if snmp['LOCATION']:
click.echo("Location already exists")
sys.exit(1)
except KeyError:
if "LOCATION" not in snmp.keys():
db.cfgdb.set_entry('SNMP', 'LOCATION', {'Location': location})
click.echo("SNMP Location {} has been added to configuration".format(location))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
@location.command('del')
@click.argument('location', metavar='<location>', required=True, nargs=-1)
@clicommon.pass_db
def delete_location(db, location):
""" Delete snmp location"""
if isinstance(location, tuple):
location = " ".join(location)
elif isinstance(location, list):
location = " ".join(location)
snmp = db.cfgdb.get_table("SNMP")
try:
if location == snmp['LOCATION']['Location']:
db.cfgdb.set_entry('SNMP', 'LOCATION', None)
click.echo("SNMP Location {} removed from configuration".format(location))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
else:
click.echo("SNMP Location {} does not exist. The location is {}".format(location, snmp['LOCATION']['Location']))
sys.exit(1)
except KeyError:
if "LOCATION" not in snmp.keys():
click.echo("SNMP Location {} is not configured".format(location))
sys.exit(2)
@location.command('modify')
@click.argument('location', metavar='<location>', required=True, nargs=-1)
@clicommon.pass_db
def modify_location(db, location):
""" Modify snmp location"""
if isinstance(location, tuple):
location = " ".join(location)
elif isinstance(location, list):
location = " ".join(location)
snmp = db.cfgdb.get_table("SNMP")
try:
snmp_location = snmp['LOCATION']['Location']
if location in snmp_location:
click.echo("SNMP location {} already exists".format(location))
sys.exit(1)
else:
db.cfgdb.mod_entry('SNMP', 'LOCATION', {'Location': location})
click.echo("SNMP location {} modified in configuration".format(location))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
except KeyError:
click.echo("Cannot modify SNMP Location. You must use 'config snmp location add command <snmp_location>'")
sys.exit(2)
from enum import IntEnum
class SnmpUserError(IntEnum):
NameCheckFailure = 1
TypeNoAuthNoPrivOrAuthNoPrivOrPrivCheckFailure = 2
RoRwCheckFailure = 3
NoAuthNoPrivHasAuthType = 4
AuthTypeMd5OrShaOrHmacsha2IsMissing = 5
AuthTypeMd5OrShaOrHmacsha2Failure = 6
AuthPasswordMissing = 7
AuthPasswordFailsComplexityRequirements = 8
EncryptPasswordNotAllowedWithAuthNoPriv = 9
EncryptTypeDesOrAesIsMissing = 10
EncryptTypeFailsComplexityRequirements = 11
EncryptPasswordMissingFailure = 12
EncryptPasswordFailsComplexityRequirements = 13
UserAlreadyConfigured = 14
@snmp.group(cls=clicommon.AbbreviationGroup)
@clicommon.pass_db
def user(db):
pass
@user.command('add')
@click.argument('user', metavar='<snmp_user>', required=True)
@click.argument('user_type', metavar='<noAuthNoPriv|AuthNoPriv|Priv>', required=True)
@click.argument('user_permission_type', metavar='<RO|RW>', required=True)
@click.argument('user_auth_type', metavar='<MD5|SHA|HMAC-SHA-2>', required=False)
@click.argument('user_auth_password', metavar='<auth_password>', required=False)
@click.argument('user_encrypt_type', metavar='<DES|AES>', required=False)
@click.argument('user_encrypt_password', metavar='<encrypt_password>', required=False)
@clicommon.pass_db
def add_user(db, user, user_type, user_permission_type, user_auth_type, user_auth_password, user_encrypt_type,
user_encrypt_password):
""" Add snmp user"""
if not snmp_username_check(user):
sys.exit(SnmpUserError.NameCheckFailure)
user_type = user_type.lower()
user_type_info = is_valid_user_type(user_type)
if not user_type_info[0]:
sys.exit(SnmpUserError.TypeNoAuthNoPrivOrAuthNoPrivOrPrivCheckFailure)
user_type = user_type_info[1]
user_permission_type = user_permission_type.upper()
if not is_valid_community_type(user_permission_type):
sys.exit(SnmpUserError.RoRwCheckFailure)
if user_type == "noAuthNoPriv":
if user_auth_type:
click.echo("User auth type not used with 'noAuthNoPriv'. Please use 'AuthNoPriv' or 'Priv' instead")
sys.exit(SnmpUserError.NoAuthNoPrivHasAuthType)
else:
if not user_auth_type:
click.echo("User auth type is missing. Must be MD5, SHA, or HMAC-SHA-2")
sys.exit(SnmpUserError.AuthTypeMd5OrShaOrHmacsha2IsMissing)
if user_auth_type:
user_auth_type = user_auth_type.upper()
if not is_valid_auth_type(user_auth_type):
sys.exit(SnmpUserError.AuthTypeMd5OrShaOrHmacsha2Failure)
elif not user_auth_password:
click.echo("User auth password is missing")
sys.exit(SnmpUserError.AuthPasswordMissing)
elif user_auth_password:
if not snmp_user_secret_check(user_auth_password):
sys.exit(SnmpUserError.AuthPasswordFailsComplexityRequirements)
if user_type == "AuthNoPriv":
if user_encrypt_type:
click.echo("User encrypt type not used with 'AuthNoPriv'. Please use 'Priv' instead")
sys.exit(SnmpUserError.EncryptPasswordNotAllowedWithAuthNoPriv)
elif user_type == "Priv":
if not user_encrypt_type:
click.echo("User encrypt type is missing. Must be DES or AES")
sys.exit(SnmpUserError.EncryptTypeDesOrAesIsMissing)
if user_encrypt_type:
user_encrypt_type = user_encrypt_type.upper()
if not is_valid_encrypt_type(user_encrypt_type):
sys.exit(SnmpUserError.EncryptTypeFailsComplexityRequirements)
elif not user_encrypt_password:
click.echo("User encrypt password is missing")
sys.exit(SnmpUserError.EncryptPasswordMissingFailure)
elif user_encrypt_password:
if not snmp_user_secret_check(user_encrypt_password):
sys.exit(SnmpUserError.EncryptPasswordFailsComplexityRequirements)
snmp_users = db.cfgdb.get_table("SNMP_USER")
if user in snmp_users.keys():
click.echo("SNMP user {} is already configured".format(user))
sys.exit(SnmpUserError.UserAlreadyConfigured)
else:
if not user_auth_type:
user_auth_type = ''
if not user_auth_password:
user_auth_password = ''
if not user_encrypt_type:
user_encrypt_type = ''
if not user_encrypt_password:
user_encrypt_password = ''
db.cfgdb.set_entry('SNMP_USER', user, {'SNMP_USER_TYPE': user_type,
'SNMP_USER_PERMISSION': user_permission_type,
'SNMP_USER_AUTH_TYPE': user_auth_type,
'SNMP_USER_AUTH_PASSWORD': user_auth_password,
'SNMP_USER_ENCRYPTION_TYPE': user_encrypt_type,
'SNMP_USER_ENCRYPTION_PASSWORD': user_encrypt_password})
click.echo("SNMP user {} added to configuration".format(user))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
@user.command('del')
@click.argument('user', metavar='<snmp_user>', required=True)
@clicommon.pass_db
def del_user(db, user):
""" Del snmp user"""
snmp_users = db.cfgdb.get_table("SNMP_USER")
if user not in snmp_users:
click.echo("SNMP user {} is not configured".format(user))
sys.exit(1)
else:
db.cfgdb.set_entry('SNMP_USER', user, None)
click.echo("SNMP user {} removed from configuration".format(user))
try:
click.echo("Restarting SNMP service...")
clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False)
clicommon.run_command("systemctl restart snmp.service", display_cmd=False)
except SystemExit as e:
click.echo("Restart service snmp failed with error {}".format(e))
raise click.Abort()
#
# 'bgp' group ('config bgp ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def bgp():
"""BGP-related configuration tasks"""
pass
#
# 'shutdown' subgroup ('config bgp shutdown ...')
#
@bgp.group(cls=clicommon.AbbreviationGroup)
def shutdown():
"""Shut down BGP session(s)"""
pass
# 'all' subcommand
@shutdown.command()
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def all(verbose):
"""Shut down all BGP sessions
In the case of Multi-Asic platform, we shut only the EBGP sessions with external neighbors.
"""
log.log_info("'bgp shutdown all' executing...")
namespaces = [DEFAULT_NAMESPACE]
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db)
for ipaddress in bgp_neighbor_ip_list:
_change_bgp_session_status_by_addr(config_db, ipaddress, 'down', verbose)
# 'neighbor' subcommand
@shutdown.command()
@click.argument('ipaddr_or_hostname', metavar='<ipaddr_or_hostname>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def neighbor(ipaddr_or_hostname, verbose):
"""Shut down BGP session by neighbor IP address or hostname.
User can specify either internal or external BGP neighbor to shutdown
"""
log.log_info("'bgp shutdown neighbor {}' executing...".format(ipaddr_or_hostname))
namespaces = [DEFAULT_NAMESPACE]
found_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'down', verbose):
found_neighbor = True
if not found_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname))
@bgp.group(cls=clicommon.AbbreviationGroup)
def startup():
"""Start up BGP session(s)"""
pass
# 'all' subcommand
@startup.command()
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def all(verbose):
"""Start up all BGP sessions
In the case of Multi-Asic platform, we startup only the EBGP sessions with external neighbors.
"""
log.log_info("'bgp startup all' executing...")
namespaces = [DEFAULT_NAMESPACE]
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db)
for ipaddress in bgp_neighbor_ip_list:
_change_bgp_session_status_by_addr(config_db, ipaddress, 'up', verbose)
# 'neighbor' subcommand
@startup.command()
@click.argument('ipaddr_or_hostname', metavar='<ipaddr_or_hostname>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def neighbor(ipaddr_or_hostname, verbose):
log.log_info("'bgp startup neighbor {}' executing...".format(ipaddr_or_hostname))
"""Start up BGP session by neighbor IP address or hostname.
User can specify either internal or external BGP neighbor to startup
"""
namespaces = [DEFAULT_NAMESPACE]
found_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'up', verbose):
found_neighbor = True
if not found_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname))
#
# 'remove' subgroup ('config bgp remove ...')
#
@bgp.group(cls=clicommon.AbbreviationGroup)
def remove():
"Remove BGP neighbor configuration from the device"
pass
@remove.command('neighbor')
@click.argument('neighbor_ip_or_hostname', metavar='<neighbor_ip_or_hostname>', required=True)
def remove_neighbor(neighbor_ip_or_hostname):
"""Deletes BGP neighbor configuration of given hostname or ip from devices
User can specify either internal or external BGP neighbor to remove
"""
namespaces = [DEFAULT_NAMESPACE]
removed_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname):
removed_neighbor = True
if not removed_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(neighbor_ip_or_hostname))
#
# 'interface' group ('config interface ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
# TODO add "hidden=True if this is a single ASIC platform, once we have click 7.0 in all branches.
@click.option('-n', '--namespace', help='Namespace name',
required=True if multi_asic.is_multi_asic() else False, type=click.Choice(multi_asic.get_namespace_list()))
@click.pass_context
def interface(ctx, namespace):
"""Interface-related configuration tasks"""
# Set namespace to default_namespace if it is None.
if namespace is None:
namespace = DEFAULT_NAMESPACE
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=str(namespace))
config_db.connect()
ctx.obj = {'config_db': config_db, 'namespace': str(namespace)}
#
# 'startup' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def startup(ctx, interface_name):
"""Start up interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
intf_fs = parse_interface_in_filter(interface_name)
if len(intf_fs) > 1 and multi_asic.is_multi_asic():
ctx.fail("Interface range not supported in multi-asic platforms !!")
if len(intf_fs) == 1 and interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
log.log_info("'interface startup {}' executing...".format(interface_name))
port_dict = config_db.get_table('PORT')
for port_name in port_dict:
if port_name in intf_fs:
config_db.mod_entry("PORT", port_name, {"admin_status": "up"})
portchannel_list = config_db.get_table("PORTCHANNEL")
for po_name in portchannel_list:
if po_name in intf_fs:
config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "up"})
subport_list = config_db.get_table("VLAN_SUB_INTERFACE")
for sp_name in subport_list:
if sp_name in intf_fs:
config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "up"})
#
# 'shutdown' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def shutdown(ctx, interface_name):
"""Shut down interface"""
log.log_info("'interface shutdown {}' executing...".format(interface_name))
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
intf_fs = parse_interface_in_filter(interface_name)
if len(intf_fs) > 1 and multi_asic.is_multi_asic():
ctx.fail("Interface range not supported in multi-asic platforms !!")
if len(intf_fs) == 1 and interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
port_dict = config_db.get_table('PORT')
for port_name in port_dict:
if port_name in intf_fs:
config_db.mod_entry("PORT", port_name, {"admin_status": "down"})
portchannel_list = config_db.get_table("PORTCHANNEL")
for po_name in portchannel_list:
if po_name in intf_fs:
config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "down"})
subport_list = config_db.get_table("VLAN_SUB_INTERFACE")
for sp_name in subport_list:
if sp_name in intf_fs:
config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "down"})
#
# 'speed' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_speed', metavar='<interface_speed>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def speed(ctx, interface_name, interface_speed, verbose):
"""Set interface speed"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
log.log_info("'interface speed {} {}' executing...".format(interface_name, interface_speed))
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -s {}".format(interface_name, interface_speed)
else:
command = "portconfig -p {} -s {} -n {}".format(interface_name, interface_speed, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'autoneg' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('mode', metavar='<mode>', required=True, type=click.Choice(["enabled", "disabled"]))
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def autoneg(ctx, interface_name, mode, verbose):
"""Set interface auto negotiation mode"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
log.log_info("'interface autoneg {} {}' executing...".format(interface_name, mode))
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -an {}".format(interface_name, mode)
else:
command = "portconfig -p {} -an {} -n {}".format(interface_name, mode, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'adv-speeds' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('speed_list', metavar='<speed_list>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def advertised_speeds(ctx, interface_name, speed_list, verbose):
"""Set interface advertised speeds"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
log.log_info("'interface advertised_speeds {} {}' executing...".format(interface_name, speed_list))
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -S {}".format(interface_name, speed_list)
else:
command = "portconfig -p {} -S {} -n {}".format(interface_name, speed_list, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'interface-type' subcommand
#
@interface.command(name='type')
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_type_value', metavar='<interface_type_value>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def interface_type(ctx, interface_name, interface_type_value, verbose):
"""Set interface type"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
log.log_info("'interface interface_type {} {}' executing...".format(interface_name, interface_type_value))
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -t {}".format(interface_name, interface_type_value)
else:
command = "portconfig -p {} -t {} -n {}".format(interface_name, interface_type_value, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'advertised-interface-types' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_type_list', metavar='<interface_type_list>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def advertised_types(ctx, interface_name, interface_type_list, verbose):
"""Set interface advertised types"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
log.log_info("'interface advertised_interface_types {} {}' executing...".format(interface_name, interface_type_list))
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -T {}".format(interface_name, interface_type_list)
else:
command = "portconfig -p {} -T {} -n {}".format(interface_name, interface_type_list, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'breakout' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('mode', required=True, type=click.STRING, autocompletion=_get_breakout_options)
@click.option('-f', '--force-remove-dependencies', is_flag=True, help='Clear all dependencies internally first.')
@click.option('-l', '--load-predefined-config', is_flag=True, help='load predefied user configuration (alias, lanes, speed etc) first.')
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Do you want to Breakout the port, continue?')
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
@click.pass_context
def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load_predefined_config):
""" Set interface breakout mode """
breakout_cfg_file = device_info.get_path_to_port_config_file()
if not os.path.isfile(breakout_cfg_file) or not breakout_cfg_file.endswith('.json'):
click.secho("[ERROR] Breakout feature is not available without platform.json file", fg='red')
raise click.Abort()
# Get the config_db connector
config_db = ctx.obj['config_db']
target_brkout_mode = mode
# Get current breakout mode
cur_brkout_dict = config_db.get_table('BREAKOUT_CFG')
if len(cur_brkout_dict) == 0:
click.secho("[ERROR] BREAKOUT_CFG table is NOT present in CONFIG DB", fg='red')
raise click.Abort()
if interface_name not in cur_brkout_dict.keys():
click.secho("[ERROR] {} interface is NOT present in BREAKOUT_CFG table of CONFIG DB".format(interface_name), fg='red')
raise click.Abort()
cur_brkout_mode = cur_brkout_dict[interface_name]["brkout_mode"]
# Validate Interface and Breakout mode
if not _validate_interface_mode(ctx, breakout_cfg_file, interface_name, mode, cur_brkout_mode):
raise click.Abort()
""" Interface Deletion Logic """
# Get list of interfaces to be deleted
del_ports = get_child_ports(interface_name, cur_brkout_mode, breakout_cfg_file)
del_intf_dict = {intf: del_ports[intf]["speed"] for intf in del_ports}
if del_intf_dict:
click.echo("\nPorts to be deleted : \n {}".format(json.dumps(del_intf_dict, indent=4)))
else:
click.secho("[ERROR] del_intf_dict is None! No interfaces are there to be deleted", fg='red')
raise click.Abort()
""" Interface Addition Logic """
# Get list of interfaces to be added
add_ports = get_child_ports(interface_name, target_brkout_mode, breakout_cfg_file)
add_intf_dict = {intf: add_ports[intf]["speed"] for intf in add_ports}
if add_intf_dict:
click.echo("Ports to be added : \n {}".format(json.dumps(add_intf_dict, indent=4)))
else:
click.secho("[ERROR] port_dict is None!", fg='red')
raise click.Abort()
""" Special Case: Dont delete those ports where the current mode and speed of the parent port
remains unchanged to limit the traffic impact """
click.secho("\nAfter running Logic to limit the impact", fg="cyan", underline=True)
matched_items = [intf for intf in del_intf_dict if intf in add_intf_dict and del_intf_dict[intf] == add_intf_dict[intf]]
# Remove the interface which remains unchanged from both del_intf_dict and add_intf_dict
for item in matched_items:
del_intf_dict.pop(item)
add_intf_dict.pop(item)
# validate all del_ports before calling breakOutPort
for intf in del_intf_dict.keys():
if not interface_name_is_valid(config_db, intf):
click.secho("[ERROR] Interface name {} is invalid".format(intf))
raise click.Abort()
click.secho("\nFinal list of ports to be deleted : \n {} \nFinal list of ports to be added : \n {}".format(json.dumps(del_intf_dict, indent=4), json.dumps(add_intf_dict, indent=4), fg='green', blink=True))
if not add_intf_dict:
click.secho("[ERROR] add_intf_dict is None or empty! No interfaces are there to be added", fg='red')
raise click.Abort()
port_dict = {}
for intf in add_intf_dict:
if intf in add_ports:
port_dict[intf] = add_ports[intf]
# writing JSON object
with open('new_port_config.json', 'w') as f:
json.dump(port_dict, f, indent=4)
# Start Interation with Dy Port BreakOut Config Mgmt
try:
""" Load config for the commands which are capable of change in config DB """
cm = load_ConfigMgmt(verbose)
""" Delete all ports if forced else print dependencies using ConfigMgmt API """
final_delPorts = [intf for intf in del_intf_dict]
""" Warn user if tables without yang models exist and have final_delPorts """
breakout_warnUser_extraTables(cm, final_delPorts, confirm=True)
# Create a dictionary containing all the added ports with its capabilities like alias, lanes, speed etc.
portJson = dict(); portJson['PORT'] = port_dict
# breakout_Ports will abort operation on failure, So no need to check return
breakout_Ports(cm, delPorts=final_delPorts, portJson=portJson, force=force_remove_dependencies,
loadDefConfig=load_predefined_config, verbose=verbose)
# Set Current Breakout mode in config DB
brkout_cfg_keys = config_db.get_keys('BREAKOUT_CFG')
if interface_name not in brkout_cfg_keys:
click.secho("[ERROR] {} is not present in 'BREAKOUT_CFG' Table!".format(interface_name), fg='red')
raise click.Abort()
config_db.set_entry("BREAKOUT_CFG", interface_name, {'brkout_mode': target_brkout_mode})
click.secho("Breakout process got successfully completed."
.format(interface_name), fg="cyan", underline=True)
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
except Exception as e:
click.secho("Failed to break out Port. Error: {}".format(str(e)), fg='magenta')
sys.exit(0)
def _get_all_mgmtinterface_keys():
"""Returns list of strings containing mgmt interface keys
"""
config_db = ConfigDBConnector()
config_db.connect()
return list(config_db.get_table('MGMT_INTERFACE').keys())
def mgmt_ip_restart_services():
"""Restart the required services when mgmt inteface IP address is changed"""
"""
Whenever the eth0 IP address is changed, restart the "interfaces-config"
service which regenerates the /etc/network/interfaces file and restarts
the networking service to make the new/null IP address effective for eth0.
"ntp-config" service should also be restarted based on the new
eth0 IP address since the ntp.conf (generated from ntp.conf.j2) is
made to listen on that particular eth0 IP address or reset it back.
"""
cmd="systemctl restart interfaces-config"
os.system (cmd)
cmd="systemctl restart ntp-config"
os.system (cmd)
#
# 'mtu' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_mtu', metavar='<interface_mtu>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def mtu(ctx, interface_name, interface_mtu, verbose):
"""Set interface mtu"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER')
if interface_is_in_portchannel(portchannel_member_table, interface_name):
ctx.fail("'interface_name' is in portchannel!")
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -m {}".format(interface_name, interface_mtu)
else:
command = "portconfig -p {} -m {} -n {}".format(interface_name, interface_mtu, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'tpid' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_tpid', metavar='<interface_tpid>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def tpid(ctx, interface_name, interface_tpid, verbose):
"""Set interface tpid"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -tp {}".format(interface_name, interface_tpid)
else:
command = "portconfig -p {} -tp {} -n {}".format(interface_name, interface_tpid, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_fec', metavar='<interface_fec>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def fec(ctx, interface_name, interface_fec, verbose):
"""Set interface fec"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if interface_fec not in ["rs", "fc", "none"]:
ctx.fail("'fec not in ['rs', 'fc', 'none']!")
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -f {}".format(interface_name, interface_fec)
else:
command = "portconfig -p {} -f {} -n {}".format(interface_name, interface_fec, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'ip' subgroup ('config interface ip ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def ip(ctx):
"""Add or remove IP address"""
pass
#
# 'add' subcommand
#
@ip.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument("ip_addr", metavar="<ip_addr>", required=True)
@click.argument('gw', metavar='<default gateway IP address>', required=False)
@click.pass_context
def add(ctx, interface_name, ip_addr, gw):
"""Add an IP address towards the interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
# Add a validation to check this interface is not a member in vlan before
# changing it to a router port
vlan_member_table = config_db.get_table('VLAN_MEMBER')
if (interface_is_in_vlan(vlan_member_table, interface_name)):
click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name))
return
try:
ip_address = ipaddress.ip_interface(ip_addr)
except ValueError as err:
ctx.fail("IP address is not valid: {}".format(err))
if interface_name == 'eth0':
# Configuring more than 1 IPv4 or more than 1 IPv6 address fails.
# Allow only one IPv4 and only one IPv6 address to be configured for IPv6.
# If a row already exist, overwrite it (by doing delete and add).
mgmtintf_key_list = _get_all_mgmtinterface_keys()
for key in mgmtintf_key_list:
# For loop runs for max 2 rows, once for IPv4 and once for IPv6.
# No need to capture the exception since the ip_addr is already validated earlier
current_ip = ipaddress.ip_interface(key[1])
if (ip_address.version == current_ip.version):
# If user has configured IPv4/v6 address and the already available row is also IPv4/v6, delete it here.
config_db.set_entry("MGMT_INTERFACE", ("eth0", key[1]), None)
# Set the new row with new value
if not gw:
config_db.set_entry("MGMT_INTERFACE", (interface_name, str(ip_address)), {"NULL": "NULL"})
else:
config_db.set_entry("MGMT_INTERFACE", (interface_name, str(ip_address)), {"gwaddr": gw})
mgmt_ip_restart_services()
return
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
interface_entry = config_db.get_entry(table_name, interface_name)
if len(interface_entry) == 0:
if table_name == "VLAN_SUB_INTERFACE":
config_db.set_entry(table_name, interface_name, {"admin_status": "up"})
else:
config_db.set_entry(table_name, interface_name, {"NULL": "NULL"})
config_db.set_entry(table_name, (interface_name, str(ip_address)), {"NULL": "NULL"})
#
# 'del' subcommand
#
@ip.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument("ip_addr", metavar="<ip_addr>", required=True)
@click.pass_context
def remove(ctx, interface_name, ip_addr):
"""Remove an IP address from the interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
try:
ip_address = ipaddress.ip_interface(ip_addr)
except ValueError as err:
ctx.fail("IP address is not valid: {}".format(err))
if interface_name == 'eth0':
config_db.set_entry("MGMT_INTERFACE", (interface_name, str(ip_address)), None)
mgmt_ip_restart_services()
return
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
interface_addresses = get_interface_ipaddresses(config_db, interface_name)
# If we deleting the last IP entry of the interface, check whether a static route present for the RIF
# before deleting the entry and also the RIF.
if interface_addresses == {ip_address}:
# Check both IPv4 and IPv6 routes.
ip_versions = [ "ip", "ipv6"]
for ip_ver in ip_versions:
# Compete the command and ask Zebra to return the routes.
# Scopes of all VRFs will be checked.
cmd = "show {} route vrf all static".format(ip_ver)
if multi_asic.is_multi_asic():
output = bgp_util.run_bgp_command(cmd, ctx.obj['namespace'])
else:
output = bgp_util.run_bgp_command(cmd)
# If there is output data, check is there a static route,
# bound to the interface.
if output != "":
if any(interface_name in output_line for output_line in output.splitlines()):
ctx.fail("Cannot remove the last IP entry of interface {}. A static {} route is still bound to the RIF.".format(interface_name, ip_ver))
remove_router_interface_ip_address(config_db, interface_name, ip_address)
interface_addresses = get_interface_ipaddresses(config_db, interface_name)
if len(interface_addresses) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False and get_intf_ipv6_link_local_mode(ctx, interface_name, table_name) != "enable":
config_db.set_entry(table_name, interface_name, None)
if multi_asic.is_multi_asic():
command = "sudo ip netns exec {} ip neigh flush dev {} {}".format(ctx.obj['namespace'], interface_name, str(ip_address))
else:
command = "ip neigh flush dev {} {}".format(interface_name, str(ip_address))
clicommon.run_command(command)
#
# buffer commands and utilities
#
def buffer_objects_map_check_legality(ctx, db, interface_name, input_map, is_new_id, is_pg):
"""
Tool function to check whether input_map is legal.
Three checking performed:
1. Whether the input_map is legal: pgs are in range [0-7]
2. Whether the input_map overlaps an existing pg in the port
"""
def _parse_object_id(idsmap):
"""
Tool function to parse the idsmap
Args:
idsmap: string containing object IDs map, like 3-4 or 7
Return:
The upper and lower bound. In case the idsmap is illegal, it returns None, None
Example:
3-4 => 3, 4
7 => 7
3- => None, None
"""
try:
match = re.search("^([0-9]+)(-[0-9]+)?$", idsmap)
lower = int(match.group(1))
if match.group(2):
upper = int(match.group(2)[1:])
else:
upper = lower
except Exception:
lower, upper = None, None
return lower, upper
config_db = db.cfgdb
object_name = "priority group" if is_pg else "queue"
try:
# Fetch maximum object id from STATE_DB
state_db = db.db
field_name = 'max_priority_groups' if is_pg else 'max_queues'
_hash = 'BUFFER_MAX_PARAM_TABLE|{}'.format(interface_name)
buffer_max_params = state_db.get_all(state_db.STATE_DB, _hash)
maximum_id = int(buffer_max_params.get(field_name)) - 1
except Exception:
ctx.fail("Unable to fetch {} from {} in STATE_DB".format(field_name, _hash))
lower, upper = _parse_object_id(input_map)
if not upper or not lower or upper < lower or lower < 0 or upper > maximum_id:
ctx.fail("Buffer {} {} is not valid.".format(object_name, input_map))
# Check overlapping.
# To configure a new PG which is overlapping an existing one is not allowed
# For example, to add '5-6' while '3-5' existing is illegal
existing_object_maps = config_db.get_table("BUFFER_PG" if is_pg else "BUFFER_QUEUE")
if not is_new_id:
if not (interface_name, input_map) in existing_object_maps.keys():
ctx.fail("Buffer {} {} doesn't exist".format(object_name, input_map))
return
for k, v in existing_object_maps.items():
port, existing_object_map = k
if port == interface_name:
existing_lower, existing_upper = _parse_object_id(existing_object_map)
if existing_upper < lower or existing_lower > upper:
# new and existing pgs disjoint, legal
pass
else:
ctx.fail("Buffer {} {} overlaps with existing {} {}".format(object_name, input_map, object_name, existing_object_map))
def update_buffer_object(db, interface_name, object_map, override_profile, is_pg, add=True):
config_db = db.cfgdb
ctx = click.get_current_context()
# Check whether port is legal
ports = config_db.get_entry("PORT", interface_name)
if not ports:
ctx.fail("Port {} doesn't exist".format(interface_name))
buffer_table = "BUFFER_PG" if is_pg else "BUFFER_QUEUE"
# Check whether object_map is legal
# Check whether there is other lossless profiles configured on the interface
buffer_objects_map_check_legality(ctx, db, interface_name, object_map, add, is_pg)
# All checking passed
if override_profile:
profile_dict = config_db.get_entry("BUFFER_PROFILE", override_profile)
if not profile_dict:
ctx.fail("Profile {} doesn't exist".format(override_profile))
pool_name = profile_dict.get("pool")
if not pool_name:
ctx.fail("Profile {} is invalid".format(override_profile))
pool_dict = config_db.get_entry("BUFFER_POOL", pool_name)
pool_dir = pool_dict.get("type")
expected_dir = "ingress" if is_pg else "egress"
if pool_dir != expected_dir:
ctx.fail("Type of pool {} referenced by profile {} is wrong".format(pool_name, override_profile))
if is_pg:
if not 'xoff' in profile_dict.keys() and 'size' in profile_dict.keys():
ctx.fail("Profile {} doesn't exist or isn't a lossless profile".format(override_profile))
config_db.set_entry(buffer_table, (interface_name, object_map), {"profile": override_profile})
else:
config_db.set_entry(buffer_table, (interface_name, object_map), {"profile": "NULL"})
if is_pg:
adjust_pfc_enable(ctx, db, interface_name, object_map, True)
def remove_buffer_object_on_port(db, interface_name, buffer_object_map, is_pg=True):
config_db = db.cfgdb
ctx = click.get_current_context()
# Check whether port is legal
ports = config_db.get_entry("PORT", interface_name)
if not ports:
ctx.fail("Port {} doesn't exist".format(interface_name))
# Remvoe all dynamic lossless PGs on the port
buffer_table = "BUFFER_PG" if is_pg else "BUFFER_QUEUE"
existing_buffer_objects = config_db.get_table(buffer_table)
removed = False
for k, v in existing_buffer_objects.items():
port, existing_buffer_object = k
if port == interface_name and (not buffer_object_map or buffer_object_map == existing_buffer_object):
referenced_profile = v.get('profile')
if referenced_profile and referenced_profile == 'ingress_lossy_profile':
if buffer_object_map:
ctx.fail("Lossy PG {} can't be removed".format(buffer_object_map))
else:
continue
config_db.set_entry(buffer_table, (interface_name, existing_buffer_object), None)
if is_pg:
adjust_pfc_enable(ctx, db, interface_name, buffer_object_map, False)
removed = True
if not removed:
object_name = "lossless priority group" if is_pg else "queue"
if buffer_object_map:
ctx.fail("No specified {} {} found on port {}".format(object_name, buffer_object_map, interface_name))
else:
ctx.fail("No {} found on port {}".format(object_name, interface_name))
def adjust_pfc_enable(ctx, db, interface_name, pg_map, add):
config_db = db.cfgdb
# Fetch the original pfc_enable
qosmap = config_db.get_entry("PORT_QOS_MAP", interface_name)
pfc_enable = qosmap.get("pfc_enable")
pfc_set = set()
if pfc_enable:
for priority in pfc_enable.split(","):
pfc_set.add(int(priority))
if pg_map:
lower_bound = int(pg_map[0])
upper_bound = int(pg_map[-1])
for priority in range(lower_bound, upper_bound + 1):
if add:
pfc_set.add(priority)
elif priority in pfc_set:
pfc_set.remove(priority)
empty_set = set()
pfc_enable = ""
if not pfc_set.issubset(empty_set):
for priority in pfc_set:
pfc_enable += str(priority) + ","
elif not add:
# Remove all
pfc_enable = ""
else:
ctx.fail("Try to add empty priorities")
qosmap["pfc_enable"] = pfc_enable[:-1]
config_db.set_entry("PORT_QOS_MAP", interface_name, qosmap)
#
# 'buffer' subgroup ('config interface buffer ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def buffer(ctx):
"""Set or clear buffer configuration"""
config_db = ctx.obj["config_db"]
if not is_dynamic_buffer_enabled(config_db):
ctx.fail("This command can only be executed on a system with dynamic buffer enabled")
#
# 'priority_group' subgroup ('config interface buffer priority_group ...')
#
@buffer.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def priority_group(ctx):
"""Set or clear buffer configuration"""
pass
#
# 'lossless' subgroup ('config interface buffer priority_group lossless ...')
#
@priority_group.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def lossless(ctx):
"""Set or clear lossless PGs"""
pass
#
# 'add' subcommand
#
@lossless.command('add')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('pg_map', metavar='<pg_map>', required=True)
@click.argument('override_profile', metavar='<override_profile>', required=False)
@clicommon.pass_db
def add_pg(db, interface_name, pg_map, override_profile):
"""Set lossless PGs for the interface"""
update_buffer_object(db, interface_name, pg_map, override_profile, True)
#
# 'set' subcommand
#
@lossless.command('set')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('pg_map', metavar='<pg_map>', required=True)
@click.argument('override_profile', metavar='<override_profile>', required=False)
@clicommon.pass_db
def set_pg(db, interface_name, pg_map, override_profile):
"""Set lossless PGs for the interface"""
update_buffer_object(db, interface_name, pg_map, override_profile, True, False)
#
# 'remove' subcommand
#
@lossless.command('remove')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('pg_map', metavar='<pg_map', required=False)
@clicommon.pass_db
def remove_pg(db, interface_name, pg_map):
"""Clear lossless PGs for the interface"""
remove_buffer_object_on_port(db, interface_name, pg_map)
#
# 'queue' subgroup ('config interface buffer queue ...')
#
@buffer.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def queue(ctx):
"""Set or clear buffer configuration"""
pass
#
# 'add' subcommand
#
@queue.command('add')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('queue_map', metavar='<queue_map>', required=True)
@click.argument('buffer_profile', metavar='<buffer_profile>', required=True)
@clicommon.pass_db
def add_queue(db, interface_name, queue_map, buffer_profile):
"""Set lossless QUEUEs for the interface"""
update_buffer_object(db, interface_name, queue_map, buffer_profile, False)
#
# 'set' subcommand
#
@queue.command('set')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('queue_map', metavar='<queue_map>', required=True)
@click.argument('buffer_profile', metavar='<buffer_profile>', required=True)
@clicommon.pass_db
def set_queue(db, interface_name, queue_map, buffer_profile):
"""Set lossless QUEUEs for the interface"""
update_buffer_object(db, interface_name, queue_map, buffer_profile, False, False)
#
# 'remove' subcommand
#
@queue.command('remove')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('queue_map', metavar='<queue_map>', required=False)
@clicommon.pass_db
def remove_queue(db, interface_name, queue_map):
"""Clear lossless QUEUEs for the interface"""
remove_buffer_object_on_port(db, interface_name, queue_map, False)
#
# 'cable_length' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('length', metavar='<length>', required=True)
@click.pass_context
def cable_length(ctx, interface_name, length):
"""Set interface cable length"""
config_db = ctx.obj["config_db"]
if not is_dynamic_buffer_enabled(config_db):
ctx.fail("This command can only be supported on a system with dynamic buffer enabled")
# Check whether port is legal
ports = config_db.get_entry("PORT", interface_name)
if not ports:
ctx.fail("Port {} doesn't exist".format(interface_name))
try:
assert "m" == length[-1]
except Exception:
ctx.fail("Invalid cable length. Should be in format <num>m, like 300m".format(cable_length))
keys = config_db.get_keys("CABLE_LENGTH")
cable_length_set = {}
cable_length_set[interface_name] = length
config_db.mod_entry("CABLE_LENGTH", keys[0], cable_length_set)
#
# 'transceiver' subgroup ('config interface transceiver ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def transceiver(ctx):
"""SFP transceiver configuration"""
pass
#
# 'lpmode' subcommand ('config interface transceiver lpmode ...')
#
@transceiver.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('state', metavar='(enable|disable)', type=click.Choice(['enable', 'disable']))
@click.pass_context
def lpmode(ctx, interface_name, state):
"""Enable/disable low-power mode for SFP transceiver module"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
cmd = "sudo sfputil lpmode {} {}".format("on" if state == "enable" else "off", interface_name)
clicommon.run_command(cmd)
#
# 'reset' subcommand ('config interface reset ...')
#
@transceiver.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def reset(ctx, interface_name):
"""Reset SFP transceiver module"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
cmd = "sudo sfputil reset {}".format(interface_name)
clicommon.run_command(cmd)
#
# 'mpls' subgroup ('config interface mpls ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def mpls(ctx):
"""Add or remove MPLS"""
pass
#
# 'add' subcommand
#
@mpls.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def add(ctx, interface_name):
"""Add MPLS operation on the interface"""
config_db = ctx.obj["config_db"]
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
table_name = get_interface_table_name(interface_name)
if not clicommon.is_interface_in_config_db(config_db, interface_name):
ctx.fail('interface {} doesn`t exist'.format(interface_name))
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]")
config_db.set_entry(table_name, interface_name, {"mpls": "enable"})
#
# 'remove' subcommand
#
@mpls.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def remove(ctx, interface_name):
"""Remove MPLS operation from the interface"""
config_db = ctx.obj["config_db"]
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
table_name = get_interface_table_name(interface_name)
if not clicommon.is_interface_in_config_db(config_db, interface_name):
ctx.fail('interface {} doesn`t exist'.format(interface_name))
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]")
config_db.set_entry(table_name, interface_name, {"mpls": "disable"})
#
# 'vrf' subgroup ('config interface vrf ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def vrf(ctx):
"""Bind or unbind VRF"""
pass
#
# 'bind' subcommand
#
@vrf.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def bind(ctx, interface_name, vrf_name):
"""Bind the interface to VRF"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
if is_interface_bind_to_vrf(config_db, interface_name) is True and \
config_db.get_entry(table_name, interface_name).get('vrf_name') == vrf_name:
return
# Clean ip addresses if interface configured
interface_addresses = get_interface_ipaddresses(config_db, interface_name)
for ipaddress in interface_addresses:
remove_router_interface_ip_address(config_db, interface_name, ipaddress)
config_db.set_entry(table_name, interface_name, None)
# When config_db del entry and then add entry with same key, the DEL will lost.
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
state_db = SonicV2Connector(use_unix_socket_path=True)
else:
state_db = SonicV2Connector(use_unix_socket_path=True, namespace=ctx.obj['namespace'])
state_db.connect(state_db.STATE_DB, False)
_hash = '{}{}'.format('INTERFACE_TABLE|', interface_name)
while state_db.exists(state_db.STATE_DB, _hash):
time.sleep(0.01)
state_db.close(state_db.STATE_DB)
config_db.set_entry(table_name, interface_name, {"vrf_name": vrf_name})
#
# 'unbind' subcommand
#
@vrf.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def unbind(ctx, interface_name):
"""Unbind the interface to VRF"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("interface is None!")
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
if is_interface_bind_to_vrf(config_db, interface_name) is False:
return
interface_ipaddresses = get_interface_ipaddresses(config_db, interface_name)
for ipaddress in interface_ipaddresses:
remove_router_interface_ip_address(config_db, interface_name, ipaddress)
config_db.set_entry(table_name, interface_name, None)
#
# 'ipv6' subgroup ('config interface ipv6 ...')
#
@interface.group()
@click.pass_context
def ipv6(ctx):
"""Enable or Disable IPv6 processing on interface"""
pass
@ipv6.group('enable')
def enable():
"""Enable IPv6 processing on interface"""
pass
@ipv6.group('disable')
def disable():
"""Disble IPv6 processing on interface"""
pass
#
# 'config interface ipv6 enable use-link-local-only <interface-name>'
#
@enable.command('use-link-local-only')
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
def enable_use_link_local_only(ctx, interface_name):
"""Enable IPv6 link local address on interface"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {}
ctx.obj['config_db'] = config_db
db = ctx.obj["config_db"]
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if interface_name.startswith("Ethernet"):
interface_type = "INTERFACE"
elif interface_name.startswith("PortChannel"):
interface_type = "PORTCHANNEL_INTERFACE"
elif interface_name.startswith("Vlan"):
interface_type = "VLAN_INTERFACE"
else:
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]")
if (interface_type == "INTERFACE" ) or (interface_type == "PORTCHANNEL_INTERFACE"):
if interface_name_is_valid(db, interface_name) is False:
ctx.fail("Interface name %s is invalid. Please enter a valid interface name!!" %(interface_name))
if (interface_type == "VLAN_INTERFACE"):
if not clicommon.is_valid_vlan_interface(db, interface_name):
ctx.fail("Interface name %s is invalid. Please enter a valid interface name!!" %(interface_name))
portchannel_member_table = db.get_table('PORTCHANNEL_MEMBER')
if interface_is_in_portchannel(portchannel_member_table, interface_name):
ctx.fail("{} is configured as a member of portchannel. Cannot configure the IPv6 link local mode!"
.format(interface_name))
vlan_member_table = db.get_table('VLAN_MEMBER')
if interface_is_in_vlan(vlan_member_table, interface_name):
ctx.fail("{} is configured as a member of vlan. Cannot configure the IPv6 link local mode!"
.format(interface_name))
interface_dict = db.get_table(interface_type)
set_ipv6_link_local_only_on_interface(db, interface_dict, interface_type, interface_name, "enable")
#
# 'config interface ipv6 disable use-link-local-only <interface-name>'
#
@disable.command('use-link-local-only')
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
def disable_use_link_local_only(ctx, interface_name):
"""Disable IPv6 link local address on interface"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {}
ctx.obj['config_db'] = config_db
db = ctx.obj["config_db"]
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
interface_type = ""
if interface_name.startswith("Ethernet"):
interface_type = "INTERFACE"
elif interface_name.startswith("PortChannel"):
interface_type = "PORTCHANNEL_INTERFACE"
elif interface_name.startswith("Vlan"):
interface_type = "VLAN_INTERFACE"
else:
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]")
if (interface_type == "INTERFACE" ) or (interface_type == "PORTCHANNEL_INTERFACE"):
if interface_name_is_valid(db, interface_name) is False:
ctx.fail("Interface name %s is invalid. Please enter a valid interface name!!" %(interface_name))
if (interface_type == "VLAN_INTERFACE"):
if not clicommon.is_valid_vlan_interface(db, interface_name):
ctx.fail("Interface name %s is invalid. Please enter a valid interface name!!" %(interface_name))
portchannel_member_table = db.get_table('PORTCHANNEL_MEMBER')
if interface_is_in_portchannel(portchannel_member_table, interface_name):
ctx.fail("{} is configured as a member of portchannel. Cannot configure the IPv6 link local mode!"
.format(interface_name))
vlan_member_table = db.get_table('VLAN_MEMBER')
if interface_is_in_vlan(vlan_member_table, interface_name):
ctx.fail("{} is configured as a member of vlan. Cannot configure the IPv6 link local mode!"
.format(interface_name))
interface_dict = db.get_table(interface_type)
set_ipv6_link_local_only_on_interface(db, interface_dict, interface_type, interface_name, "disable")
#
# 'vrf' group ('config vrf ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='vrf')
@click.pass_context
def vrf(ctx):
"""VRF-related configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {}
ctx.obj['config_db'] = config_db
@vrf.command('add')
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def add_vrf(ctx, vrf_name):
"""Add vrf"""
config_db = ctx.obj['config_db']
if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'):
ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!")
if len(vrf_name) > 15:
ctx.fail("'vrf_name' is too long!")
if (vrf_name == 'mgmt' or vrf_name == 'management'):
vrf_add_management_vrf(config_db)
else:
config_db.set_entry('VRF', vrf_name, {"NULL": "NULL"})
@vrf.command('del')
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def del_vrf(ctx, vrf_name):
"""Del vrf"""
config_db = ctx.obj['config_db']
if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'):
ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!")
if len(vrf_name) > 15:
ctx.fail("'vrf_name' is too long!")
if (vrf_name == 'mgmt' or vrf_name == 'management'):
vrf_delete_management_vrf(config_db)
else:
del_interface_bind_to_vrf(config_db, vrf_name)
config_db.set_entry('VRF', vrf_name, None)
@vrf.command('add_vrf_vni_map')
@click.argument('vrfname', metavar='<vrf-name>', required=True, type=str)
@click.argument('vni', metavar='<vni>', required=True)
@click.pass_context
def add_vrf_vni_map(ctx, vrfname, vni):
config_db = ctx.obj['config_db']
found = 0
if vrfname not in config_db.get_table('VRF').keys():
ctx.fail("vrf {} doesnt exists".format(vrfname))
if not vni.isdigit():
ctx.fail("Invalid VNI {}. Only valid VNI is accepted".format(vni))
if clicommon.vni_id_is_valid(int(vni)) is False:
ctx.fail("Invalid VNI {}. Valid range [1 to 16777215].".format(vni))
vxlan_table = config_db.get_table('VXLAN_TUNNEL_MAP')
vxlan_keys = vxlan_table.keys()
if vxlan_keys is not None:
for key in vxlan_keys:
if (vxlan_table[key]['vni'] == vni):
found = 1
break
if (found == 0):
ctx.fail("VLAN VNI not mapped. Please create VLAN VNI map entry first")
found = 0
vrf_table = config_db.get_table('VRF')
vrf_keys = vrf_table.keys()
if vrf_keys is not None:
for vrf_key in vrf_keys:
if ('vni' in vrf_table[vrf_key] and vrf_table[vrf_key]['vni'] == vni):
found = 1
break
if (found == 1):
ctx.fail("VNI already mapped to vrf {}".format(vrf_key))
config_db.mod_entry('VRF', vrfname, {"vni": vni})
@vrf.command('del_vrf_vni_map')
@click.argument('vrfname', metavar='<vrf-name>', required=True, type=str)
@click.pass_context
def del_vrf_vni_map(ctx, vrfname):
config_db = ctx.obj['config_db']
if vrfname not in config_db.get_table('VRF').keys():
ctx.fail("vrf {} doesnt exists".format(vrfname))
config_db.mod_entry('VRF', vrfname, {"vni": 0})
#
# 'route' group ('config route ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def route(ctx):
"""route-related configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {}
ctx.obj['config_db'] = config_db
@route.command('add', context_settings={"ignore_unknown_options": True})
@click.argument('command_str', metavar='prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>', nargs=-1, type=click.Path())
@click.pass_context
def add_route(ctx, command_str):
"""Add route command"""
config_db = ctx.obj['config_db']
key, route = cli_sroute_to_config(ctx, command_str)
# If defined intf name, check if it belongs to interface
if 'ifname' in route:
if (not route['ifname'] in config_db.get_keys('VLAN_INTERFACE') and
not route['ifname'] in config_db.get_keys('INTERFACE') and
not route['ifname'] in config_db.get_keys('PORTCHANNEL_INTERFACE') and
not route['ifname'] == 'null'):
ctx.fail('interface {} doesn`t exist'.format(route['ifname']))
entry_counter = 1
if 'nexthop' in route:
entry_counter = len(route['nexthop'].split(','))
# Alignment in case the command contains several nexthop ip
for i in range(entry_counter):
if 'nexthop-vrf' in route:
if i > 0:
vrf = route['nexthop-vrf'].split(',')[0]
route['nexthop-vrf'] += ',' + vrf
else:
route['nexthop-vrf'] = ''
if not 'nexthop' in route:
route['nexthop'] = ''
if 'ifname' in route:
if i > 0:
route['ifname'] += ','
else:
route['ifname'] = ''
# Set default values for distance and blackhole because the command doesn't have such an option
if 'distance' in route:
route['distance'] += ',0'
else:
route['distance'] = '0'
if 'blackhole' in route:
route['blackhole'] += ',false'
else:
# If the user configure with "ifname" as "null", set 'blackhole' attribute as true.
if 'ifname' in route and route['ifname'] == 'null':
route['blackhole'] = 'true'
else:
route['blackhole'] = 'false'
# Check if exist entry with key
keys = config_db.get_keys('STATIC_ROUTE')
if key in keys:
# If exist update current entry
current_entry = config_db.get_entry('STATIC_ROUTE', key)
for entry in ['nexthop', 'nexthop-vrf', 'ifname', 'distance', 'blackhole']:
if not entry in current_entry:
current_entry[entry] = ''
if entry in route:
current_entry[entry] += ',' + route[entry]
else:
current_entry[entry] += ','
config_db.set_entry("STATIC_ROUTE", key, current_entry)
else:
config_db.set_entry("STATIC_ROUTE", key, route)
@route.command('del', context_settings={"ignore_unknown_options": True})
@click.argument('command_str', metavar='prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>', nargs=-1, type=click.Path())
@click.pass_context
def del_route(ctx, command_str):
"""Del route command"""
config_db = ctx.obj['config_db']
key, route = cli_sroute_to_config(ctx, command_str, strict_nh=False)
keys = config_db.get_keys('STATIC_ROUTE')
prefix_tuple = tuple(key.split('|'))
if not key in keys and not prefix_tuple in keys:
ctx.fail('Route {} doesnt exist'.format(key))
else:
# If not defined nexthop or intf name remove entire route
if not 'nexthop' in route and not 'ifname' in route:
config_db.set_entry("STATIC_ROUTE", key, None)
return
current_entry = config_db.get_entry('STATIC_ROUTE', key)
nh = ['']
nh_vrf = ['']
ifname = ['']
distance = ['']
blackhole = ['']
if 'nexthop' in current_entry:
nh = current_entry['nexthop'].split(',')
if 'nexthop-vrf' in current_entry:
nh_vrf = current_entry['nexthop-vrf'].split(',')
if 'ifname' in current_entry:
ifname = current_entry['ifname'].split(',')
if 'distance' in current_entry:
distance = current_entry['distance'].split(',')
if 'blackhole' in current_entry:
blackhole = current_entry['blackhole'].split(',')
# Zip data from config_db into tuples
# {'nexthop': '10.0.0.2,20.0.0.2', 'vrf-nexthop': ',Vrf-RED', 'ifname': ','}
# [('10.0.0.2', '', ''), ('20.0.0.2', 'Vrf-RED', '')]
nh_zip = list(itertools.zip_longest(nh, nh_vrf, ifname, fillvalue=''))
cli_tuple = ()
# Create tuple from CLI argument
# config route add prefix 1.4.3.4/32 nexthop vrf Vrf-RED 20.0.0.2
# ('20.0.0.2', 'Vrf-RED', '')
for entry in ['nexthop', 'nexthop-vrf', 'ifname']:
if entry in route:
cli_tuple += (route[entry],)
else:
cli_tuple += ('',)
if cli_tuple in nh_zip:
# If cli tuple is in config_db find its index and delete from lists
idx = nh_zip.index(cli_tuple)
if len(nh) - 1 >= idx:
del nh[idx]
if len(nh_vrf) - 1 >= idx:
del nh_vrf[idx]
if len(ifname) - 1 >= idx:
del ifname[idx]
if len(distance) - 1 >= idx:
del distance[idx]
if len(blackhole) - 1 >= idx:
del blackhole[idx]
else:
ctx.fail('Not found {} in {}'.format(cli_tuple, key))
if (len(nh) == 0 or (len(nh) == 1 and nh[0] == '')) and \
(len(ifname) == 0 or (len(ifname) == 1 and ifname[0] == '')):
# If there are no nexthop and ifname fields in the current record, delete it
config_db.set_entry("STATIC_ROUTE", key, None)
else:
# Otherwise it still has ECMP nexthop or ifname fields, so compose it from the lists into db
current_entry['nexthop'] = ','.join((str(e)) for e in nh)
current_entry['nexthop-vrf'] = ','.join((str(e)) for e in nh_vrf)
current_entry['ifname'] = ','.join((str(e)) for e in ifname)
current_entry['distance'] = ','.join((str(e)) for e in distance)
current_entry['blackhole'] = ','.join((str(e)) for e in blackhole)
config_db.set_entry("STATIC_ROUTE", key, current_entry)
#
# 'acl' group ('config acl ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def acl():
"""ACL-related configuration tasks"""
pass
#
# 'add' subgroup ('config acl add ...')
#
@acl.group(cls=clicommon.AbbreviationGroup)
def add():
"""
Add ACL configuration.
"""
pass
def get_acl_bound_ports():
config_db = ConfigDBConnector()
config_db.connect()
ports = set()
portchannel_members = set()
portchannel_member_dict = config_db.get_table("PORTCHANNEL_MEMBER")
for key in portchannel_member_dict:
ports.add(key[0])
portchannel_members.add(key[1])
port_dict = config_db.get_table("PORT")
for key in port_dict:
if key not in portchannel_members:
ports.add(key)
return list(ports)
def expand_vlan_ports(port_name):
"""
Expands a given VLAN interface into its member ports.
If the provided interface is a VLAN, then this method will return its member ports.
If the provided interface is not a VLAN, then this method will return a list with only
the provided interface in it.
"""
config_db = ConfigDBConnector()
config_db.connect()
if port_name not in config_db.get_keys("VLAN"):
return [port_name]
vlan_members = config_db.get_keys("VLAN_MEMBER")
members = [member for vlan, member in vlan_members if port_name == vlan]
if not members:
raise ValueError("Cannot bind empty VLAN {}".format(port_name))
return members
def parse_acl_table_info(table_name, table_type, description, ports, stage):
table_info = {"type": table_type}
if description:
table_info["policy_desc"] = description
else:
table_info["policy_desc"] = table_name
if not ports and ports != None:
raise ValueError("Cannot bind empty list of ports")
port_list = []
valid_acl_ports = get_acl_bound_ports()
if ports:
for port in ports.split(","):
port_list += expand_vlan_ports(port)
port_list = list(set(port_list)) # convert to set first to remove duplicate ifaces
else:
port_list = valid_acl_ports
for port in port_list:
if port not in valid_acl_ports:
raise ValueError("Cannot bind ACL to specified port {}".format(port))
table_info["ports"] = port_list
table_info["stage"] = stage
return table_info
#
# 'table' subcommand ('config acl add table ...')
#
@add.command()
@click.argument("table_name", metavar="<table_name>")
@click.argument("table_type", metavar="<table_type>")
@click.option("-d", "--description")
@click.option("-p", "--ports")
@click.option("-s", "--stage", type=click.Choice(["ingress", "egress"]), default="ingress")
@click.pass_context
def table(ctx, table_name, table_type, description, ports, stage):
"""
Add ACL table
"""
config_db = ConfigDBConnector()
config_db.connect()
try:
table_info = parse_acl_table_info(table_name, table_type, description, ports, stage)
except ValueError as e:
ctx.fail("Failed to parse ACL table config: exception={}".format(e))
config_db.set_entry("ACL_TABLE", table_name, table_info)
#
# 'remove' subgroup ('config acl remove ...')
#
@acl.group(cls=clicommon.AbbreviationGroup)
def remove():
"""
Remove ACL configuration.
"""
pass
#
# 'table' subcommand ('config acl remove table ...')
#
@remove.command()
@click.argument("table_name", metavar="<table_name>")
def table(table_name):
"""
Remove ACL table
"""
config_db = ConfigDBConnector()
config_db.connect()
config_db.set_entry("ACL_TABLE", table_name, None)
#
# 'acl update' group
#
@acl.group(cls=clicommon.AbbreviationGroup)
def update():
"""ACL-related configuration tasks"""
pass
#
# 'full' subcommand
#
@update.command()
@click.argument('file_name', required=True)
def full(file_name):
"""Full update of ACL rules configuration."""
log.log_info("'acl update full {}' executing...".format(file_name))
command = "acl-loader update full {}".format(file_name)
clicommon.run_command(command)
#
# 'incremental' subcommand
#
@update.command()
@click.argument('file_name', required=True)
def incremental(file_name):
"""Incremental update of ACL rule configuration."""
log.log_info("'acl update incremental {}' executing...".format(file_name))
command = "acl-loader update incremental {}".format(file_name)
clicommon.run_command(command)
#
# 'dropcounters' group ('config dropcounters ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def dropcounters():
"""Drop counter related configuration tasks"""
pass
#
# 'install' subcommand ('config dropcounters install')
#
@dropcounters.command()
@click.argument("counter_name", type=str, required=True)
@click.argument("counter_type", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option("-a", "--alias", type=str, help="Alias for this counter")
@click.option("-g", "--group", type=str, help="Group for this counter")
@click.option("-d", "--desc", type=str, help="Description for this counter")
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def install(counter_name, alias, group, counter_type, desc, reasons, verbose):
"""Install a new drop counter"""
command = "dropconfig -c install -n '{}' -t '{}' -r '{}'".format(counter_name, counter_type, reasons)
if alias:
command += " -a '{}'".format(alias)
if group:
command += " -g '{}'".format(group)
if desc:
command += " -d '{}'".format(desc)
clicommon.run_command(command, display_cmd=verbose)
#
# 'delete' subcommand ('config dropcounters delete')
#
@dropcounters.command()
@click.argument("counter_name", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def delete(counter_name, verbose):
"""Delete an existing drop counter"""
command = "dropconfig -c uninstall -n {}".format(counter_name)
clicommon.run_command(command, display_cmd=verbose)
#
# 'add_reasons' subcommand ('config dropcounters add_reasons')
#
@dropcounters.command('add-reasons')
@click.argument("counter_name", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def add_reasons(counter_name, reasons, verbose):
"""Add reasons to an existing drop counter"""
command = "dropconfig -c add -n {} -r {}".format(counter_name, reasons)
clicommon.run_command(command, display_cmd=verbose)
#
# 'remove_reasons' subcommand ('config dropcounters remove_reasons')
#
@dropcounters.command('remove-reasons')
@click.argument("counter_name", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def remove_reasons(counter_name, reasons, verbose):
"""Remove reasons from an existing drop counter"""
command = "dropconfig -c remove -n {} -r {}".format(counter_name, reasons)
clicommon.run_command(command, display_cmd=verbose)
#
# 'ecn' command ('config ecn ...')
#
@config.command()
@click.option('-profile', metavar='<profile_name>', type=str, required=True, help="Profile name")
@click.option('-rmax', metavar='<red threshold max>', type=int, help="Set red max threshold")
@click.option('-rmin', metavar='<red threshold min>', type=int, help="Set red min threshold")
@click.option('-ymax', metavar='<yellow threshold max>', type=int, help="Set yellow max threshold")
@click.option('-ymin', metavar='<yellow threshold min>', type=int, help="Set yellow min threshold")
@click.option('-gmax', metavar='<green threshold max>', type=int, help="Set green max threshold")
@click.option('-gmin', metavar='<green threshold min>', type=int, help="Set green min threshold")
@click.option('-rdrop', metavar='<red drop probability>', type=click.IntRange(0, 100), help="Set red drop probability")
@click.option('-ydrop', metavar='<yellow drop probability>', type=click.IntRange(0, 100), help="Set yellow drop probability")
@click.option('-gdrop', metavar='<green drop probability>', type=click.IntRange(0, 100), help="Set green drop probability")
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose):
"""ECN-related configuration tasks"""
log.log_info("'ecn -profile {}' executing...".format(profile))
command = "ecnconfig -p %s" % profile
if rmax is not None: command += " -rmax %d" % rmax
if rmin is not None: command += " -rmin %d" % rmin
if ymax is not None: command += " -ymax %d" % ymax
if ymin is not None: command += " -ymin %d" % ymin
if gmax is not None: command += " -gmax %d" % gmax
if gmin is not None: command += " -gmin %d" % gmin
if rdrop is not None: command += " -rdrop %d" % rdrop
if ydrop is not None: command += " -ydrop %d" % ydrop
if gdrop is not None: command += " -gdrop %d" % gdrop
if verbose: command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'pfc' group ('config interface pfc ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def pfc(ctx):
"""Set PFC configuration."""
pass
#
# 'pfc asymmetric' ('config interface pfc asymmetric ...')
#
@pfc.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('status', type=click.Choice(['on', 'off']))
@click.pass_context
def asymmetric(ctx, interface_name, status):
"""Set asymmetric PFC configuration."""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
clicommon.run_command("pfc config asymmetric {0} {1}".format(status, interface_name))
#
# 'pfc priority' command ('config interface pfc priority ...')
#
@pfc.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('priority', type=click.Choice([str(x) for x in range(8)]))
@click.argument('status', type=click.Choice(['on', 'off']))
@click.pass_context
def priority(ctx, interface_name, priority, status):
"""Set PFC priority configuration."""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
clicommon.run_command("pfc config priority {0} {1} {2}".format(status, interface_name, priority))
#
# 'buffer' group ('config buffer ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def buffer(ctx):
"""Configure buffer_profile"""
config_db = ConfigDBConnector()
config_db.connect()
if not is_dynamic_buffer_enabled(config_db):
ctx.fail("This command can only be supported on a system with dynamic buffer enabled")
@buffer.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def profile(ctx):
"""Configure buffer profile"""
pass
@profile.command('add')
@click.argument('profile', metavar='<profile>', required=True)
@click.option('--xon', metavar='<xon>', type=int, help="Set xon threshold")
@click.option('--xoff', metavar='<xoff>', type=int, help="Set xoff threshold")
@click.option('--size', metavar='<size>', type=int, help="Set reserved size size")
@click.option('--dynamic_th', metavar='<dynamic_th>', type=str, help="Set dynamic threshold")
@click.option('--pool', metavar='<pool>', type=str, help="Buffer pool")
@clicommon.pass_db
def add_profile(db, profile, xon, xoff, size, dynamic_th, pool):
"""Add or modify a buffer profile"""
config_db = db.cfgdb
ctx = click.get_current_context()
profile_entry = config_db.get_entry('BUFFER_PROFILE', profile)
if profile_entry:
ctx.fail("Profile {} already exist".format(profile))
update_profile(ctx, config_db, profile, xon, xoff, size, dynamic_th, pool)
@profile.command('set')
@click.argument('profile', metavar='<profile>', required=True)
@click.option('--xon', metavar='<xon>', type=int, help="Set xon threshold")
@click.option('--xoff', metavar='<xoff>', type=int, help="Set xoff threshold")
@click.option('--size', metavar='<size>', type=int, help="Set reserved size size")
@click.option('--dynamic_th', metavar='<dynamic_th>', type=str, help="Set dynamic threshold")
@click.option('--pool', metavar='<pool>', type=str, help="Buffer pool")
@clicommon.pass_db
def set_profile(db, profile, xon, xoff, size, dynamic_th, pool):
"""Add or modify a buffer profile"""
config_db = db.cfgdb
ctx = click.get_current_context()
profile_entry = config_db.get_entry('BUFFER_PROFILE', profile)
if not profile_entry:
ctx.fail("Profile {} doesn't exist".format(profile))
if not 'xoff' in profile_entry.keys() and xoff:
ctx.fail("Can't change profile {} from dynamically calculating headroom to non-dynamically one".format(profile))
update_profile(ctx, config_db, profile, xon, xoff, size, dynamic_th, pool, profile_entry)
def _is_shared_headroom_pool_enabled(ctx, config_db):
ingress_lossless_pool = config_db.get_entry('BUFFER_POOL', 'ingress_lossless_pool')
if 'xoff' in ingress_lossless_pool:
return True
default_lossless_param_table = config_db.get_table('DEFAULT_LOSSLESS_BUFFER_PARAMETER')
if not default_lossless_param_table:
ctx.fail("Dynamic buffer calculation is enabled while no entry found in DEFAULT_LOSSLESS_BUFFER_PARAMETER table")
default_lossless_param = list(default_lossless_param_table.values())[0]
over_subscribe_ratio = default_lossless_param.get('over_subscribe_ratio')
if over_subscribe_ratio and over_subscribe_ratio != '0':
return True
return False
def update_profile(ctx, config_db, profile_name, xon, xoff, size, dynamic_th, pool, profile_entry = None):
params = {}
if profile_entry:
params = profile_entry
shp_enabled = _is_shared_headroom_pool_enabled(ctx, config_db)
if not pool:
pool = 'ingress_lossless_pool'
params['pool'] = pool
if not config_db.get_entry('BUFFER_POOL', pool):
ctx.fail("Pool {} doesn't exist".format(pool))
if xon:
params['xon'] = xon
else:
xon = params.get('xon')
if xoff:
params['xoff'] = xoff
else:
xoff = params.get('xoff')
if size:
params['size'] = size
else:
size = params.get('size')
dynamic_calculate = False if (xon or xoff or size) else True
if dynamic_calculate:
params['headroom_type'] = 'dynamic'
if not dynamic_th:
ctx.fail("Either size information (xon, xoff, size) or dynamic_th needs to be provided")
params['dynamic_th'] = dynamic_th
else:
if not xon:
ctx.fail("Xon is mandatory for non-dynamic profile")
if not xoff:
if shp_enabled:
ctx.fail("Shared headroom pool is enabled, xoff is mandatory for non-dynamic profile")
elif not size:
ctx.fail("Neither xoff nor size is provided")
else:
xoff_number = int(size) - int(xon)
if xoff_number <= 0:
ctx.fail("The xoff must be greater than 0 while we got {} (calculated by: size {} - xon {})".format(xoff_number, size, xon))
params['xoff'] = str(xoff_number)
if not size:
if shp_enabled:
size = int(xon)
else:
size = int(xon) + int(xoff)
params['size'] = size
if dynamic_th:
params['dynamic_th'] = dynamic_th
elif not params.get('dynamic_th'):
# Fetch all the keys of default_lossless_buffer_parameter table
# and then get the default_dynamic_th from that entry (should be only one)
keys = config_db.get_keys('DEFAULT_LOSSLESS_BUFFER_PARAMETER')
if len(keys) != 1:
ctx.fail("Multiple entries are found in DEFAULT_LOSSLESS_BUFFER_PARAMETER while no dynamic_th specified")
default_lossless_param = config_db.get_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', keys[0])
if 'default_dynamic_th' in default_lossless_param:
params['dynamic_th'] = default_lossless_param['default_dynamic_th']
else:
ctx.fail("No dynamic_th defined in DEFAULT_LOSSLESS_BUFFER_PARAMETER")
config_db.set_entry("BUFFER_PROFILE", (profile_name), params)
@profile.command('remove')
@click.argument('profile', metavar='<profile>', required=True)
@clicommon.pass_db
def remove_profile(db, profile):
"""Delete a buffer profile"""
config_db = db.cfgdb
ctx = click.get_current_context()
existing_pgs = config_db.get_table("BUFFER_PG")
for k, v in existing_pgs.items():
port, pg = k
referenced_profile = v.get('profile')
if referenced_profile and referenced_profile == profile:
ctx.fail("Profile {} is referenced by {}|{} and can't be removed".format(profile, port, pg))
entry = config_db.get_entry("BUFFER_PROFILE", profile)
if entry:
config_db.set_entry("BUFFER_PROFILE", profile, None)
else:
ctx.fail("Profile {} doesn't exist".format(profile))
@buffer.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def shared_headroom_pool(ctx):
"""Configure buffer shared headroom pool"""
pass
@shared_headroom_pool.command()
@click.argument('ratio', metavar='<ratio>', type=int, required=True)
@clicommon.pass_db
def over_subscribe_ratio(db, ratio):
"""Configure over subscribe ratio"""
config_db = db.cfgdb
ctx = click.get_current_context()
port_number = len(config_db.get_table('PORT'))
if ratio < 0 or ratio > port_number:
ctx.fail("Invalid over-subscribe-ratio value {}. It should be in range [0, {}]".format(ratio, port_number))
default_lossless_param = config_db.get_table("DEFAULT_LOSSLESS_BUFFER_PARAMETER")
first_item = True
for k, v in default_lossless_param.items():
if not first_item:
ctx.fail("More than one item in DEFAULT_LOSSLESS_BUFFER_PARAMETER table. Only the first one is updated")
first_item = False
if ratio == 0:
if "over_subscribe_ratio" in v.keys():
v.pop("over_subscribe_ratio")
else:
v["over_subscribe_ratio"] = ratio
config_db.set_entry("DEFAULT_LOSSLESS_BUFFER_PARAMETER", k, v)
@shared_headroom_pool.command()
@click.argument('size', metavar='<size>', type=int, required=True)
@clicommon.pass_db
def size(db, size):
"""Configure shared headroom pool size"""
config_db = db.cfgdb
state_db = db.db
ctx = click.get_current_context()
_hash = 'BUFFER_MAX_PARAM_TABLE|global'
buffer_max_params = state_db.get_all(state_db.STATE_DB, _hash)
if buffer_max_params:
mmu_size = buffer_max_params.get('mmu_size')
if mmu_size and int(mmu_size) < size:
ctx.fail("Shared headroom pool must be less than mmu size ({})".format(mmu_size))
ingress_lossless_pool = config_db.get_entry("BUFFER_POOL", "ingress_lossless_pool")
if size == 0:
if "xoff" in ingress_lossless_pool:
ingress_lossless_pool.pop("xoff")
else:
ingress_lossless_pool["xoff"] = size
config_db.set_entry("BUFFER_POOL", "ingress_lossless_pool", ingress_lossless_pool)
#
# 'platform' group ('config platform ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def platform():
"""Platform-related configuration tasks"""
# 'firmware' subgroup ("config platform firmware ...")
@platform.group(cls=clicommon.AbbreviationGroup)
def firmware():
"""Firmware configuration tasks"""
pass
# 'install' subcommand ("config platform firmware install")
@firmware.command(
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True
),
add_help_option=False
)
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
def install(args):
"""Install platform firmware"""
cmd = "fwutil install {}".format(" ".join(args))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
# 'update' subcommand ("config platform firmware update")
@firmware.command(
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True
),
add_help_option=False
)
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
def update(args):
"""Update platform firmware"""
cmd = "fwutil update {}".format(" ".join(args))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
#
# 'watermark' group ("show watermark telemetry interval")
#
@config.group(cls=clicommon.AbbreviationGroup)
def watermark():
"""Configure watermark """
pass
@watermark.group(cls=clicommon.AbbreviationGroup)
def telemetry():
"""Configure watermark telemetry"""
pass
@telemetry.command()
@click.argument('interval', required=True)
def interval(interval):
"""Configure watermark telemetry interval"""
command = 'watermarkcfg --config-interval ' + interval
clicommon.run_command(command)
#
# 'interface_naming_mode' subgroup ('config interface_naming_mode ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='interface_naming_mode')
def interface_naming_mode():
"""Modify interface naming mode for interacting with SONiC CLI"""
pass
@interface_naming_mode.command('default')
def naming_mode_default():
"""Set CLI interface naming mode to DEFAULT (SONiC port name)"""
set_interface_naming_mode('default')
@interface_naming_mode.command('alias')
def naming_mode_alias():
"""Set CLI interface naming mode to ALIAS (Vendor port alias)"""
set_interface_naming_mode('alias')
def is_loopback_name_valid(loopback_name):
"""Loopback name validation
"""
if loopback_name[:CFG_LOOPBACK_PREFIX_LEN] != CFG_LOOPBACK_PREFIX :
return False
if (loopback_name[CFG_LOOPBACK_PREFIX_LEN:].isdigit() is False or
int(loopback_name[CFG_LOOPBACK_PREFIX_LEN:]) > CFG_LOOPBACK_ID_MAX_VAL) :
return False
if len(loopback_name) > CFG_LOOPBACK_NAME_TOTAL_LEN_MAX:
return False
return True
#
# 'loopback' group ('config loopback ...')
#
@config.group()
@click.pass_context
@click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection')
def loopback(ctx, redis_unix_socket_path):
"""Loopback-related configuration tasks"""
kwargs = {}
if redis_unix_socket_path:
kwargs['unix_socket_path'] = redis_unix_socket_path
config_db = ConfigDBConnector(**kwargs)
config_db.connect(wait_for_init=False)
ctx.obj = {'db': config_db}
@loopback.command('add')
@click.argument('loopback_name', metavar='<loopback_name>', required=True)
@click.pass_context
def add_loopback(ctx, loopback_name):
config_db = ctx.obj['db']
if is_loopback_name_valid(loopback_name) is False:
ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' "
.format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO))
lo_intfs = [k for k, v in config_db.get_table('LOOPBACK_INTERFACE').items() if type(k) != tuple]
if loopback_name in lo_intfs:
ctx.fail("{} already exists".format(loopback_name))
config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, {"NULL" : "NULL"})
@loopback.command('del')
@click.argument('loopback_name', metavar='<loopback_name>', required=True)
@click.pass_context
def del_loopback(ctx, loopback_name):
config_db = ctx.obj['db']
if is_loopback_name_valid(loopback_name) is False:
ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' "
.format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO))
lo_config_db = config_db.get_table('LOOPBACK_INTERFACE')
lo_intfs = [k for k, v in lo_config_db.items() if type(k) != tuple]
if loopback_name not in lo_intfs:
ctx.fail("{} does not exists".format(loopback_name))
ips = [ k[1] for k in lo_config_db if type(k) == tuple and k[0] == loopback_name ]
for ip in ips:
config_db.set_entry('LOOPBACK_INTERFACE', (loopback_name, ip), None)
config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, None)
@config.group(cls=clicommon.AbbreviationGroup)
def ztp():
""" Configure Zero Touch Provisioning """
if os.path.isfile('/usr/bin/ztp') is False:
exit("ZTP feature unavailable in this image version")
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
@ztp.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='ZTP will be restarted. You may lose switch data and connectivity, continue?')
@click.argument('run', required=False, type=click.Choice(["run"]))
def run(run):
"""Restart ZTP of the device."""
command = "ztp run -y"
clicommon.run_command(command, display_cmd=True)
@ztp.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Active ZTP session will be stopped and disabled, continue?')
@click.argument('disable', required=False, type=click.Choice(["disable"]))
def disable(disable):
"""Administratively Disable ZTP."""
command = "ztp disable -y"
clicommon.run_command(command, display_cmd=True)
@ztp.command()
@click.argument('enable', required=False, type=click.Choice(["enable"]))
def enable(enable):
"""Administratively Enable ZTP."""
command = "ztp enable"
clicommon.run_command(command, display_cmd=True)
#
# 'syslog' group ('config syslog ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='syslog')
@click.pass_context
def syslog_group(ctx):
"""Syslog server configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@syslog_group.command('add')
@click.argument('syslog_ip_address', metavar='<syslog_ip_address>', required=True)
@click.pass_context
def add_syslog_server(ctx, syslog_ip_address):
""" Add syslog server IP """
if not clicommon.is_ipaddress(syslog_ip_address):
ctx.fail('Invalid ip address')
db = ctx.obj['db']
syslog_servers = db.get_table("SYSLOG_SERVER")
if syslog_ip_address in syslog_servers:
click.echo("Syslog server {} is already configured".format(syslog_ip_address))
return
else:
db.set_entry('SYSLOG_SERVER', syslog_ip_address, {'NULL': 'NULL'})
click.echo("Syslog server {} added to configuration".format(syslog_ip_address))
try:
click.echo("Restarting rsyslog-config service...")
clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service rsyslog-config failed with error {}".format(e))
@syslog_group.command('del')
@click.argument('syslog_ip_address', metavar='<syslog_ip_address>', required=True)
@click.pass_context
def del_syslog_server(ctx, syslog_ip_address):
""" Delete syslog server IP """
if not clicommon.is_ipaddress(syslog_ip_address):
ctx.fail('Invalid IP address')
db = ctx.obj['db']
syslog_servers = db.get_table("SYSLOG_SERVER")
if syslog_ip_address in syslog_servers:
db.set_entry('SYSLOG_SERVER', '{}'.format(syslog_ip_address), None)
click.echo("Syslog server {} removed from configuration".format(syslog_ip_address))
else:
ctx.fail("Syslog server {} is not configured.".format(syslog_ip_address))
try:
click.echo("Restarting rsyslog-config service...")
clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service rsyslog-config failed with error {}".format(e))
#
# 'ntp' group ('config ntp ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def ntp(ctx):
"""NTP server configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@ntp.command('add')
@click.argument('ntp_ip_address', metavar='<ntp_ip_address>', required=True)
@click.pass_context
def add_ntp_server(ctx, ntp_ip_address):
""" Add NTP server IP """
if not clicommon.is_ipaddress(ntp_ip_address):
ctx.fail('Invalid ip address')
db = ctx.obj['db']
ntp_servers = db.get_table("NTP_SERVER")
if ntp_ip_address in ntp_servers:
click.echo("NTP server {} is already configured".format(ntp_ip_address))
return
else:
db.set_entry('NTP_SERVER', ntp_ip_address, {'NULL': 'NULL'})
click.echo("NTP server {} added to configuration".format(ntp_ip_address))
try:
click.echo("Restarting ntp-config service...")
clicommon.run_command("systemctl restart ntp-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service ntp-config failed with error {}".format(e))
@ntp.command('del')
@click.argument('ntp_ip_address', metavar='<ntp_ip_address>', required=True)
@click.pass_context
def del_ntp_server(ctx, ntp_ip_address):
""" Delete NTP server IP """
if not clicommon.is_ipaddress(ntp_ip_address):
ctx.fail('Invalid IP address')
db = ctx.obj['db']
ntp_servers = db.get_table("NTP_SERVER")
if ntp_ip_address in ntp_servers:
db.set_entry('NTP_SERVER', '{}'.format(ntp_ip_address), None)
click.echo("NTP server {} removed from configuration".format(ntp_ip_address))
else:
ctx.fail("NTP server {} is not configured.".format(ntp_ip_address))
try:
click.echo("Restarting ntp-config service...")
clicommon.run_command("systemctl restart ntp-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service ntp-config failed with error {}".format(e))
#
# 'sflow' group ('config sflow ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def sflow(ctx):
"""sFlow-related configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
#
# 'sflow' command ('config sflow enable')
#
@sflow.command()
@click.pass_context
def enable(ctx):
"""Enable sFlow"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'up'}}
else:
sflow_tbl['global']['admin_state'] = 'up'
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
try:
proc = subprocess.Popen("systemctl is-active sflow", shell=True, text=True, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
except SystemExit as e:
ctx.fail("Unable to check sflow status {}".format(e))
if out != "active":
log.log_info("sflow service is not enabled. Starting sflow docker...")
clicommon.run_command("sudo systemctl enable sflow")
clicommon.run_command("sudo systemctl start sflow")
#
# 'sflow' command ('config sflow disable')
#
@sflow.command()
@click.pass_context
def disable(ctx):
"""Disable sFlow"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
else:
sflow_tbl['global']['admin_state'] = 'down'
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
#
# 'sflow' command ('config sflow polling-interval ...')
#
@sflow.command('polling-interval')
@click.argument('interval', metavar='<polling_interval>', required=True,
type=int)
@click.pass_context
def polling_int(ctx, interval):
"""Set polling-interval for counter-sampling (0 to disable)"""
if interval not in range(5, 301) and interval != 0:
click.echo("Polling interval must be between 5-300 (0 to disable)")
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
sflow_tbl['global']['polling_interval'] = interval
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
def is_valid_sample_rate(rate):
return rate.isdigit() and int(rate) in range(256, 8388608 + 1)
#
# 'sflow interface' group
#
@sflow.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def interface(ctx):
"""Configure sFlow settings for an interface"""
pass
#
# 'sflow' command ('config sflow interface enable ...')
#
@interface.command()
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.pass_context
def enable(ctx, ifname):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo("Invalid interface name")
return
intf_dict = config_db.get_table('SFLOW_SESSION')
if intf_dict and ifname in intf_dict:
intf_dict[ifname]['admin_state'] = 'up'
config_db.mod_entry('SFLOW_SESSION', ifname, intf_dict[ifname])
else:
config_db.mod_entry('SFLOW_SESSION', ifname, {'admin_state': 'up'})
#
# 'sflow' command ('config sflow interface disable ...')
#
@interface.command()
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.pass_context
def disable(ctx, ifname):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo("Invalid interface name")
return
intf_dict = config_db.get_table('SFLOW_SESSION')
if intf_dict and ifname in intf_dict:
intf_dict[ifname]['admin_state'] = 'down'
config_db.mod_entry('SFLOW_SESSION', ifname, intf_dict[ifname])
else:
config_db.mod_entry('SFLOW_SESSION', ifname,
{'admin_state': 'down'})
#
# 'sflow' command ('config sflow interface sample-rate ...')
#
@interface.command('sample-rate')
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.argument('rate', metavar='<sample_rate>', required=True, type=str)
@click.pass_context
def sample_rate(ctx, ifname, rate):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo('Invalid interface name')
return
if not is_valid_sample_rate(rate) and rate != 'default':
click.echo('Error: Sample rate must be between 256 and 8388608 or default')
return
sess_dict = config_db.get_table('SFLOW_SESSION')
if sess_dict and ifname in sess_dict.keys():
if rate == 'default':
if 'sample_rate' not in sess_dict[ifname]:
return
del sess_dict[ifname]['sample_rate']
config_db.set_entry('SFLOW_SESSION', ifname, sess_dict[ifname])
return
sess_dict[ifname]['sample_rate'] = rate
config_db.mod_entry('SFLOW_SESSION', ifname, sess_dict[ifname])
else:
if rate != 'default':
config_db.mod_entry('SFLOW_SESSION', ifname, {'sample_rate': rate})
#
# 'sflow collector' group
#
@sflow.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def collector(ctx):
"""Add/Delete a sFlow collector"""
pass
def is_valid_collector_info(name, ip, port, vrf_name):
if len(name) > 16:
click.echo("Collector name must not exceed 16 characters")
return False
if port not in range(0, 65535 + 1):
click.echo("Collector port number must be between 0 and 65535")
return False
if not clicommon.is_ipaddress(ip):
click.echo("Invalid IP address")
return False
if vrf_name != 'default' and vrf_name != 'mgmt':
click.echo("Only 'default' and 'mgmt' VRF are supported")
return False
return True
#
# 'sflow' command ('config sflow collector add ...')
#
@collector.command()
@click.option('--port', required=False, type=int, default=6343,
help='Collector port number')
@click.option('--vrf', required=False, type=str, default='default',
help='Collector VRF')
@click.argument('name', metavar='<collector_name>', required=True)
@click.argument('ipaddr', metavar='<IPv4/v6_address>', required=True)
@click.pass_context
def add(ctx, name, ipaddr, port, vrf):
"""Add a sFlow collector"""
ipaddr = ipaddr.lower()
if not is_valid_collector_info(name, ipaddr, port, vrf):
return
config_db = ctx.obj['db']
collector_tbl = config_db.get_table('SFLOW_COLLECTOR')
if (collector_tbl and name not in collector_tbl and len(collector_tbl) == 2):
click.echo("Only 2 collectors can be configured, please delete one")
return
config_db.mod_entry('SFLOW_COLLECTOR', name,
{"collector_ip": ipaddr, "collector_port": port,
"collector_vrf": vrf})
return
#
# 'sflow' command ('config sflow collector del ...')
#
@collector.command('del')
@click.argument('name', metavar='<collector_name>', required=True)
@click.pass_context
def del_collector(ctx, name):
"""Delete a sFlow collector"""
config_db = ctx.obj['db']
collector_tbl = config_db.get_table('SFLOW_COLLECTOR')
if name not in collector_tbl:
click.echo("Collector: {} not configured".format(name))
return
config_db.mod_entry('SFLOW_COLLECTOR', name, None)
#
# 'sflow agent-id' group
#
@sflow.group(cls=clicommon.AbbreviationGroup, name='agent-id')
@click.pass_context
def agent_id(ctx):
"""Add/Delete a sFlow agent"""
pass
#
# 'sflow' command ('config sflow agent-id add ...')
#
@agent_id.command()
@click.argument('ifname', metavar='<interface_name>', required=True)
@click.pass_context
def add(ctx, ifname):
"""Add sFlow agent information"""
if ifname not in netifaces.interfaces():
click.echo("Invalid interface name")
return
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
if 'agent_id' in sflow_tbl['global']:
click.echo("Agent already configured. Please delete it first.")
return
sflow_tbl['global']['agent_id'] = ifname
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
#
# 'sflow' command ('config sflow agent-id del')
#
@agent_id.command('del')
@click.pass_context
def delete(ctx):
"""Delete sFlow agent information"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
if 'agent_id' not in sflow_tbl['global']:
click.echo("sFlow agent not configured.")
return
sflow_tbl['global'].pop('agent_id')
config_db.set_entry('SFLOW', 'global', sflow_tbl['global'])
#
# set ipv6 link local mode on a given interface
#
def set_ipv6_link_local_only_on_interface(config_db, interface_dict, interface_type, interface_name, mode):
curr_mode = config_db.get_entry(interface_type, interface_name).get('ipv6_use_link_local_only')
if curr_mode is not None:
if curr_mode == mode:
return
else:
if mode == "disable":
return
if mode == "enable":
config_db.mod_entry(interface_type, interface_name, {"ipv6_use_link_local_only": mode})
return
# If we are disabling the ipv6 link local on an interface, and if no other interface
# attributes/ip addresses are configured on the interface, delete the interface from the interface table
exists = False
for key in interface_dict.keys():
if not isinstance(key, tuple):
if interface_name == key:
#Interface bound to non-default-vrf do not delete the entry
if 'vrf_name' in interface_dict[key]:
if len(interface_dict[key]['vrf_name']) > 0:
exists = True
break
continue
if interface_name in key:
exists = True
break
if exists:
config_db.mod_entry(interface_type, interface_name, {"ipv6_use_link_local_only": mode})
else:
config_db.set_entry(interface_type, interface_name, None)
#
# 'ipv6' group ('config ipv6 ...')
#
@config.group()
@click.pass_context
def ipv6(ctx):
"""IPv6 configuration"""
#
# 'enable' command ('config ipv6 enable ...')
#
@ipv6.group()
@click.pass_context
def enable(ctx):
"""Enable IPv6 on all interfaces """
#
# 'link-local' command ('config ipv6 enable link-local')
#
@enable.command('link-local')
@click.pass_context
def enable_link_local(ctx):
"""Enable IPv6 link-local on all interfaces """
config_db = ConfigDBConnector()
config_db.connect()
vlan_member_table = config_db.get_table('VLAN_MEMBER')
portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER')
mode = "enable"
# Enable ipv6 link local on VLANs
vlan_dict = config_db.get_table('VLAN')
for key in vlan_dict.keys():
set_ipv6_link_local_only_on_interface(config_db, vlan_dict, 'VLAN_INTERFACE', key, mode)
# Enable ipv6 link local on PortChannels
portchannel_dict = config_db.get_table('PORTCHANNEL')
for key in portchannel_dict.keys():
if interface_is_in_vlan(vlan_member_table, key):
continue
set_ipv6_link_local_only_on_interface(config_db, portchannel_dict, 'PORTCHANNEL_INTERFACE', key, mode)
port_dict = config_db.get_table('PORT')
for key in port_dict.keys():
if interface_is_in_portchannel(portchannel_member_table, key) or interface_is_in_vlan(vlan_member_table, key):
continue
set_ipv6_link_local_only_on_interface(config_db, port_dict, 'INTERFACE', key, mode)
#
# 'disable' command ('config ipv6 disable ...')
#
@ipv6.group()
@click.pass_context
def disable(ctx):
"""Disable IPv6 on all interfaces """
#
# 'link-local' command ('config ipv6 disable link-local')
#
@disable.command('link-local')
@click.pass_context
def disable_link_local(ctx):
"""Disable IPv6 link local on all interfaces """
config_db = ConfigDBConnector()
config_db.connect()
mode = "disable"
tables = ['INTERFACE', 'VLAN_INTERFACE', 'PORTCHANNEL_INTERFACE']
for table_type in tables:
table_dict = config_db.get_table(table_type)
if table_dict:
for key in table_dict.keys():
if isinstance(key, str) is False:
continue
set_ipv6_link_local_only_on_interface(config_db, table_dict, table_type, key, mode)
#
# 'rate' group ('config rate ...')
#
@config.group()
def rate():
"""Set port rates configuration."""
pass
@rate.command()
@click.argument('interval', metavar='<interval>', type=click.IntRange(min=1, max=1000), required=True)
@click.argument('rates_type', type=click.Choice(['all', 'port', 'rif', 'flowcnt-trap']), default='all')
def smoothing_interval(interval, rates_type):
"""Set rates smoothing interval """
counters_db = swsssdk.SonicV2Connector()
counters_db.connect('COUNTERS_DB')
alpha = 2.0/(interval + 1)
if rates_type in ['port', 'all']:
counters_db.set('COUNTERS_DB', 'RATES:PORT', 'PORT_SMOOTH_INTERVAL', interval)
counters_db.set('COUNTERS_DB', 'RATES:PORT', 'PORT_ALPHA', alpha)
if rates_type in ['rif', 'all']:
counters_db.set('COUNTERS_DB', 'RATES:RIF', 'RIF_SMOOTH_INTERVAL', interval)
counters_db.set('COUNTERS_DB', 'RATES:RIF', 'RIF_ALPHA', alpha)
if rates_type in ['flowcnt-trap', 'all']:
counters_db.set('COUNTERS_DB', 'RATES:TRAP', 'TRAP_SMOOTH_INTERVAL', interval)
counters_db.set('COUNTERS_DB', 'RATES:TRAP', 'TRAP_ALPHA', alpha)
# Load plugins and register them
helper = util_base.UtilHelper()
helper.load_and_register_plugins(plugins, config)
#
# 'subinterface' group ('config subinterface ...')
#
@config.group()
@click.pass_context
@click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection')
def subinterface(ctx, redis_unix_socket_path):
"""subinterface-related configuration tasks"""
kwargs = {}
if redis_unix_socket_path:
kwargs['unix_socket_path'] = redis_unix_socket_path
config_db = ConfigDBConnector(**kwargs)
config_db.connect(wait_for_init=False)
ctx.obj = {'db': config_db}
def subintf_vlan_check(config_db, parent_intf, vlan):
subintf_db = config_db.get_table('VLAN_SUB_INTERFACE')
subintf_names = [k for k in subintf_db if type(k) != tuple]
for subintf in subintf_names:
sub_intf_sep_idx = subintf.find(VLAN_SUB_INTERFACE_SEPARATOR)
if sub_intf_sep_idx == -1:
continue
if parent_intf == subintf[:sub_intf_sep_idx]:
if 'vlan' in subintf_db[subintf]:
if str(vlan) == subintf_db[subintf]['vlan']:
return True
else:
vlan_id = subintf[sub_intf_sep_idx + 1:]
if str(vlan) == vlan_id:
return True
return False
@subinterface.command('add')
@click.argument('subinterface_name', metavar='<subinterface_name>', required=True)
@click.argument('vid', metavar='<vid>', required=False, type=click.IntRange(1,4094))
@click.pass_context
def add_subinterface(ctx, subinterface_name, vid):
sub_intf_sep_idx = subinterface_name.find(VLAN_SUB_INTERFACE_SEPARATOR)
if sub_intf_sep_idx == -1:
ctx.fail("{} is invalid vlan subinterface".format(subinterface_name))
interface_alias = subinterface_name[:sub_intf_sep_idx]
if interface_alias is None:
ctx.fail("{} invalid subinterface".format(interface_alias))
if interface_alias.startswith("Po") is True:
intf_table_name = CFG_PORTCHANNEL_PREFIX
elif interface_alias.startswith("Eth") is True:
intf_table_name = 'PORT'
config_db = ctx.obj['db']
port_dict = config_db.get_table(intf_table_name)
if interface_alias is not None:
if not port_dict:
ctx.fail("{} parent interface not found. {} table none".format(interface_alias, intf_table_name))
if get_intf_longname(interface_alias) not in port_dict.keys():
ctx.fail("{} parent interface not found".format(subinterface_name))
# Validate if parent is portchannel member
portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER')
if interface_is_in_portchannel(portchannel_member_table, interface_alias):
ctx.fail("{} is configured as a member of portchannel. Cannot configure subinterface"
.format(interface_alias))
# Validate if parent is vlan member
vlan_member_table = config_db.get_table('VLAN_MEMBER')
if interface_is_in_vlan(vlan_member_table, interface_alias):
ctx.fail("{} is configured as a member of vlan. Cannot configure subinterface"
.format(interface_alias))
sub_intfs = [k for k,v in config_db.get_table('VLAN_SUB_INTERFACE').items() if type(k) != tuple]
if subinterface_name in sub_intfs:
ctx.fail("{} already exists".format(subinterface_name))
subintf_dict = {}
if vid is not None:
subintf_dict.update({"vlan" : vid})
if subintf_vlan_check(config_db, get_intf_longname(interface_alias), vid) is True:
ctx.fail("Vlan {} encap already configured on other subinterface on {}".format(vid, interface_alias))
subintf_dict.update({"admin_status" : "up"})
config_db.set_entry('VLAN_SUB_INTERFACE', subinterface_name, subintf_dict)
@subinterface.command('del')
@click.argument('subinterface_name', metavar='<subinterface_name>', required=True)
@click.pass_context
def del_subinterface(ctx, subinterface_name):
sub_intf_sep_idx = subinterface_name.find(VLAN_SUB_INTERFACE_SEPARATOR)
if sub_intf_sep_idx == -1:
ctx.fail("{} is invalid vlan subinterface".format(subinterface_name))
config_db = ctx.obj['db']
#subinterface_name = subintf_get_shortname(subinterface_name)
if interface_name_is_valid(config_db, subinterface_name) is False:
ctx.fail("{} is invalid ".format(subinterface_name))
subintf_config_db = config_db.get_table('VLAN_SUB_INTERFACE')
sub_intfs = [k for k,v in subintf_config_db.items() if type(k) != tuple]
if subinterface_name not in sub_intfs:
ctx.fail("{} does not exists".format(subinterface_name))
ips = {}
ips = [ k[1] for k in config_db.get_table('VLAN_SUB_INTERFACE') if type(k) == tuple and k[0] == subinterface_name ]
for ip in ips:
try:
ipaddress.ip_network(ip, strict=False)
config_db.set_entry('VLAN_SUB_INTERFACE', (subinterface_name, ip), None)
except ValueError:
ctx.fail("Invalid ip {} found on interface {}".format(ip, subinterface_name))
subintf_config_db = config_db.get_table('INTERFACE')
ips = [ k[1] for k in subintf_config_db if type(k) == tuple and k[0] == subinterface_name ]
for ip in ips:
config_db.set_entry('INTERFACE', (subinterface_name, ip), None)
config_db.set_entry('VLAN_SUB_INTERFACE', subinterface_name, None)
if __name__ == '__main__':
config()
| 39.296119 | 210 | 0.674804 |
2a3dde9e3bd483a48f160360de3862a25ad2fe6a | 1,015 | py | Python | 2020/Day1/functiondemo1.py | vishipayyallore/learning-python_2021 | e091d47d167ecb510ea0819b64a78bd58c2a42b3 | [
"Apache-2.0"
] | 1 | 2020-09-17T15:06:44.000Z | 2020-09-17T15:06:44.000Z | 2020/Day1/functiondemo1.py | vishipayyallore/learning-python_2020 | e091d47d167ecb510ea0819b64a78bd58c2a42b3 | [
"Apache-2.0"
] | null | null | null | 2020/Day1/functiondemo1.py | vishipayyallore/learning-python_2020 | e091d47d167ecb510ea0819b64a78bd58c2a42b3 | [
"Apache-2.0"
] | null | null | null | # Variables with Global Scope
name = ''
age = 0
salary = 0.0
is_manager = False
def get_employee_details():
# using global variables
global name, age, salary, is_manager
print("Starting the Function")
name = input('Enter your name: ')
age = int(input('Enter your age: '))
salary = float(input('Enter your Salary: '))
is_manager = bool(int(input("Enter Manager Status (0/1): ")))
def show_employee_details():
print("====================================================")
print("***************** Employee Details *****************")
print("====================================================")
print(f"Name: {name}")
print(f"Age: {age}")
print(f"Salary: {salary}")
print(f"Is Manager: {is_manager}")
print("----------------------------------------------------")
def main():
print("Starting the Program")
get_employee_details() # Invoking the function
show_employee_details() # Invoking the function
# Program execution starts here
main() | 29.852941 | 65 | 0.528079 |
66a87a0dd2a0a3ca61ada7fe59cb7f3d931f694e | 6,597 | py | Python | tests/ut/python/dataset/test_ten_crop.py | ZephyrChenzf/mindspore | 8f191847cf71e12715ced96bc3575914f980127a | [
"Apache-2.0"
] | 1 | 2020-06-17T07:05:45.000Z | 2020-06-17T07:05:45.000Z | tests/ut/python/dataset/test_ten_crop.py | ZephyrChenzf/mindspore | 8f191847cf71e12715ced96bc3575914f980127a | [
"Apache-2.0"
] | null | null | null | tests/ut/python/dataset/test_ten_crop.py | ZephyrChenzf/mindspore | 8f191847cf71e12715ced96bc3575914f980127a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing TenCrop in DE
"""
import pytest
import numpy as np
from util import visualize, save_and_check_md5
import mindspore.dataset as ds
import mindspore.dataset.transforms.vision.py_transforms as vision
from mindspore import log as logger
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def util_test_ten_crop(crop_size, vertical_flip=False, plot=False):
"""
Utility function for testing TenCrop. Input arguments are given by other tests
"""
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [
vision.Decode(),
vision.ToTensor(),
]
transform_1 = vision.ComposeOp(transforms_1)
data1 = data1.map(input_columns=["image"], operations=transform_1())
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [
vision.Decode(),
vision.TenCrop(crop_size, use_vertical_flip=vertical_flip),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
transform_2 = vision.ComposeOp(transforms_2)
data2 = data2.map(input_columns=["image"], operations=transform_2())
num_iter = 0
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
num_iter += 1
image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_2 = item2["image"]
logger.info("shape of image_1: {}".format(image_1.shape))
logger.info("shape of image_2: {}".format(image_2.shape))
logger.info("dtype of image_1: {}".format(image_1.dtype))
logger.info("dtype of image_2: {}".format(image_2.dtype))
if plot:
visualize(np.array([image_1]*10), (image_2 * 255).astype(np.uint8).transpose(0, 2, 3, 1))
# The output data should be of a 4D tensor shape, a stack of 10 images.
assert len(image_2.shape) == 4
assert image_2.shape[0] == 10
def test_ten_crop_op_square(plot=False):
"""
Tests TenCrop for a square crop
"""
logger.info("test_ten_crop_op_square")
util_test_ten_crop(200, plot=plot)
def test_ten_crop_op_rectangle(plot=False):
"""
Tests TenCrop for a rectangle crop
"""
logger.info("test_ten_crop_op_rectangle")
util_test_ten_crop((200, 150), plot=plot)
def test_ten_crop_op_vertical_flip(plot=False):
"""
Tests TenCrop with vertical flip set to True
"""
logger.info("test_ten_crop_op_vertical_flip")
util_test_ten_crop(200, vertical_flip=True, plot=plot)
def test_ten_crop_md5():
"""
Tests TenCrops for giving the same results in multiple runs.
Since TenCrop is a deterministic function, we expect it to return the same result for a specific input every time
"""
logger.info("test_ten_crop_md5")
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [
vision.Decode(),
vision.TenCrop((200, 100), use_vertical_flip=True),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
transform_2 = vision.ComposeOp(transforms_2)
data2 = data2.map(input_columns=["image"], operations=transform_2())
# Compare with expected md5 from images
filename = "ten_crop_01_result.npz"
save_and_check_md5(data2, filename, generate_golden=GENERATE_GOLDEN)
def test_ten_crop_list_size_error_msg():
"""
Tests TenCrop error message when the size arg has more than 2 elements
"""
logger.info("test_ten_crop_list_size_error_msg")
with pytest.raises(TypeError) as info:
_ = [
vision.Decode(),
vision.TenCrop([200, 200, 200]),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
error_msg = "Size should be a single integer or a list/tuple (h, w) of length 2."
assert error_msg == str(info.value)
def test_ten_crop_invalid_size_error_msg():
"""
Tests TenCrop error message when the size arg is not positive
"""
logger.info("test_ten_crop_invalid_size_error_msg")
with pytest.raises(ValueError) as info:
_ = [
vision.Decode(),
vision.TenCrop(0),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
error_msg = "Input is not within the required range"
assert error_msg == str(info.value)
with pytest.raises(ValueError) as info:
_ = [
vision.Decode(),
vision.TenCrop(-10),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
]
assert error_msg == str(info.value)
def test_ten_crop_wrong_img_error_msg():
"""
Tests TenCrop error message when the image is not in the correct format.
"""
logger.info("test_ten_crop_wrong_img_error_msg")
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
vision.Decode(),
vision.TenCrop(200),
vision.ToTensor()
]
transform = vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
with pytest.raises(RuntimeError) as info:
data.create_tuple_iterator().get_next()
error_msg = "TypeError: img should be PIL Image or Numpy array. Got <class 'tuple'>"
# error msg comes from ToTensor()
assert error_msg in str(info.value)
if __name__ == "__main__":
test_ten_crop_op_square(plot=True)
test_ten_crop_op_rectangle(plot=True)
test_ten_crop_op_vertical_flip(plot=True)
test_ten_crop_md5()
test_ten_crop_list_size_error_msg()
test_ten_crop_invalid_size_error_msg()
test_ten_crop_wrong_img_error_msg()
| 34.539267 | 117 | 0.690011 |
a2c9003674ae4f2d8facde3567026305a61228e8 | 3,295 | py | Python | rastervision/command/train_command_config.py | xiaospica/raster-vision | aa98412245fb768762ad2920da0241b56c5ece3d | [
"Apache-2.0"
] | 1 | 2019-07-11T02:32:29.000Z | 2019-07-11T02:32:29.000Z | rastervision/command/train_command_config.py | xiaospica/raster-vision | aa98412245fb768762ad2920da0241b56c5ece3d | [
"Apache-2.0"
] | null | null | null | rastervision/command/train_command_config.py | xiaospica/raster-vision | aa98412245fb768762ad2920da0241b56c5ece3d | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
import rastervision as rv
from rastervision.command import (TrainCommand, CommandConfig,
CommandConfigBuilder)
from rastervision.protos.command_pb2 \
import CommandConfig as CommandConfigMsg
from rastervision.rv_config import RVConfig
from rastervision.command.utils import (check_task_type, check_backend_type)
class TrainCommandConfig(CommandConfig):
def __init__(self, root_uri, task, backend):
super().__init__(rv.TRAIN, root_uri)
self.task = task
self.backend = backend
def create_command(self, tmp_dir=None):
if not tmp_dir:
_tmp_dir = RVConfig.get_tmp_dir()
tmp_dir = _tmp_dir.name
else:
_tmp_dir = tmp_dir
retval = TrainCommand(self)
retval.set_tmp_dir(_tmp_dir)
return retval
def to_proto(self):
msg = super().to_proto()
task = self.task.to_proto()
backend = self.backend.to_proto()
msg.MergeFrom(
CommandConfigMsg(
train_config=CommandConfigMsg.TrainConfig(
task=task, backend=backend)))
return msg
def report_io(self):
io_def = rv.core.CommandIODefinition()
self.task.report_io(self.command_type, io_def)
self.backend.report_io(self.command_type, io_def)
return io_def
@staticmethod
def builder():
return TrainCommandConfigBuilder()
class TrainCommandConfigBuilder(CommandConfigBuilder):
def __init__(self, prev=None):
super().__init__(prev)
if prev is None:
self.task = None
self.backend = None
else:
self.task = prev.task
self.backend = prev.backend
def validate(self):
super().validate()
if self.task is None:
raise rv.ConfigError('Task not set for TrainCommandConfig. Use '
'with_task or with_experiment')
check_task_type(self.task)
if self.backend is None:
raise rv.ConfigError('Backend not set for TrainCommandConfig. Use '
'with_task or with_experiment')
check_backend_type(self.backend)
def build(self):
self.validate()
return TrainCommandConfig(self.root_uri, self.task, self.backend)
def from_proto(self, msg):
b = super().from_proto(msg)
conf = msg.train_config
task = rv.TaskConfig.from_proto(conf.task)
backend = rv.BackendConfig.from_proto(conf.backend)
b = b.with_task(task)
b = b.with_backend(backend)
return b
def get_root_uri(self, experiment_config):
return experiment_config.train_uri
def with_experiment(self, experiment_config):
b = super().with_experiment(experiment_config)
b = b.with_task(experiment_config.task)
b = b.with_backend(experiment_config.backend)
return b
def with_task(self, task):
"""Sets a specific task type.
Args:
task: A TaskConfig object.
"""
b = deepcopy(self)
b.task = task
return b
def with_backend(self, backend):
b = deepcopy(self)
b.backend = backend
return b
| 28.405172 | 79 | 0.618513 |
0592968fd14fb47312963c3fec2a945f78c3c709 | 4,432 | py | Python | bitmovin_api_sdk/models/ignoring.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/models/ignoring.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/models/ignoring.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.ignored_by import IgnoredBy
import pprint
import six
class Ignoring(object):
@poscheck_model
def __init__(self,
ignored_by=None,
ignored_by_description=None):
# type: (IgnoredBy, string_types) -> None
self._ignored_by = None
self._ignored_by_description = None
self.discriminator = None
if ignored_by is not None:
self.ignored_by = ignored_by
if ignored_by_description is not None:
self.ignored_by_description = ignored_by_description
@property
def openapi_types(self):
types = {
'ignored_by': 'IgnoredBy',
'ignored_by_description': 'string_types'
}
return types
@property
def attribute_map(self):
attributes = {
'ignored_by': 'ignoredBy',
'ignored_by_description': 'ignoredByDescription'
}
return attributes
@property
def ignored_by(self):
# type: () -> IgnoredBy
"""Gets the ignored_by of this Ignoring.
:return: The ignored_by of this Ignoring.
:rtype: IgnoredBy
"""
return self._ignored_by
@ignored_by.setter
def ignored_by(self, ignored_by):
# type: (IgnoredBy) -> None
"""Sets the ignored_by of this Ignoring.
:param ignored_by: The ignored_by of this Ignoring.
:type: IgnoredBy
"""
if ignored_by is not None:
if not isinstance(ignored_by, IgnoredBy):
raise TypeError("Invalid type for `ignored_by`, type has to be `IgnoredBy`")
self._ignored_by = ignored_by
@property
def ignored_by_description(self):
# type: () -> string_types
"""Gets the ignored_by_description of this Ignoring.
Describes why ignoredBy has been set to its current value.
:return: The ignored_by_description of this Ignoring.
:rtype: string_types
"""
return self._ignored_by_description
@ignored_by_description.setter
def ignored_by_description(self, ignored_by_description):
# type: (string_types) -> None
"""Sets the ignored_by_description of this Ignoring.
Describes why ignoredBy has been set to its current value.
:param ignored_by_description: The ignored_by_description of this Ignoring.
:type: string_types
"""
if ignored_by_description is not None:
if not isinstance(ignored_by_description, string_types):
raise TypeError("Invalid type for `ignored_by_description`, type has to be `string_types`")
self._ignored_by_description = ignored_by_description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Ignoring):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.432624 | 164 | 0.615749 |
b8ab7717a1ed96aee5c6e99c7569fb6ab0a6e811 | 1,120 | py | Python | rllab/torch/algos/advantages.py | NeurIPSPaperSubmission7934/code_submission | 713fce673e8e3ba30b559d4eebe6d3e4891069ed | [
"Apache-2.0"
] | null | null | null | rllab/torch/algos/advantages.py | NeurIPSPaperSubmission7934/code_submission | 713fce673e8e3ba30b559d4eebe6d3e4891069ed | [
"Apache-2.0"
] | null | null | null | rllab/torch/algos/advantages.py | NeurIPSPaperSubmission7934/code_submission | 713fce673e8e3ba30b559d4eebe6d3e4891069ed | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 Copyright holder of the paper Generative Adversarial Model Learning
# submitted to NeurIPS 2019 for review
# All rights reserved.
def gae(rewards, masks, values, discount, gae_lambda, use_gpu=False):
if use_gpu:
rewards, masks, values = rewards.cpu(), masks.cpu(), values.cpu()
tensor_type = type(rewards)
returns = tensor_type(rewards.size(0), 1)
deltas = tensor_type(rewards.size(0), 1)
advantages = tensor_type(rewards.size(0), 1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + discount * prev_return * masks[i]
deltas[i] = rewards[i] + discount * prev_value * masks[i] - values[i]
advantages[i] = deltas[i] + discount * gae_lambda * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values[i, 0]
prev_advantage = advantages[i, 0]
advantages = (advantages - advantages.mean()) / advantages.std()
if use_gpu:
advantages, returns = advantages.cuda(), returns.cuda()
return advantages, returns | 38.62069 | 88 | 0.663393 |
c66a53677bbe7a4b254f58acd63a88c785d8735b | 24,111 | py | Python | demo/forms.py | prafullkotecha/django-material | a81641634893db132d17a4b355b66c3e12bceeff | [
"BSD-3-Clause"
] | null | null | null | demo/forms.py | prafullkotecha/django-material | a81641634893db132d17a4b355b66c3e12bceeff | [
"BSD-3-Clause"
] | null | null | null | demo/forms.py | prafullkotecha/django-material | a81641634893db132d17a4b355b66c3e12bceeff | [
"BSD-3-Clause"
] | 1 | 2021-11-04T15:25:11.000Z | 2021-11-04T15:25:11.000Z | import datetime
from django.template import Template
from material import Layout, Row, Column, Fieldset, Span2, Span3, Span5, Span6, Span10
from . import demo as forms
COUNTRY_CHOICES = (
('', 'Country'), (244, 'Aaland Islands'), (1, 'Afghanistan'), (2, 'Albania'), (3, 'Algeria'),
(4, 'American Samoa'), (5, 'Andorra'), (6, 'Angola'), (7, 'Anguilla'), (8, 'Antarctica'),
(9, 'Antigua and Barbuda'), (10, 'Argentina'), (11, 'Armenia'), (12, 'Aruba'), (13, 'Australia'),
(14, 'Austria'), (15, 'Azerbaijan'), (16, 'Bahamas'), (17, 'Bahrain'), (18, 'Bangladesh'),
(19, 'Barbados'), (20, 'Belarus'), (21, 'Belgium'), (22, 'Belize'), (23, 'Benin'),
(24, 'Bermuda'), (25, 'Bhutan'), (26, 'Bolivia'), (245, 'Bonaire, Sint Eustatius and Saba'),
(27, 'Bosnia and Herzegovina'), (28, 'Botswana'), (29, 'Bouvet Island'), (30, 'Brazil'),
(31, 'British Indian Ocean Territory'), (32, 'Brunei Darussalam'),
(33, 'Bulgaria'), (34, 'Burkina Faso'), (35, 'Burundi'), (36, 'Cambodia'), (37, 'Cameroon'),
(38, 'Canada'), (251, 'Canary Islands'), (39, 'Cape Verde'), (40, 'Cayman Islands'), (41, 'Central African Republic'),
(42, 'Chad'), (43, 'Chile'), (44, 'China'), (45, 'Christmas Island'), (46, 'Cocos (Keeling) Islands'),
(47, 'Colombia'), (48, 'Comoros'), (49, 'Congo'), (50, 'Cook Islands'), (51, 'Costa Rica'),
(52, "Cote D'Ivoire"), (53, 'Croatia'), (54, 'Cuba'), (246, 'Curacao'), (55, 'Cyprus'),
(56, 'Czech Republic'), (237, 'Democratic Republic of Congo'), (57, 'Denmark'), (58, 'Djibouti'), (59, 'Dominica'),
(60, 'Dominican Republic'), (61, 'East Timor'), (62, 'Ecuador'), (63, 'Egypt'), (64, 'El Salvador'),
(65, 'Equatorial Guinea'), (66, 'Eritrea'), (67, 'Estonia'), (68, 'Ethiopia'), (69, 'Falkland Islands (Malvinas)'),
(70, 'Faroe Islands'), (71, 'Fiji'), (72, 'Finland'), (74, 'France, skypolitan'), (75, 'French Guiana'),
(76, 'French Polynesia'), (77, 'French Southern Territories'), (126, 'FYROM'), (78, 'Gabon'), (79, 'Gambia'),
(80, 'Georgia'), (81, 'Germany'), (82, 'Ghana'), (83, 'Gibraltar'), (84, 'Greece'),
(85, 'Greenland'), (86, 'Grenada'), (87, 'Guadeloupe'), (88, 'Guam'), (89, 'Guatemala'),
(241, 'Guernsey'), (90, 'Guinea'), (91, 'Guinea-Bissau'), (92, 'Guyana'), (93, 'Haiti'),
(94, 'Heard and Mc Donald Islands'), (95, 'Honduras'), (96, 'Hong Kong'), (97, 'Hungary'), (98, 'Iceland'),
(99, 'India'), (100, 'Indonesia'), (101, 'Iran (Islamic Republic of)'), (102, 'Iraq'), (103, 'Ireland'),
(104, 'Israel'), (105, 'Italy'), (106, 'Jamaica'), (107, 'Japan'), (240, 'Jersey'),
(108, 'Jordan'), (109, 'Kazakhstan'), (110, 'Kenya'), (111, 'Kiribati'), (113, 'Korea, Republic of'),
(114, 'Kuwait'), (115, 'Kyrgyzstan'), (116, "Lao People's Democratic Republic"), (117, 'Latvia'), (118, 'Lebanon'),
(119, 'Lesotho'), (120, 'Liberia'), (121, 'Libyan Arab Jamahiriya'), (122, 'Liechtenstein'), (123, 'Lithuania'),
(124, 'Luxembourg'), (125, 'Macau'), (127, 'Madagascar'), (128, 'Malawi'), (129, 'Malaysia'),
(130, 'Maldives'), (131, 'Mali'), (132, 'Malta'), (133, 'Marshall Islands'), (134, 'Martinique'),
(135, 'Mauritania'), (136, 'Mauritius'), (137, 'Mayotte'), (138, 'Mexico'), (139, 'Micronesia, Federated States of'),
(140, 'Moldova, Republic of'), (141, 'Monaco'), (142, 'Mongolia'), (242, 'Montenegro'), (143, 'Montserrat'),
(144, 'Morocco'), (145, 'Mozambique'), (146, 'Myanmar'), (147, 'Namibia'), (148, 'Nauru'),
(149, 'Nepal'), (150, 'Netherlands'), (151, 'Netherlands Antilles'), (152, 'New Caledonia'), (153, 'New Zealand'),
(154, 'Nicaragua'), (155, 'Niger'), (156, 'Nigeria'), (157, 'Niue'), (158, 'Norfolk Island'),
(112, 'North Korea'), (159, 'Northern Mariana Islands'), (160, 'Norway'), (161, 'Oman'), (162, 'Pakistan'),
(163, 'Palau'), (247, 'Palestinian Territory, Occupied'), (164, 'Panama'), (165, 'Papua New Guinea'), (166, 'Paraguay'),
(167, 'Peru'), (168, 'Philippines'), (169, 'Pitcairn'), (170, 'Poland'), (171, 'Portugal'),
(172, 'Puerto Rico'), (173, 'Qatar'), (174, 'Reunion'), (175, 'Romania'), (176, 'Russian Federation'),
(177, 'Rwanda'), (178, 'Saint Kitts and Nevis'), (179, 'Saint Lucia'), (180, 'Saint Vincent and the Grenadines'),
(181, 'Samoa'), (182, 'San Marino'), (183, 'Sao Tome and Principe'), (184, 'Saudi Arabia'), (185, 'Senegal'),
(243, 'Serbia'), (186, 'Seychelles'), (187, 'Sierra Leone'), (188, 'Singapore'), (189, 'Slovak Republic'),
(190, 'Slovenia'), (191, 'Solomon Islands'), (192, 'Somalia'), (193, 'South Africa'),
(194, 'South Georgia & South Sandwich Islands'), (248, 'South Sudan'), (195, 'Spain'), (196, 'Sri Lanka'),
(249, 'St. Barthelemy'), (197, 'St. Helena'), (250, 'St. Martin (French part)'), (198, 'St. Pierre and Miquelon'),
(199, 'Sudan'), (200, 'Suriname'), (201, 'Svalbard and Jan Mayen Islands'), (202, 'Swaziland'),
(203, 'Sweden'), (204, 'Switzerland'), (205, 'Syrian Arab Republic'), (206, 'Taiwan'), (207, 'Tajikistan'),
(208, 'Tanzania, United Republic of'), (209, 'Thailand'), (210, 'Togo'), (211, 'Tokelau'), (212, 'Tonga'),
(213, 'Trinidad and Tobago'), (214, 'Tunisia'), (215, 'Turkey'), (216, 'Turkmenistan'),
(217, 'Turks and Caicos Islands'), (218, 'Tuvalu'), (219, 'Uganda'), (220, 'Ukraine'), (221, 'United Arab Emirates'),
(222, 'United Kingdom'), (223, 'United States'), (224, 'United States Minor Outlying Islands'), (225, 'Uruguay'),
(226, 'Uzbekistan'), (227, 'Vanuatu'), (228, 'Vatican City State (Holy See)'), (229, 'Venezuela'), (230, 'Viet Nam'),
(231, 'Virgin Islands (British)'), (232, 'Virgin Islands (U.S.)'), (233, 'Wallis and Futuna Islands'),
(234, 'Western Sahara'), (235, 'Yemen'), (238, 'Zambia'), (239, 'Zimbabwe'),
)
QUESTION_CHOICES = (
('X01', 'I have a history of problems with anesthesia'),
('X02', 'I smoke'),
('X03', 'I have been addicted to recreational drugs'),
('X04', 'I weak eye contact lenses or glasses'),
('X05', 'I have an implantable devise'),
('X06', 'Blood has been donated for this procedure by a family member'),
('X07', 'I consume alcohol on a regular basis'),
('X08', 'I have teeth and mouth considerations such as loose teeth, caps, bridework, banding, and dentures'),
('X09', 'I have a vascular access devise'),
)
CARDIOVASCULAR_RISK_CHOICES = (
('R01', 'Heart Attack'),
('R02', 'Angina'),
('R03', 'Congestive Heart Failure'),
('R04', 'Previous heart surgery'),
('R05', 'Heart Murmur'),
('R06', 'Mitral Valve Prolapse'),
('R07', 'Internal Defibrillator'),
('R08', 'Paralysis'),
('R09', 'Kidney Disease'),
('R10', 'High Blood Pressure'),
('R11', 'Fast or irregular heat beats'),
('R12', 'Previous Angiosplasy'),
('R13', 'Valvular Heart Disorder'),
('R14', 'Aortic Stenosis'),
('R15', 'Pacemaker'),
('R16', 'Stroke'),
('R17', 'Insulin Dependent Diabetes'),
('R18', 'Shortness of Breath'),
)
APNIA_RISK_CHOICES = (
('A01', 'Loud Snoring'),
('A02', 'Choking while asleep'),
('A03', 'Emphysema'),
('A04', 'Pheumonia'),
('A05', 'Bleeding Disorder'),
('A06', 'Aids or HIV'),
('A07', 'Jaundice'),
('A08', 'Seizure Disorder'),
('A09', 'Thyroid Trouble'),
('A10', 'Joint Replacement'),
('A11', 'Prostate problems'),
('A12', 'Downs Syndrome'),
('A13', 'Excessive Daytime Sleepiness'),
('A14', 'Diagnsed Sleep Apnea'),
('A15', 'Asthma'),
('A16', 'TB'),
('A17', 'Bruise Easy'),
('A18', 'Hepatits'),
('A19', 'Hiatal Hernia'),
('A20', 'Migraine Headaches'),
('A21', 'TMJ (temporomand joint problem)'),
('A22', 'Kidney Problems'),
('A23', 'Steroid Use'),
('A24', 'Witnessed Grasping'),
('A25', 'Bronchitis'),
('A26', 'Wheezing'),
('A27', 'Cystic Fibrosis'),
('A28', 'Anemia'),
('A29', 'Liver Desease'),
('A30', 'Reflus'),
('A31', 'Cancer'),
('A32', 'Athritis'),
('A33', 'Bladder Problems'),
('A34', 'Cortisone Use'),
)
class LoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
keep_logged = forms.BooleanField(required=False, label="Keep me logged in")
template = Template("""
{% form %}
{% part form.email prefix %}<i class="material-icons prefix">email</i>{% endpart %}
{% part form.password prefix %}<i class="material-icons prefix">lock</i>{% endpart %}
{% attr form.keep_logged 'group' class append %}right-align{% endattr %}
{% endform %}
""")
buttons = Template("""
<button class="waves-effect waves-teal btn-flat">Register</button>
<button class="waves-effect waves-light btn" type="submit">Login</button>
""")
title = "Login form"
def clean(self):
cleaned_data = super(LoginForm, self).clean()
if cleaned_data.get('email') == 'john@doe.com':
raise forms.ValidationError('John, come on. You are blocked.')
class RegistrationForm(forms.Form):
username = forms.CharField()
email = forms.EmailField(label="Email Address")
password = forms.CharField(widget=forms.PasswordInput)
password_confirm = forms.CharField(widget=forms.PasswordInput, label="Confirm password")
first_name = forms.CharField(required=False)
last_name = forms.CharField(required=False)
gender = forms.ChoiceField(choices=((None, ''), ('F', 'Female'), ('M', 'Male'), ('O', 'Other')))
receive_news = forms.BooleanField(required=False, label='I want to receive news and special offers')
agree_toc = forms.BooleanField(required=True, label='I agree with the Terms and Conditions')
layout = Layout('username', 'email',
Row('password', 'password_confirm'),
Fieldset('Pesonal details',
Row('first_name', 'last_name'),
'gender', 'receive_news', 'agree_toc'))
template = Template("""
{% form %}
{% part form.username prefix %}<i class="material-icons prefix">account_box</i>{% endpart %}
{% part form.email prefix %}<i class="material-icons prefix">email</i>{% endpart %}
{% part form.password prefix %}<i class="material-icons prefix">lock_open</i>{% endpart %}
{% endform %}
""")
buttons = Template("""
<button class="waves-effect waves-light btn" type="submit">Submit</button>
""")
title = "Registration form"
class ContactForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
subject = forms.CharField()
message = forms.CharField(widget=forms.Textarea)
send_copy = forms.BooleanField(required=False,
label="Send a copy to my e-mail address")
template = Template("""
{% form %}
{% part form.name prefix %}<i class="material-icons prefix">account_box</i>{% endpart %}
{% part form.email prefix %}<i class="material-icons prefix">email</i>{% endpart %}
{% part form.subject prefix %}<i class="material-icons prefix">announcement</i>{% endpart %}
{% part form.message prefix %}<i class="material-icons prefix">message</i>{% endpart %}
{% attr form.send_copy 'group' class append %}right-align{% endattr %}
{% endform %}
""")
layout = Layout(Row('name', 'email'), 'subject', 'message', 'send_copy')
buttons = Template("""
<button class="btn btn-primary pull-right" type="submit">Send message</button>
""")
title = "Contact form"
class OrderForm(forms.Form):
name = forms.CharField()
company = forms.CharField()
email = forms.EmailField()
phone = forms.CharField()
interest = forms.ChoiceField(choices=((None, 'Interested in'), ('D', 'Design'), ('C', 'Development'),
('I', 'Illustration'), ('B', 'Branding'), ('V', 'Video')))
budget = forms.ChoiceField(choices=((None, 'Budget'), ('S', 'Less than $5000'), ('M', '$5000-$10000'),
('L', '$10000-$20000'), ('XL', 'More than $20000')))
start_date = forms.DateField(label="Expected start date")
finish_date = forms.DateField(label="Expected finish date")
attachment = forms.FileField(label="Include some file...")
message = forms.CharField(widget=forms.Textarea)
layout = Layout('name', 'company', 'email', 'phone',
Row('interest', 'budget'),
Row('start_date', 'finish_date'),
'attachment', 'message')
template = Template("""
{% form %}
{% part form.name prefix %}<i class="material-icons prefix">account_box</i>{% endpart %}
{% part form.company prefix %}<i class="material-icons prefix">business</i>{% endpart %}
{% part form.email prefix %}<i class="material-icons prefix">email</i>{% endpart %}
{% part form.phone prefix %}<i class="material-icons prefix">call</i>{% endpart %}
{% endform %}
""")
buttons = Template("""
<button class="btn btn-primary pull-right" type="submit">Submit request</button>
""")
title = "Order services"
class CheckoutForm(forms.Form):
first_name = forms.CharField()
last_name = forms.CharField()
email = forms.EmailField()
phone = forms.CharField()
country = forms.ChoiceField(choices=COUNTRY_CHOICES)
city = forms.CharField()
post_code = forms.IntegerField()
address = forms.CharField()
additional_info = forms.CharField(widget=forms.Textarea)
card_type = forms.ChoiceField(choices=(('V', 'Visa'), ('M', 'MasterCard'), ('P', 'Paypal')), widget=forms.RadioSelect)
card_holder = forms.CharField(label="Name on card")
card_number = forms.CharField(label="Card number")
card_ccv2 = forms.IntegerField(label="CVV2")
card_exp_month = forms.ChoiceField(choices=((1, 'January'), (2, 'February'), (3, 'March'),
(4, 'April'), (5, 'May'), (6, 'June'),
(7, 'July'), (8, 'August'), (9, 'September'),
(10, 'October'), (11, 'November'), (12, 'December')))
card_exp_year = forms.IntegerField(label="Year")
layout = Layout(
Row('first_name', 'last_name'),
Row('email', 'phone'),
Row(Span5('country'), Span5('city'), Span2('post_code')),
'address',
'additional_info',
Fieldset('Card Details',
Row(Column('card_type', span_columns=4),
Column('card_holder',
Row(Span10('card_number'), Span2('card_ccv2')),
Row('card_exp_month', 'card_exp_year'),
span_columns=8))))
template = Template("""
{% form %}
{% part form.first_name prefix %}<i class="material-icons prefix">account_box</i>{% endpart %}
{% part form.last_name prefix %}<i class="material-icons prefix">account_box</i>{% endpart %}
{% part form.email prefix %}<i class="material-icons prefix">email</i>{% endpart %}
{% part form.phone prefix %}<i class="material-icons prefix">call</i>{% endpart %}
{% part form.card_type label %}{% endpart %}
{% endform %}
""")
buttons = Template("""
<button class="btn btn-primary pull-right" type="submit">Submit request</button>
""")
title = "Checkout form"
css = """
@media only screen and (min-width : 601px) {
#id_card_type_container {
margin-top: 40px;
margin-left: 50px;
}
"""
class CommentForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
website = forms.URLField()
comment = forms.CharField(widget=forms.Textarea)
layout = Layout(Row('name', 'email'),
'website', 'comment')
template = Template("""
{% form %}
{% part form.name prefix %}<i class="material-icons prefix">account_box</i>{% endpart %}
{% part form.email prefix %}<i class="material-icons prefix">email</i>{% endpart %}
{% part form.website prefix %}<i class="material-icons prefix">card_travel</i>{% endpart %}
{% part form.comment prefix %}<i class="material-icons prefix">chat</i>{% endpart %}
{% endform %}
""")
buttons = Template("""
<button class="btn btn-primary pull-right" type="submit">Add comment</button>
""")
title = "Comment form"
class BankForm(forms.Form):
branch_name = forms.CharField()
""" Personal Details """
person_title = forms.ChoiceField(choices=(('Mr', 'Mr.'), ('Mrs.', 'Mrs.'), ('Ms.', 'Ms.')), label='Title')
full_name = forms.CharField()
date_of_birth = forms.DateField()
email = forms.EmailField()
parent_name = forms.CharField(label='In case of a minor please provide details')
nationality = forms.ChoiceField(choices=COUNTRY_CHOICES)
mobile_no = forms.CharField()
existing_bank_account = forms.CharField()
partner_name = forms.CharField(label='Name of father/spouse')
""" Residential address """
flat_bulding = forms.CharField(label='Flat no. and bldg. name')
road_no = forms.CharField(label='Road no./name')
area_and_landmark = forms.CharField(label='Area and landmark')
telephone_residence = forms.CharField()
city = forms.CharField()
office = forms.CharField()
fax = forms.CharField()
pin_code = forms.CharField()
""" Mailing Address """
mailing_company_details = forms.CharField(label="Company name and department/ Flat no. and bldg. name")
mailing_road_no = forms.CharField(label='Road no./name')
mailing_area_and_landmark = forms.CharField(label='Area and landmark')
mailing_city = forms.CharField(label='City')
mailing_mobile = forms.CharField(label='Mobile No.')
mailing_telephone_residence = forms.CharField(label='Telephone Residence')
mailing_office = forms.CharField(label='Office')
mailing_fax = forms.CharField(label='Fax')
mailing_pin_code = forms.CharField(label='Pin Code')
mailing_email = forms.EmailField(label='E-mail')
""" Details of Introduction by Existing Customer """
introducer_name = forms.CharField(label='Customer Name')
introducer_account_no = forms.CharField(label='Account No.')
introducer_signature = forms.CharField(label="Introducer's signature")
""" Account Details """
account_type = forms.ChoiceField(
choices=(('S', 'Savings'), ('C', 'Current'), ('F', 'Fixed deposits')),
label='Choice of account',
widget=forms.RadioSelect)
account_mode = forms.ChoiceField(
choices=(('CS', 'Cash'), ('CQ', 'Cheque'), ('NF', 'NEFT')),
label='Mode of funding',
widget=forms.RadioSelect)
account_amount = forms.FloatField(label='Amount')
""" Details of Fixed Deposit """
deposit_type = forms.ChoiceField(
choices=(('O', 'Ordinary'), ('C', 'Cumulative')),
label='Types of deposit',
widget=forms.RadioSelect)
deposit_mode = forms.ChoiceField(
choices=(('CS', 'Cash'), ('CQ', 'Cheque'), ('NF', 'NEFT')),
label='Mode of funding',
widget=forms.RadioSelect)
deposit_amount = forms.FloatField(label='Amount')
deposit_no = forms.CharField(label='No. of deposits')
deposit_individual_amount = forms.FloatField(label='Individual Deposit Amount')
""" Personal Details """
occupation = forms.ChoiceField(
choices=(('NE', 'Non-executive'), ('HW', 'Housewife'), ('RT', 'Retired'),
('ST', 'Student'), ('OT', 'Other'), ('UN', 'Unemployed')),
widget=forms.RadioSelect)
job_title = forms.CharField()
department = forms.CharField()
nature_of_business = forms.CharField()
education = forms.ChoiceField(
choices=(('UG', 'Under graduate'), ('GR', 'Graduate'), ('OT', 'Others')),
widget=forms.RadioSelect)
montly_income = forms.ChoiceField(
choices=(('000', 'Zero Income'), ('L10', 'Less than $10,000'), ('G10', '$10,000+')),
widget=forms.RadioSelect)
martial_status = forms.ChoiceField(
choices=(('M', 'Married'), ('S', 'Single')),
widget=forms.RadioSelect)
spouse_name = forms.CharField()
""" Other existing bank accounts, if any """
other_account1 = forms.CharField(label='Name of the Bank / branch')
other_account2 = forms.CharField(label='Name of the Bank / branch')
""" Reason for Account opening """
reason = forms.CharField(label="Please specify", widget=forms.Textarea)
""" Terms And Conditions """
terms_accepted = forms.BooleanField(
label="I/We confirm having read and understood the account rules of The Banking Corporation Limited"
" ('the Bank'), and hereby agree to be bound by the terms and conditions and amendments governing the"
" account(s) issued by the Bank from time-to-time.")
layout = Layout(
Fieldset("Please open an account at",
'branch_name'),
Fieldset("Personal Details (Sole/First Accountholder/Minor)",
Row(Span2('person_title'), Span10('full_name')),
Row(Column('date_of_birth',
'email',
'parent_name'),
Column('nationality',
Row('mobile_no', 'existing_bank_account'),
'partner_name'))),
Fieldset('Residential address',
Row('flat_bulding', 'road_no'),
Row(Span10('area_and_landmark'), Span2('city')),
Row('telephone_residence', 'office', 'fax', 'pin_code')),
Fieldset("Mailing Address (If different from the First Accountholder's address)",
'mailing_company_details',
Row('mailing_road_no', 'mailing_area_and_landmark', 'mailing_city', 'mailing_mobile'),
Row('mailing_telephone_residence', 'mailing_office', 'mailing_fax', 'mailing_pin_code'),
'mailing_email'),
Fieldset("Details of Introduction by Existing Customer (If applicable)",
Row('introducer_name', 'introducer_account_no'),
'introducer_signature'),
Fieldset("Account Details",
Row('account_type', 'account_mode'),
'account_amount'),
Fieldset("Details of Fixed Deposit",
Row('deposit_type', 'deposit_mode'),
Row(Span6('deposit_amount'), Span3('deposit_no'), Span3('deposit_individual_amount'))),
Fieldset("Personal Details",
Row('occupation', 'education', 'montly_income'),
'job_title',
Row('department', 'nature_of_business'),
Row('martial_status', 'spouse_name')),
Fieldset("Other existing bank accounts, if any",
Row('other_account1', 'other_account2')),
Fieldset("Reason for Account opening",
'reason'),
Fieldset("Terms And Conditions",
'terms_accepted')
)
template = Template("""
{% form %}
{% attr form.account_type 'group' class append %}inline{% endattr %}
{% attr form.account_mode 'group' class append %}inline{% endattr %}
{% attr form.deposit_type 'group' class append %}inline{% endattr %}
{% attr form.deposit_mode 'group' class append %}inline{% endattr %}
{% attr form.martial_status 'group' class append %}inline{% endattr %}
{% endform %}
""")
buttons = Template("""
<button class="btn btn-primary pull-right" type="submit">Save application</button>
""")
title = "Personal Bank Account Initial Application"
css = """
.section h5 {
font-size: 1.2rem;
padding-bottom: 0.2rem;
border-bottom: 3px solid black;
}
"""
blockclass = "col s12 m12 l9 offset-l1"
class WizardForm1(forms.Form):
subject = forms.CharField(max_length=100)
sender = forms.EmailField()
class WizardForm2(forms.Form):
message = forms.CharField(widget=forms.Textarea)
| 47.183953 | 124 | 0.593049 |
0d9affe1b4b5bf374095c471dcd5d57cbbb13ea8 | 6,257 | py | Python | server/scripts/recommendation/train.py | exKAZUu-Research/WillingQuiz | e785a12997c30cbc5bfcc5737a466d49cd80015c | [
"BSD-3-Clause"
] | null | null | null | server/scripts/recommendation/train.py | exKAZUu-Research/WillingQuiz | e785a12997c30cbc5bfcc5737a466d49cd80015c | [
"BSD-3-Clause"
] | 8 | 2019-12-18T15:34:48.000Z | 2019-12-18T15:34:52.000Z | server/scripts/recommendation/train.py | exKAZUu-Research/WillingQuiz | e785a12997c30cbc5bfcc5737a466d49cd80015c | [
"BSD-3-Clause"
] | null | null | null | import sys
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
import pickle
import json
import datetime
import util
MAX_USER_DURATION = 30
MIN_REQUIRED_USER_DURATION = 7
MIN_DATE = datetime.date(2017, 3, 6)
MAX_DATE = datetime.date(2999, 12, 31)
class NotEnoughDataError(Exception):
pass
def load_data_and_train_model(users_file, user_data_file):
try:
raw_users = pd.read_csv(users_file, index_col=0)
raw_users.index.names = ['user_id']
if raw_users.shape[0] == 0:
raise NotEnoughDataError('ユーザが存在しません')
user_data = pd.read_csv(user_data_file, index_col=0)
user_data = util.set_time_and_date(user_data)
user_data = user_data[user_data['date'] >= MIN_DATE]
if user_data.shape[0] == 0:
raise NotEnoughDataError('ユーザの行動ログが存在しません')
users = create_users_with_settings(raw_users, user_data)
users = set_measures(users, user_data)
users = set_answer_count(users, user_data)
model = train_model(users)
model_dump = pickle.dumps(model)
return model_dump
except NotEnoughDataError as err:
sys.exit(err)
def create_users_with_settings(raw_users, user_data):
settings_logs = user_data.query('kind == "logSetting"')
settings_logs = settings_logs[['user_id', 'value', 'date', 'time']]
if settings_logs.shape[0] == 0:
raise NotEnoughDataError('ユーザが機能設定をしたログが存在しません')
settings_column = (settings_logs['value']
.apply(json.loads)
.apply(pd.Series)['setting'])
settings_logs = (settings_logs
.join(settings_column.apply(pd.Series))
.drop('value', axis='columns'))
active_settings = (settings_logs
.sort_values(['date', 'time'])
.drop_duplicates(['user_id', 'date'], keep='last')
.groupby('user_id')
.apply(lambda df: df.assign(next_settings_date=df['date'][1:df.shape[0]].append(pd.Series(MAX_DATE)).values)))
active_settings = (active_settings
.assign(end_date=np.minimum(active_settings['next_settings_date'], (active_settings['date'] + datetime.timedelta(MAX_USER_DURATION))) - datetime.timedelta(days=1))
.drop('next_settings_date', axis='columns')
.set_index(['user_id', 'date']))
users = raw_users.join(active_settings, how='inner')
return users
def set_measures(users, user_data):
surveys = (user_data
.query('kind == "survey"')
.drop_duplicates('user_id')
.set_index('user_id'))
if surveys.shape[0] == 0:
raise NotEnoughDataError('ユーザが心理アンケートに回答したログが存在しません')
survey_answers = (surveys['value']
.apply(json.loads)
.apply(pd.Series))
measures = util.calc_measures_from_survey(survey_answers)
num_questions = survey_answers.shape[1]
invalid_survey = (
(pd.isnull(users['survey_started_at'])) |
(pd.isnull(users['survey_finished_at'])) |
((pd.to_datetime(users['survey_finished_at']) - pd.to_datetime(users['survey_started_at'])).dt.seconds < num_questions * 2) |
(survey_answers.apply(lambda row: row.value_counts().iloc[0] >= 68 * 2 / 3, axis='columns'))
)
new_users = (users
.join(measures, how='inner')
.query('~@invalid_survey'))
if new_users.shape[0] == 0:
raise NotEnoughDataError('心理アンケートに有効な回答をしたユーザが存在しません')
return new_users
def set_answer_count(users, user_data):
quiz_answers = user_data.query('kind == "quiz"')
if quiz_answers.shape[0] == 0:
raise NotEnoughDataError('ユーザが問題に回答したログが存在しません')
quiz_answers = quiz_answers.assign(value=quiz_answers['value'].apply(json.loads))
quiz_answers = quiz_answers.assign(
total_answer_count=quiz_answers['value'].apply(lambda v: len(v['answers']))
)
users_date_ranges = (users
.reset_index()
.rename(columns={'index': 'user_id', 'date': 'user_start_date', 'end_date': 'user_end_date'})
[['user_id', 'user_start_date', 'user_end_date']])
quiz_answer_summary = (quiz_answers
.groupby(['user_id', 'date'], as_index=False)
.agg({'total_answer_count': 'sum'})
.merge(users_date_ranges, on='user_id'))
quiz_answer_summary = (quiz_answer_summary
[(pd.to_datetime(quiz_answer_summary['user_start_date']).dt.date <= quiz_answer_summary['date']) &
(pd.to_datetime(quiz_answer_summary['user_end_date']).dt.date >= quiz_answer_summary['date'])]
.groupby(['user_id', 'user_start_date'], as_index=False)
.agg({'total_answer_count': 'sum'})
.rename(columns={'user_start_date': 'date'})
.set_index(['user_id', 'date']))
users = users.join(quiz_answer_summary, how='inner')
if users.shape[0] == 0:
raise NotEnoughDataError('有効期間中に問題に回答したユーザが存在しません')
users = users.assign(total_answer_count_log=np.log(np.maximum(users['total_answer_count'], 1)))
course_summary = (users
.groupby('course')
['total_answer_count_log']
.agg(['mean', 'std'])
.fillna(1))
total_answer_count_std = ((users['total_answer_count_log'].values - course_summary.loc[users['course'], 'mean'].values)
/ course_summary.loc[users['course'], 'std'].values)
users = (users
.assign(total_answer_count_std=total_answer_count_std)
.drop('total_answer_count_log', axis='columns'))
return users
def train_model(users):
measure_columns = users.columns[users.columns.str.match('^m[0-9]+$')].tolist()
x_columns = measure_columns + util.SETTING_COLUMNS
y_column = 'total_answer_count_std'
users = users.dropna(subset=(x_columns + [y_column]))
if users.shape[0] == 0:
raise NotEnoughDataError('訓練データとして使用できるユーザが存在しません')
users = users.apply(pd.Series) # dtypeをセットし直して、util.create_dummy_variables_for_settings内のpd.Categoricalでの型不整合エラーを防ぐ
x = users[x_columns]
x = util.create_dummy_variables_for_settings(x)
y = users[y_column]
model = RandomForestRegressor(n_estimators=500)
model = model.fit(x, y)
return model
if __name__ == '__main__':
users_file, user_data_file = sys.argv[1:]
model_dump = load_data_and_train_model(users_file, user_data_file)
sys.stdout.buffer.write(model_dump)
| 36.805882 | 171 | 0.677321 |
7d2babec8a2ba637f43a56309ff191e4814739ae | 2,237 | py | Python | problems/776_split_bst.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | 1 | 2018-02-03T14:17:18.000Z | 2018-02-03T14:17:18.000Z | problems/776_split_bst.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | null | null | null | problems/776_split_bst.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | null | null | null | '''
URL: https://leetcode.com/problems/split-bst/description/
(Recursive version) Time complexity: O(log n)
(Recursive version) Space complexity: O(height of tree)
(Iterative version) Time complexity: O(log n)
(Iterative version) Space complexity: O(height of tree)
'''
class Solution:
def splitBST(self, root, V):
"""
:type root: TreeNode
:type V: int
:rtype: List[TreeNode]
"""
if root is None:
return [None, None]
if root.val == V:
large = root.right
root.right = None
return [root, large]
if root.val < V:
small, large = self.splitBST(root.right, V)
root.right = small
return [root, large]
if root.val >= V:
small, large = self.splitBST(root.left, V)
root.left = large
return [small, root]
# Iterative version:
# smaller = None
# smaller_inject = None
# larger = None
# larger_inject = None
# if root is None:
# return [None, None]
# curr_node = root
# while curr_node is not None:
# old_node = curr_node
# if V < curr_node.val: # add to larger
# if larger == None:
# larger = TreeNode(curr_node.val)
# larger.right = curr_node.right
# larger_inject = larger
# else:
# larger_inject.left = TreeNode(curr_node.val)
# larger_inject = larger_inject.left
# larger_inject.right = curr_node.right
# curr_node = curr_node.left
# elif V >= curr_node.val: # add to smaller
# if smaller == None:
# smaller = TreeNode(curr_node.val)
# smaller.left = curr_node.left
# smaller_inject = smaller
# else:
# smaller_inject.right = TreeNode(curr_node.val)
# smaller_inject = smaller_inject.right
# smaller_inject.left = curr_node.left
# curr_node = curr_node.right
# return [smaller, larger]
| 30.22973 | 68 | 0.510952 |
8fd5a7f8c39e3ce82dda2aaebc745e748bf5e696 | 12,218 | py | Python | api/tests/opentrons/protocol_runner/test_protocol_runner.py | Opentrons/labware | e21d8db51eac5818477264a45ef12c0a2d15fb72 | [
"Apache-2.0"
] | 2 | 2015-11-10T17:49:51.000Z | 2016-01-15T04:43:37.000Z | api/tests/opentrons/protocol_runner/test_protocol_runner.py | Opentrons/labware | e21d8db51eac5818477264a45ef12c0a2d15fb72 | [
"Apache-2.0"
] | null | null | null | api/tests/opentrons/protocol_runner/test_protocol_runner.py | Opentrons/labware | e21d8db51eac5818477264a45ef12c0a2d15fb72 | [
"Apache-2.0"
] | null | null | null | """Tests for the ProtocolRunner class."""
import pytest
from decoy import Decoy, matchers
from pathlib import Path
from typing import List, cast
from opentrons_shared_data.protocol.dev_types import (
JsonProtocol as LegacyJsonProtocolDict,
)
from opentrons.hardware_control import API as HardwareAPI
from opentrons.protocols.api_support.types import APIVersion
from opentrons_shared_data.protocol.models.protocol_schema_v6 import ProtocolSchemaV6
from opentrons_shared_data.labware.labware_definition import LabwareDefinition
from opentrons.protocol_api_experimental import ProtocolContext
from opentrons.protocol_engine import ProtocolEngine, commands as pe_commands
from opentrons.protocol_reader import (
ProtocolSource,
JsonProtocolConfig,
PythonProtocolConfig,
)
from opentrons.protocol_runner import ProtocolRunner
from opentrons.protocol_runner.task_queue import TaskQueue
from opentrons.protocol_runner.json_file_reader import JsonFileReader
from opentrons.protocol_runner.json_command_translator import JsonCommandTranslator
from opentrons.protocol_runner.python_file_reader import (
PythonFileReader,
PythonProtocol,
)
from opentrons.protocol_runner.python_context_creator import PythonContextCreator
from opentrons.protocol_runner.python_executor import PythonExecutor
from opentrons.protocol_runner.legacy_context_plugin import LegacyContextPlugin
from opentrons.protocol_runner.legacy_wrappers import (
LegacyFileReader,
LegacyContextCreator,
LegacyExecutor,
LegacyPythonProtocol,
LegacyJsonProtocol,
LegacyProtocolContext,
LegacyLabwareDefinition,
)
@pytest.fixture
def protocol_engine(decoy: Decoy) -> ProtocolEngine:
"""Get a mocked out ProtocolEngine dependency."""
return decoy.mock(cls=ProtocolEngine)
@pytest.fixture
def hardware_api(decoy: Decoy) -> HardwareAPI:
"""Get a mocked out HardwareAPI dependency."""
return decoy.mock(cls=HardwareAPI)
@pytest.fixture
def task_queue(decoy: Decoy) -> TaskQueue:
"""Get a mocked out TaskQueue dependency."""
return decoy.mock(cls=TaskQueue)
@pytest.fixture
def json_file_reader(decoy: Decoy) -> JsonFileReader:
"""Get a mocked out JsonFileReader dependency."""
return decoy.mock(cls=JsonFileReader)
@pytest.fixture
def json_command_translator(decoy: Decoy) -> JsonCommandTranslator:
"""Get a mocked out JsonCommandTranslator dependency."""
return decoy.mock(cls=JsonCommandTranslator)
@pytest.fixture
def python_file_reader(decoy: Decoy) -> PythonFileReader:
"""Get a mocked out PythonFileReader dependency."""
return decoy.mock(cls=PythonFileReader)
@pytest.fixture
def python_context_creator(decoy: Decoy) -> PythonContextCreator:
"""Get a mocked out PythonContextCreator dependency."""
return decoy.mock(cls=PythonContextCreator)
@pytest.fixture
def python_executor(decoy: Decoy) -> PythonExecutor:
"""Get a mocked out PythonExecutor dependency."""
return decoy.mock(cls=PythonExecutor)
@pytest.fixture
def legacy_file_reader(decoy: Decoy) -> LegacyFileReader:
"""Get a mocked out LegacyFileReader dependency."""
return decoy.mock(cls=LegacyFileReader)
@pytest.fixture
def legacy_context_creator(decoy: Decoy) -> LegacyContextCreator:
"""Get a mocked out LegacyContextCreator dependency."""
return decoy.mock(cls=LegacyContextCreator)
@pytest.fixture
def legacy_executor(decoy: Decoy) -> LegacyExecutor:
"""Get a mocked out LegacyExecutor dependency."""
return decoy.mock(cls=LegacyExecutor)
@pytest.fixture
def subject(
protocol_engine: ProtocolEngine,
hardware_api: HardwareAPI,
task_queue: TaskQueue,
json_file_reader: JsonFileReader,
json_command_translator: JsonCommandTranslator,
python_file_reader: PythonFileReader,
python_context_creator: PythonContextCreator,
python_executor: PythonExecutor,
legacy_file_reader: LegacyFileReader,
legacy_context_creator: LegacyContextCreator,
legacy_executor: LegacyExecutor,
) -> ProtocolRunner:
"""Get a ProtocolRunner test subject with mocked dependencies."""
return ProtocolRunner(
protocol_engine=protocol_engine,
hardware_api=hardware_api,
task_queue=task_queue,
json_file_reader=json_file_reader,
json_command_translator=json_command_translator,
python_file_reader=python_file_reader,
python_context_creator=python_context_creator,
python_executor=python_executor,
legacy_file_reader=legacy_file_reader,
legacy_context_creator=legacy_context_creator,
legacy_executor=legacy_executor,
)
async def test_play_starts_run(
decoy: Decoy,
protocol_engine: ProtocolEngine,
task_queue: TaskQueue,
subject: ProtocolRunner,
) -> None:
"""It should start a protocol run with play."""
subject.play()
decoy.verify(protocol_engine.play(), times=1)
async def test_pause(
decoy: Decoy,
protocol_engine: ProtocolEngine,
subject: ProtocolRunner,
) -> None:
"""It should pause a protocol run with pause."""
subject.pause()
decoy.verify(protocol_engine.pause(), times=1)
async def test_stop(
decoy: Decoy,
task_queue: TaskQueue,
protocol_engine: ProtocolEngine,
subject: ProtocolRunner,
) -> None:
"""It should halt a protocol run with stop."""
subject.play()
await subject.stop()
decoy.verify(await protocol_engine.stop(), times=1)
async def test_stop_never_started(
decoy: Decoy,
task_queue: TaskQueue,
protocol_engine: ProtocolEngine,
subject: ProtocolRunner,
) -> None:
"""It should clean up rather than halt if the runner was never started."""
await subject.stop()
decoy.verify(
await protocol_engine.finish(drop_tips_and_home=False, set_run_status=False),
times=1,
)
async def test_run(
decoy: Decoy,
protocol_engine: ProtocolEngine,
task_queue: TaskQueue,
subject: ProtocolRunner,
) -> None:
"""It should run a protocol to completion."""
assert subject.was_started() is False
await subject.run()
assert subject.was_started() is True
decoy.verify(
protocol_engine.play(),
task_queue.start(),
await task_queue.join(),
)
def test_load_json(
decoy: Decoy,
json_file_reader: JsonFileReader,
json_command_translator: JsonCommandTranslator,
protocol_engine: ProtocolEngine,
task_queue: TaskQueue,
subject: ProtocolRunner,
) -> None:
"""It should load a JSON protocol file."""
json_protocol_source = ProtocolSource(
directory=Path("/dev/null"),
main_file=Path("/dev/null/abc.json"),
files=[],
metadata={},
config=JsonProtocolConfig(schema_version=6),
labware_definitions=[],
)
json_protocol = ProtocolSchemaV6.construct() # type: ignore[call-arg]
commands: List[pe_commands.CommandCreate] = [
pe_commands.PauseCreate(params=pe_commands.PauseParams(message="hello")),
pe_commands.PauseCreate(params=pe_commands.PauseParams(message="goodbye")),
]
decoy.when(json_file_reader.read(json_protocol_source)).then_return(json_protocol)
decoy.when(json_command_translator.translate(json_protocol)).then_return(commands)
subject.load(json_protocol_source)
decoy.verify(
protocol_engine.add_command(
request=pe_commands.PauseCreate(
params=pe_commands.PauseParams(message="hello")
)
),
protocol_engine.add_command(
request=pe_commands.PauseCreate(
params=pe_commands.PauseParams(message="goodbye")
)
),
task_queue.set_run_func(func=protocol_engine.wait_until_complete),
)
def test_load_python(
decoy: Decoy,
python_file_reader: PythonFileReader,
python_context_creator: PythonContextCreator,
python_executor: PythonExecutor,
protocol_engine: ProtocolEngine,
task_queue: TaskQueue,
subject: ProtocolRunner,
) -> None:
"""It should load a Python protocol file."""
python_protocol_source = ProtocolSource(
directory=Path("/dev/null"),
main_file=Path("/dev/null/abc.py"),
files=[],
metadata={},
config=PythonProtocolConfig(api_version=APIVersion(3, 0)),
labware_definitions=[],
)
python_protocol = decoy.mock(cls=PythonProtocol)
protocol_context = decoy.mock(cls=ProtocolContext)
decoy.when(python_file_reader.read(python_protocol_source)).then_return(
python_protocol
)
decoy.when(python_context_creator.create(protocol_engine)).then_return(
protocol_context
)
subject.load(python_protocol_source)
decoy.verify(
task_queue.set_run_func(
func=python_executor.execute,
protocol=python_protocol,
context=protocol_context,
),
)
def test_load_legacy_python(
decoy: Decoy,
legacy_file_reader: LegacyFileReader,
legacy_context_creator: LegacyContextCreator,
legacy_executor: LegacyExecutor,
task_queue: TaskQueue,
protocol_engine: ProtocolEngine,
subject: ProtocolRunner,
) -> None:
"""It should load a legacy context-based Python protocol."""
labware_definition = LabwareDefinition.construct() # type: ignore[call-arg]
legacy_protocol_source = ProtocolSource(
directory=Path("/dev/null"),
main_file=Path("/dev/null/abc.py"),
files=[],
metadata={},
config=PythonProtocolConfig(api_version=APIVersion(2, 11)),
labware_definitions=[labware_definition],
)
extra_labware = {"definition-uri": cast(LegacyLabwareDefinition, {})}
legacy_protocol = LegacyPythonProtocol(
text="",
contents="",
filename="protocol.py",
api_level=APIVersion(2, 11),
metadata={"foo": "bar"},
bundled_labware=None,
bundled_data=None,
bundled_python=None,
extra_labware=extra_labware,
)
legacy_context = decoy.mock(cls=LegacyProtocolContext)
decoy.when(legacy_file_reader.read(legacy_protocol_source)).then_return(
legacy_protocol
)
decoy.when(legacy_context_creator.create(legacy_protocol)).then_return(
legacy_context
)
subject.load(legacy_protocol_source)
decoy.verify(
protocol_engine.add_labware_definition(labware_definition),
protocol_engine.add_plugin(matchers.IsA(LegacyContextPlugin)),
task_queue.set_run_func(
func=legacy_executor.execute,
protocol=legacy_protocol,
context=legacy_context,
),
)
def test_load_legacy_json(
decoy: Decoy,
legacy_file_reader: LegacyFileReader,
legacy_context_creator: LegacyContextCreator,
legacy_executor: LegacyExecutor,
task_queue: TaskQueue,
protocol_engine: ProtocolEngine,
subject: ProtocolRunner,
) -> None:
"""It should load a legacy context-based JSON protocol."""
labware_definition = LabwareDefinition.construct() # type: ignore[call-arg]
legacy_protocol_source = ProtocolSource(
directory=Path("/dev/null"),
main_file=Path("/dev/null/abc.json"),
files=[],
metadata={},
config=JsonProtocolConfig(schema_version=5),
labware_definitions=[labware_definition],
)
legacy_protocol = LegacyJsonProtocol(
text="{}",
contents=cast(LegacyJsonProtocolDict, {}),
filename="protocol.json",
api_level=APIVersion(2, 11),
schema_version=5,
metadata={"foo": "bar"},
)
legacy_context = decoy.mock(cls=LegacyProtocolContext)
decoy.when(legacy_file_reader.read(legacy_protocol_source)).then_return(
legacy_protocol
)
decoy.when(legacy_context_creator.create(legacy_protocol)).then_return(
legacy_context
)
subject.load(legacy_protocol_source)
decoy.verify(
protocol_engine.add_labware_definition(labware_definition),
protocol_engine.add_plugin(matchers.IsA(LegacyContextPlugin)),
task_queue.set_run_func(
func=legacy_executor.execute,
protocol=legacy_protocol,
context=legacy_context,
),
)
| 30.468828 | 86 | 0.720658 |
f5ccdaaa925b9a269aac6a699446a5e798109106 | 2,267 | py | Python | alipay/aop/api/domain/AlipayFundTransAuctionBalanceQueryModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AlipayFundTransAuctionBalanceQueryModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AlipayFundTransAuctionBalanceQueryModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundTransAuctionBalanceQueryModel(object):
def __init__(self):
self._biz_id = None
self._inst_id = None
self._user_id = None
self._verify_id = None
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def inst_id(self):
return self._inst_id
@inst_id.setter
def inst_id(self, value):
self._inst_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def verify_id(self):
return self._verify_id
@verify_id.setter
def verify_id(self, value):
self._verify_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.inst_id:
if hasattr(self.inst_id, 'to_alipay_dict'):
params['inst_id'] = self.inst_id.to_alipay_dict()
else:
params['inst_id'] = self.inst_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.verify_id:
if hasattr(self.verify_id, 'to_alipay_dict'):
params['verify_id'] = self.verify_id.to_alipay_dict()
else:
params['verify_id'] = self.verify_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundTransAuctionBalanceQueryModel()
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'inst_id' in d:
o.inst_id = d['inst_id']
if 'user_id' in d:
o.user_id = d['user_id']
if 'verify_id' in d:
o.verify_id = d['verify_id']
return o
| 26.360465 | 69 | 0.565064 |
a932fada06a10c506cabaa74ae5bbc7ad93f37cc | 1,173 | py | Python | cron/poll_issue_close_stale.py | ECrownofFire/chaos | 0cfbb85ab52654967909aef54eff3a0e62b641bd | [
"MIT"
] | 1,804 | 2017-05-23T02:34:27.000Z | 2017-05-26T00:44:44.000Z | cron/poll_issue_close_stale.py | ECrownofFire/chaos | 0cfbb85ab52654967909aef54eff3a0e62b641bd | [
"MIT"
] | 345 | 2017-05-20T23:55:12.000Z | 2017-06-19T07:48:58.000Z | cron/poll_issue_close_stale.py | ECrownofFire/chaos | 0cfbb85ab52654967909aef54eff3a0e62b641bd | [
"MIT"
] | 248 | 2017-05-23T02:00:07.000Z | 2017-05-26T00:00:28.000Z | import arrow
import logging
import settings
import github_api as gh
__log = logging.getLogger("stale_issues")
def poll_issue_close_stale(api):
"""
Looks through all open issues. For any open issue, if the issue is
too old and has not been recently commented on, chaosbot issues a
/vote close...
"""
__log.info("Checking for stale issues...")
# Get the oldest open issues
issues = gh.issues.get_oldest_open_issues(api, settings.URN)
__log.info("Got the oldest %d open issues" % len(issues))
for issue in issues:
number = issue["number"]
last_updated = arrow.get(issue["updated_at"])
now = arrow.utcnow()
delta = (now - last_updated).total_seconds()
__log.info("Issue %d has not been updated in %d seconds" % (number, delta))
if delta > settings.ISSUE_STALE_THRESHOLD:
__log.info("/vote close issue %d" % number)
# leave an explanatory comment
body = "/vote close \n\nThis issue hasn't been active for a while. " + \
"To keep it open, react with :-1:"
gh.comments.leave_comment(api, settings.URN, number, body)
| 29.325 | 84 | 0.641944 |
852ffdd7c6e355d5a6cab28af8bb07957e23ec96 | 355 | py | Python | gumo/pullqueue/worker/domain/configuration/__init__.py | gumo-py/gumo-pullqueue | a8458b9931c14a8f71f447e6ca84892efb82ea2d | [
"MIT"
] | null | null | null | gumo/pullqueue/worker/domain/configuration/__init__.py | gumo-py/gumo-pullqueue | a8458b9931c14a8f71f447e6ca84892efb82ea2d | [
"MIT"
] | 25 | 2019-04-22T14:03:46.000Z | 2022-02-11T02:50:23.000Z | gumo/pullqueue/worker/domain/configuration/__init__.py | gumo-py/gumo-pullqueue | a8458b9931c14a8f71f447e6ca84892efb82ea2d | [
"MIT"
] | null | null | null | import dataclasses
import os
from typing import Optional
@dataclasses.dataclass(frozen=True)
class PullQueueWorkerConfiguration:
server_url: str
polling_sleep_seconds: int
request_logger: object = None
target_audience_client_id: Optional[str] = None
worker_name: str = dataclasses.field(default_factory=lambda: f'pid={os.getpid()}')
| 27.307692 | 86 | 0.777465 |
c416a946c0acf7ac1a9bd9c6820216fd7e2aa066 | 20,400 | py | Python | nemo/collections/nlp/models/intent_slot_classification/intent_slot_classification_model.py | Troublem1/NeMo | b03e5683438a64c08569d8436ac6680597e2b8af | [
"Apache-2.0"
] | null | null | null | nemo/collections/nlp/models/intent_slot_classification/intent_slot_classification_model.py | Troublem1/NeMo | b03e5683438a64c08569d8436ac6680597e2b8af | [
"Apache-2.0"
] | null | null | null | nemo/collections/nlp/models/intent_slot_classification/intent_slot_classification_model.py | Troublem1/NeMo | b03e5683438a64c08569d8436ac6680597e2b8af | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, List, Optional
import onnx
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss
from nemo.collections.nlp.data.intent_slot_classification import (
IntentSlotClassificationDataset,
IntentSlotDataDesc,
IntentSlotInferenceDataset,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import SequenceTokenClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes import typecheck
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.neural_types import NeuralType
from nemo.utils import logging
class IntentSlotClassificationModel(NLPModel):
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return self.bert_model.input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return self.classifier.output_types
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
""" Initializes BERT Joint Intent and Slot model.
"""
self.max_seq_length = cfg.language_model.max_seq_length
# Setup tokenizer.
self.setup_tokenizer(cfg.tokenizer)
# Check the presence of data_dir.
if not cfg.data_dir or not os.path.exists(cfg.data_dir):
# Disable setup methods.
IntentSlotClassificationModel._set_model_restore_state(is_being_restored=True)
# Set default values of data_desc.
self._set_defaults_data_desc(cfg)
else:
self.data_dir = cfg.data_dir
# Update configuration of data_desc.
self._set_data_desc_to_cfg(cfg, cfg.data_dir, cfg.train_ds, cfg.validation_ds)
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
# Enable setup methods.
IntentSlotClassificationModel._set_model_restore_state(is_being_restored=False)
# Initialize Bert model
self.bert_model = get_lm_model(
pretrained_model_name=self.cfg.language_model.pretrained_model_name,
config_file=self.register_artifact('language_model.config_file', cfg.language_model.config_file),
config_dict=OmegaConf.to_container(self.cfg.language_model.config)
if self.cfg.language_model.config
else None,
checkpoint_file=self.cfg.language_model.lm_checkpoint,
vocab_file=self.register_artifact('tokenizer.vocab_file', cfg.tokenizer.vocab_file),
)
# Initialize Classifier.
self._reconfigure_classifier()
def _set_defaults_data_desc(self, cfg):
"""
Method makes sure that cfg.data_desc params are set.
If not, set's them to "dummy" defaults.
"""
if not hasattr(cfg, "data_desc"):
OmegaConf.set_struct(cfg, False)
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = " "
cfg.data_desc.intent_label_ids = {" ": 0}
cfg.data_desc.intent_weights = [1]
# Slots.
cfg.data_desc.slot_labels = " "
cfg.data_desc.slot_label_ids = {" ": 0}
cfg.data_desc.slot_weights = [1]
cfg.data_desc.pad_label = "O"
OmegaConf.set_struct(cfg, True)
def _set_data_desc_to_cfg(self, cfg, data_dir, train_ds, validation_ds):
""" Method creates IntentSlotDataDesc and copies generated values to cfg.data_desc. """
# Save data from data desc to config - so it can be reused later, e.g. in inference.
data_desc = IntentSlotDataDesc(data_dir=data_dir, modes=[train_ds.prefix, validation_ds.prefix])
OmegaConf.set_struct(cfg, False)
if not hasattr(cfg, "data_desc") or cfg.data_desc is None:
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = list(data_desc.intents_label_ids.keys())
cfg.data_desc.intent_label_ids = data_desc.intents_label_ids
cfg.data_desc.intent_weights = data_desc.intent_weights
# Slots.
cfg.data_desc.slot_labels = list(data_desc.slots_label_ids.keys())
cfg.data_desc.slot_label_ids = data_desc.slots_label_ids
cfg.data_desc.slot_weights = data_desc.slot_weights
cfg.data_desc.pad_label = data_desc.pad_label
# for older(pre - 1.0.0.b3) configs compatibility
if not hasattr(cfg, "class_labels") or cfg.class_labels is None:
cfg.class_labels = {}
cfg.class_labels = OmegaConf.create(
{'intent_labels_file': 'intent_labels.csv', 'slot_labels_file': 'slot_labels.csv'}
)
slot_labels_file = os.path.join(data_dir, cfg.class_labels.slot_labels_file)
intent_labels_file = os.path.join(data_dir, cfg.class_labels.intent_labels_file)
self._save_label_ids(data_desc.slots_label_ids, slot_labels_file)
self._save_label_ids(data_desc.intents_label_ids, intent_labels_file)
self.register_artifact('class_labels.intent_labels_file', intent_labels_file)
self.register_artifact('class_labels.slot_labels_file', slot_labels_file)
OmegaConf.set_struct(cfg, True)
def _save_label_ids(self, label_ids: Dict[str, int], filename: str) -> None:
""" Saves label ids map to a file """
with open(filename, 'w') as out:
labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))
out.write('\n'.join(labels))
logging.info(f'Labels: {label_ids}')
logging.info(f'Labels mapping saved to : {out.name}')
def _reconfigure_classifier(self):
""" Method reconfigures the classifier depending on the settings of model cfg.data_desc """
self.classifier = SequenceTokenClassifier(
hidden_size=self.bert_model.config.hidden_size,
num_intents=len(self.cfg.data_desc.intent_labels),
num_slots=len(self.cfg.data_desc.slot_labels),
dropout=self.cfg.head.fc_dropout,
num_layers=self.cfg.head.num_output_layers,
log_softmax=False,
)
# define losses
if self.cfg.class_balancing == 'weighted_loss':
# You may need to increase the number of epochs for convergence when using weighted_loss
self.intent_loss = CrossEntropyLoss(logits_ndim=2, weight=self.cfg.data_desc.intent_weights)
self.slot_loss = CrossEntropyLoss(logits_ndim=3, weight=self.cfg.data_desc.slot_weights)
else:
self.intent_loss = CrossEntropyLoss(logits_ndim=2)
self.slot_loss = CrossEntropyLoss(logits_ndim=3)
self.total_loss = AggregatorLoss(
num_inputs=2, weights=[self.cfg.intent_loss_weight, 1.0 - self.cfg.intent_loss_weight]
)
# setup to track metrics
self.intent_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.intent_labels),
label_ids=self.cfg.data_desc.intent_label_ids,
dist_sync_on_step=True,
mode='micro',
)
self.slot_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.slot_labels),
label_ids=self.cfg.data_desc.slot_label_ids,
dist_sync_on_step=True,
mode='micro',
)
def update_data_dir_for_training(self, data_dir: str, train_ds, validation_ds) -> None:
"""
Update data directory and get data stats with Data Descriptor.
Also, reconfigures the classifier - to cope with data with e.g. different number of slots.
Args:
data_dir: path to data directory
"""
logging.info(f'Setting data_dir to {data_dir}.')
self.data_dir = data_dir
# Update configuration with new data.
self._set_data_desc_to_cfg(self.cfg, data_dir, train_ds, validation_ds)
# Reconfigure the classifier for different settings (number of intents, slots etc.).
self._reconfigure_classifier()
def update_data_dir_for_testing(self, data_dir) -> None:
"""
Update data directory.
Args:
data_dir: path to data directory
"""
logging.info(f'Setting data_dir to {data_dir}.')
self.data_dir = data_dir
@typecheck()
def forward(self, input_ids, token_type_ids, attention_mask):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
intent_logits, slot_logits = self.classifier(hidden_states=hidden_states)
return intent_logits, slot_logits
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
train_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', train_loss)
self.log('lr', lr, prog_bar=True)
return {
'loss': train_loss,
'lr': lr,
}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
val_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
# calculate accuracy metrics for intents and slot reporting
# intents
preds = torch.argmax(intent_logits, axis=-1)
self.intent_classification_report.update(preds, intent_labels)
# slots
subtokens_mask = subtokens_mask > 0.5
preds = torch.argmax(slot_logits, axis=-1)[subtokens_mask]
slot_labels = slot_labels[subtokens_mask]
self.slot_classification_report.update(preds, slot_labels)
return {
'val_loss': val_loss,
'intent_tp': self.intent_classification_report.tp,
'intent_fn': self.intent_classification_report.fn,
'intent_fp': self.intent_classification_report.fp,
'slot_tp': self.slot_classification_report.tp,
'slot_fn': self.slot_classification_report.fn,
'slot_fp': self.slot_classification_report.fp,
}
def validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
# calculate metrics and log classification report (separately for intents and slots)
intent_precision, intent_recall, intent_f1, intent_report = self.intent_classification_report.compute()
logging.info(f'Intent report: {intent_report}')
slot_precision, slot_recall, slot_f1, slot_report = self.slot_classification_report.compute()
logging.info(f'Slot report: {slot_report}')
self.log('val_loss', avg_loss)
self.log('intent_precision', intent_precision)
self.log('intent_recall', intent_recall)
self.log('intent_f1', intent_f1)
self.log('slot_precision', slot_precision)
self.log('slot_recall', slot_recall)
self.log('slot_f1', slot_f1)
return {
'val_loss': avg_loss,
'intent_precision': intent_precision,
'intent_recall': intent_recall,
'intent_f1': intent_f1,
'slot_precision': slot_precision,
'slot_recall': slot_recall,
'slot_f1': slot_f1,
}
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs):
"""
Called at the end of test to aggregate outputs.
:param outputs: list of individual outputs of each test step.
"""
return self.validation_epoch_end(outputs)
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config)
def _setup_dataloader_from_config(self, cfg: DictConfig):
input_file = f'{self.data_dir}/{cfg.prefix}.tsv'
slot_file = f'{self.data_dir}/{cfg.prefix}_slots.tsv'
if not (os.path.exists(input_file) and os.path.exists(slot_file)):
raise FileNotFoundError(
f'{input_file} or {slot_file} not found. Please refer to the documentation for the right format \
of Intents and Slots files.'
)
dataset = IntentSlotClassificationDataset(
input_file=input_file,
slot_file=slot_file,
tokenizer=self.tokenizer,
max_seq_length=self.max_seq_length,
num_samples=cfg.num_samples,
pad_label=self.cfg.data_desc.pad_label,
ignore_extra_tokens=self.cfg.ignore_extra_tokens,
ignore_start_end=self.cfg.ignore_start_end,
)
return DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
drop_last=cfg.drop_last,
collate_fn=dataset.collate_fn,
)
def _setup_infer_dataloader(self, queries: List[str], test_ds) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Args:
queries: text
batch_size: batch size to use during inference
Returns:
A pytorch DataLoader.
"""
dataset = IntentSlotInferenceDataset(
tokenizer=self.tokenizer, queries=queries, max_seq_length=-1, do_lower_case=False
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=test_ds.batch_size,
shuffle=test_ds.shuffle,
num_workers=test_ds.num_workers,
pin_memory=test_ds.pin_memory,
drop_last=test_ds.drop_last,
)
def predict_from_examples(self, queries: List[str], test_ds) -> List[List[str]]:
"""
Get prediction for the queries (intent and slots)
Args:
queries: text sequences
test_ds: Dataset configuration section.
Returns:
predicted_intents, predicted_slots: model intent and slot predictions
"""
predicted_intents = []
predicted_slots = []
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Retrieve intent and slot vocabularies from configuration.
intent_labels = self.cfg.data_desc.intent_labels
slot_labels = self.cfg.data_desc.slot_labels
# Initialize tokenizer.
# if not hasattr(self, "tokenizer"):
# self._setup_tokenizer(self.cfg.tokenizer)
# Initialize modules.
# self._reconfigure_classifier()
# Switch model to evaluation mode
self.eval()
self.to(device)
# Dataset.
infer_datalayer = self._setup_infer_dataloader(queries, test_ds)
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask = batch
intent_logits, slot_logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
# predict intents and slots for these examples
# intents
intent_preds = tensor2list(torch.argmax(intent_logits, axis=-1))
# convert numerical outputs to Intent and Slot labels from the dictionaries
for intent_num in intent_preds:
if intent_num < len(intent_labels):
predicted_intents.append(intent_labels[int(intent_num)])
else:
# should not happen
predicted_intents.append("Unknown Intent")
# slots
slot_preds = torch.argmax(slot_logits, axis=-1)
for slot_preds_query, mask_query in zip(slot_preds, subtokens_mask):
query_slots = ''
for slot, mask in zip(slot_preds_query, mask_query):
if mask == 1:
if slot < len(slot_labels):
query_slots += slot_labels[int(slot)] + ' '
else:
query_slots += 'Unknown_slot '
predicted_slots.append(query_slots.strip())
finally:
# set mode back to its original value
self.train(mode=mode)
return predicted_intents, predicted_slots
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="Joint_Intent_Slot_Assistant",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemonlpmodels/versions/1.0.0a5/files/Joint_Intent_Slot_Assistant.nemo",
description="This models is trained on this https://github.com/xliuhw/NLU-Evaluation-Data dataset which includes 64 various intents and 55 slots. Final Intent accuracy is about 87%, Slot accuracy is about 89%.",
)
result.append(model)
return result
| 42.061856 | 223 | 0.655147 |
3a599ffa05e1b3de2ef24adde8391167d20ba454 | 26,995 | py | Python | exact_sync/v1/api/annotation_media_files_api.py | maubreville/EXACT-Sync | 47a47e5af360292677601a877e0765d5e01bd2df | [
"MIT"
] | 4 | 2020-10-22T08:46:00.000Z | 2021-09-22T21:40:03.000Z | exact_sync/v1/api/annotation_media_files_api.py | maubreville/EXACT-Sync | 47a47e5af360292677601a877e0765d5e01bd2df | [
"MIT"
] | null | null | null | exact_sync/v1/api/annotation_media_files_api.py | maubreville/EXACT-Sync | 47a47e5af360292677601a877e0765d5e01bd2df | [
"MIT"
] | 1 | 2020-07-26T15:16:17.000Z | 2020-07-26T15:16:17.000Z | # coding: utf-8
"""
EXACT - API
API to interact with the EXACT Server # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
from exact_sync.v1.api.pagination_base_api import PaginationBaseAPI
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from exact_sync.v1.api_client import ApiClient
class AnnotationMediaFilesApi(PaginationBaseAPI):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_annotation_media_file(self, **kwargs): # noqa: E501
"""create_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_annotation_media_file(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name:
:param int media_file_type:
:param str file:
:param int annotation:
:return: AnnotationMediaFile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_annotation_media_file_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_annotation_media_file_with_http_info(**kwargs) # noqa: E501
return data
def create_annotation_media_file_with_http_info(self, **kwargs): # noqa: E501
"""create_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_annotation_media_file_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name:
:param int media_file_type:
:param str file:
:param int annotation:
:return: AnnotationMediaFile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'media_file_type', 'file', 'annotation'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_annotation_media_file" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file'] # noqa: E501
if 'name' in params:
form_params.append(('name', params['name'])) # noqa: E501
if 'media_file_type' in params:
form_params.append(('media_file_type', params['media_file_type'])) # noqa: E501
if 'annotation' in params:
form_params.append(('annotation', params['annotation'])) # noqa: E501
body_params = {}
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/annotations/annotation_media_files/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AnnotationMediaFiles', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def destroy_annotation_media_file(self, id, **kwargs): # noqa: E501
"""destroy_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.destroy_annotation_media_file(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.destroy_annotation_media_file_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.destroy_annotation_media_file_with_http_info(id, **kwargs) # noqa: E501
return data
def destroy_annotation_media_file_with_http_info(self, id, **kwargs): # noqa: E501
"""destroy_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.destroy_annotation_media_file_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method destroy_annotation_media_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `destroy_annotation_media_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/annotations/annotation_media_files/{id}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_annotation_media_files(self, pagination:bool=True, **kwargs): # noqa: E501
"""list_annotation_media_files # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_annotation_media_files(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param str id: id
:param str name: name
:param str name__contains: name__contains
:param str annotation: annotation
:param str media_file_type: media_file_type
:return: AnnotationMediaFiles
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if pagination:
if kwargs.get('async_req'):
return self.list_annotation_media_files_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_annotation_media_files_with_http_info(**kwargs) # noqa: E501
return data
else:
return self._get_all(self.list_annotation_media_files_with_http_info, **kwargs)
def list_annotation_media_files_with_http_info(self, **kwargs): # noqa: E501
"""list_annotation_media_files # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_annotation_media_files_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param str id: id
:param str name: name
:param str name__contains: name__contains
:param str annotation: annotation
:param str media_file_type: media_file_type
:return: AnnotationMediaFiles
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'offset', 'id', 'name', 'name__contains', 'annotation', 'media_file_type'] # noqa: E501
all_params.append('omit')
all_params.append('expand')
all_params.append('fields')
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_annotation_media_files" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'name__contains' in params:
query_params.append(('name__contains', params['name__contains'])) # noqa: E501
if 'annotation' in params:
query_params.append(('annotation', params['annotation'])) # noqa: E501
if 'media_file_type' in params:
query_params.append(('media_file_type', params['media_file_type'])) # noqa: E501
if 'omit' in params:
query_params.append(('omit', params['omit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/annotations/annotation_media_files/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AnnotationMediaFiles', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def partial_update_annotation_media_file(self, id, **kwargs): # noqa: E501
"""partial_update_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_annotation_media_file(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str name:
:param int media_file_type:
:param int annotation:
:return: AnnotationMediaFile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.partial_update_annotation_media_file_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.partial_update_annotation_media_file_with_http_info(id, **kwargs) # noqa: E501
return data
def partial_update_annotation_media_file_with_http_info(self, id, **kwargs): # noqa: E501
"""partial_update_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_annotation_media_file_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str name:
:param int media_file_type:
:param int annotation:
:return: AnnotationMediaFile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'name', 'media_file_type', 'annotation'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method partial_update_annotation_media_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `partial_update_annotation_media_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = {}
if 'name' in params:
body_params['name'] = params['name']
if 'media_file_type' in params:
body_params['media_file_type'] = params['media_file_type']
if 'annotation' in params:
body_params['annotation'] = params['annotation']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/annotations/annotation_media_files/{id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AnnotationMediaFile', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_annotation_media_file(self, id, **kwargs): # noqa: E501
"""retrieve_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_annotation_media_file(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str name: name
:param str name__contains: name__contains
:param str annotation: annotation
:param str media_file_type: media_file_type
:return: AnnotationMediaFile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_annotation_media_file_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.retrieve_annotation_media_file_with_http_info(id, **kwargs) # noqa: E501
return data
def retrieve_annotation_media_file_with_http_info(self, id, **kwargs): # noqa: E501
"""retrieve_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_annotation_media_file_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str name: name
:param str name__contains: name__contains
:param str annotation: annotation
:param str media_file_type: media_file_type
:return: AnnotationMediaFile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'name', 'name__contains', 'annotation', 'media_file_type'] # noqa: E501
all_params.append('async_req')
all_params.append('omit')
all_params.append('fields')
all_params.append('expand')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_annotation_media_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `retrieve_annotation_media_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'name__contains' in params:
query_params.append(('name__contains', params['name__contains'])) # noqa: E501
if 'annotation' in params:
query_params.append(('annotation', params['annotation'])) # noqa: E501
if 'media_file_type' in params:
query_params.append(('media_file_type', params['media_file_type'])) # noqa: E501
if 'omit' in params:
query_params.append(('omit', params['omit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E50
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/annotations/annotation_media_files/{id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AnnotationMediaFile', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_annotation_media_file(self, id, **kwargs): # noqa: E501
"""update_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_annotation_media_file(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param AnnotationMediaFile body:
:return: AnnotationMediaFile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_annotation_media_file_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_annotation_media_file_with_http_info(id, **kwargs) # noqa: E501
return data
def update_annotation_media_file_with_http_info(self, id, **kwargs): # noqa: E501
"""update_annotation_media_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_annotation_media_file_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param AnnotationMediaFile body:
:return: AnnotationMediaFile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_annotation_media_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_annotation_media_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in body:
local_var_files['file'] = body['file'] # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/annotations/annotation_media_files/{id}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AnnotationMediaFile', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.466374 | 133 | 0.611372 |
e3cc81715ba207db2360371f7349896ae4fea881 | 26,737 | py | Python | venv/lib/python3.8/site-packages/pygments/lexers/markup.py | felipesch92/projeto_kivy | 382827b9a632c5c3989a3129a2d3ee29b0defcf3 | [
"MIT"
] | 603 | 2020-12-23T13:49:32.000Z | 2022-03-31T23:38:03.000Z | venv/lib/python3.8/site-packages/pygments/lexers/markup.py | felipesch92/projeto_kivy | 382827b9a632c5c3989a3129a2d3ee29b0defcf3 | [
"MIT"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | venv/lib/python3.8/site-packages/pygments/lexers/markup.py | felipesch92/projeto_kivy | 382827b9a632c5c3989a3129a2d3ee29b0defcf3 | [
"MIT"
] | 63 | 2015-01-04T07:11:06.000Z | 2020-11-28T21:24:42.000Z | """
pygments.lexers.markup
~~~~~~~~~~~~~~~~~~~~~~
Lexers for non-HTML markup languages.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers.html import HtmlLexer, XmlLexer
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.css import CssLexer
from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \
using, this, do_insertions, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Other
from pygments.util import get_bool_opt, ClassNotFound
__all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer',
'MozPreprocHashLexer', 'MozPreprocPercentLexer',
'MozPreprocXulLexer', 'MozPreprocJavascriptLexer',
'MozPreprocCssLexer', 'MarkdownLexer', 'TiddlyWiki5Lexer']
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
.. versionadded:: 0.6
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
.. versionadded:: 0.7
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'\}\}\}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'\{\{\{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
.. versionadded:: 0.7
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: language``,
``.. code:: language`` and ``.. code-block:: language``
directives with a lexer for the given language (default:
``True``).
.. versionadded:: 0.8
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
yield from do_insertions(ins, lexer.get_tokens_unprocessed(code))
# from docutils.parsers.rst.states
closers = '\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = '\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*)?\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list marker
(r'^( *)(:(?:\\\\|\\:|[^:\n])+:(?=\s))([ \t]*)',
bygroups(Text, Name.Class, Text)),
# Definition list
(r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*)?\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
(r'\\$', Keyword),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
default('#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
.. versionadded:: 0.6
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]+', Text, 'textline'),
default('textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(.{2}', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class MozPreprocHashLexer(RegexLexer):
"""
Lexer for Mozilla Preprocessor files (with '#' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozhashpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^#', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
'exprstart': [
(r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'),
(words((
'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif',
'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter',
'include', 'includesubst', 'error')),
Comment.Preproc, '#pop'),
],
'expr': [
(words(('!', '!=', '==', '&&', '||')), Operator),
(r'(defined)(\()', bygroups(Keyword, Punctuation)),
(r'\)', Punctuation),
(r'[0-9]+', Number.Decimal),
(r'__\w+?__', Name.Variable),
(r'@\w+?@', Name.Class),
(r'\w+', Name),
(r'\n', Text, '#pop'),
(r'\s+', Text),
(r'\S', Punctuation),
],
}
class MozPreprocPercentLexer(MozPreprocHashLexer):
"""
Lexer for Mozilla Preprocessor files (with '%' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozpercentpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^%', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
}
class MozPreprocXulLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`XmlLexer`.
.. versionadded:: 2.0
"""
name = "XUL+mozpreproc"
aliases = ['xul+mozpreproc']
filenames = ['*.xul.in']
mimetypes = []
def __init__(self, **options):
super().__init__(XmlLexer, MozPreprocHashLexer, **options)
class MozPreprocJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`JavascriptLexer`.
.. versionadded:: 2.0
"""
name = "Javascript+mozpreproc"
aliases = ['javascript+mozpreproc']
filenames = ['*.js.in']
mimetypes = []
def __init__(self, **options):
super().__init__(JavascriptLexer, MozPreprocHashLexer, **options)
class MozPreprocCssLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`CssLexer`.
.. versionadded:: 2.0
"""
name = "CSS+mozpreproc"
aliases = ['css+mozpreproc']
filenames = ['*.css.in']
mimetypes = []
def __init__(self, **options):
super().__init__(CssLexer, MozPreprocPercentLexer, **options)
class MarkdownLexer(RegexLexer):
"""
For `Markdown <https://help.github.com/categories/writing-on-github/>`_ markup.
.. versionadded:: 2.2
"""
name = 'markdown'
aliases = ['md', 'markdown']
filenames = ['*.md', '*.markdown']
mimetypes = ["text/x-markdown"]
flags = re.MULTILINE
def _handle_codeblock(self, match):
"""
match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
"""
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), String.Backtick, match.group(1)
yield match.start(2), String.Backtick, match.group(2)
yield match.start(3), Text , match.group(3)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name( match.group(2).strip() )
except ClassNotFound:
pass
code = match.group(4)
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(4), String, code
else:
yield from do_insertions([], lexer.get_tokens_unprocessed(code))
yield match.start(5), String.Backtick, match.group(5)
tokens = {
'root': [
# heading with '#' prefix (atx-style)
(r'(^#[^#].+)(\n)', bygroups(Generic.Heading, Text)),
# subheading with '#' prefix (atx-style)
(r'(^#{2,6}[^#].+)(\n)', bygroups(Generic.Subheading, Text)),
# heading with '=' underlines (Setext-style)
(r'^(.+)(\n)(=+)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# subheading with '-' underlines (Setext-style)
(r'^(.+)(\n)(-+)(\n)', bygroups(Generic.Subheading, Text, Generic.Subheading, Text)),
# task list
(r'^(\s*)([*-] )(\[[ xX]\])( .+\n)',
bygroups(Text, Keyword, Keyword, using(this, state='inline'))),
# bulleted list
(r'^(\s*)([*-])(\s)(.+\n)',
bygroups(Text, Keyword, Text, using(this, state='inline'))),
# numbered list
(r'^(\s*)([0-9]+\.)( .+\n)',
bygroups(Text, Keyword, using(this, state='inline'))),
# quote
(r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)),
# code block fenced by 3 backticks
(r'^(\s*```\n[\w\W]*?^\s*```$\n)', String.Backtick),
# code block with language
(r'^(\s*```)(\w+)(\n)([\w\W]*?)(^\s*```$\n)', _handle_codeblock),
include('inline'),
],
'inline': [
# escape
(r'\\.', Text),
# inline code
(r'([^`]?)(`[^`\n]+`)', bygroups(Text, String.Backtick)),
# warning: the following rules eat outer tags.
# eg. **foo _bar_ baz** => foo and baz are not recognized as bold
# bold fenced by '**'
(r'([^\*]?)(\*\*[^* \n][^*\n]*\*\*)', bygroups(Text, Generic.Strong)),
# bold fenced by '__'
(r'([^_]?)(__[^_ \n][^_\n]*__)', bygroups(Text, Generic.Strong)),
# italics fenced by '*'
(r'([^\*]?)(\*[^* \n][^*\n]*\*)', bygroups(Text, Generic.Emph)),
# italics fenced by '_'
(r'([^_]?)(_[^_ \n][^_\n]*_)', bygroups(Text, Generic.Emph)),
# strikethrough
(r'([^~]?)(~~[^~ \n][^~\n]*~~)', bygroups(Text, Generic.Deleted)),
# mentions and topics (twitter and github stuff)
(r'[@#][\w/:]+', Name.Entity),
# (image?) links eg: 
(r'(!?\[)([^]]+)(\])(\()([^)]+)(\))',
bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)),
# reference-style links, e.g.:
# [an example][id]
# [id]: http://example.com/
(r'(\[)([^]]+)(\])(\[)([^]]*)(\])',
bygroups(Text, Name.Tag, Text, Text, Name.Label, Text)),
(r'^(\s*\[)([^]]*)(\]:\s*)(.+)',
bygroups(Text, Name.Label, Text, Name.Attribute)),
# general text, must come last!
(r'[^\\\s]+', Text),
(r'.', Text),
],
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
class TiddlyWiki5Lexer(RegexLexer):
"""
For `TiddlyWiki5 <https://tiddlywiki.com/#TiddlerFiles>`_ markup.
.. versionadded:: 2.7
"""
name = 'tiddler'
aliases = ['tid']
filenames = ['*.tid']
mimetypes = ["text/vnd.tiddlywiki"]
flags = re.MULTILINE
def _handle_codeblock(self, match):
"""
match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
"""
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), String, match.group(1)
yield match.start(2), String, match.group(2)
yield match.start(3), Text, match.group(3)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(2).strip())
except ClassNotFound:
pass
code = match.group(4)
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(4), String, code
return
yield from do_insertions([], lexer.get_tokens_unprocessed(code))
yield match.start(5), String, match.group(5)
def _handle_cssblock(self, match):
"""
match args: 1:style tag 2:newline, 3:code, 4:closing style tag
"""
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), String, match.group(1)
yield match.start(2), String, match.group(2)
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name('css')
except ClassNotFound:
pass
code = match.group(3)
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(3), String, code
return
yield from do_insertions([], lexer.get_tokens_unprocessed(code))
yield match.start(4), String, match.group(4)
tokens = {
'root': [
# title in metadata section
(r'^(title)(:\s)(.+\n)', bygroups(Keyword, Text, Generic.Heading)),
# headings
(r'^(!)([^!].+\n)', bygroups(Generic.Heading, Text)),
(r'^(!{2,6})(.+\n)', bygroups(Generic.Subheading, Text)),
# bulleted or numbered lists or single-line block quotes
# (can be mixed)
(r'^(\s*)([*#>]+)(\s*)(.+\n)',
bygroups(Text, Keyword, Text, using(this, state='inline'))),
# multi-line block quotes
(r'^(<<<.*\n)([\w\W]*?)(^<<<.*$)', bygroups(String, Text, String)),
# table header
(r'^(\|.*?\|h)$', bygroups(Generic.Strong)),
# table footer or caption
(r'^(\|.*?\|[cf])$', bygroups(Generic.Emph)),
# table class
(r'^(\|.*?\|k)$', bygroups(Name.Tag)),
# definitions
(r'^(;.*)$', bygroups(Generic.Strong)),
# text block
(r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)),
# code block with language
(r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock),
# CSS style block
(r'^(<style>)(\n)([\w\W]*?)(^</style>$)', _handle_cssblock),
include('keywords'),
include('inline'),
],
'keywords': [
(words((
'\\define', '\\end', 'caption', 'created', 'modified', 'tags',
'title', 'type'), prefix=r'^', suffix=r'\b'),
Keyword),
],
'inline': [
# escape
(r'\\.', Text),
# created or modified date
(r'\d{17}', Number.Integer),
# italics
(r'(\s)(//[^/]+//)((?=\W|\n))',
bygroups(Text, Generic.Emph, Text)),
# superscript
(r'(\s)(\^\^[^\^]+\^\^)', bygroups(Text, Generic.Emph)),
# subscript
(r'(\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)),
# underscore
(r'(\s)(__[^_]+__)', bygroups(Text, Generic.Strong)),
# bold
(r"(\s)(''[^']+'')((?=\W|\n))",
bygroups(Text, Generic.Strong, Text)),
# strikethrough
(r'(\s)(~~[^~]+~~)((?=\W|\n))',
bygroups(Text, Generic.Deleted, Text)),
# TiddlyWiki variables
(r'<<[^>]+>>', Name.Tag),
(r'\$\$[^$]+\$\$', Name.Tag),
(r'\$\([^)]+\)\$', Name.Tag),
# TiddlyWiki style or class
(r'^@@.*$', Name.Tag),
# HTML tags
(r'</?[^>]+>', Name.Tag),
# inline code
(r'`[^`]+`', String.Backtick),
# HTML escaped symbols
(r'&\S*?;', String.Regex),
# Wiki links
(r'(\[{2})([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text)),
# External links
(r'(\[{2})([^]\|]+)(\|)([^]\|]+)(\]{2})',
bygroups(Text, Name.Tag, Text, Name.Attribute, Text)),
# Transclusion
(r'(\{{2})([^}]+)(\}{2})', bygroups(Text, Name.Tag, Text)),
# URLs
(r'(\b.?.?tps?://[^\s"]+)', bygroups(Name.Attribute)),
# general text, must come last!
(r'[\w]+', Text),
(r'.', Text)
],
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
| 35.04194 | 101 | 0.468602 |
6555074969dda11c81f20e0ef8c55d83a364038b | 3,131 | py | Python | ticketing/tests/test_signing.py | autlamps/tessera-backend | 1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2 | [
"MIT"
] | null | null | null | ticketing/tests/test_signing.py | autlamps/tessera-backend | 1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2 | [
"MIT"
] | 1 | 2018-08-14T03:15:00.000Z | 2018-08-21T00:33:34.000Z | ticketing/tests/test_signing.py | autlamps/tessera-backend | 1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2 | [
"MIT"
] | null | null | null | import base64
from unittest import TestCase
import rsa
from django.contrib.auth.models import User
from tessera import settings
from ticketing.models import Account, BalanceTicket, Driver, Route
from ticketing.userticket.createqrcode import QRCode
class SignTestCase(TestCase):
def setUp(self):
User.objects.all().delete()
BalanceTicket.objects.all().delete()
self.qrfactory = QRCode(testing=True)
(pubKey, privKey) = rsa.newkeys(512)
self.qrfactory.private = privKey
self.qrfactory.public = pubKey
self.user = User.objects.create_user("fakeuser", "fakepassword")
Account(user=self.user).save()
self.acc = self.user.account.all()[0]
self.bt = BalanceTicket(account=self.acc, current_value=10,
qr_code_id="716190a3-849a-4d4a-"
"a0a2-020cf40bda7d")
self.bt.save()
def testSigning(self):
signed1 = self.qrfactory.createbtqrcode(self.bt)
signed2 = self.qrfactory.createbtqrcode(self.bt)
user1, hashed1 = signed1.split(":")
user2, hashed2 = signed2.split(":")
if user1 == user2:
if hashed1 == hashed2:
pass
else:
self.fail("The hashes are different")
else:
self.fail("The users are different")
def testVerifying(self):
signed = self.qrfactory.createbtqrcode(self.bt)
ticket = self.qrfactory.verify(signed)
qrcode = ticket['ticket'].qr_code_id
type = ticket['type']
if str(qrcode) != self.bt.qr_code_id:
self.fail("The QR code is not the set one, got : "
+ qrcode.__str__())
if type != "b":
self.fail("The type of ticket is not b, got : "
+ type)
def testPublicKey(self):
testSignature = "This is a test"
priv = settings.PRIVATE_KEY
if priv == "":
self.fail("There is no Private Key in the settings")
header = priv[:32]
body = priv[32:len(priv)-29].replace(" ", "\n")
footer = priv[-29:]
privkey = header + "\n" + body + footer
private = rsa.PrivateKey.load_pkcs1(privkey)
pub = settings.PUBLIC_KEY
if pub == "":
self.fail("There is no Public Key in the settings")
header = pub[:31]
body = pub[31:len(pub)-28].replace(" ", "\n")
footer = pub[-28:]
pubkey = header + "\n" + body + footer
public = rsa.PublicKey.load_pkcs1(pubkey)
signed = base64.b64encode(rsa.sign(testSignature.encode('UTF-8'),
private, 'SHA-256'))
if testSignature is signed:
self.fail("The hashed message is incorrect, we got : "
"" + signed.__str__())
else:
hash = base64.b64decode(signed)
try:
rsa.verify(testSignature.encode(), hash, public)
except rsa.VerificationError:
self.fail("We are unable to verify the hashed message")
| 35.579545 | 73 | 0.565315 |
84de0cc0612bcdfb098e5f18fa3eef84f63752bc | 15,662 | py | Python | saleor/product/utils.py | VanilleBid/weekly-saleor | e776e86ee7ce710929ef33878d936e2a8367a217 | [
"BSD-3-Clause"
] | null | null | null | saleor/product/utils.py | VanilleBid/weekly-saleor | e776e86ee7ce710929ef33878d936e2a8367a217 | [
"BSD-3-Clause"
] | 86 | 2018-03-08T14:19:19.000Z | 2018-05-12T14:55:16.000Z | saleor/product/utils.py | VanilleBid/weekly-saleor | e776e86ee7ce710929ef33878d936e2a8367a217 | [
"BSD-3-Clause"
] | 2 | 2018-03-05T12:29:10.000Z | 2018-09-28T12:40:52.000Z | from collections import defaultdict, namedtuple
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.db.models import F
from django.utils.encoding import smart_text
from django_prices.templatetags import prices_i18n
from prices import Price, PriceRange
from . import ProductAvailabilityStatus, VariantAvailabilityStatus
from ..cart.utils import get_cart_from_request, get_or_create_cart_from_request
from ..core.utils import get_paginator_items, to_local_currency
from ..core.utils.billing import get_tax_price, price_range_get_taxed
from ..core.utils.filters import get_now_sorted_by
from .forms import ProductForm
def products_visible_to_user(user, staff_view_all=False):
# pylint: disable=cyclic-import
from .models import Product
if (staff_view_all
and user.is_authenticated
and user.is_staff
and user.is_active):
return Product.objects.all()
return Product.objects.available_products()
def products_with_details(user, staff_view_all=False):
products = products_visible_to_user(user, staff_view_all)
products = products.prefetch_related(
'category', 'images', 'variants__stock',
'variants__variant_images__image', 'attributes__values',
'product_type__variant_attributes__values',
'product_type__product_attributes__values')
return products
def products_for_homepage():
user = AnonymousUser()
products = products_with_details(user)
products = products.filter(is_featured=True)
return products
def get_product_images(product):
"""Return list of product images that will be placed in product gallery."""
return list(product.images.all())
def products_with_availability(products, discounts, local_currency):
for product in products:
yield product, get_availability(product, discounts, local_currency)
ProductAvailability = namedtuple(
'ProductAvailability', (
'available', 'on_sale',
'price_range', 'price_range_undiscounted',
'taxed_price_range', 'taxed_price_range_undiscounted',
'discount', 'price_range_local_currency', 'discount_local_currency'))
def get_availability(product, discounts=None, local_currency=None):
# In default currency
price_range = product.get_price_range(discounts=discounts)
undiscounted = product.get_price_range()
if undiscounted.min_price > price_range.min_price:
discount = undiscounted.min_price - price_range.min_price
else:
discount = None
# Local currency
if local_currency:
price_range_local = to_local_currency(
price_range, local_currency)
undiscounted_local = to_local_currency(
undiscounted, local_currency)
if (undiscounted_local and
undiscounted_local.min_price > price_range_local.min_price):
discount_local_currency = (
undiscounted_local.min_price - price_range_local.min_price)
else:
discount_local_currency = None
else:
price_range_local = None
discount_local_currency = None
is_available = product.is_in_stock() and product.is_available()
is_on_sale = (
product.is_available() and discount is not None and
undiscounted.min_price != price_range.min_price)
return ProductAvailability(
available=is_available,
on_sale=is_on_sale,
price_range=price_range,
price_range_undiscounted=undiscounted,
taxed_price_range=price_range_get_taxed(price_range),
taxed_price_range_undiscounted=price_range_get_taxed(undiscounted),
discount=discount,
price_range_local_currency=price_range_local,
discount_local_currency=discount_local_currency)
def handle_cart_form(request, product, create_cart=False):
if create_cart:
cart = get_or_create_cart_from_request(request)
else:
cart = get_cart_from_request(request)
form = ProductForm(
cart=cart, product=product, data=request.POST or None,
discounts=request.discounts)
return form, cart
def products_for_cart(user):
products = products_visible_to_user(user)
products = products.prefetch_related('variants__variant_images__image')
return products
def product_json_ld(product, attributes=None):
# type: (saleor.product.models.Product, saleor.product.utils.ProductAvailability, dict) -> dict # noqa
"""Generate JSON-LD data for product."""
data = {'@context': 'http://schema.org/',
'@type': 'Product',
'name': smart_text(product),
'image': [
product_image.image.url
for product_image in product.images.all()],
'description': product.description,
'offers': []}
for variant in product.variants.all():
price = variant.get_price_per_item()
available = 'http://schema.org/InStock'
if not product.is_available() or not variant.is_in_stock():
available = 'http://schema.org/OutOfStock'
variant_data = {
'@type': 'Offer',
'availability': available,
'itemCondition': 'http://schema.org/NewCondition',
'price': price.gross,
'priceCurrency': price.currency,
'sku': variant.sku}
data['offers'].append(variant_data)
if attributes is not None:
brand = ''
for key in attributes:
if key.name == 'brand':
brand = attributes[key].name
break
elif key.name == 'publisher':
brand = attributes[key].name
if brand:
data['brand'] = {'@type': 'Thing', 'name': brand}
return data
def get_variant_picker_data(product, discounts=None, local_currency=None):
availability = get_availability(product, discounts, local_currency)
variants = product.variants.all()
data = {'variantAttributes': [], 'variants': []}
variant_attributes = product.product_type.variant_attributes.all()
# Collect only available variants
filter_available_variants = defaultdict(list)
for variant in variants:
price = variant.get_price_per_item(discounts)
price_undiscounted = variant.get_price_per_item()
if local_currency:
price_local_currency = to_local_currency(price, local_currency)
else:
price_local_currency = None
schema_data = {'@type': 'Offer',
'itemCondition': 'http://schema.org/NewCondition',
'priceCurrency': price.currency,
'price': price.net}
in_stock = variant.is_in_stock()
if in_stock:
schema_data['availability'] = 'http://schema.org/InStock'
else:
schema_data['availability'] = 'http://schema.org/OutOfStock'
variant_data = {
'id': variant.id,
'availability': in_stock,
'price': price_as_dict(price),
'priceUndiscounted': price_as_dict(price_undiscounted),
'taxedPrice': price_as_dict(get_tax_price(total=price)[0]),
'taxedPriceUndiscounted': price_as_dict(get_tax_price(total=price_undiscounted)[0]),
'attributes': variant.attributes,
'priceLocalCurrency': price_as_dict(price_local_currency),
'schemaData': schema_data}
data['variants'].append(variant_data)
for variant_key, variant_value in variant.attributes.items():
filter_available_variants[int(variant_key)].append(
int(variant_value))
for attribute in variant_attributes:
available_variants = filter_available_variants.get(attribute.pk, None)
if available_variants:
data['variantAttributes'].append({
'pk': attribute.pk,
'name': attribute.name,
'slug': attribute.slug,
'values': [
{'pk': value.pk, 'name': value.name, 'slug': value.slug}
for value in attribute.values.filter(
pk__in=available_variants)]})
data['availability'] = {
'discount': price_as_dict(availability.discount),
'taxedPriceRange': price_range_as_dict(availability.taxed_price_range),
'taxedPriceRangeUndiscounted': price_range_as_dict(availability.taxed_price_range_undiscounted),
'priceRange': price_range_as_dict(availability.price_range),
'priceRangeUndiscounted': price_range_as_dict(
availability.price_range_undiscounted),
'priceRangeLocalCurrency': price_range_as_dict(
availability.price_range_local_currency)}
return data
def get_product_attributes_data(product):
attributes = product.product_type.product_attributes.all()
attributes_map = {attribute.pk: attribute for attribute in attributes}
values_map = get_attributes_display_map(product, attributes)
return {attributes_map.get(attr_pk): value_obj
for (attr_pk, value_obj) in values_map.items()}
def price_as_dict(price):
if not price:
return None
return {'currency': price.currency,
'gross': price.gross,
'grossLocalized': prices_i18n.gross(price),
'net': price.net,
'netLocalized': prices_i18n.net(price)}
def price_range_as_dict(price_range):
if not price_range:
return None
return {'maxPrice': price_as_dict(price_range.max_price),
'minPrice': price_as_dict(price_range.min_price)}
def get_variant_url_from_product(product, attributes):
return '%s?%s' % (product.get_absolute_url(), urlencode(attributes))
def get_variant_url(variant):
attributes = {}
values = {}
for attribute in variant.product.product_type.variant_attributes.all():
attributes[str(attribute.pk)] = attribute
for value in attribute.values.all():
values[str(value.pk)] = value
return get_variant_url_from_product(variant.product, attributes)
def get_attributes_display_map(obj, attributes):
display_map = {}
for attribute in attributes:
value = obj.attributes.get(smart_text(attribute.pk))
if value:
choices = {smart_text(a.pk): a for a in attribute.values.all()}
choice_obj = choices.get(value)
if choice_obj:
display_map[attribute.pk] = choice_obj
else:
display_map[attribute.pk] = value
return display_map
def get_product_availability_status(product):
from .models import Stock
is_available = product.is_available()
has_stock_records = Stock.objects.filter(variant__product=product)
are_all_variants_in_stock = all(
variant.is_in_stock() for variant in product.variants.all())
is_in_stock = any(
variant.is_in_stock() for variant in product.variants.all())
requires_variants = product.product_type.has_variants
if not product.is_published:
return ProductAvailabilityStatus.NOT_PUBLISHED
if requires_variants and not product.variants.exists():
# We check the requires_variants flag here in order to not show this
# status with product types that don't require variants, as in that
# case variants are hidden from the UI and user doesn't manage them.
return ProductAvailabilityStatus.VARIANTS_MISSSING
if not has_stock_records:
return ProductAvailabilityStatus.NOT_CARRIED
if not is_in_stock:
return ProductAvailabilityStatus.OUT_OF_STOCK
if not are_all_variants_in_stock:
return ProductAvailabilityStatus.LOW_STOCK
if not is_available and product.available_on is not None:
return ProductAvailabilityStatus.NOT_YET_AVAILABLE
return ProductAvailabilityStatus.READY_FOR_PURCHASE
def get_variant_availability_status(variant):
has_stock_records = variant.stock.exists()
if not has_stock_records:
return VariantAvailabilityStatus.NOT_CARRIED
if not variant.is_in_stock():
return VariantAvailabilityStatus.OUT_OF_STOCK
return VariantAvailabilityStatus.AVAILABLE
def get_product_costs_data(product):
zero_price = Price(0, 0, currency=settings.DEFAULT_CURRENCY)
zero_price_range = PriceRange(zero_price, zero_price)
purchase_costs_range = zero_price_range
gross_margin = (0, 0)
if not product.variants.exists():
return purchase_costs_range, gross_margin
variants = product.variants.all()
costs, margins = get_cost_data_from_variants(variants)
if costs:
purchase_costs_range = PriceRange(min(costs), max(costs))
if margins:
gross_margin = (margins[0], margins[-1])
return purchase_costs_range, gross_margin
def sort_cost_data(costs, margins):
costs = sorted(costs, key=lambda x: x.gross)
margins = sorted(margins)
return costs, margins
def get_cost_data_from_variants(variants):
costs = []
margins = []
for variant in variants:
costs_data = get_variant_costs_data(variant)
costs += costs_data['costs']
margins += costs_data['margins']
return sort_cost_data(costs, margins)
def get_variant_costs_data(variant):
costs = []
margins = []
for stock in variant.stock.all():
costs.append(get_cost_price(stock))
margin = get_margin_for_variant(stock)
if margin:
margins.append(margin)
costs = sorted(costs, key=lambda x: x.gross)
margins = sorted(margins)
return {'costs': costs, 'margins': margins}
def get_cost_price(stock):
zero_price = Price(0, 0, currency=settings.DEFAULT_CURRENCY)
if not stock.cost_price:
return zero_price
return stock.cost_price
def get_margin_for_variant(stock):
if not stock.cost_price:
return None
price = stock.variant.get_price_per_item()
margin = price - stock.cost_price
percent = round((margin.gross / price.gross) * 100, 0)
return percent
def allocate_stock(stock, quantity):
stock.quantity_allocated = F('quantity_allocated') + quantity
stock.save(update_fields=['quantity_allocated'])
def deallocate_stock(stock, quantity):
stock.quantity_allocated = F('quantity_allocated') - quantity
stock.save(update_fields=['quantity_allocated'])
def increase_stock(stock, quantity):
stock.quantity = F('quantity') + quantity
stock.save(update_fields=['quantity'])
def decrease_stock(stock, quantity):
stock.quantity = F('quantity') - quantity
stock.quantity_allocated = F('quantity_allocated') - quantity
stock.save(update_fields=['quantity', 'quantity_allocated'])
def get_product_list_context(request, filter_set):
"""
:param request: request object
:param filter_set: filter set for product list
:return: context dictionary
"""
# Avoiding circular dependency
from .filters import SORT_BY_FIELDS
products_paginated = get_paginator_items(
filter_set.qs, settings.PAGINATE_BY, request.GET.get('page'))
products_and_availability = list(products_with_availability(
products_paginated, request.discounts, request.currency))
now_sorted_by = get_now_sorted_by(filter_set)
arg_sort_by = request.GET.get('sort_by')
is_descending = arg_sort_by.startswith('-') if arg_sort_by else False
return {
'filter_set': filter_set,
'products': products_and_availability,
'products_paginated': products_paginated,
'sort_by_choices': SORT_BY_FIELDS,
'now_sorted_by': now_sorted_by,
'is_descending': is_descending}
| 36.087558 | 107 | 0.690653 |
ce2ee077c855bc9ddee4150343cf83f7fba4783b | 3,574 | py | Python | notes/reference/tutorials/an-introduction-to-asynch-programming-and-twisted/exercises/part_8_ex_1.py | aav789/study-notes | 34eca00cd48869ba7a79c0ea7d8948ee9bde72b9 | [
"MIT"
] | 43 | 2015-06-10T14:48:00.000Z | 2020-11-29T16:22:28.000Z | notes/reference/tutorials/an-introduction-to-asynch-programming-and-twisted/exercises/part_8_ex_1.py | aav789/study-notes | 34eca00cd48869ba7a79c0ea7d8948ee9bde72b9 | [
"MIT"
] | 1 | 2021-11-01T12:01:44.000Z | 2021-11-01T12:01:44.000Z | notes/reference/tutorials/an-introduction-to-asynch-programming-and-twisted/exercises/part_8_ex_1.py | lextoumbourou/notes | 5f94c59a467eb3eb387542bdce398abc0365e6a7 | [
"MIT"
] | 40 | 2015-03-02T10:33:59.000Z | 2020-05-24T12:17:05.000Z | # This is the Twisted Get Poetry Now! client, version 4.0
import optparse, sys
from twisted.internet import defer
from twisted.internet.protocol import Protocol, ClientFactory
def TimeoutError(Exception):
print "We timeout out!"
pass
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
This is the Get Poetry Now! client, Twisted version 4.0
Run it like this:
python get-poetry.py port1 port2 port3 ...
If you are in the base directory of the twisted-intro package,
you could run it like this:
python twisted-client-4/get-poetry.py 10001 10002 10003
to grab poetry from servers on ports 10001, 10002, and 10003.
Of course, there need to be servers listening on those ports
for that to work.
"""
parser = optparse.OptionParser(usage)
_, addresses = parser.parse_args()
if not addresses:
print parser.format_help()
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
class PoetryProtocol(Protocol):
poem = ''
def dataReceived(self, data):
print "received {}".format(data)
self.poem += data
def connectionLost(self, reason):
self.poemReceived(self.poem)
def poemReceived(self, poem):
self.factory.poem_finished(poem)
class PoetryClientFactory(ClientFactory):
protocol = PoetryProtocol
def __init__(self, deferred):
self.deferred = deferred
def startedConnecting(self, connector):
from twisted.internet import reactor
self.timeout_call = reactor.callLater(1, self.handle_timeout, connector)
def poem_finished(self, poem):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.callback(poem)
def clientConnectionFailed(self, connector, reason):
self.timeout_call.cancel()
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.errback(reason)
def handle_timeout(self, connector):
if self.deferred is not None:
d, self.deferred = self.deferred, None
connector.disconnect()
d.errback(TimeoutError)
def get_poetry(host, port):
"""
Download a poem from the given host and port. This function
returns a Deferred which will be fired with the complete text of
the poem or a Failure if the poem could not be downloaded.
"""
d = defer.Deferred()
from twisted.internet import reactor
factory = PoetryClientFactory(d)
reactor.connectTCP(host, port, factory)
return d
def poetry_main():
addresses = parse_args()
from twisted.internet import reactor
poems = []
errors = []
def got_poem(poem):
poems.append(poem)
def poem_failed(err):
print >>sys.stderr, 'Poem failed:', err
print >>sys.stderr, 'From {} on port {}'.format(host, port)
errors.append(err)
def poem_done(_):
if len(poems) + len(errors) == len(addresses):
reactor.stop()
for address in addresses:
host, port = address
d = get_poetry(host, port)
d.addCallback(got_poem)
d.addErrback(poem_failed)
d.addBoth(poem_done)
reactor.run()
for poem in poems:
print poem
if __name__ == '__main__':
poetry_main()
| 23.826667 | 80 | 0.641298 |
d864172faae91b60392338b05f6ccce53d0b46ed | 2,112 | py | Python | app/api/device/device_parameter.py | zldevil2011/zhengsheng | 93f7e486289f7685460fbab7a474aaac8e5b275e | [
"MIT"
] | null | null | null | app/api/device/device_parameter.py | zldevil2011/zhengsheng | 93f7e486289f7685460fbab7a474aaac8e5b275e | [
"MIT"
] | null | null | null | app/api/device/device_parameter.py | zldevil2011/zhengsheng | 93f7e486289f7685460fbab7a474aaac8e5b275e | [
"MIT"
] | null | null | null | # coding=utf-8
from rest_framework import status
from app.models import Device, Parameter
from rest_framework.views import APIView
from rest_framework.response import Response
from app.serializer import DeviceSerializer
import time
class DeviceParameter(APIView):
def post(self, request, format=None):
request_str = request.data['k']
request_list = request_str.split(',')
device_id = request_list[0].split("=")[1]
ver_no = int(request_list[1].split("=")[1])
try:
device = Device.objects.get(device_id = device_id)
parameter = Parameter.objects.get(device=device)
up = '1'
if parameter.version <= ver_no:
up = '0'
else:
up = '1'
parVer=parameter.version
TempSet = int(parameter.temperature * 10)
TempTSet = parameter.temperature_t_length
PUTime1 = parameter.power_get_point1
PUTime2 = parameter.power_get_point2
response_str = ''
response_str += 'up=' + up + ',id=' + str(device_id) + ',parVer=' + str(parVer) + ',TempSet=' + str(TempSet) + ',TempTSet='+str(TempTSet)+',PUTime1='+str(PUTime1)+',PUTime2='+str(PUTime2)
return Response({'k': response_str} , status=status.HTTP_200_OK)
except Exception, e:
print str(e)
request_str = request.data['k']
request_list = request_str.split(',')
device_id = int(request_list[0].split("=")[1])
device = Device.objects.get(device_id=device_id)
parameter = Parameter()
parameter.device = device
parameter.temperature_t_length = 0
parameter.temperature = 0
parameter.power_get_point1 = '00:00:00'
parameter.power_get_point2 = '12:00:00'
parameter.version = 1
parameter.save()
response_str = 'up=1,id=' + str(device_id) + ',parVer=1,TempSet=0,TempTSet=0,PUTime1=00:00:00,PUTime2=12:00:00'
return Response({'k': response_str} , status=status.HTTP_200_OK)
| 43.102041 | 199 | 0.601799 |
e47a385d2cb0ebef43c2b624e881430f82f0860a | 18,776 | py | Python | azure/mgmt/network/v2017_03_01/operations/route_tables_operations.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | azure/mgmt/network/v2017_03_01/operations/route_tables_operations.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | azure/mgmt/network/v2017_03_01/operations/route_tables_operations.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class RouteTablesOperations(object):
"""RouteTablesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-03-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01"
self.config = config
def delete(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, route_table_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`RouteTable
<azure.mgmt.network.v2017_03_01.models.RouteTable>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`RouteTable
<azure.mgmt.network.v2017_03_01.models.RouteTable>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route
table operation.
:type parameters: :class:`RouteTable
<azure.mgmt.network.v2017_03_01.models.RouteTable>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`RouteTable
<azure.mgmt.network.v2017_03_01.models.RouteTable>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RouteTable')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`RouteTable
<azure.mgmt.network.v2017_03_01.models.RouteTable>`
:rtype: :class:`RouteTablePaged
<azure.mgmt.network.v2017_03_01.models.RouteTablePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`RouteTable
<azure.mgmt.network.v2017_03_01.models.RouteTable>`
:rtype: :class:`RouteTablePaged
<azure.mgmt.network.v2017_03_01.models.RouteTablePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| 44.91866 | 144 | 0.654825 |
b7394eddf3a58700fc40f63d95fc0a386e5c217e | 3,540 | py | Python | sudoku_buddy/base.py | papalotis/sudoku-buddy | 9e7f14d9e8a5a79d8c241cb87282fb1872efc439 | [
"MIT"
] | null | null | null | sudoku_buddy/base.py | papalotis/sudoku-buddy | 9e7f14d9e8a5a79d8c241cb87282fb1872efc439 | [
"MIT"
] | null | null | null | sudoku_buddy/base.py | papalotis/sudoku-buddy | 9e7f14d9e8a5a79d8c241cb87282fb1872efc439 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Description: Base file for sudoku solver implementation
Project: sudoku-buddy
"""
import numpy as np
from numpy.typing import ArrayLike, NDArray
class Sudoku:
SUDOKU_SIZE = 9
SUDOKU_SIZE_SQUARE = 81
BOX_SIZE = 3
EMPTY_VALUE = 0
def _get_neighbor_indices(
self,
cell_index: int,
row_neighbors: bool,
column_neighbors: bool,
box_neighbors: bool,
) -> set[int]:
row, column = divmod(cell_index, self.SUDOKU_SIZE)
box_row = row // self.BOX_SIZE
box_column = column // self.BOX_SIZE
neighbors: set[int] = set()
for other_cell_index in range(self.SUDOKU_SIZE_SQUARE):
if other_cell_index == cell_index:
continue
other_row, other_column = divmod(other_cell_index, self.SUDOKU_SIZE)
if other_row == row and row_neighbors:
neighbors.add(other_cell_index)
if other_column == column and column_neighbors:
neighbors.add(other_cell_index)
other_box_row = other_row // self.BOX_SIZE
other_box_column = other_column // self.BOX_SIZE
if (
other_box_row == box_row
and other_box_column == box_column
and box_neighbors
):
neighbors.add(other_cell_index)
return neighbors
def _create_neighbors_indices_array(
self, row_neighbors: bool, column_neighbors: bool, box_neighbors: bool
) -> NDArray[np.int_]:
neighbros_lol = [
list(
self._get_neighbor_indices(
cell_index, row_neighbors, column_neighbors, box_neighbors
)
)
for cell_index in range(self.SUDOKU_SIZE_SQUARE)
]
return np.array(neighbros_lol, dtype=np.int_)
def _handle_input_buffer(self) -> NDArray[np.int_]:
return np.array(self._input_buffer, dtype=np.int_).flatten()
def _create_initial_candidates_mask(self) -> NDArray[np.bool_]:
mask: NDArray[np.bool_] = np.ones(
(self.SUDOKU_SIZE_SQUARE, self.SUDOKU_SIZE), dtype=np.bool_
)
mask_value_is_not_empty = self._input_array != self.EMPTY_VALUE
values_not_empty = self._input_array[mask_value_is_not_empty]
# the index of a value is the same as the value itself minus 1 (because of
# zero-based indexing)
indices_to_set_to_true = values_not_empty - 1
mask[mask_value_is_not_empty] = False
mask[mask_value_is_not_empty, indices_to_set_to_true] = True
return mask
def __init__(self, input_buffer: ArrayLike):
self._input_buffer = input_buffer
self._input_array = self._handle_input_buffer()
self._all_neighbors_indices = self._create_neighbors_indices_array(
row_neighbors=True, column_neighbors=True, box_neighbors=True
)
self._row_neighbors_indices = self._create_neighbors_indices_array(
row_neighbors=True, column_neighbors=False, box_neighbors=False
)
self._column_neighbors_indices = self._create_neighbors_indices_array(
row_neighbors=False, column_neighbors=True, box_neighbors=False
)
self._box_neighbors_indices = self._create_neighbors_indices_array(
row_neighbors=False, column_neighbors=False, box_neighbors=True
)
self._candidates_mask = self._create_initial_candidates_mask()
| 35.4 | 82 | 0.650847 |
5ebfda7c679d4c0de2437eb1d2e34c7a0557a5d1 | 220 | py | Python | linprog_solver/simplex/templatetags/simplex_extras.py | apirobot/django-linprog-solver-website | a90018c257b1d0a4c064baea1bb7c6e22bac1ab9 | [
"MIT"
] | 2 | 2017-04-22T11:25:00.000Z | 2020-04-05T20:22:41.000Z | linprog_solver/simplex/templatetags/simplex_extras.py | apirobot/django-linprog-solver-website | a90018c257b1d0a4c064baea1bb7c6e22bac1ab9 | [
"MIT"
] | null | null | null | linprog_solver/simplex/templatetags/simplex_extras.py | apirobot/django-linprog-solver-website | a90018c257b1d0a4c064baea1bb7c6e22bac1ab9 | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.inclusion_tag('simplex/_solution_steps.html')
def show_solution_steps(solution_steps):
return {
'solution_steps': solution_steps,
}
| 18.333333 | 55 | 0.745455 |
0380e01c2394fcd90d506c92c08f4facb6b495ab | 2,692 | py | Python | homeassistant/components/homeassistant/triggers/time_pattern.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | homeassistant/components/homeassistant/triggers/time_pattern.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 77 | 2020-07-16T16:43:09.000Z | 2022-03-31T06:14:37.000Z | homeassistant/components/homeassistant/triggers/time_pattern.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 7 | 2021-03-20T12:34:01.000Z | 2021-12-02T10:13:52.000Z | """Offer time listening automation rules."""
import voluptuous as vol
from homeassistant.const import CONF_PLATFORM
from homeassistant.core import HassJob, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import async_track_time_change
# mypy: allow-untyped-defs, no-check-untyped-defs
CONF_HOURS = "hours"
CONF_MINUTES = "minutes"
CONF_SECONDS = "seconds"
class TimePattern:
"""Validate a time pattern value.
:raises Invalid: If the value has a wrong format or is outside the range.
"""
def __init__(self, maximum):
"""Initialize time pattern."""
self.maximum = maximum
def __call__(self, value):
"""Validate input."""
try:
if value == "*":
return value
if isinstance(value, str) and value.startswith("/"):
number = int(value[1:])
else:
value = number = int(value)
if not (0 <= number <= self.maximum):
raise vol.Invalid(f"must be a value between 0 and {self.maximum}")
except ValueError as err:
raise vol.Invalid("invalid time_pattern value") from err
return value
TRIGGER_SCHEMA = vol.All(
cv.TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "time_pattern",
CONF_HOURS: TimePattern(maximum=23),
CONF_MINUTES: TimePattern(maximum=59),
CONF_SECONDS: TimePattern(maximum=59),
}
),
cv.has_at_least_one_key(CONF_HOURS, CONF_MINUTES, CONF_SECONDS),
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger_data = automation_info.get("trigger_data", {}) if automation_info else {}
hours = config.get(CONF_HOURS)
minutes = config.get(CONF_MINUTES)
seconds = config.get(CONF_SECONDS)
job = HassJob(action)
# If larger units are specified, default the smaller units to zero
if minutes is None and hours is not None:
minutes = 0
if seconds is None and minutes is not None:
seconds = 0
@callback
def time_automation_listener(now):
"""Listen for time changes and calls action."""
hass.async_run_hass_job(
job,
{
"trigger": {
**trigger_data,
"platform": "time_pattern",
"now": now,
"description": "time pattern",
}
},
)
return async_track_time_change(
hass, time_automation_listener, hour=hours, minute=minutes, second=seconds
)
| 29.911111 | 85 | 0.616642 |
3a5e79c79832fab002ad2867765386567e3ce39c | 8,882 | py | Python | doublePendulum_HNN/validate_doublePendulum_HNN.py | pkmtum/Physics-enhanced_NN_SmallData | 532e9a2f8609b92e5717ebf4f2c3cb74500e2221 | [
"MIT"
] | 2 | 2021-11-27T15:55:11.000Z | 2021-12-12T10:43:55.000Z | doublePendulum_HNN/validate_doublePendulum_HNN.py | pkmtum/Physics-enhanced_NN_SmallData | 532e9a2f8609b92e5717ebf4f2c3cb74500e2221 | [
"MIT"
] | null | null | null | doublePendulum_HNN/validate_doublePendulum_HNN.py | pkmtum/Physics-enhanced_NN_SmallData | 532e9a2f8609b92e5717ebf4f2c3cb74500e2221 | [
"MIT"
] | null | null | null | import jax
import jax.numpy as jnp
from jax.experimental import stax
from jax.experimental.ode import odeint
from functools import partial
# import matplotlib.pyplot as plt
import pickle
import os
def wrap_state(state):
# wrap generalized coordinates to [-pi, pi)
phi1 = (state[0] + jnp.pi) % (2 * jnp.pi) - jnp.pi
phi2 = (state[1] + jnp.pi) % (2 * jnp.pi) - jnp.pi
phi1_t = state[2]
phi2_t = state[3]
return jnp.array([phi1, phi2, phi1_t, phi2_t])
def analytical_dynamics(can_state, t=0, m1=1, m2=1, l1=1, l2=1, g=9.81):
"""Analytical time derivative of the double pendulums state in
generalized coordinates and conjugated momenta=(phi1, phi2, p1, p2)"""
phi1, phi2, p1, p2 = can_state
C0 = l1 * l2 * (m1 + m2 * jnp.sin(phi1 - phi2) ** 2)
C1 = (p1 * p2 * jnp.sin(phi1 - phi2)) / C0
C2 = (m2 * (l2 * p1) ** 2 + (m1 + m2) * (l1 * p2) ** 2 -
2 * l1 * l2 * m2 * p1 * p2 * jnp.cos(phi1 - phi2)) * \
jnp.sin(2 * (phi1 - phi2)) / (2 * C0 ** 2)
# F is the right-hand side of the Hamilton's equations
phi1_t = (l2 * p1 - l1 * p2 * jnp.cos(phi1 - phi2)) / (l1 * C0)
phi2_t = (l1 * (m1 + m2) * p2 - l2 *
m2 * p1 * jnp.cos(phi1 - phi2)) / (l2 * m2 * C0)
p1_t = -(m1 + m2) * g * l1 * jnp.sin(phi1) - C1 + C2
p2_t = -m2 * g * l2 * jnp.sin(phi2) + C1 - C2
can_state_t = jnp.array([phi1_t, phi2_t, p1_t, p2_t])
return can_state_t
# set phi=0 position to minimal potential energy of 9.81*m1*l2
def analytical_hamiltonian(can_state, t=0, m1=1, m2=1, l1=1, l2=1, g=9.81):
phi1, phi2, p1, p2 = can_state
C0 = l1 * l2 * (m1 + m2 * jnp.sin(phi1 - phi2) ** 2)
w1 = (l2 * p1 - l1 * p2 * jnp.cos(phi1 - phi2)) / (l1 * C0)
w2 = (l1 * (m1 + m2) * p2 - l2 *
m2 * p1 * jnp.cos(phi1 - phi2)) / (l2 * m2 * C0)
# compute the kinetic energy of each bob
K1 = 0.5 * m1 * (l1 * w1) ** 2
K2 = 0.5 * m2 * ((l1 * w1) ** 2 + (l2 * w2) ** 2 +
2 * l1 * l2 * w1 * w2 * jnp.cos(phi1 - phi2))
T = K1 + K2
# compute the height of each bob
y1 = l2 + l1 * (1 - jnp.cos(phi1)) # set phi=0 position to zero potential energy
y2 = y1 - l2 * jnp.cos(phi2)
# y2 = l1 * (1 - jnp.cos(phi1)) + l2 *(1 - jnp.cos(phi2))
V = m1 * g * y1 + m2 * g * y2
return T+V
def HNN_dynamics(hamiltonian, can_state, t=0):
# can_state = wrap_state(can_state) # Force the first two angle coordinates in [-pi,pi). Delete for other examples.
q, p = jnp.split(can_state, 2, axis=-1)
q_t = jax.grad(hamiltonian, 1)(q, p)
p_t = - jax.grad(hamiltonian, 0)(q, p)
can_state_t = jnp.concatenate([q_t, p_t])
return can_state_t
@partial(jax.jit, backend='cpu')
def general_analytical_odeint(x0, t):
return odeint(analytical_dynamics, x0, t, rtol=1e-12, atol=1e-12)
def simple_odeint(dynamics_fun, state0, t, num_updates=1):
x = state0
X = []
tp_last = 0.
for tp in t:
dt = tp - tp_last
dx = rk4_update(dynamics_fun, x, num_updates, dt)
x += dx
X.append(x)
tp_last = tp
return jnp.array(X) # jnp.array necessary to vmap the function
def rk4_update(dynamics_fun, state, num_updates, delta_t, t=None):
"""Applies num_update Runge-Kutta4 steps to integrate over delta_t. Returns update: delta_state"""
def get_update(update):
dt = delta_t / num_updates
current_state = state + update
k1 = dt * dynamics_fun(current_state)
k2 = dt * dynamics_fun(current_state + k1 / 2)
k3 = dt * dynamics_fun(current_state + k2 / 2)
k4 = dt * dynamics_fun(current_state + k3)
return update + 1.0 / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
state_update = 0
for _ in range(num_updates):
state_update = get_update(state_update)
return state_update # jnp.array(state_update)
# Path:
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
eval_path = THIS_DIR + "/eval/"
listEVAL = sorted(os.listdir(eval_path))
print("Cases: " + str(listEVAL))
# BaselineFile = "/doublePendulum_HNN_B_2x128_Data4x1500_params_for_loss_0.022198114544153214.pkl" # TODO: BaselineFile
BaselineFile = "/doublePendulum_HNN_B_2x128_Data4x150_params_for_loss_16.397838592529297.pkl"
labels = [r'Baseline 2x128', r'HNN 2x128', r'HNN+H-Reg. 2x128'] # TODO: labels r'Baseline 2x128',
print("Labels: " + str(labels))
def evaluate_deltaH():
print("------------------------------ ------------------------------")
print("------------------------------ Evaluate Delta H mean/max ------------------------------")
print("------------------------------ ------------------------------")
t = jnp.linspace(0, 100, 101)
rng = jax.random.PRNGKey(7)
X0phi = jax.random.uniform(rng, (10, 2), minval=-0.8 * jnp.pi, maxval=0.8 * jnp.pi)
X0phit = jnp.zeros_like(X0phi)
X0 = jnp.hstack((X0phi, X0phit))
print("X0 for evaluate_deltaH: \n{}".format(X0))
# Baseline:
len_state = 4
hidden_dim = 128 # 128
output_dim = 4
mlp_init_fun, mlp_apply_fun = stax.serial(
stax.Dense(hidden_dim),
stax.Softplus,
stax.Dense(hidden_dim),
stax.Softplus,
stax.Dense(output_dim), )
@jax.jit
def Baseline_dynamics(params, state, t=0):
state = wrap_state(state)
state_t = mlp_apply_fun(params, state)
return state_t
print("Processing: " + str(BaselineFile))
with open(THIS_DIR + BaselineFile, 'rb') as fp:
case_dict = pickle.load(fp)
params = case_dict['params']
dynamics = jax.jit(partial(Baseline_dynamics, params))
def Base_simple_odeint(x0, t):
return simple_odeint(dynamics, x0, t, num_updates=100)
def Base_odeint(x0, t):
return odeint(dynamics, x0, t, rtol=1e-12, atol=1e-12)
X = jax.vmap(Base_simple_odeint, (0, None))(X0, t) # dim = num_initial_states * len_trajectory * 4
# X = jax.vmap(Base_odeint, (0, None))(X0, t)
h_pred = jax.vmap(jax.vmap(analytical_hamiltonian))(X)
h_test = jax.vmap(analytical_hamiltonian)(X0)
h_test = jnp.reshape(h_test, (len(X0), 1))
delta_h = jnp.abs(h_pred - h_test)
delta_h_mean = jnp.mean(jnp.abs(h_pred - h_test))
delta_h_max = jnp.max(jnp.abs(h_pred - h_test))
delta_h_std = jnp.std(jnp.abs(h_pred - h_test))
normalization = 9.81 * 6 / 100
print("normalized_delta_h_mean: {}".format(delta_h_mean / normalization))
print("normalized_delta_h_max: {}".format(delta_h_max / normalization))
print("normalized_delta_h_std: {}".format(delta_h_std / normalization))
# HNN:
len_state = 4
hidden_dim = 128 # 128
output_dim = 1
mlp_init_fun, mlp_apply_fun = stax.serial(
stax.Dense(hidden_dim),
stax.Softplus,
stax.Dense(hidden_dim),
stax.Softplus,
stax.Dense(output_dim), )
def learned_hamiltonian(params): # mlp_apply_fun
def hamiltonian(q, p):
state = jnp.concatenate([q, p])
state = wrap_state(state)
# squeeze because jax.grad only defined for scalar input shape: () NOT (1,)
return jnp.squeeze(mlp_apply_fun(params, state))
return hamiltonian
for file in listEVAL:
print("Processing: " + str(file))
with open(eval_path + file, 'rb') as fp:
case_dict = pickle.load(fp)
params = case_dict['params']
dynamics = jax.jit(partial(HNN_dynamics, learned_hamiltonian(params)))
def HNN_simple_odeint(x0, t):
return simple_odeint(dynamics, x0, t, num_updates=100)
def HNN_odeint(x0, t):
return odeint(dynamics, x0, t, rtol=1e-12, atol=1e-12)
X = jax.vmap(HNN_simple_odeint, (0, None))(X0, t) # dim = num_initial_states * len_trajectory * 4
# X = jax.vmap(HNN_odeint, (0, None))(X0, t)
h_pred = jax.vmap(jax.vmap(analytical_hamiltonian))(X)
h_test = jax.vmap(analytical_hamiltonian)(X0)
h_test = jnp.reshape(h_test, (len(X0), 1))
delta_h = jnp.abs(h_pred - h_test)
delta_h_mean = jnp.mean(jnp.abs(h_pred - h_test))
delta_h_max = jnp.max(jnp.abs(h_pred - h_test))
delta_h_std = jnp.std(jnp.abs(h_pred - h_test))
normalization = 9.81 * 6 / 100
print("normalized_delta_h_mean: {}".format(delta_h_mean / normalization))
print("normalized_delta_h_max: {}".format(delta_h_max / normalization))
print("normalized_delta_h_std: {}".format(delta_h_std / normalization))
if __name__ == "__main__":
evaluate_deltaH()
| 37.957265 | 121 | 0.587255 |
804ff70f1a9bb1f403d60a1805b2bd08206826b4 | 4,726 | py | Python | tinydb/storages.py | annihilatorrrr/tinydb | 569447981a9f0d383c4be5556cd336c9ed7ef758 | [
"MIT"
] | null | null | null | tinydb/storages.py | annihilatorrrr/tinydb | 569447981a9f0d383c4be5556cd336c9ed7ef758 | [
"MIT"
] | null | null | null | tinydb/storages.py | annihilatorrrr/tinydb | 569447981a9f0d383c4be5556cd336c9ed7ef758 | [
"MIT"
] | null | null | null | """
Contains the :class:`base class <tinydb.storages.Storage>` for storages and
implementations.
"""
import io
import json
import os
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
__all__ = ('Storage', 'JSONStorage', 'MemoryStorage')
def touch(path: str, create_dirs: bool):
"""
Create a file if it doesn't exist yet.
:param path: The file to create.
:param create_dirs: Whether to create all missing parent directories.
"""
if create_dirs:
base_dir = os.path.dirname(path)
# Check if we need to create missing parent directories
if not os.path.exists(base_dir):
os.makedirs(base_dir)
# Create the file by opening it in 'a' mode which creates the file if it
# does not exist yet but does not modify its contents
with open(path, 'a'):
pass
class Storage(ABC):
"""
The abstract base class for all Storages.
A Storage (de)serializes the current state of the database and stores it in
some place (memory, file on disk, ...).
"""
# Using ABCMeta as metaclass allows instantiating only storages that have
# implemented read and write
@abstractmethod
def read(self) -> Optional[Dict[str, Dict[str, Any]]]:
"""
Read the current state.
Any kind of deserialization should go here.
Return ``None`` here to indicate that the storage is empty.
"""
raise NotImplementedError('To be overridden!')
@abstractmethod
def write(self, data: Dict[str, Dict[str, Any]]) -> None:
"""
Write the current state of the database to the storage.
Any kind of serialization should go here.
:param data: The current state of the database.
"""
raise NotImplementedError('To be overridden!')
def close(self) -> None:
"""
Optional: Close open file handles, etc.
"""
pass
class JSONStorage(Storage):
"""
Store the data in a JSON file.
"""
def __init__(self, path: str, create_dirs=False, encoding=None, access_mode='r+', **kwargs):
"""
Create a new instance.
Also creates the storage file, if it doesn't exist and the access mode is appropriate for writing.
:param path: Where to store the JSON data.
:param access_mode: mode in which the file is opened (r, r+, w, a, x, b, t, +, U)
:type access_mode: str
"""
super().__init__()
self._mode = access_mode
self.kwargs = kwargs
# Create the file if it doesn't exist and creating is allowed by the
# access mode
if any([character in self._mode for character in ('+', 'w', 'a')]): # any of the writing modes
touch(path, create_dirs=create_dirs)
# Open the file for reading/writing
self._handle = open(path, mode=self._mode, encoding=encoding)
def close(self) -> None:
self._handle.close()
def read(self) -> Optional[Dict[str, Dict[str, Any]]]:
# Get the file size by moving the cursor to the file end and reading
# its location
self._handle.seek(0, os.SEEK_END)
size = self._handle.tell()
if not size:
# File is empty, so we return ``None`` so TinyDB can properly
# initialize the database
return None
else:
# Return the cursor to the beginning of the file
self._handle.seek(0)
# Load the JSON contents of the file
return json.load(self._handle)
def write(self, data: Dict[str, Dict[str, Any]]):
# Move the cursor to the beginning of the file just in case
self._handle.seek(0)
# Serialize the database state using the user-provided arguments
serialized = json.dumps(data, **self.kwargs)
# Write the serialized data to the file
try:
self._handle.write(serialized)
except io.UnsupportedOperation:
raise IOError('Cannot write to the database. Access mode is "{0}"'.format(self._mode))
# Ensure the file has been written
self._handle.flush()
os.fsync(self._handle.fileno())
# Remove data that is behind the new cursor in case the file has
# gotten shorter
self._handle.truncate()
class MemoryStorage(Storage):
"""
Store the data as JSON in memory.
"""
def __init__(self):
"""
Create a new instance.
"""
super().__init__()
self.memory = None
def read(self) -> Optional[Dict[str, Dict[str, Any]]]:
return self.memory
def write(self, data: Dict[str, Dict[str, Any]]):
self.memory = data
| 28.299401 | 106 | 0.613627 |
7c2539f6e1a9fa08f28a64bd1fc892d739903f1e | 7,102 | py | Python | Prediction_Algorithm-Motion/prediction_alg_motion.py | cantiusdeepan/Cadrea | bdd341f8e9ee7a5103611d5bdac1a820ab9fdd81 | [
"MIT"
] | null | null | null | Prediction_Algorithm-Motion/prediction_alg_motion.py | cantiusdeepan/Cadrea | bdd341f8e9ee7a5103611d5bdac1a820ab9fdd81 | [
"MIT"
] | null | null | null | Prediction_Algorithm-Motion/prediction_alg_motion.py | cantiusdeepan/Cadrea | bdd341f8e9ee7a5103611d5bdac1a820ab9fdd81 | [
"MIT"
] | null | null | null | #Motion prediction Web service
#Gets real-time motion values and then sends the prediction of motion for the next time instant
#Communication is via MQTT
import json
import time
import paho.mqtt.client as MQTT
import random
import fpformat
import numpy
from datetime import datetime
# It writes the weeks in a nested list called year[] with:
# num_weeks - number of weeks (for easier presentation we use 4, for the final product it would be 52)
# samples_week - how many samples per week (for easier presentation we use 20, for the final product it would be 336 - every half hour)
# new_week stores the values of motion in real time, got via an MQTT subscription
new_value = 5
#num_weeks = 52
#samples_week = 336
num_weeks = 4
samples_week = 20
new_week = []
prediction = []
#Making the matrix which will keep values for the whole year
year = []
for row in range(num_weeks):
week = []
for column in range(samples_week):
week.append(random.randint(0, 1))
year.append(week)
print year
#Defining the MQTT class with its methods. This code will subscribe to values from the motion sensor and publish prediction values.
class MyMQTT:
def __init__(self, broker, port, notifier):
self.broker = broker
self.port = port
self.notifier = notifier
self._paho_mqtt = MQTT.Client("Prediction_alg_motion", False)
self._paho_mqtt.on_connect = self.myOnConnect
self._paho_mqtt.on_message = self.myOnMessageReceived
def myOnConnect (self, paho_mqtt, userdata, flags, rc):
print ("Connected to message broker with result code: "+str(rc))
def myOnMessageReceived (self, paho_mqtt , userdata, msg):
self.notifier.notify(msg.topic, msg.payload)
print ("Message received. Message: " + msg.topic + " QoS: " + str(msg.qos) + " Payload: " + str(msg.payload))
def myPublish(self,p,hid):
print("barbastruzzo")
js = {"data": "motion", "value": p}
motion_topic = 'house/'+hid+'/prediction_local_controller/predict_motion'
#motion_topic = 'house/' + hid + '/motion_local_controller/motion_status'
self._paho_mqtt.publish(motion_topic, json.dumps(js), 2)
def mySubscribe(self, topic, qos=2):
self._paho_mqtt.subscribe(topic, qos)
print "Subscription successful"
def start(self):
self._paho_mqtt.connect(self.broker, self.port)
self._paho_mqtt.loop_start()
def stop(self):
self._paho_mqtt.loop_stop()
class DoSomething():
def __init__(self):
print "hello"
self.house_id = ""
self.mqtt_broker = ""
self.mqtt_port = 0
self.RC_config_reader()
self.myMqtt = MyMQTT(self.mqtt_broker, self.mqtt_port, self)
#self.myMqtt = MyMQTT("192.168.1.105", self.mqtt_port, self)
self.myMqtt.start()
self.myMqtt.mySubscribe('house/'+self.house_id+'/motion_local_controller/motion_status',0)
global new_week
global samples_week
global week
global year
global num_weeks
global prediction
self.count = 0
print "Old year: ", year
self.predict()
while True:
time.sleep(30)
new_week.append(new_value)
print "new week is: ", new_week
if len(new_week) == samples_week:
print "Weeks shift here. New_week becomes the last week and is then emptied."
for i in range(num_weeks):
if (i == num_weeks-1):
year[i] = new_week
break
year[i] = year[i+1]
print "New year: ", year
obj_year = {"year": year}
self.writejson("predictions.json", obj_year)
#break
new_week = []
self.count=0
#trying to get the predictions
self.predict()
#We publish a predicted value every half-hour
self.myMqtt.myPublish(prediction[self.count], self.house_id)
print "I've published at time: ", str(datetime.now())
self.count += 1
#With this method we predict the presence values based on values for that time of the day and week, throughout the whole year
#We do it using a weighted average algorithm in which we use a number of past values, where the more recent values have a higher weight in the prediction
def predict(self):
global prediction
#emptying prediction vector
prediction = []
#making column vectors with the appropriate values
columns = []
for row in range(samples_week):
help = []
for column in range(num_weeks):
help.append(0)
columns.append(help)
for i in range(samples_week):
for j in range(num_weeks):
columns[i][j] = year[j][i]
print "columns: ", columns
for m in range(samples_week):
x = 0
sum_weights = 0
weight = 0.5
for item in columns[m]:
x += (item * weight)
sum_weights += weight
weight += 0.1
result = x / sum_weights
print "Result: ", result
if (result > 0.49999):
prediction.append(1)
else:
prediction.append(0)
print "Prediction: ", prediction
def writejson(self, filename, obj):
myfile = open(filename, "w")
obj_1 = obj
json.dump(obj_1, myfile)
myfile.close()
def end(self):
self.myMqtt.stop ()
def RC_config_reader(self):
json_file = open('prediction_config.json').read()
local_config = json.loads(json_file)
if local_config.get("RC_base_url"):
self.rc_base_url = local_config["RC_base_url"]
else:
print "Problem in local json - Can't get RC url"
if local_config.get("Central_config_base_url"):
self.cc_base_url = local_config["Central_config_base_url"]
else:
print "Problem in local json - Can't get Central config url"
if local_config.get("house_id"):
self.house_id = local_config["house_id"]
else:
print "Problem in local json - Can't get house_id"
if local_config.get("mqtt_broker"):
self.mqtt_broker = local_config["mqtt_broker"]
else:
print "Problem in local json - Can't get mqtt_broker"
if local_config.get("mqtt_port"):
self.mqtt_port = local_config["mqtt_port"]
else:
print "Problem in local json - Can't get mqtt_port"
def notify(self, topic, msg):
global new_value
global num_weeks
global samples_week
global prediction
if topic=='house/'+self.house_id+'/motion_local_controller/motion_status':
new_value=(json.loads(msg)['value'])
print "I've received the new value and it is: ", new_value
if __name__ == "__main__":
test = DoSomething()
test.end() | 33.186916 | 153 | 0.610955 |
8bf6795e6745d48287cd8717c7ad9b24bc3cc7de | 15,582 | py | Python | rllib/util/neural_networks/tests/test_neural_networks.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
] | null | null | null | rllib/util/neural_networks/tests/test_neural_networks.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
] | null | null | null | rllib/util/neural_networks/tests/test_neural_networks.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
] | null | null | null | import pytest
import torch
import torch.distributions
import torch.jit
import torch.testing
from rllib.util.distributions import Delta
from rllib.util.neural_networks import (
CategoricalNN,
DeterministicNN,
Ensemble,
FelixNet,
HeteroGaussianNN,
HomoGaussianNN,
)
from rllib.util.neural_networks.utilities import count_vars
from rllib.util.utilities import tensor_to_distribution
@pytest.fixture(params=[[], [32], [64, 32]])
def layers(request):
return request.param
@pytest.fixture(params=["ReLU", "tanh"])
def non_linearity(request):
return request.param
@pytest.fixture(params=[None, 1, 32])
def batch_size(request):
return request.param
@pytest.fixture(params=[1, 16])
def in_dim(request):
return (request.param,)
@pytest.fixture(params=[2, 4])
def out_dim(request):
return (request.param,)
@pytest.fixture(params=[5, 32])
def num_heads(request):
return request.param
def _test_from_other(object_, class_):
other = class_.from_other(object_, copy=False)
assert isinstance(other, class_)
assert other is not object_
try:
other = torch.jit.script(other)
except: # noqa: E722
pass
other_state_dict = other.state_dict()
for name, param in object_.named_parameters():
if not (torch.allclose(param, torch.zeros_like(param)) or name == "_scale"):
assert not torch.allclose(param, other_state_dict[name])
assert count_vars(other) == count_vars(object_)
def _test_from_other_with_copy(object_, class_):
other = class_.from_other(object_, copy=True)
assert isinstance(other, class_)
assert other is not object_
try:
other = torch.jit.script(other)
except: # noqa: E722
pass
other_state_dict = other.state_dict()
for name, param in object_.named_parameters():
assert torch.allclose(param, other_state_dict[name])
assert count_vars(other) == count_vars(object_)
class TestDeterministicNN(object):
@pytest.fixture(scope="class")
def net(self):
return DeterministicNN
def test_output_shape(
self, net, in_dim, out_dim, layers, non_linearity, batch_size
):
net = torch.jit.script(
net(in_dim, out_dim, layers, non_linearity=non_linearity)
)
if batch_size is None:
t = torch.randn(in_dim)
o = net(t)
assert o.shape == torch.Size(out_dim)
else:
t = torch.randn((batch_size,) + in_dim)
o = net(t)
assert o.shape == torch.Size((batch_size,) + out_dim)
def test_layers(self, net, in_dim, out_dim, layers):
net = torch.jit.script(net(in_dim, out_dim, layers))
layers = layers or list()
# Check nn.parameters (+1: head)
assert 2 * (len(layers) + 1) == len([*net.parameters()])
# Check shapes
layers.append(out_dim[0])
for i, param in enumerate(net.parameters()):
assert param.shape[0] == layers[i // 2]
def test_class_method(self, net, in_dim, out_dim, layers, non_linearity):
n1 = net(in_dim, out_dim, layers, non_linearity=non_linearity)
_test_from_other(n1, net)
_test_from_other_with_copy(n1, net)
class TestHeteroGaussianNN(object):
@pytest.fixture(scope="class")
def net(self):
return HeteroGaussianNN
def test_output_shape(self, net, in_dim, out_dim, layers, batch_size):
net = torch.jit.script(net(in_dim, out_dim, layers))
if batch_size is None:
t = torch.randn(in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size(out_dim)
else:
t = torch.randn((batch_size,) + in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size((batch_size,) + out_dim)
def test_output_properties(self, net, in_dim, out_dim, batch_size):
net = torch.jit.script(net(in_dim, out_dim))
if batch_size is None:
t = torch.randn(in_dim)
else:
t = torch.randn((batch_size, 2) + in_dim)
o = tensor_to_distribution(net(t))
assert isinstance(o, torch.distributions.MultivariateNormal)
assert o.has_rsample
assert not o.has_enumerate_support
assert o.batch_shape == torch.Size(
(batch_size, 2) if batch_size is not None else []
)
def test_layers(self, net, in_dim, out_dim, layers):
net = torch.jit.script(net(in_dim, out_dim, layers))
layers = layers or list()
# Check nn.parameters (+2: mean and covariance)
assert 2 * (len(layers) + 2) == len([*net.parameters()])
# Check shapes
layers.append(out_dim[0])
layers.append(out_dim[0])
i = 0
for name, param in net.named_parameters():
if name.startswith("_scale"):
assert param.shape[0] == out_dim[0] # * out_dim
else:
assert param.shape[0] == layers[i // 2]
i += 1
def test_class_method(self, net, in_dim, out_dim, layers, non_linearity):
n1 = net(in_dim, out_dim, layers, non_linearity=non_linearity)
_test_from_other(n1, net)
_test_from_other_with_copy(n1, net)
class TestHomoGaussianNN(object):
@pytest.fixture(scope="class")
def net(self):
return HomoGaussianNN
def test_output_shape(self, net, in_dim, out_dim, layers, batch_size):
net = torch.jit.script(net(in_dim, out_dim, layers))
if batch_size is None:
t = torch.randn(in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size(out_dim)
else:
t = torch.randn((batch_size,) + in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size((batch_size,) + out_dim)
def test_output_properties(self, net, in_dim, out_dim, batch_size):
net = torch.jit.script(net(in_dim, out_dim))
if batch_size is None:
t = torch.randn(in_dim)
else:
t = torch.randn((batch_size, 2) + in_dim)
o = tensor_to_distribution(net(t))
assert isinstance(o, torch.distributions.MultivariateNormal)
assert o.has_rsample
assert not o.has_enumerate_support
assert o.batch_shape == torch.Size(
(batch_size, 2) if batch_size is not None else ()
)
def test_layers(self, net, in_dim, out_dim, layers):
net = torch.jit.script(net(in_dim, out_dim, layers))
layers = layers or list()
# Check nn.parameters (+1: mean and covariance has only 1 param)
assert 2 * (len(layers) + 1) + 1 == len([*net.parameters()])
# Check shapes
layers.append(out_dim[0])
i = 0
for name, param in net.named_parameters():
if name.startswith("_scale"):
assert param.shape[0] == out_dim[0]
else:
assert param.shape[0] == layers[i // 2]
i += 1
def test_class_method(self, net, in_dim, out_dim, layers, non_linearity):
n1 = net(in_dim, out_dim, layers, non_linearity=non_linearity)
_test_from_other(n1, net)
_test_from_other_with_copy(n1, net)
class TestCategoricalNN(object):
@pytest.fixture(scope="class")
def net(self):
return CategoricalNN
def test_output_shape(self, net, in_dim, out_dim, layers, batch_size):
net = torch.jit.script(net(in_dim, out_dim, layers))
if batch_size is None:
t = torch.randn(in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size([])
else:
t = torch.randn((batch_size,) + in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size([batch_size])
def test_output_properties(self, net, in_dim, out_dim, batch_size):
net = torch.jit.script(net(in_dim, out_dim))
if batch_size is None:
t = torch.randn(in_dim)
else:
t = torch.randn((batch_size, 2) + in_dim)
o = tensor_to_distribution(net(t))
assert isinstance(o, torch.distributions.Categorical)
assert not o.has_rsample
assert o.has_enumerate_support
assert o.batch_shape == torch.Size(
(batch_size, 2) if batch_size is not None else ()
)
def test_layers(self, net, in_dim, out_dim, layers):
net = torch.jit.script(net(in_dim, out_dim, layers))
layers = layers or list()
# Check nn.parameters (+1: head)
assert 2 * (len(layers) + 1) == len([*net.parameters()])
# Check shapes
layers.append(out_dim[0])
for i, param in enumerate(net.parameters()):
assert param.shape[0] == layers[i // 2]
def test_class_method(self, net, in_dim, out_dim, layers, non_linearity):
n1 = net(in_dim, out_dim, layers, non_linearity=non_linearity)
_test_from_other(n1, net)
_test_from_other_with_copy(n1, net)
class TestEnsembleNN(object):
@pytest.fixture(scope="class", params=[DeterministicNN, HeteroGaussianNN, Ensemble])
def net(self, request):
return request.param
@pytest.fixture(scope="class", params=[True, False])
def deterministic(self, request):
return request.param
def test_num_heads(self, num_heads, deterministic):
net = Ensemble((4,), (2,), num_heads=num_heads, deterministic=deterministic)
assert net.num_heads == num_heads
def test_output_shape(self, out_dim, batch_size, num_heads, deterministic):
in_dim = (4,)
net = Ensemble(
in_dim, out_dim, num_heads=num_heads, deterministic=deterministic
)
if batch_size is None:
t = torch.randn(in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size(out_dim)
else:
t = torch.randn((batch_size,) + in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size((batch_size,) + out_dim)
def test_output_properties(self, out_dim, num_heads, batch_size, deterministic):
in_dim = (4,)
net = Ensemble(
in_dim, out_dim, num_heads=num_heads, deterministic=deterministic
)
if batch_size is None:
t = torch.randn(in_dim)
else:
t = torch.randn((batch_size, 2) + in_dim)
o = tensor_to_distribution(net(t))
assert isinstance(o, torch.distributions.MultivariateNormal)
assert o.has_rsample
assert not o.has_enumerate_support
assert o.batch_shape == torch.Size(
(batch_size, 2) if batch_size is not None else ()
)
net.set_prediction_strategy("set_head")
net.set_head(0)
o = tensor_to_distribution(net(t))
if deterministic:
assert isinstance(o, Delta)
else:
assert isinstance(o, torch.distributions.MultivariateNormal)
assert o.batch_shape == torch.Size(
(batch_size, 2) if batch_size is not None else ()
)
assert o.has_rsample
assert not o.has_enumerate_support
def test_layers(self, out_dim, num_heads, layers, deterministic):
in_dim = (4,)
net = Ensemble(in_dim, out_dim, layers=layers, num_heads=num_heads)
layers = layers or list()
# Check nn.parameters (+1: head)
assert 2 * (len(layers) + 2) == len([*net.parameters()])
# Check shapes
layers.append(out_dim[0] * num_heads)
layers.append(out_dim[0] * num_heads)
# Check shapes
i = 0
for name, param in net.named_parameters():
if name.startswith("_scale"):
assert param.shape[0] == out_dim[0] * num_heads # * out_dim
else:
assert param.shape[0] == layers[i // 2]
i += 1
def test_class_method(self, net, batch_size, out_dim, num_heads):
layers = [64, 64]
in_dim = (4,)
try:
n1 = net(in_dim, out_dim, layers=layers, num_heads=num_heads)
except TypeError:
base_net = net(in_dim, out_dim, layers=layers)
n1 = Ensemble.from_feedforward(base_net, num_heads=num_heads)
if isinstance(base_net, DeterministicNN):
assert n1.deterministic
else:
assert not n1.deterministic
_test_from_other(n1, Ensemble)
_test_from_other_with_copy(n1, Ensemble)
# Test layers
layers = layers or list()
# Check nn.parameters (+1: head)
assert 2 * (len(layers) + 2) == len([*n1.parameters()])
# Check shapes
layers.append(out_dim[0] * num_heads)
layers.append(out_dim[0] * num_heads)
i = 0
for name, param in n1.named_parameters():
if name.startswith("_scale"):
assert param.shape[0] == out_dim[0] * num_heads # * out_dim
else:
assert param.shape[0] == layers[i // 2]
i += 1
# Check output
if batch_size is None:
t = torch.randn(in_dim)
o = tensor_to_distribution(n1(t))
assert o.sample().shape == torch.Size(out_dim)
assert o.batch_shape == torch.Size([])
else:
t = torch.randn((batch_size, 2) + in_dim)
o = tensor_to_distribution(n1(t))
assert o.sample().shape == torch.Size((batch_size, 2) + out_dim)
assert o.batch_shape == torch.Size((batch_size, 2))
assert isinstance(o, torch.distributions.MultivariateNormal)
assert o.has_rsample
assert not o.has_enumerate_support
class TestFelixNet(object):
@pytest.fixture(scope="class")
def net(self):
return FelixNet
def test_output_shape(self, net, in_dim, out_dim, batch_size):
net = torch.jit.script(net(in_dim, out_dim))
if batch_size is None:
t = torch.randn(in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size(out_dim)
else:
t = torch.randn((batch_size,) + in_dim)
o = tensor_to_distribution(net(t)).sample()
assert o.shape == torch.Size((batch_size,) + out_dim)
def test_output_properties(self, net, in_dim, out_dim, batch_size):
net = torch.jit.script(net(in_dim, out_dim))
if batch_size is None:
t = torch.randn(in_dim)
else:
t = torch.randn((batch_size, 2) + in_dim)
o = tensor_to_distribution(net(t))
assert isinstance(o, torch.distributions.MultivariateNormal)
assert o.has_rsample
assert not o.has_enumerate_support
assert o.batch_shape == torch.Size(
(batch_size, 2) if batch_size is not None else ()
)
def test_layers(self, net, in_dim, out_dim):
net = torch.jit.script(net(in_dim, out_dim))
layers = [64, 64]
# Check nn.parameters (+2: mean and covariance have only weights)
assert 2 * (len(layers)) + 2 == len([*net.parameters()])
# Check shapes
layers.append(out_dim[0])
for i, param in enumerate(net.parameters()):
assert param.shape[0] == layers[i // 2]
def test_class_method(self, net, in_dim, out_dim):
n1 = net(in_dim, out_dim)
_test_from_other(n1, net)
_test_from_other_with_copy(n1, net)
| 33.947712 | 88 | 0.610255 |
01547496c2f987415f81cf04e8b3598a591d4cee | 2,386 | py | Python | code/feedback-server.py | bahruzjabiyev/t-reqs-http-fuzzer | 92cd4dff0a0786ada75d0b52ef02a062e03cb043 | [
"MIT"
] | 113 | 2021-09-22T06:23:26.000Z | 2022-01-05T14:47:34.000Z | code/feedback-server.py | bahruzjabiyev/T-Reqs-HTTP-Fuzzer | 92cd4dff0a0786ada75d0b52ef02a062e03cb043 | [
"MIT"
] | null | null | null | code/feedback-server.py | bahruzjabiyev/T-Reqs-HTTP-Fuzzer | 92cd4dff0a0786ada75d0b52ef02a062e03cb043 | [
"MIT"
] | 11 | 2022-01-17T08:49:29.000Z | 2022-03-10T06:00:37.000Z | import socket
import threading
import re
import sys
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 8080))
s.listen()
def get_body(data):
body = b'None'
body_length = -1
try:
parts = data.split(b'\r\n\r\n')
if parts:
if len(parts) > 1:
body = b'\r\n\r\n'.join(parts[1:])
body_length = len(body)
# Alternatively, a single type of character ('B' in this case) can
# be counted assuming that the body of received requests contains
# just that character. This would be useful in cases where the format
# of the body in received requests alternates between chunked and regular.
# body_length = len(re.findall(b'(?<=B)B', body))
# if body_length != 0:
# body_length += 1
else:
body = b''
body_length = 0
except Exception as exception:
print(data)
print("exception: {}".format(exception))
return body, body_length
def handle_connection(conn):
data = b''
try:
conn.settimeout(3)
while True:
try:
conn_data = conn.recv(2048)
if not conn_data:
break
else:
data += conn_data
except socket.timeout:
break
if b"debug=true" in data:
body = data
body_length = len(data)
else:
body, body_length = get_body(data)
response_body = b"{body: '" + body + b"', body_length: " + str(body_length).encode() + b"}"
response_headers = b"HTTP/1.1 200 OK\r\nConnection: close\r\nserver-name: someserver\r\nContent-Length: " + str(len(response_body)).encode() + b"\r\n\r\n"
conn.sendall(response_headers + response_body)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
except Exception as exception:
print(data)
print("exception: {}".format(exception))
while True:
try:
conn, addr = s.accept()
thread = threading.Thread(target=handle_connection, args=(conn,))
thread.start()
except Exception as exception:
print("exception: {}".format(exception))
s.close()
| 29.825 | 162 | 0.551551 |
cbfed1b26e66db00cc3592c10062c8d0cf12000f | 2,082 | py | Python | adminmgr/media/code/A3/task3/BD_0980_1528_1600.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/A3/task3/BD_0980_1528_1600.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/A3/task3/BD_0980_1528_1600.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
from operator import add
def aggregate_tweets_count(new_values, total_sum):
return sum(new_values) + (total_sum or 0)
def get_sql_context_instance(spark_context):
if ('sqlContextSingletonInstance' not in globals()):
globals()['sqlContextSingletonInstance'] = SQLContext(spark_context)
return globals()['sqlContextSingletonInstance']
def process_rdd(time, rdd):
#print("----------=========- %s -=========----------" % str(time))
try:
sql_context = get_sql_context_instance(rdd.context)
row_rdd = rdd.map(lambda w: Row(hashtag=w[0],hashtag_count=w[1]))
hashtags_df = sql_context.createDataFrame(row_rdd)
hashtags_df.registerTempTable("hashtags")
hashtag_counts_df = sql_context.sql("select hashtag,hashtag_count from hashtags where hashtag like '_%' order by hashtag_count desc limit 5")
#hashtag_counts_df.show()
send_df_to_dashboard(hashtag_counts_df)
except:
e = sys.exc_info()[0]
#print("Error: %s" % e)
def send_df_to_dashboard(df):
#top_tags = [str(t.hashtag) for t in df.select("hashtag").collect()]
top_tags = ((str(x[0]),x[1]) for x in df.select('*').collect())
sortedIterms = sorted(top_tags,key=lambda x:(-x[1],x[0]))
k=0
for i in sortedIterms:
k+=1
if(k<5):
print(i[0],end=",")
else:
print(i[0])
k=0
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,int(sys.argv[2]))
ssc.checkpoint("/checkpoint_BIGDATA")
dataStream1=ssc.socketTextStream("localhost",9009)
dataStream = dataStream1.window(int(sys.argv[1]),1)
words = dataStream.flatMap(lambda line: ((line.split(";")[7]).split(",")))
hashtags = words.map(lambda x: (x, 1))
#tags_totals=hashtags.updateStateByKey(aggregate_tweets_count)
tags_totals=hashtags.reduceByKey(add)
tags_totals.foreachRDD(process_rdd)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
| 33.047619 | 145 | 0.710855 |
01841b238714213808b042ecf349886b81f0a5fa | 5,609 | py | Python | examples/migration.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
] | 1 | 2022-02-15T13:18:04.000Z | 2022-02-15T13:18:04.000Z | examples/migration.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
] | null | null | null | examples/migration.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
] | null | null | null | """This is the demo script meant to show how administrator can perform a
migration of objects from one environment to another.
This script will not work without replacing parameters with real values.
Its basic goal is to present what can be done with this module and to
ease its usage.
`mstrio.server.migration` module is still work in progress. We plan to release all
functionalities in 03.2022
"""
from mstrio.connection import Connection
from mstrio.access_and_security.privilege import Privilege
from mstrio.users_and_groups.user import User
from mstrio.server.migration import (bulk_migrate_package, bulk_full_migration, Migration,
PackageConfig, PackageContentInfo, PackageSettings)
from mstrio.types import ObjectTypes
# Create connection to the source environment
source_base_url = "https://<>/MicroStrategyLibrary/api"
source_username = "some_username"
source_password = "some_password"
source_conn = Connection(source_base_url, source_username, source_password,
project_name="MicroStrategy Tutorial", login_mode=1)
# Create connection to the target environment
target_base_url = "https://<>/MicroStrategyLibrary/api"
target_username = "some_username"
target_password = "some_password"
target_conn = Connection(target_base_url, target_username, target_password,
project_name="MicroStrategy Tutorial", login_mode=1)
# Make sure the current user have the following privileges:
# 'Create package', id: 295
# 'Apply package', id: 296
# They can be granted by admin with the following commands:
user = User(source_conn, username='some_username')
Privilege(source_conn, id=295).add_to_user(user)
Privilege(source_conn, id=296).add_to_user(user)
# Or by name:
user2 = User(target_conn, username='some_username')
Privilege(target_conn, name='Create package').add_to_user(user2)
Privilege(target_conn, name='Apply package').add_to_user(user2)
# Create PackageConfig with information what object should be migrated and how.
# The options are of type Enum with all possible values listed.
dossier_id = 'some dossier id'
document_id = 'some document id'
report_id = 'some report id'
package_settings = PackageSettings(
PackageSettings.DefaultAction.USE_EXISTING,
PackageSettings.UpdateSchema.RECAL_TABLE_LOGICAL_SIZE,
PackageSettings.AclOnReplacingObjects.REPLACE,
PackageSettings.AclOnNewObjects.KEEP_ACL_AS_SOURCE_OBJECT,
)
package_content_info = PackageContentInfo(
id=report_id,
type=ObjectTypes.REPORT_DEFINITION,
action=PackageContentInfo.Action.USE_EXISTING,
include_dependents=True,
)
package_content_info2 = PackageContentInfo(
id=dossier_id,
type=ObjectTypes.DOCUMENT_DEFINITION,
action=PackageContentInfo.Action.USE_EXISTING,
include_dependents=True,
)
package_config = PackageConfig(PackageConfig.PackageUpdateType.PROJECT, package_settings,
package_content_info)
package_config2 = PackageConfig(PackageConfig.PackageUpdateType.PROJECT, package_settings,
package_content_info2)
save_path = 'some/path/import_package.mmp'
custom_package_path = 'some/other/path/other_import_package.mmp'
# Create Migrations objects that can use all the funcionalities
mig = Migration(
save_path=save_path,
source_connection=source_conn,
target_connection=target_conn,
configuration=package_config,
)
# Create Migration object that can only use `create_package()`
mig2 = Migration(
save_path=save_path,
source_connection=source_conn,
configuration=package_config2,
)
# Create Migration object that can only be used for `migrate_package()`
mig3 = Migration(
target_connection=target_conn,
custom_package_path=custom_package_path,
)
# Short version
# Create import package and save it to the file
mig.create_package()
# or
mig2.create_package()
# Migrate downloaded package to the target environment.
# Create undo package and save it to file
mig.migrate_package()
# or
mig3.migrate_package()
# End to end migration
mig.perform_full_migration()
# Detailed version
# Create import package and save it to the file sepecified with `save_path`
# argument during creation of migration object
mig.create_package()
mig2.create_package()
# Migrate downloaded package to the target environment
# `migrate_package()` by default uses a package binary saved to a variable
# during `create_package()`
mig.migrate_package()
# or a custom package binary specified with `custom_package_path`
# during Migration object creation, if `create_package()` was not called.
mig3.migrate_package()
# It is also possible to respecify `custom_package_path` at this stage
mig3.migrate_package(custom_package_path='path/to/some_package.mmp')
# Perform end to end migration. `perform_full_migration()` encapsulates
# `create_package()` and `migrate_package()` from the previous steps
# In order to be able to use, Migration object needs `source_connection`,
# `configuration` and `target_connection` parameters filled during creation
mig.perform_full_migration()
# Perform many full migrations at once
bulk_full_migration([mig])
# Perform many migrations at once
bulk_migrate_package([mig, mig3])
# If the migration needs to be reverted use `undo_migration()`
mig.undo_migration()
# or run `migrate_package()` with path to the custom undo package
Migration(save_path=save_path, target_connection=target_conn,
custom_package_path='path/to/some_package_undo.mmp').migrate_package()
# Status of the migration can be checked by checking the `status` property
status = mig.status
| 36.422078 | 90 | 0.780531 |
cf20ba15eecc7fa755fea4ca0804522f5407a856 | 5,915 | py | Python | zeeko/telemetry/tests/test_pipeline.py | alexrudy/Zeeko | fb4992724620ed548dd32c3201f79f5b7bebfe32 | [
"BSD-3-Clause"
] | 2 | 2017-07-23T22:06:05.000Z | 2020-02-28T07:54:15.000Z | zeeko/telemetry/tests/test_pipeline.py | alexrudy/Zeeko | fb4992724620ed548dd32c3201f79f5b7bebfe32 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T19:54:06.000Z | 2020-10-29T19:54:06.000Z | zeeko/telemetry/tests/test_pipeline.py | alexrudy/Zeeko | fb4992724620ed548dd32c3201f79f5b7bebfe32 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import h5py
import time
import zmq
import numpy as np
from ...tests.test_helpers import ZeekoTestBase
from ..pipeline import create_pipeline
@pytest.fixture
def chunksize():
"""The size of chunks."""
return 10
@pytest.fixture
def pipeline(address, context, chunksize, filename):
"""Pipeline"""
ioloop = create_pipeline(address, context, chunksize, filename)
yield ioloop
ioloop.cancel()
class TestPipeline(ZeekoTestBase):
"""Tests for the pipeline."""
def check_filename(self, filename, n, chunksize, nchunks, publisher):
"""docstring for check_filename"""
with h5py.File(filename.format(n), 'r') as f:
assert 'telemetry' in f
mg = f['telemetry']
for name in publisher.keys():
assert name in mg
g = mg[name]
assert g['data'].shape[0] == (chunksize * nchunks)
assert (g['mask'][...] > 0).sum() == (chunksize * nchunks)
def publish_chunks(self, publisher, pub, chunksize, n=1):
"""Publish chunks."""
for i in range(n):
for j in range(chunksize):
publisher.update()
publisher.publish(pub, flags=zmq.NOBLOCK)
time.sleep(0.01)
def run_n_chunks(self, pipeline, publisher, pub, chunksize, n, groups=1):
"""Run a pipeline through n chunks"""
with self.running_loop(pipeline):
for i in range(groups):
self.publish_chunks(publisher, pub, chunksize, n=n)
# Pause and resume to roll over files.
pipeline.pause()
self.publish_chunks(publisher, pub, chunksize, n=n)
pipeline.resume()
def test_multiple_pipeline_writes(self, pipeline, filename, pub, Publisher, chunksize):
"""Test the multi-write ability of the pipeline."""
self.run_n_chunks(pipeline, Publisher, pub, chunksize, 6, 2)
for n in range(2):
self.check_filename(filename, n, chunksize, 6, Publisher)
def test_multiple_pipeline_writes_change_items(self, pipeline, filename, pub, Publisher, chunksize):
"""Test the multi-write ability of the pipeline."""
with self.running_loop(pipeline):
self.publish_chunks(Publisher, pub, chunksize, n=3)
pipeline.pause()
pipeline.resume()
# Consume and remove a single item.
Publisher.popitem()
self.publish_chunks(Publisher, pub, chunksize, n=3)
for n in range(2):
self.check_filename(filename, n, chunksize, 3, Publisher)
def test_create_pipeline(address, context, chunksize, filename):
"""Test creating a pipeline."""
ioloop = create_pipeline(address, context, chunksize, filename)
print("Created")
ioloop.cancel(timeout=0.1)
print("Canceled")
def test_run_pipeline(pipeline, Publisher, pub, filename, chunksize):
"""Test running the pipeline."""
with pipeline.running(timeout=0.1):
print("Waiting on start.")
pipeline.state.selected("RUN").wait(timeout=0.1)
for i in range(chunksize * 2):
if pipeline.record.pushed.is_set():
break
Publisher.update()
Publisher.publish(pub, flags=zmq.NOBLOCK)
time.sleep(0.1)
print("Waiting on publishing events")
pipeline.record.pushed.wait(timeout=3.0)
pipeline.write.fired.wait(timeout=3.0)
pipeline.state.selected("STOP").wait(timeout=1.0)
print("Finished loop work.")
print(pipeline.record.complete)
for chunk in pipeline.record:
print("{0}: {1}".format(chunk, pipeline.record[chunk].lastindex))
assert pipeline.write.fired.is_set()
assert pipeline.record.framecounter == len(Publisher) * (chunksize)
with h5py.File(filename.format(0), 'r') as f:
assert 'telemetry' in f
mg = f['telemetry']
for name in Publisher.keys():
assert name in mg
g = mg[name]
assert g['data'].shape[0] == (chunksize)
# Compute the last index
print(np.arange(g['mask'].shape[0])[g['mask'][...] > 0])
li = (np.arange(g['mask'].shape[0])[g['mask'][...] > 0]).max()
print(g['mask'][...])
assert li == g['mask'].shape[0] - 1
np.testing.assert_allclose(g['data'][li], Publisher[name].array)
def test_final_write(pipeline, Publisher, pub, filename, chunksize):
"""Test running the pipeline."""
with pipeline.running(timeout=0.1):
pipeline.state.selected("RUN").wait(timeout=0.1)
for i in range(chunksize * 2):
if pipeline.record.pushed.is_set():
break
Publisher.update()
Publisher.publish(pub, flags=zmq.NOBLOCK)
time.sleep(0.1)
pipeline.record.pushed.wait(timeout=3.0)
pipeline.write.fired.wait(timeout=3.0)
assert pipeline.record.pushed.is_set()
pipeline.record.pushed.clear()
for i in range(3):
Publisher.update()
Publisher.publish(pub, flags=zmq.NOBLOCK)
time.sleep(0.1)
pipeline.pause()
pipeline.record.pushed.wait(timeout=3.0)
pipeline.state.selected("STOP").wait(timeout=1.0)
assert pipeline.state.selected("STOP").is_set()
assert pipeline.write.fired.is_set()
assert pipeline.record.framecounter == len(Publisher) * (chunksize + 3)
with h5py.File(filename.format(0), 'r') as f:
assert 'telemetry' in f
mg = f['telemetry']
for name in Publisher.keys():
assert name in mg
g = mg[name]
assert g['data'].shape[0] == (chunksize * 2)
assert np.max(g['mask']) == pipeline.record.framecount
| 38.16129 | 104 | 0.591885 |
eb8a7bd8a188145d153878bd6f18c9496a964887 | 8,017 | py | Python | zbunker/routes.py | CYBERDEVILZ/zbunker-website | a03e5b4772ee06e2ebb4b6a4ef2cae7587c08d37 | [
"MIT"
] | null | null | null | zbunker/routes.py | CYBERDEVILZ/zbunker-website | a03e5b4772ee06e2ebb4b6a4ef2cae7587c08d37 | [
"MIT"
] | null | null | null | zbunker/routes.py | CYBERDEVILZ/zbunker-website | a03e5b4772ee06e2ebb4b6a4ef2cae7587c08d37 | [
"MIT"
] | 1 | 2022-02-14T04:16:50.000Z | 2022-02-14T04:16:50.000Z | from logging import exception
from wtforms.validators import ValidationError
from zbunker import app, db, mail
from flask_mail import Message
from flask import render_template, redirect, flash, url_for, request, jsonify, session
from zbunker.forms import (
LoginForm,
RegistrationForm,
OTPForm,
ResetPasswordForm,
VerifyOTPForm,
)
from zbunker.models import User, OTPModel
from werkzeug.urls import url_parse
from flask_login import login_user, current_user, logout_user, login_required
import json
import re
from random import randint
import os
@app.route("/")
def landing():
return render_template("landing.html", title="Landing")
@app.route("/home")
def home():
videos = [1, 2, 3, 4, 5, 6, 7]
return render_template("home.html", videos=videos)
@app.route("/prime", methods=["GET", "POST"])
def register():
form = RegistrationForm()
if current_user.is_authenticated:
return redirect(url_for("home"))
if request.method == "POST":
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for("login"))
else:
return render_template(
"prime.html", anchor=1, title="Join Prime", form=form
)
return render_template("prime.html", title="Join Prime", form=form)
# return redirect(url_for('register', _anchor='prime-anchor'))
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if current_user.is_authenticated:
return redirect(url_for("home"))
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remMe.data)
nextPage = request.args.get(
"next"
) # a feature to route to the next url (for login_required only)
if not nextPage or url_parse(nextPage).netloc != "":
nextPage = url_for("home")
return redirect(nextPage)
else:
flash(
"Login Unsuccessful. Please check email and password", category="danger"
)
return render_template("login.html", form=form)
# Email Validation Route
@app.route("/forgot-password", methods=["GET", "POST"])
def forgotpassword():
form = OTPForm()
if request.method == "POST":
if form.validate_on_submit():
email = User.query.filter_by(email=form.email.data).first()
if email:
otp = gen_otp() # Generate OTP
new_otp = OTPModel(email=form.email.data, otp=otp)
db.session.add(new_otp)
db.session.commit()
msg = Message(
sender=os.environ.get("EMAIL_ADDRESS"),
recipients=[form.email.data],
subject="Forgot Password | ZBunker",
)
msg.html = render_template("forgot_password_email.html", otp=otp)
try:
mail.send(msg)
except Exception as e:
print(e)
flash(
"Something went wrong while sending the OTP.", category="danger"
)
session["user_email"] = form.email.data
return redirect(url_for("otpverify"))
return render_template("forgot-password.html", form=form)
def gen_otp():
keybase = "abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ"
otp = ""
for i in range(6):
otp += str(keybase[randint(0, len(keybase) - 1)])
print(otp)
return otp
@app.route("/validate/otp", methods=["GET", "POST"])
def otpverify():
form = VerifyOTPForm()
try:
email = session["user_email"]
except:
email = None
if request.method == "POST":
if form.validate_on_submit():
if email:
user = (
OTPModel.query.filter_by(email=email)
.order_by(OTPModel.id.desc())
.first()
)
if user.otp != form.OTP.data:
flash("Your Password is Incorrect. Try Again")
else:
return redirect(url_for("resetpassword"))
return render_template("enter-otp.html", form=form)
@app.route("/reset-password", methods=["GET", "POST"])
def resetpassword():
try:
email = session["user_email"]
except:
email = None
form = ResetPasswordForm()
if request.method == "POST":
if form.validate_on_submit():
if email:
user = User.query.filter_by(email=email).first()
if user:
user.set_password(form.password.data)
db.session.commit()
return redirect(url_for("home"))
else:
flash("something doesn't seem right (UDE)", category="danger")
else:
flash("Something doesn't seem right. (SESSNONE)", category="danger")
return render_template("reset-password.html", form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home"))
@app.route("/pt")
def pt():
form = RegistrationForm()
return render_template("pt.html", form=form)
@app.route("/about")
def about():
title = "About"
return render_template("about.html", title=title)
@app.route("/basic")
def basic():
title = "Basic"
return render_template("basic.html", title=title)
@app.route("/learn") # change this
def filter():
title = "Filter"
return render_template("filtergrid.html", title=title)
@app.route("/support-zbunker")
@app.route("/donate")
def zbunkerprime():
title = "Support Us"
# get the no of users and calc the percentage (short goal)
user = User.query.all()
total = 20 # the target goal
members = len(user)
percentage = (members / total) * 100
filler = str(percentage) + "%"
marker = 86.5
if percentage > 100:
marker = "0%"
else:
if 100 - percentage < 86.5:
marker = str(100 - percentage) + "%"
else:
marker = str(86.5) + "%"
return render_template(
"donate.html",
title=title,
members=members,
filler=filler,
marker=marker,
total=total,
)
@app.route("/sponsors")
def sponsor():
title = "Sponsors"
users = User.query.all()
sponsors = []
for user in users:
sponsors.append(user.username)
return render_template("sponsors.html", title=title, sponsors=sponsors)
@app.route("/learn/ethical-hacking")
def eth():
title = "Ethical Hacking"
return render_template("eth.html", title=title)
@app.route("/learn/python-programming")
def pythonprogramming():
title = "Python Programming"
return render_template("python.html", title=title)
@app.route("/learn/git-essentials")
def git():
title = "Git Essentials"
return render_template("git.html", title=title)
@app.route("/learn/nmap")
def nmap():
title = "Nmap"
return render_template("nmap.html", title=title)
@app.route("/learn/mongodb")
def mongo():
title = "Mongo DB"
return render_template("mongo.html", title=title)
@app.route("/learn/linux-essentials")
def linux():
title = "Linux"
return render_template("linux.html", title=title)
@app.route("/learn/c-programming")
def c():
title = "C Programming"
return render_template("c.html", title=title)
@app.route("/learn/postgresql")
def postgres():
title = "Learn PostgreSQL"
return render_template("postgres.html", title=title)
@app.route("/contact-us")
def contact():
title = "Contact Us"
return render_template("contact.html", title=title)
| 26.199346 | 88 | 0.598104 |
9009fa9c92a74ec1910ef291f001c6c3e24f3b5e | 281 | py | Python | wsltools/utils/faker/providers/ssn/ko_KR/__init__.py | Symbo1/wsltools | 0b6e536fc85c707a1c81f0296c4e91ca835396a1 | [
"MIT"
] | 412 | 2020-04-16T08:11:58.000Z | 2022-02-02T19:49:53.000Z | wsltools/utils/faker/providers/ssn/ko_KR/__init__.py | Symbo1/wsltools | 0b6e536fc85c707a1c81f0296c4e91ca835396a1 | [
"MIT"
] | 1 | 2020-04-16T14:03:46.000Z | 2020-04-17T03:41:18.000Z | wsltools/utils/faker/providers/ssn/ko_KR/__init__.py | Symbo1/wsltools | 0b6e536fc85c707a1c81f0296c4e91ca835396a1 | [
"MIT"
] | 33 | 2020-04-16T08:48:53.000Z | 2021-10-20T04:39:29.000Z | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from .. import Provider as SsnProvider
class Provider(SsnProvider):
ssn_formats = ("##0#0#-1######", "##0#1#-1######", "##0#2#-1######",
"##0#0#-2######", "##0#1#-2######", "##0#2#-2######")
| 28.1 | 72 | 0.462633 |
e37a6b1177c102760b336feefea726faa205d8fb | 69 | py | Python | ulfy/__init__.py | cdaversin/ulfy | 42dfe51c821acffbccc0df26d7b9549a5cb949eb | [
"MIT"
] | null | null | null | ulfy/__init__.py | cdaversin/ulfy | 42dfe51c821acffbccc0df26d7b9549a5cb949eb | [
"MIT"
] | null | null | null | ulfy/__init__.py | cdaversin/ulfy | 42dfe51c821acffbccc0df26d7b9549a5cb949eb | [
"MIT"
] | null | null | null | from sympy_expr import Expression
from ufl_sympy import ufl_to_sympy
| 23 | 34 | 0.884058 |
7ada3a902e7038c7713340ec590f332a8075264c | 5,242 | py | Python | visigoth/common/button/button.py | visigoths/visigoth | c5297148209d630f6668f0e5ba3039a8856d8320 | [
"MIT"
] | null | null | null | visigoth/common/button/button.py | visigoths/visigoth | c5297148209d630f6668f0e5ba3039a8856d8320 | [
"MIT"
] | 1 | 2021-01-26T16:55:48.000Z | 2021-09-03T15:29:14.000Z | visigoth/common/button/button.py | visigoths/visigoth | c5297148209d630f6668f0e5ba3039a8856d8320 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# visigoth: A lightweight Python3 library for rendering data visualizations in SVG
# Copyright (C) 2020-2021 Visigoth Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from visigoth.internal.svg import rectangle
from visigoth.common.text import Text
from visigoth.internal.utils.js import Js
from visigoth.common import DiagramElement
import os
class Button(DiagramElement):
"""
Create a button
Keyword Arguments:
text(str) : the text to display in the button
image(visigoth.common.Image) : an image element to display in the button
padding(int) : define padding around button content in pixels
font_height(int) : font size in pixeks
text_attributes(dict)x: a dict containing SVG name/value attributes
url(str): url to link to from the text
fill(str): the background hue for the button
push_fill(str): the background hue for the button when pushed
stroke(str): the stroke hue for the line around the button
stroke_width(int): the stroke width for the line around the button
r(int): the button corner radius in pixels
click_value(str): the event value emitted when the button is clicked
Notes:
When pressed the button will generate an event with the value of the click_value parameter on channel "click"
"""
def __init__(self,text=None,image=None,padding=2,font_height=24,text_attributes={},url=None,fill="white",push_fill="red",stroke="black",stroke_width=1,r=5,click_value="click"):
super(Button,self).__init__()
self.text = None
self.image = None
if not text and not image:
text = "?"
if text:
self.text = Text(text,font_height=font_height,text_attributes=text_attributes,url=url)
if image:
self.image = image
self.fill = fill
self.push_fill = push_fill
self.stroke = stroke
self.stroke_width = stroke_width
self.r = r
self.padding = padding
self.click_value = click_value
self.initially_selected = False
self.width = 0
self.height = 0
def setInitiallySelected(self):
self.initially_selected = True
def build(self,fmt):
if fmt != "html":
return
if self.text:
self.text.build(fmt)
if self.image:
self.image.build(fmt)
self.width = self.padding
self.height = 0
if self.text:
self.width += self.text.getWidth() + self.padding
if self.image:
self.width += self.image.getWidth() + self.padding
if self.text:
self.height += self.text.getHeight()
if self.image and self.image.getHeight() > self.height:
self.height = self.image.getHeight()
self.height += 2*self.padding
def draw(self,d,cx,cy):
if d.getFormat() != "html":
return
button_width = self.getWidth()
button_height = self.getHeight()
oy = cy - button_height/2
ox = cx - button_width/2
g = d.openGroup(self.getId())
r = rectangle(ox,oy,button_width,button_height,self.fill,self.stroke,self.stroke_width,self.r,self.r)
r.addAttr("tabindex","0")
rid = r.getId()
d.add(r)
if self.image and self.text:
tx = ox + self.padding + self.text.getWidth()/2
self.text.draw(d,tx,cy)
ix = ox + 2*self.padding + self.text.getWidth() + self.image.getWidth()/2
self.image.draw(d,ix,cy)
else:
if self.text:
self.text.draw(d,cx,cy)
if self.image:
self.image.draw(d,cx,cy)
d.closeGroup()
with open(os.path.join(os.path.split(__file__)[0],"button.js"),"r") as jsfile:
jscode = jsfile.read()
config = { "initially_selected":self.initially_selected, "rectangle":rid, "fill":self.fill, "push_fill":self.push_fill, "click_value":self.click_value }
Js.registerJs(d,self,jscode,"button",cx,cy,config)
def getWidth(self):
return self.width
def getHeight(self):
return self.height | 41.603175 | 180 | 0.649943 |
521f338df6f97388eb1fd7429d28f19e02fa753e | 6,236 | py | Python | Script/BOM formatter/test_xlsx.py | vitalii17/SVLib | 693273bc960f1acf2bb1dca27269a0ddebd75aa9 | [
"MIT"
] | null | null | null | Script/BOM formatter/test_xlsx.py | vitalii17/SVLib | 693273bc960f1acf2bb1dca27269a0ddebd75aa9 | [
"MIT"
] | null | null | null | Script/BOM formatter/test_xlsx.py | vitalii17/SVLib | 693273bc960f1acf2bb1dca27269a0ddebd75aa9 | [
"MIT"
] | null | null | null | import sys
import os
from enum import Enum, unique
from operator import itemgetter
from itertools import groupby
from openpyxl import load_workbook, Workbook
from openpyxl.styles import Font
project_name = "ProjName"
project_ver = "v1.0"
def main(args):
if len(args) > 1:
pass
else:
script_path = args[0]
script_dir = os.path.dirname(script_path)
bom_name = project_name + " " + project_ver + " - BOM"
raw_bom_reader = XlReader("_raw_bom.xlsx")
raw_bom = raw_bom_reader.raw_data[1:]
parser = RawBomParser()
grouped = parser.separate(raw_bom, 6)
raw_bom_dict = {}
raw_bom_dict = parser.separate(grouped, 7)
bom = Bom(raw_bom_dict)
xlwriter = XlWriter(file="BOM.xlsx")
xlwriter.write_bom(bom.structured_bom)
@unique
class RowType(Enum):
BOMITEM = 1
HEADER = 2
CATEGORY = 3
SPACER = 4
class BomRow(object):
def __init__(self, cels=(), row_type=RowType.BOMITEM, font_bold=False,
font_italic=False,
font_underline="none",
font_size_pt=11):
super(BomRow, self).__init__()
self.cels = cels
self.row_type = row_type
self.font_bold = font_bold
self.font_italic = font_italic
self.font_underline = font_underline
self.font_size_pt = font_size_pt
class Bom(object):
def __init__(self, grouped_bom):
super(Bom, self).__init__()
self.header = BomRow(cels=("Component Name", "Value", "Package", "Count", "Designator", "Notes"),
row_type=RowType.HEADER, font_bold=True, font_size_pt=12)
self.cat_smd_top = BomRow(cels=("SMD, top side", ""),
row_type=RowType.CATEGORY,
font_bold=True, font_italic=True, font_underline="single", font_size_pt=12)
self.cat_smd_bot = BomRow(cels=("SMD, bottom side", ""),
row_type=RowType.CATEGORY,
font_bold=True, font_italic=True, font_underline="single", font_size_pt=12)
self.cat_pth_top = BomRow(cels=("PTH, top side", ""),
row_type=RowType.CATEGORY,
font_bold=True, font_italic=True, font_underline="single", font_size_pt=12)
self.cat_pth_bot = BomRow(cels=("PTH, bottom side", ""),
row_type=RowType.CATEGORY,
font_bold=True, font_italic=True, font_underline="single", font_size_pt=12)
self.empty_row = BomRow(cels=("", ""),
row_type=RowType.SPACER)
self.bom_width = len(self.header.cels)
self.smd_top = self._make_bom_row_list(grouped_bom.get("SMD", "").get("Top", ""))
self.smd_bot = self._make_bom_row_list(grouped_bom.get("SMD", "").get("Bottom", ""))
self.pth_top = self._make_bom_row_list(grouped_bom.get("PTH", "").get("Top", ""))
self.pth_bot = self._make_bom_row_list(grouped_bom.get("PTH", "").get("Bottom", ""))
self.structured_bom = [self.header] + \
[self.cat_smd_top] + self.smd_top + [self.empty_row] + \
[self.cat_smd_bot] + self.smd_bot + [self.empty_row] + \
[self.cat_pth_top] + self.pth_top + [self.empty_row] + \
[self.cat_pth_bot] + self.pth_bot
def _make_bom_row_list(self, source):
result = []
for item in source:
result = result + [BomRow(cels=item, row_type=RowType.BOMITEM)]
return result
class RawBomParser(object):
def __init__(self):
super(RawBomParser, self).__init__()
def separate(self, content, key):
result_dict = {}
if isinstance(content, list):
sorted = self._sort(content, key)
result_dict = self._group(sorted, key)
elif isinstance(content, dict):
for item in content:
result_dict[item] = self._group(content[item], key)
return result_dict
def _sort(self, content, key):
result = content.copy()
result.sort(key=itemgetter(key))
return result
def _group(self, content, key):
grouped = groupby(content, key=itemgetter(key))
result = {}
for key, items in grouped:
result_list = []
for i in items:
result_list = result_list + [i, ]
result[key] = result_list
return result
class XlWriter(object):
def __init__(self, file):
super(XlWriter, self).__init__()
self.file = file
self.wb = Workbook()
self.ws = self.wb.active
self.ws.title = "Sheet1"
def write_bom(self, row_list):
for row_num, row_content in enumerate(row_list, start=1):
self._write_row(row_content, row_num)
self.wb.save(filename=self.file)
def _write_row(self, bom_row, row_num):
for col, val in enumerate(bom_row.cels, start=1):
self.ws.cell(row=row_num, column=col).value = val
self.ws.cell(row=row_num, column=col).font = Font(bold=bom_row.font_bold,
italic=bom_row.font_italic,
underline=bom_row.font_underline,
size=bom_row.font_size_pt)
class XlReader(object):
def __init__(self, file):
super(XlReader, self).__init__()
self.file = file
self.raw_data = []
wb = load_workbook(filename=self.file)
ws = wb.worksheets[0]
for row in ws.iter_rows(min_row=1):
raw_row = []
for item in row:
raw_row = raw_row + [item.value, ]
self.raw_data = self.raw_data + [raw_row, ]
if __name__ == '__main__':
main(sys.argv)
| 36.899408 | 110 | 0.544901 |
89078389d7765e94127cb47f9e31f162a59d40ba | 1,006 | py | Python | examples/supervised_learning/trees_ensembles/random_forest_regressor.py | cesar0205/machine_learning_algorithms | 7cd7c7295e2fb7357123cd81cb3e43c485930d74 | [
"Apache-2.0"
] | null | null | null | examples/supervised_learning/trees_ensembles/random_forest_regressor.py | cesar0205/machine_learning_algorithms | 7cd7c7295e2fb7357123cd81cb3e43c485930d74 | [
"Apache-2.0"
] | null | null | null | examples/supervised_learning/trees_ensembles/random_forest_regressor.py | cesar0205/machine_learning_algorithms | 7cd7c7295e2fb7357123cd81cb3e43c485930d74 | [
"Apache-2.0"
] | null | null | null | from supervised_learning.trees.random_forests import RandomForestRegressor
import numpy as np
import matplotlib.pyplot as plt
def main():
#100 samples of the sin(x) function in the range 0 - 2*pi in order to apply a regressor.
T = 100;
X = np.linspace(0, 2 * np.pi, T);
y = np.sin(X);
N = 30 #70 samples will be for training and 30 for testing.
test_ind = np.random.choice(T, N, replace=False);
train_ind = np.array([i for i in range(T) if i not in test_ind])
X_test = X[test_ind].reshape(-1, 1)
X_train = X[train_ind].reshape(-1, 1)
y_train = y[train_ind]
y_test = y[test_ind]
model = RandomForestRegressor()
model.fit(X_train, y_train)
print("Test score: ", model.score(X_test, y_test))
plt.plot(X, y)
pred = model.predict(X.reshape(T, 1))
plt.plot(X, pred, 'o')
plt.title("Random forest regressor. Fitting the sin(x) function")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
if __name__ == "__main__":
main(); | 27.189189 | 92 | 0.647117 |
f3d48c0760d7ea7df3530bb67267d039f4b2d1c2 | 5,008 | py | Python | src/solution/my_edge_app/data-analytics/program/data_analytics.py | industrial-edge/Developer-Guide-Hands-on-App | 6dbf01f3945f138f01f75f1f9f3906ddd0420b10 | [
"Unlicense"
] | 10 | 2021-05-06T09:13:47.000Z | 2022-03-17T11:04:38.000Z | src/solution/my_edge_app/data-analytics/program/data_analytics.py | industrial-edge/Developer-Guide-Hands-on-App | 6dbf01f3945f138f01f75f1f9f3906ddd0420b10 | [
"Unlicense"
] | 6 | 2021-04-30T07:07:44.000Z | 2021-05-31T07:08:32.000Z | src/solution/my_edge_app/data-analytics/program/data_analytics.py | industrial-edge/Developer-Guide-Hands-on-App | 6dbf01f3945f138f01f75f1f9f3906ddd0420b10 | [
"Unlicense"
] | 6 | 2021-04-29T17:54:16.000Z | 2022-03-17T11:02:34.000Z | # Copyright 2021 Siemens AG
# This file is subject to the terms and conditions of the MIT License.
# See LICENSE file in the top-level directory.
"""Module Data Analytics.
This module consists of DataGenerator class and also the function to generate
bivariate normal distributed datasets.
"""
import paho.mqtt.client as mqtt
import sys
import logging
import statistics
import json
BROKER_ADDRESS='ie-databus'
BROKER_PORT=1883
MICRO_SERVICE_NAME = 'data-analytics'
""" Broker user and password for authtentification"""
USERNAME='edge'
PASSWORD='edge'
class DataAnalyzer():
"""
Data Analyzer connects to mqtt broker and waits for new
input data to calculate KPIs.
"""
def __init__(self, logger_parent):
""" Starts the instantiated object with a proper logger """
logger_name = '{}.{}'.format(logger_parent,__name__)
self.logger = logging.getLogger(logger_name)
self.client = mqtt.Client(MICRO_SERVICE_NAME)
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_subscribe = self.on_subscribe
self.client.on_message = self.on_message
self.topic_callback = dict()
def on_connect(self, client, userdata, flags, rc):
self.logger.info('Connected successfully to broker, response code {}'.format(rc))
def on_disconnect(self, client, userdata, rc):
if rc != 0:
self.logger.warning('Connection ended unexpectedly from broker, error code {}'.format(rc))
def on_subscribe(self, client, userdata, mid, granted_qos):
self.logger.info('successfully subscribed ')
def on_message(self, client, userdata, message):
self.logger.info('New message received on topic: {}'.format(message.topic))
# print(message.payload)
# load = message.payload
new_msg = json.loads(message.payload)
try:
self.topic_callback[message.topic](new_msg)
except Exception as err:
self.logger.error('An error ocurred while hanlding new message of {}: {}'.format(message.topic, err))
def subscribe(self, topic, callback):
""" Subscribes to given topic, assigning a callback function that
handles the received payload
:topic: string with the topic to subscribe
:callback: function to assign the payload received
"""
self.topic_callback.update({topic:callback})
self.client.subscribe(topic)
# Callback function for MQTT topic 'StandardKpis'
def standard_kpis(self, payload):
values = [key['value'] for key in payload]
# Calculate standard KPIs
result = {
'mean_result' : statistics.mean(values),
'median_result' : statistics.median(values),
'stddev_result' : statistics.stdev(values),
'name' : payload[0]['name'],
}
self.logger.info('mean calculated: {}'.format(statistics.mean(values)))
self.logger.info('median calculated: {}'.format(statistics.median(values)))
self.logger.info('stddev calculated: {} \n ======='.format(statistics.stdev(values)))
# publish results back on MQTT topic 'StandardKpiResult'
self.client.publish(topic='StandardKpiResult', payload=json.dumps(result))
return
# Callback function for MQTT topic 'Mean' subscription
def power_mean(self, payload):
self.logger.info('calculating power mean...')
current_values = [item['value'] for item in payload['current_drive3_batch']]
voltage_values = [item['value'] for item in payload['voltage_drive3_batch']]
# Calculate mean of power
power_batch_sum = sum([current*voltage for current, voltage in zip(current_values,voltage_values)])
power_mean = round((power_batch_sum/payload['sample_number']),2)
self.logger.info("power mean result: {}\n".format(power_mean))
result = {
'power_mean_result' : power_mean,
'name' : 'powerdrive3_mean',
}
# publish result back on MQTT topic 'MeanResult'
self.client.publish(topic='MeanResult', payload=json.dumps(result))
return
def handle_data(self):
"""
Starts the connection to MQTT broker and subscribe to respective
topics.
"""
self.logger.info('Preparing Mqtt Connection')
try:
self.client.username_pw_set(USERNAME, PASSWORD)
self.client.connect(BROKER_ADDRESS)
self.client.loop_start()
self.logger.info('Subscribe to topic StandardKpis')
self.subscribe(topic='StandardKpis', callback=self.standard_kpis)
self.logger.info('Subscripe to topic Mean')
self.subscribe(topic='Mean', callback=self.power_mean)
self.logger.info('Finished subscription to topics')
except Exception as e:
self.logger.error(str(e))
| 37.373134 | 113 | 0.653554 |
a122980a9ce8eb09db8c74e43804f478daed3512 | 167 | py | Python | faker/__init__.py | errbufferoverfl/faker | f518bfa3dcfd0433289fab69a1e5b972350fb4b4 | [
"MIT"
] | null | null | null | faker/__init__.py | errbufferoverfl/faker | f518bfa3dcfd0433289fab69a1e5b972350fb4b4 | [
"MIT"
] | null | null | null | faker/__init__.py | errbufferoverfl/faker | f518bfa3dcfd0433289fab69a1e5b972350fb4b4 | [
"MIT"
] | null | null | null | from faker.factory import Factory
from faker.generator import Generator
from faker.proxy import Faker
VERSION = '8.14.1'
__all__ = ('Factory', 'Generator', 'Faker')
| 20.875 | 43 | 0.754491 |
145e1ed9595c90d8a47f2e33ac5aa5cc1bbaf480 | 12,734 | py | Python | addons/io_scene_gltf2/blender/exp/gltf2_blender_image.py | doc22940/glTF-Blender-IO | 73d9e3c8b00eb8f2fe7b6ba3e62231e55a0573b4 | [
"Apache-2.0"
] | 1 | 2020-06-22T10:45:09.000Z | 2020-06-22T10:45:09.000Z | addons/io_scene_gltf2/blender/exp/gltf2_blender_image.py | MozillaReality/glTF-Blender-IO | 9dde650dc77fd1a1c9552a2773f0c3533f70683c | [
"Apache-2.0"
] | null | null | null | addons/io_scene_gltf2/blender/exp/gltf2_blender_image.py | MozillaReality/glTF-Blender-IO | 9dde650dc77fd1a1c9552a2773f0c3533f70683c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import os
from typing import Optional
import numpy as np
import tempfile
import enum
class Channel(enum.IntEnum):
R = 0
G = 1
B = 2
A = 3
# These describe how an ExportImage's channels should be filled.
class FillImage:
"""Fills a channel with the channel src_chan from a Blender image."""
def __init__(self, image: bpy.types.Image, src_chan: Channel):
self.image = image
self.src_chan = src_chan
class FillWhite:
"""Fills a channel with all ones (1.0)."""
pass
class ExportImage:
"""Custom image class.
An image is represented by giving a description of how to fill its red,
green, blue, and alpha channels. For example:
self.fills = {
Channel.R: FillImage(image=bpy.data.images['Im1'], src_chan=Channel.B),
Channel.G: FillWhite(),
}
This says that the ExportImage's R channel should be filled with the B
channel of the Blender image 'Im1', and the ExportImage's G channel
should be filled with all 1.0s. Undefined channels mean we don't care
what values that channel has.
This is flexible enough to handle the case where eg. the user used the R
channel of one image as the metallic value and the G channel of another
image as the roughness, and we need to synthesize an ExportImage that
packs those into the B and G channels for glTF.
Storing this description (instead of raw pixels) lets us make more
intelligent decisions about how to encode the image.
"""
def __init__(self):
self.fills = {}
@staticmethod
def from_blender_image(image: bpy.types.Image):
export_image = ExportImage()
for chan in range(image.channels):
export_image.fill_image(image, dst_chan=chan, src_chan=chan)
return export_image
def fill_image(self, image: bpy.types.Image, dst_chan: Channel, src_chan: Channel):
self.fills[dst_chan] = FillImage(image, src_chan)
def fill_white(self, dst_chan: Channel):
self.fills[dst_chan] = FillWhite()
def is_filled(self, chan: Channel) -> bool:
return chan in self.fills
def empty(self) -> bool:
return not self.fills
def blender_image(self) -> Optional[bpy.types.Image]:
"""If there's an existing Blender image we can use,
returns it. Otherwise (if channels need packing),
returns None.
"""
if self.__on_happy_path():
for fill in self.fills.values():
return fill.image
return None
def __on_happy_path(self) -> bool:
# All src_chans match their dst_chan and come from the same image
return (
all(isinstance(fill, FillImage) for fill in self.fills.values()) and
all(dst_chan == fill.src_chan for dst_chan, fill in self.fills.items()) and
len(set(fill.image.name for fill in self.fills.values())) == 1
)
def encode(self, mime_type: Optional[str]) -> bytes:
self.file_format = {
"image/jpeg": "JPEG",
"image/png": "PNG"
}.get(mime_type, "PNG")
# Happy path = we can just use an existing Blender image
if self.__on_happy_path():
return self.__encode_happy()
# Unhappy path = we need to create the image self.fills describes.
return self.__encode_unhappy()
def __encode_happy(self) -> bytes:
return self.__encode_from_image(self.blender_image())
def __encode_unhappy(self) -> bytes:
result = self.__encode_unhappy_with_compositor()
if result is not None:
return result
return self.__encode_unhappy_with_numpy()
def __encode_unhappy_with_compositor(self) -> bytes:
# Builds a Compositor graph that will build the correct image
# from the description in self.fills.
#
# [ Image ]->[ Sep RGBA ] [ Comb RGBA ]
# [ src_chan]--->[dst_chan ]--->[ Output ]
#
# This is hacky, but is about 4x faster than using
# __encode_unhappy_with_numpy. There are some caveats though:
# First, we can't handle pre-multiplied alpha.
if Channel.A in self.fills:
return None
# Second, in order to get the same results as using image.pixels
# (which ignores the colorspace), we need to use the 'Non-Color'
# colorspace for all images and set the output device to 'None'. But
# setting the colorspace on dirty images discards their changes.
# So we can't handle dirty images that aren't already 'Non-Color'.
for fill in self.fills:
if isinstance(fill, FillImage):
if fill.image.is_dirty:
if fill.image.colorspace_settings.name != 'Non-Color':
return None
tmp_scene = None
orig_colorspaces = {} # remembers original colorspaces
try:
tmp_scene = bpy.data.scenes.new('##gltf-export:tmp-scene##')
tmp_scene.use_nodes = True
node_tree = tmp_scene.node_tree
for node in node_tree.nodes:
node_tree.nodes.remove(node)
out = node_tree.nodes.new('CompositorNodeComposite')
comb_rgba = node_tree.nodes.new('CompositorNodeCombRGBA')
for i in range(4):
comb_rgba.inputs[i].default_value = 1.0
node_tree.links.new(out.inputs['Image'], comb_rgba.outputs['Image'])
img_size = None
for dst_chan, fill in self.fills.items():
if not isinstance(fill, FillImage):
continue
img = node_tree.nodes.new('CompositorNodeImage')
img.image = fill.image
sep_rgba = node_tree.nodes.new('CompositorNodeSepRGBA')
node_tree.links.new(sep_rgba.inputs['Image'], img.outputs['Image'])
node_tree.links.new(comb_rgba.inputs[dst_chan], sep_rgba.outputs[fill.src_chan])
if fill.image.colorspace_settings.name != 'Non-Color':
if fill.image.name not in orig_colorspaces:
orig_colorspaces[fill.image.name] = \
fill.image.colorspace_settings.name
fill.image.colorspace_settings.name = 'Non-Color'
if img_size is None:
img_size = fill.image.size[:2]
else:
# All images should be the same size (should be
# guaranteed by gather_texture_info)
assert img_size == fill.image.size[:2]
width, height = img_size or (1, 1)
return _render_temp_scene(
tmp_scene=tmp_scene,
width=width,
height=height,
file_format=self.file_format,
color_mode='RGB',
colorspace='None',
)
finally:
for img_name, colorspace in orig_colorspaces.items():
bpy.data.images[img_name].colorspace_settings.name = colorspace
if tmp_scene is not None:
bpy.data.scenes.remove(tmp_scene, do_unlink=True)
def __encode_unhappy_with_numpy(self):
# Read the pixels of each image with image.pixels, put them into a
# numpy, and assemble the desired image that way. This is the slowest
# method, and the conversion to Python data eats a lot of memory, so
# it's only used as a last resort.
result = None
img_fills = {
chan: fill
for chan, fill in self.fills.items()
if isinstance(fill, FillImage)
}
# Loop over images instead of dst_chans; ensures we only decode each
# image once even if it's used in multiple channels.
image_names = list(set(fill.image.name for fill in img_fills.values()))
for image_name in image_names:
image = bpy.data.images[image_name]
if result is None:
result = np.ones((image.size[0], image.size[1], 4), np.float32)
# Images should all be the same size (should be guaranteed by
# gather_texture_info).
assert (image.size[0], image.size[1]) == result.shape[:2]
# Slow and eats all your memory.
pixels = np.array(image.pixels[:])
pixels = pixels.reshape((image.size[0], image.size[1], image.channels))
for dst_chan, img_fill in img_fills.items():
if img_fill.image == image:
result[:, :, dst_chan] = pixels[:, :, img_fill.src_chan]
pixels = None # GC this please
if result is None:
# No ImageFills; use a 1x1 white pixel
result = np.array([1.0, 1.0, 1.0, 1.0])
result = result.reshape((1, 1, 4))
return self.__encode_from_numpy_array(result)
def __encode_from_numpy_array(self, array: np.ndarray) -> bytes:
tmp_image = None
try:
tmp_image = bpy.data.images.new(
"##gltf-export:tmp-image##",
width=array.shape[0],
height=array.shape[1],
alpha=Channel.A in self.fills,
)
assert tmp_image.channels == 4 # 4 regardless of the alpha argument above.
# Also slow and eats all your memory.
tmp_image.pixels = array.flatten().tolist()
return _encode_temp_image(tmp_image, self.file_format)
finally:
if tmp_image is not None:
bpy.data.images.remove(tmp_image, do_unlink=True)
def __encode_from_image(self, image: bpy.types.Image) -> bytes:
# See if there is an existing file we can use.
if image.source == 'FILE' and image.file_format == self.file_format and \
not image.is_dirty:
if image.packed_file is not None:
return image.packed_file.data
else:
src_path = bpy.path.abspath(image.filepath_raw)
if os.path.isfile(src_path):
with open(src_path, 'rb') as f:
return f.read()
# Copy to a temp image and save.
tmp_image = None
try:
tmp_image = image.copy()
tmp_image.update()
if image.is_dirty:
tmp_image.pixels = image.pixels[:]
return _encode_temp_image(tmp_image, self.file_format)
finally:
if tmp_image is not None:
bpy.data.images.remove(tmp_image, do_unlink=True)
def _encode_temp_image(tmp_image: bpy.types.Image, file_format: str) -> bytes:
with tempfile.TemporaryDirectory() as tmpdirname:
tmpfilename = tmpdirname + '/img'
tmp_image.filepath_raw = tmpfilename
tmp_image.file_format = file_format
tmp_image.save()
with open(tmpfilename, "rb") as f:
return f.read()
def _render_temp_scene(
tmp_scene: bpy.types.Scene,
width: int,
height: int,
file_format: str,
color_mode: str,
colorspace: str,
) -> bytes:
"""Set render settings, render to a file, and read back."""
tmp_scene.render.resolution_x = width
tmp_scene.render.resolution_y = height
tmp_scene.render.resolution_percentage = 100
tmp_scene.display_settings.display_device = colorspace
tmp_scene.render.image_settings.color_mode = color_mode
tmp_scene.render.dither_intensity = 0.0
# Turn off all metadata (stuff like use_stamp_date, etc.)
for attr in dir(tmp_scene.render):
if attr.startswith('use_stamp_'):
setattr(tmp_scene.render, attr, False)
with tempfile.TemporaryDirectory() as tmpdirname:
tmpfilename = tmpdirname + "/img"
tmp_scene.render.filepath = tmpfilename
tmp_scene.render.use_file_extension = False
tmp_scene.render.image_settings.file_format = file_format
bpy.ops.render.render(scene=tmp_scene.name, write_still=True)
with open(tmpfilename, "rb") as f:
return f.read()
| 37.125364 | 96 | 0.612847 |
90cf8b02294adc91a11d9fbe06eaa11c14d32e38 | 2,961 | py | Python | mahotas/tests/test_bbox.py | langner/mahotas | 1840b629fab325e7fb700f90cf2b662ff35e4205 | [
"BSL-1.0"
] | 1 | 2021-02-18T00:37:56.000Z | 2021-02-18T00:37:56.000Z | mahotas/tests/test_bbox.py | langner/mahotas | 1840b629fab325e7fb700f90cf2b662ff35e4205 | [
"BSL-1.0"
] | null | null | null | mahotas/tests/test_bbox.py | langner/mahotas | 1840b629fab325e7fb700f90cf2b662ff35e4205 | [
"BSL-1.0"
] | 1 | 2019-10-30T19:26:12.000Z | 2019-10-30T19:26:12.000Z | import numpy as np
import mahotas
import mahotas as mh
from mahotas import bbox
from nose.tools import raises
def test_croptobbox():
X,Y = np.meshgrid(np.arange(16)-8,np.arange(16)-8)
ball = ((X**2+Y**2) < 32).astype(np.uint8)
assert mahotas.croptobbox(ball).sum() == ball.sum()
assert mahotas.croptobbox(ball,border=2).sum() == ball.sum()
assert mahotas.croptobbox(ball,border=256).sum() == ball.sum()
assert mahotas.croptobbox(ball,border=256).size == ball.size
assert mahotas.croptobbox(ball.T).sum() == ball.sum()
assert mahotas.croptobbox(ball[::2]).sum() == ball[::2].sum()
assert mahotas.croptobbox(ball[::2].T).sum() == ball[::2].sum()
assert mahotas.croptobbox(ball.T, border=2).sum() == ball.sum()
assert mahotas.croptobbox(ball.T, border=256).sum() == ball.sum()
assert mahotas.croptobbox(ball.T, border=256).size == ball.size
def test_croptobbox_3d():
YXZ = np.indices((32,32,64), float)
YXZ -= 8
Y,X,Z = YXZ
ball = ((X**2+Y**2+Z**2) < 64).astype(np.uint8)
assert np.sum(ball) == np.sum(mh.croptobbox(ball))
def test_bbox_empty():
assert mahotas.bbox(np.zeros((), np.bool)).shape == (0,)
def test_bbox_3():
YXZ = np.indices((32,32,64), float)
YXZ -= 8
Y,X,Z = YXZ
ball = ((X**2+Y**2+Z**2) < 64).astype(np.uint8)
m0,M0,m1,M1,m2,M2 = mahotas.bbox(ball)
Y,X,Z = np.where(ball)
assert np.all(m0 <= Y)
assert np.all(m1 <= X)
assert np.all(m2 <= Z)
assert np.all(M0 > Y)
assert np.all(M1 > X)
assert np.all(M2 > Z)
def test_bbox():
img = np.zeros((10,10), np.uint16)
a0,b0,a1,b1 = bbox(img)
assert a0 == b0
assert a1 == b1
img[4,2]=1
a0,b0,a1,b1=bbox(img)
assert a0 == 4
assert b0 == 5
assert a1 == 2
assert b1 == 3
img[6,8]=1
a0,b0,a1,b1=bbox(img)
assert a0 == 4
assert b0 == 7
assert a1 == 2
assert b1 == 9
img[7,7]=1
a0,b0,a1,b1=bbox(img)
assert a0 == 4
assert b0 == 8
assert a1 == 2
assert b1 == 9
c0,d0,c1,d1=bbox(img, 0)
assert c0 == a0
assert b0 == d0
assert c1 == a1
assert b1 == d1
c0,d0,c1,d1=bbox(img, 1)
assert c0 != a0
assert b0 != d0
assert c1 != a1
assert b1 != d1
def test_as_slice():
YXZ = np.indices((32,32,64), float)
YXZ -= 8
Y,X,Z = YXZ
ball = ((X**2+Y**2+Z**2) < 64).astype(np.uint8)
s = bbox(ball, as_slice=True)
assert ball[s].sum() == ball.sum()
def test_slice_border():
'Test bbox(slice=True, border=6) in 2D & 3D'
f = np.zeros((32,32), bool)
f[8:8] = 1
m0,M0, m1,M1 = mh.bbox(f, border=6, as_slice=False)
sl = mh.bbox(f, border=6, as_slice=True)
assert np.all(f[sl] == f[m0:M0, m1:M1])
f = np.zeros((32,32, 32), bool)
f[8:8,12:15] = 1
m0,M0, m1,M1, m2, M2 = mh.bbox(f, border=6, as_slice=False)
sl = mh.bbox(f, border=6, as_slice=True)
assert np.all(f[sl] == f[m0:M0, m1:M1, m2:M2])
| 26.20354 | 69 | 0.57717 |
0b9bea81e4f27a041795c2e005f39cf1ddead028 | 42,849 | py | Python | main_benchmark.py | peterfeifanchen/scGNN | 4ef9013ad0f44f9f51708e9bb60e5138f5706593 | [
"MIT"
] | 2 | 2022-01-07T04:37:33.000Z | 2022-01-08T04:24:00.000Z | main_benchmark.py | peterfeifanchen/scGNN | 4ef9013ad0f44f9f51708e9bb60e5138f5706593 | [
"MIT"
] | null | null | null | main_benchmark.py | peterfeifanchen/scGNN | 4ef9013ad0f44f9f51708e9bb60e5138f5706593 | [
"MIT"
] | 2 | 2021-07-27T05:54:50.000Z | 2021-08-11T05:54:16.000Z | import time
import resource
import datetime
import argparse
import sys
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
import torch
from torch.utils.data import Dataset, DataLoader
from torch import nn, optim
from torch.nn import functional as F
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, MeanShift, OPTICS
from model import AE, VAE
from util_function import *
from graph_function import *
from benchmark_util import *
from gae_embedding import GAEembedding, measure_clustering_results, test_clustering_benchmark_results
# from LTMG_R import *
import pandas as pd
# Benchmark for both celltype identification and imputation, needs Preprocessing_main.py first, then proceed by this script.
parser = argparse.ArgumentParser(
description='main benchmark for scRNA with timer and mem')
parser.add_argument('--datasetName', type=str, default='1.Biase',
help='Dataset: benchmarks: 9.Chung/11.Kolodziejczyk/12.Klein/13.Zeisel')
parser.add_argument('--batch-size', type=int, default=12800, metavar='N',
help='input batch size for training (default: 12800)')
parser.add_argument('--Regu-epochs', type=int, default=500, metavar='N',
help='number of epochs to train in Feature Autoencoder initially (default: 500)')
parser.add_argument('--EM-epochs', type=int, default=200, metavar='N',
help='number of epochs to train Feature Autoencoder in iteration EM (default: 200)')
parser.add_argument('--EM-iteration', type=int, default=10, metavar='N',
help='number of iteration in total EM iteration (default: 10)')
parser.add_argument('--EMtype', type=str, default='EM',
help='EM process type (default: celltypeEM) or EM')
parser.add_argument('--alpha', type=float, default=0.5,
help='iteration alpha (default: 0.5) to control the converge rate, should be a number between 0~1')
parser.add_argument('--converge-type', type=str, default='celltype',
help='type of converge: celltype/graph/both/either (default: celltype) ')
parser.add_argument('--converge-graphratio', type=float, default=0.01,
help='ratio of cell type change in EM iteration (default: 0.01), 0-1')
parser.add_argument('--converge-celltyperatio', type=float, default=0.95,
help='ratio of cell type change in EM iteration (default: 0.99), 0-1')
parser.add_argument('--cluster-epochs', type=int, default=200, metavar='N',
help='number of epochs in Cluster Autoencoder training (default: 200)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disable GPU training. If you only have CPU, add --no-cuda in the command line')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--regulized-type', type=str, default='LTMG',
help='regulized type (default: LTMG) in EM, otherwise: noregu/LTMG/LTMG01')
parser.add_argument('--reduction', type=str, default='sum',
help='reduction type: mean/sum, default(sum)')
parser.add_argument('--model', type=str, default='AE',
help='VAE/AE (default: AE)')
parser.add_argument('--gammaPara', type=float, default=0.1,
help='regulized parameter (default: 0.1)')
parser.add_argument('--alphaRegularizePara', type=float, default=0.9,
help='regulized parameter (default: 0.9)')
# imputation related
parser.add_argument('--EMregulized-type', type=str, default='Celltype',
help='regulized type (default: noregu) in EM, otherwise: noregu/Graph/GraphR/Celltype/CelltypeR')
# parser.add_argument('--adjtype', type=str, default='unweighted',
# help='adjtype (default: weighted) otherwise: unweighted')
# parser.add_argument('--aePara', type=str, default='start',
# help='whether use parameter of first feature autoencoder: start/end/cont')
parser.add_argument('--gammaImputePara', type=float, default=0.0,
help='regulized parameter (default: 0.0)')
parser.add_argument('--graphImputePara', type=float, default=0.3,
help='graph parameter (default: 0.3)')
parser.add_argument('--celltypeImputePara', type=float, default=0.1,
help='celltype parameter (default: 0.1)')
parser.add_argument('--L1Para', type=float, default=1.0,
help='L1 regulized parameter (default: 0.001)')
parser.add_argument('--L2Para', type=float, default=0.0,
help='L2 regulized parameter (default: 0.001)')
parser.add_argument('--EMreguTag', action='store_true', default=False,
help='whether regu in EM process')
parser.add_argument('--discreteTag', action='store_true', default=False,
help='whether input is raw or 0/1 (default: False)')
# Build cell graph
parser.add_argument('--k', type=int, default=10,
help='parameter k in KNN graph (default: 10)')
parser.add_argument('--knn-distance', type=str, default='euclidean',
help='KNN graph distance type: euclidean/cosine/correlation (default: euclidean)')
parser.add_argument('--prunetype', type=str, default='KNNgraphStatsSingleThread',
help='prune type, KNNgraphStats/KNNgraphML/KNNgraphStatsSingleThread (default: KNNgraphStats)')
parser.add_argument('--zerofillFlag', action='store_true', default=False,
help='fill zero or not before EM process (default: False)')
# Debug related
parser.add_argument('--precisionModel', type=str, default='Float',
help='Single Precision/Double precision: Float/Double (default:Float)')
parser.add_argument('--coresUsage', type=str, default='1',
help='how many cores used: all/1/... (default:1)')
parser.add_argument('--npyDir', type=str, default='npyGraphTest/',
help='save npy results in directory')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--saveinternal', action='store_true', default=False,
help='whether save internal interation results or not')
parser.add_argument('--debuginfo', action='store_true', default=False,
help='whether output debuginfo in cpu time and memory info')
# LTMG related
parser.add_argument('--inferLTMGTag', action='store_true', default=False,
help='Whether infer LTMG')
parser.add_argument('--LTMGDir', type=str, default='/home/jwang/data/scData/',
help='directory of LTMGDir, default:(/home/wangjue/workspace/scGNN/data/scData/)')
parser.add_argument('--expressionFile', type=str, default='Biase_expression.csv',
help='expression File in csv')
parser.add_argument('--ltmgFile', type=str, default='ltmg.csv',
help='expression File in csv')
# Clustering related
parser.add_argument('--useGAEembedding', action='store_true', default=False,
help='whether use GAE embedding for clustering(default: False)')
parser.add_argument('--useBothembedding', action='store_true', default=False,
help='whether use both embedding and Graph embedding for clustering(default: False)')
parser.add_argument('--n-clusters', default=20, type=int,
help='number of clusters if predifined for KMeans/Birch ')
parser.add_argument('--clustering-method', type=str, default='LouvainK',
help='Clustering method: Louvain/KMeans/SpectralClustering/AffinityPropagation/AgglomerativeClustering/AgglomerativeClusteringK/Birch/BirchN/MeanShift/OPTICS/LouvainK/LouvainB')
parser.add_argument('--maxClusterNumber', type=int, default=30,
help='max cluster for celltypeEM without setting number of clusters (default: 30)')
parser.add_argument('--minMemberinCluster', type=int, default=5,
help='max cluster for celltypeEM without setting number of clusters (default: 100)')
parser.add_argument('--resolution', type=str, default='auto',
help='the number of resolution on Louvain (default: auto/0.5/0.8)')
# Benchmark related
parser.add_argument('--benchmark', type=str, default='/home/jwang/data/scData/13.Zeisel/Zeisel_cell_label.csv',
help='the benchmark file of celltype (default: /home/wangjue/workspace/scGNN/data/scData/9.Chung/Chung_cell_label.csv)')
# Aggrelated
parser.add_argument('--linkage', type=str, default='ward',
help='linkage should be: ward, average, complete, single')
# GAE related
parser.add_argument('--GAEmodel', type=str,
default='gcn_vae', help="models used")
parser.add_argument('--GAEepochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--GAEhidden1', type=int, default=32,
help='Number of units in hidden layer 1.')
parser.add_argument('--GAEhidden2', type=int, default=16,
help='Number of units in hidden layer 2.')
parser.add_argument('--GAElr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--GAEdropout', type=float, default=0.,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--GAElr_dw', type=float, default=0.001,
help='Initial learning rate for regularization.')
# Start Impute or not, only used for evaluating Impute
parser.add_argument('--imputeMode', default=False, action='store_true',
help='impute or not (default: False). Caution: usually change npuDir if set imputeMode as true')
parser.add_argument('--dropoutRatio', type=float, default=0.1,
help='dropout ratio for impute (default: 0.1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# TODO
# As we have lots of parameters, should check args
checkargs(args)
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
print('Using device:'+str(device))
if not args.coresUsage == 'all':
torch.set_num_threads(int(args.coresUsage))
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
print(args)
start_time = time.time()
print('---0:00:00---scRNA starts loading.')
if not args.imputeMode:
# if args.discreteTag:
# scData = scBenchDataset(args.datasetName, args.discreteTag)
# else:
# scData = scBenchDataset(args.datasetName, args.discreteTag, transform=logtransform)
scData = scBenchDataset(args.datasetName, args.discreteTag)
else:
# if args.discreteTag:
# scData = scDatasetDropout(args.datasetName, args.discreteTag, args.dropoutRatio)
# else:
# scData = scDatasetDropout(args.datasetName, args.discreteTag, args.dropoutRatio, transform=logtransform)
scData = scDatasetDropout(datasetName=args.datasetName,
discreteTag=args.discreteTag, ratio=args.dropoutRatio, seed=args.seed)
train_loader = DataLoader(
scData, batch_size=args.batch_size, shuffle=False, **kwargs)
if args.inferLTMGTag:
# run LTMG in R
runLTMG(args.LTMGDir+'test/'+args.expressionFile, args.LTMGDir+'test/')
ltmgFile = args.ltmgFile
else:
ltmgFile = args.datasetName+'/T2000_UsingOriginalMatrix/T2000_LTMG.txt'
regulationMatrix = readLTMGnonsparse(args.LTMGDir, ltmgFile)
regulationMatrix = torch.from_numpy(regulationMatrix)
if args.precisionModel == 'Double':
regulationMatrix = regulationMatrix.type(torch.DoubleTensor)
elif args.precisionModel == 'Float':
regulationMatrix = regulationMatrix.type(torch.FloatTensor)
# Original
if args.model == 'VAE':
# model = VAE(dim=scData.features.shape[1]).to(device)
model = VAE2d(dim=scData.features.shape[1]).to(device)
elif args.model == 'AE':
model = AE(dim=scData.features.shape[1]).to(device)
if args.precisionModel == 'Double':
model = model.double()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
# Benchmark
bench_pd = pd.read_csv(args.benchmark, index_col=0)
# t1=pd.read_csv('/home/jwang/data/scData/13.Zeisel/Zeisel_cell_label.csv',index_col=0)
bench_celltype = bench_pd.iloc[:, 0].to_numpy()
# whether to output debuginfo in running time and memory consumption
def debuginfoStr(info):
if args.debuginfo:
print('---'+str(datetime.timedelta(seconds=int(time.time()-start_time)))+'---'+info)
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print('Mem consumption: '+str(mem))
debuginfoStr('scRNA has been successfully loaded')
# TODO: have to improve save npy
def train(epoch, train_loader=train_loader, EMFlag=False, taskType='celltype'):
'''
EMFlag indicates whether in EM processes.
If in EM, use regulized-type parsed from program entrance,
Otherwise, noregu
taskType: celltype or imputation
'''
model.train()
train_loss = 0
# for batch_idx, (data, _) in enumerate(train_loader):
# for batch_idx, data in enumerate(train_loader):
for batch_idx, (data, dataindex) in enumerate(train_loader):
if args.precisionModel == 'Double':
data = data.type(torch.DoubleTensor)
elif args.precisionModel == 'Float':
data = data.type(torch.FloatTensor)
data = data.to(device)
regulationMatrixBatch = regulationMatrix[dataindex, :]
regulationMatrixBatch = regulationMatrixBatch.to(device)
optimizer.zero_grad()
if args.model == 'VAE':
recon_batch, mu, logvar, z = model(data)
# Original
# loss = loss_function(recon_batch, data, mu, logvar)
if taskType == 'celltype':
if EMFlag and (not args.EMreguTag):
loss = loss_function_graph(recon_batch, data.view(-1, recon_batch.shape[1]), mu, logvar, gammaPara=args.gammaPara, regulationMatrix=regulationMatrixBatch,
regularizer_type='noregu', reguPara=args.alphaRegularizePara, modelusage=args.model, reduction=args.reduction)
else:
loss = loss_function_graph(recon_batch, data.view(-1, recon_batch.shape[1]), mu, logvar, gammaPara=args.gammaPara, regulationMatrix=regulationMatrixBatch,
regularizer_type=args.regulized_type, reguPara=args.alphaRegularizePara, modelusage=args.model, reduction=args.reduction)
elif taskType == 'imputation':
if EMFlag and (not args.EMreguTag):
loss = loss_function_graph_celltype(recon_batch, data.view(-1, recon_batch.shape[1]), mu, logvar, graphregu=adjsample, celltyperegu=celltypesample, gammaPara=args.gammaImputePara,
regulationMatrix=regulationMatrixBatch, regularizer_type=args.EMregulized_type, reguPara=args.graphImputePara, reguParaCelltype=args.celltypeImputePara, modelusage=args.model, reduction=args.reduction)
else:
loss = loss_function_graph_celltype(recon_batch, data.view(-1, recon_batch.shape[1]), mu, logvar, graphregu=adjsample, celltyperegu=celltypesample, gammaPara=args.gammaImputePara,
regulationMatrix=regulationMatrixBatch, regularizer_type=args.regulized_type, reguPara=args.graphImputePara, reguParaCelltype=args.celltypeImputePara, modelusage=args.model, reduction=args.reduction)
elif args.model == 'AE':
recon_batch, z = model(data)
mu_dummy = ''
logvar_dummy = ''
# Original
# loss = loss_function(recon_batch, data, mu, logvar)
if taskType == 'celltype':
if EMFlag and (not args.EMreguTag):
loss = loss_function_graph(recon_batch, data.view(-1, recon_batch.shape[1]), mu_dummy, logvar_dummy, gammaPara=args.gammaPara,
regulationMatrix=regulationMatrixBatch, regularizer_type='noregu', reguPara=args.alphaRegularizePara, modelusage=args.model, reduction=args.reduction)
else:
loss = loss_function_graph(recon_batch, data.view(-1, recon_batch.shape[1]), mu_dummy, logvar_dummy, gammaPara=args.gammaPara, regulationMatrix=regulationMatrixBatch,
regularizer_type=args.regulized_type, reguPara=args.alphaRegularizePara, modelusage=args.model, reduction=args.reduction)
elif taskType == 'imputation':
if EMFlag and (not args.EMreguTag):
loss = loss_function_graph_celltype(recon_batch, data.view(-1, recon_batch.shape[1]), mu_dummy, logvar_dummy, graphregu=adjsample, celltyperegu=celltypesample, gammaPara=args.gammaImputePara,
regulationMatrix=regulationMatrixBatch, regularizer_type=args.EMregulized_type, reguPara=args.graphImputePara, reguParaCelltype=args.celltypeImputePara, modelusage=args.model, reduction=args.reduction)
else:
loss = loss_function_graph_celltype(recon_batch, data.view(-1, recon_batch.shape[1]), mu_dummy, logvar_dummy, graphregu=adjsample, celltyperegu=celltypesample, gammaPara=args.gammaImputePara,
regulationMatrix=regulationMatrixBatch, regularizer_type=args.regulized_type, reguPara=args.graphImputePara, reguParaCelltype=args.celltypeImputePara, modelusage=args.model, reduction=args.reduction)
# L1 and L2 regularization in imputation
# 0.0 for no regularization
if taskType == 'imputation':
l1 = 0.0
l2 = 0.0
for p in model.parameters():
l1 = l1 + p.abs().sum()
l2 = l2 + p.pow(2).sum()
loss = loss + args.L1Para * l1 + args.L2Para * l2
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item() / len(data)))
# for batch
if batch_idx == 0:
recon_batch_all = recon_batch
data_all = data
z_all = z
else:
recon_batch_all = torch.cat((recon_batch_all, recon_batch), 0)
data_all = torch.cat((data_all, data), 0)
z_all = torch.cat((z_all, z), 0)
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
return recon_batch_all, data_all, z_all
if __name__ == "__main__":
outParaTag = str(args.k)+'-'+str(args.gammaPara)+'-'+str(args.alphaRegularizePara)+'-' + \
str(args.gammaImputePara)+'-'+str(args.graphImputePara) + \
'-'+str(args.celltypeImputePara)
# outParaTag = str(args.gammaImputePara)+'-'+str(args.graphImputePara)+'-'+str(args.celltypeImputePara)
ptfileStart = args.npyDir+args.datasetName+'_'+outParaTag+'_EMtrainingStart.pt'
stateStart = {
# 'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
ptfile = args.npyDir+args.datasetName+'_EMtraining.pt'
# Step 1. celltype clustering
# store parameter
torch.save(stateStart, ptfileStart)
# Save results only when impute
discreteStr = ''
if args.discreteTag:
discreteStr = 'D'
if args.imputeMode:
# Does not need now
# save_sparse_matrix(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr+'_'+str(args.dropoutRatio)+'_features.npz',scData.features)
# sp.save_npz(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr+'_'+str(args.dropoutRatio)+'_features.npz',scData.features)
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr +
'_'+str(args.dropoutRatio)+'_'+outParaTag+'_features.npy', scData.features)
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr +
'_'+str(args.dropoutRatio)+'_'+outParaTag+'_dropi.npy', scData.i)
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr +
'_'+str(args.dropoutRatio)+'_'+outParaTag+'_dropj.npy', scData.j)
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr +
'_'+str(args.dropoutRatio)+'_'+outParaTag+'_dropix.npy', scData.ix)
debuginfoStr('Start feature autoencoder training')
for epoch in range(1, args.Regu_epochs + 1):
recon, original, z = train(epoch, EMFlag=False)
debuginfoStr('Feature autoencoder training finished')
zOut = z.detach().cpu().numpy()
# torch.save(model.state_dict(),ptfile)
ptstatus = model.state_dict()
# Store reconOri for imputation
reconOri = recon.clone()
reconOri = reconOri.detach().cpu().numpy()
# Step 1. Inferring celltype
# Define resolution
# Default: auto, otherwise use user defined resolution
if args.resolution == 'auto':
if zOut.shape[0] < 2000:
resolution = 0.8
else:
resolution = 0.5
else:
resolution = float(args.resolution)
debuginfoStr('Start construct cell grpah')
# Here para = 'euclidean:10'
# adj, edgeList = generateAdj(zOut, graphType='KNNgraphML', para = args.knn_distance+':'+str(args.k))
adj, edgeList = generateAdj(zOut, graphType=args.prunetype, para=args.knn_distance +
':'+str(args.k), adjTag=(args.useGAEembedding or args.useBothembedding))
# if args.adjtype == 'unweighted':
# adj, edgeList = generateAdj(zOut, graphType=args.prunetype, para = args.knn_distance+':'+str(args.k))
# adjdense = sp.csr_matrix.todense(adj)
# elif args.adjtype == 'weighted':
# adj, edgeList = generateAdjWeighted(zOut, graphType=args.prunetype, para = args.knn_distance+':'+str(args.k))
# adjdense = adj.toarray()
debuginfoStr('Cell Graph constructed and pruned')
# if args.saveinternal:
# reconOut = recon.detach().cpu().numpy()
# if args.imputeMode:
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr+'_'+str(args.dropoutRatio)+'_'+outParaTag+'_recon.npy',reconOut)
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr+'_'+str(args.dropoutRatio)+'_'+outParaTag+'_z.npy',zOut)
# else:
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr+'_'+outParaTag+'_recon.npy',reconOut)
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr+'_'+outParaTag+'_z.npy',zOut)
# Whether use GAE embedding
debuginfoStr('Start Graph Autoencoder training')
if args.useGAEembedding or args.useBothembedding:
zDiscret = zOut > np.mean(zOut, axis=0)
zDiscret = 1.0*zDiscret
if args.useGAEembedding:
zOut = GAEembedding(zDiscret, adj, args)
elif args.useBothembedding:
zEmbedding = GAEembedding(zDiscret, adj, args)
zOut = np.concatenate((zOut, zEmbedding), axis=1)
debuginfoStr('Graph Autoencoder training finished')
# For iteration studies
G0 = nx.Graph()
G0.add_weighted_edges_from(edgeList)
nlG0 = nx.normalized_laplacian_matrix(G0)
# set iteration criteria for converge
adjOld = nlG0
# set celltype criteria for converge
listResultOld = [1 for i in range(zOut.shape[0])]
# Fill the zeros before EM iteration
# TODO: better implementation later, now we don't filling zeros for now
if args.zerofillFlag:
for nz_index in range(len(scData.nz_i)):
# tmp = scipy.sparse.lil_matrix.todense(scData.features[scData.nz_i[nz_index], scData.nz_j[nz_index]])
# tmp = np.asarray(tmp).reshape(-1)[0]
tmp = scData.features[scData.nz_i[nz_index], scData.nz_j[nz_index]]
reconOut[scData.nz_i[nz_index], scData.nz_j[nz_index]] = tmp
recon = reconOut
debuginfoStr('EM Iteration started')
for bigepoch in range(0, args.EM_iteration):
iteration_time = time.time()
# Now for both methods, we need do clustering, using clustering results to check converge
# TODO May reimplement later
# Clustering: Get cluster
clustering_time = time.time()
if args.clustering_method == 'Louvain':
listResult, size = generateLouvainCluster(edgeList)
k = len(np.unique(listResult))
print('Louvain cluster: '+str(k))
elif args.clustering_method == 'LouvainK':
listResult, size = generateLouvainCluster(edgeList)
k = len(np.unique(listResult))
print('Louvain cluster: '+str(k))
# resolution of louvain cluster:
k = int(k*resolution) if int(k*resolution)>=3 else 2
clustering = KMeans(n_clusters=k, random_state=0).fit(zOut)
listResult = clustering.predict(zOut)
elif args.clustering_method == 'LouvainB':
listResult, size = generateLouvainCluster(edgeList)
k = len(np.unique(listResult))
print('Louvain cluster: '+str(k))
# resolution of louvain cluster:
k = int(k*resolution) if int(k*resolution)>=3 else 2
clustering = Birch(n_clusters=k).fit(zOut)
listResult = clustering.predict(zOut)
elif args.clustering_method == 'KMeans':
clustering = KMeans(n_clusters=args.n_clusters,
random_state=0).fit(zOut)
listResult = clustering.predict(zOut)
elif args.clustering_method == 'SpectralClustering':
clustering = SpectralClustering(
n_clusters=args.n_clusters, assign_labels="discretize", random_state=0).fit(zOut)
listResult = clustering.labels_.tolist()
elif args.clustering_method == 'AffinityPropagation':
clustering = AffinityPropagation().fit(zOut)
listResult = clustering.predict(zOut)
elif args.clustering_method == 'AgglomerativeClustering':
clustering = AgglomerativeClustering(
linkage=args.linkage).fit(zOut)
listResult = clustering.labels_.tolist()
elif args.clustering_method == 'AgglomerativeClusteringK':
clustering = AgglomerativeClustering(
n_clusters=args.n_clusters).fit(zOut)
listResult = clustering.labels_.tolist()
elif args.clustering_method == 'Birch':
clustering = Birch(n_clusters=args.n_clusters).fit(zOut)
listResult = clustering.predict(zOut)
elif args.clustering_method == 'BirchN':
clustering = Birch(n_clusters=None).fit(zOut)
listResult = clustering.predict(zOut)
elif args.clustering_method == 'MeanShift':
clustering = MeanShift().fit(zOut)
listResult = clustering.labels_.tolist()
elif args.clustering_method == 'OPTICS':
clustering = OPTICS(min_samples=int(
args.k/2), min_cluster_size=args.minMemberinCluster).fit(zOut)
listResult = clustering.labels_.tolist()
else:
print("Error: Clustering method not appropriate")
# print("---Clustering takes %s seconds ---" % (time.time() - clustering_time))
# If clusters more than maxclusters, then have to stop
if len(set(listResult)) > args.maxClusterNumber or len(set(listResult)) <= 1:
print("Stopping: Number of clusters is " +
str(len(set(listResult))) + ".")
# Exit
# return None
# Else: dealing with the number
listResult = trimClustering(
listResult, minMemberinCluster=args.minMemberinCluster, maxClusterNumber=args.maxClusterNumber)
# Calculate silhouette
measure_clustering_results(zOut, listResult)
print('Total Cluster Number: '+str(len(set(listResult))))
debuginfoStr(
str(bigepoch)+'th iter: Cluster Autoencoder training started')
# Graph regulizated EM AE with Cluster AE, do the additional AE
if args.EMtype == 'celltypeEM':
# Each cluster has a autoencoder, and organize them back in iteraization
clusterIndexList = []
for i in range(len(set(listResult))):
clusterIndexList.append([])
for i in range(len(listResult)):
clusterIndexList[listResult[i]].append(i)
reconNew = np.zeros(
(scData.features.shape[0], scData.features.shape[1]))
# Convert to Tensor
reconNew = torch.from_numpy(reconNew)
if args.precisionModel == 'Double':
reconNew = reconNew.type(torch.DoubleTensor)
elif args.precisionModel == 'Float':
reconNew = reconNew.type(torch.FloatTensor)
reconNew = reconNew.to(device)
# model.load_state_dict(torch.load(ptfile))
model.load_state_dict(ptstatus)
for clusterIndex in clusterIndexList:
reconUsage = recon[clusterIndex]
scDataInter = scDatasetInter(reconUsage)
train_loader = DataLoader(
scDataInter, batch_size=args.batch_size, shuffle=False, **kwargs)
for epoch in range(1, args.cluster_epochs + 1):
reconCluster, originalCluster, zCluster = train(
epoch, EMFlag=True)
count = 0
for i in clusterIndex:
reconNew[i] = reconCluster[count, :]
count += 1
# Update
recon = reconNew
# torch.save(model.state_dict(),ptfile)
ptstatus = model.state_dict()
debuginfoStr(
str(bigepoch)+'th iter: Cluster Autoencoder training succeed')
# Use new dataloader
scDataInter = scDatasetInter(recon)
train_loader = DataLoader(
scDataInter, batch_size=args.batch_size, shuffle=False, **kwargs)
debuginfoStr(str(bigepoch)+'th iter: Start construct cell grpah')
for epoch in range(1, args.EM_epochs + 1):
recon, original, z = train(epoch, EMFlag=True)
zOut = z.detach().cpu().numpy()
# Here para = 'euclidean:10'
# adj, edgeList = generateAdj(zOut, graphType='KNNgraphML', para = args.knn_distance+':'+str(args.k))
adj, edgeList = generateAdj(zOut, graphType=args.prunetype, para=args.knn_distance+':'+str(
args.k), adjTag=(args.useGAEembedding or args.useBothembedding or (bigepoch == int(args.EM_iteration)-1)))
# if args.adjtype == 'unweighted':
# adj, edgeList = generateAdj(zOut, graphType=args.prunetype, para = args.knn_distance+':'+str(args.k))
# adjdense = sp.csr_matrix.todense(adj)
# elif args.adjtype == 'weighted':
# adj, edgeList = generateAdjWeighted(zOut, graphType=args.prunetype, para = args.knn_distance+':'+str(args.k))
# adjdense = adj.toarray()
debuginfoStr(
str(bigepoch)+'th iter: Cell Graph constructed and pruned')
debuginfoStr(str(bigepoch)+'th iter: Start Graph Autoencoder training')
# Whether use GAE embedding
if args.useGAEembedding or args.useBothembedding:
zDiscret = zOut > np.mean(zOut, axis=0)
zDiscret = 1.0*zDiscret
if args.useGAEembedding:
zOut = GAEembedding(zDiscret, adj, args)
elif args.useBothembedding:
zEmbedding = GAEembedding(zDiscret, adj, args)
zOut = np.concatenate((zOut, zEmbedding), axis=1)
debuginfoStr(
str(bigepoch)+'th iter: Graph Autoencoder training finished')
if args.saveinternal:
reconOut = recon.detach().cpu().numpy()
if args.imputeMode:
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr+'_'+str(args.dropoutRatio)+'_'+outParaTag+'_recon'+str(bigepoch)+'.npy',reconOut)
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr +
'_'+str(args.dropoutRatio)+'_'+outParaTag+'_z'+str(bigepoch)+'.npy', zOut)
else:
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr+'_'+outParaTag+'_recon'+str(bigepoch)+'.npy',reconOut)
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type +
discreteStr+'_'+outParaTag+'_z'+str(bigepoch)+'.npy', zOut)
# print("---One iteration in EM process, proceeded %s seconds ---" % (time.time() - iteration_time))
# Iteration usage
Gc = nx.Graph()
Gc.add_weighted_edges_from(edgeList)
adjGc = nx.adjacency_matrix(Gc)
# Update new adj
adjNew = args.alpha*nlG0 + (1-args.alpha) * adjGc/np.sum(adjGc, axis=0)
# debug
graphChange = np.mean(abs(adjNew-adjOld))
graphChangeThreshold = args.converge_graphratio * np.mean(abs(nlG0))
print('adjNew:{} adjOld:{} G0:{}'.format(adjNew, adjOld, nlG0))
print('mean:{} threshold:{}'.format(graphChange, graphChangeThreshold))
silhouette, chs, dbs = measureClusteringNoLabel(zOut, listResult)
ari, ami, nmi, cs, fms, vms, hs = measureClusteringTrueLabel(
listResultOld, listResult)
print(listResultOld)
print(listResult)
print('celltype similarity:'+str(ari))
ari, ami, nmi, cs, fms, vms, hs = measureClusteringTrueLabel(
bench_celltype, listResult)
resultarray = []
resultstr = str(silhouette)+' '+str(chs)+' '+str(dbs)+' '+str(ari)+' ' + \
str(ami)+' '+str(nmi)+' '+str(cs)+' ' + \
str(fms)+' '+str(vms)+' '+str(hs)
resultarray.append(resultstr)
print('All Results: ')
print(resultstr)
if args.saveinternal:
if args.imputeMode:
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+str(
args.dropoutRatio)+'_'+outParaTag+'_benchmark'+str(bigepoch)+'.txt', resultarray, fmt='%s')
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+str(args.dropoutRatio) +
'_'+outParaTag+'_graph'+str(bigepoch)+'.csv', edgeList, fmt='%d,%d,%2.1f')
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+str(
args.dropoutRatio)+'_'+outParaTag+'_results'+str(bigepoch)+'.txt', listResult, fmt='%d')
else:
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_' +
outParaTag+'_benchmark'+str(bigepoch)+'.txt', resultarray, fmt='%s')
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_' +
outParaTag+'_graph'+str(bigepoch)+'.csv', edgeList, fmt='%d,%d,%2.1f')
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_' +
outParaTag+'_results'+str(bigepoch)+'.txt', listResult, fmt='%d')
# graph criteria
if args.converge_type == 'graph':
if graphChange < graphChangeThreshold:
print('Graph Converge now!')
# Converge, Update
adjOld = adjNew
listResultOld = listResult
break
# celltype criteria
elif args.converge_type == 'celltype':
if ari > args.converge_celltyperatio:
print('Celltype Converge now!')
# Converge, Update
adjOld = adjNew
listResultOld = listResult
break
# if both criteria are meets
elif args.converge_type == 'both':
if graphChange < graphChangeThreshold and ari > args.converge_celltyperatio:
print('Graph and Celltype Converge now!')
# Converge, Update
adjOld = adjNew
listResultOld = listResult
break
# if either criteria are meets
elif args.converge_type == 'either':
if graphChange < graphChangeThreshold or ari > args.converge_celltyperatio:
print('Graph or Celltype Converge now!')
# Converge, Update
adjOld = adjNew
listResultOld = listResult
break
# Update
adjOld = adjNew
listResultOld = listResult
# torch.cuda.empty_cache()
debuginfoStr(str(bigepoch)+'th iter: Iteration finished')
# Output celltype related results
if args.imputeMode:
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+discreteStr +
'_'+str(args.dropoutRatio)+'_'+outParaTag+'_final_edgeList.npy', edgeList)
else:
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type +
discreteStr+'_'+outParaTag+'_final_edgeList.npy', edgeList)
# np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+outParaTag+'_'+str(args.L1Para)+'_'+str(args.L2Para)+'_recon.csv',reconOut,delimiter=",",fmt='%10.4f')
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+outParaTag+'_' +
str(args.L1Para)+'_'+str(args.L2Para)+'_embedding.csv', zOut, delimiter=",", fmt='%10.4f')
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+outParaTag+'_' +
str(args.L1Para)+'_'+str(args.L2Para)+'_graph.csv', edgeList, fmt='%d,%d,%2.1f')
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+outParaTag +
'_'+str(args.L1Para)+'_'+str(args.L2Para)+'_results.txt', listResult, fmt='%d')
resultarray = []
silhouette, chs, dbs = measureClusteringNoLabel(zOut, listResult)
ari, ami, nmi, cs, fms, vms, hs = measureClusteringTrueLabel(
bench_celltype, listResult)
resultstr = str(silhouette)+' '+str(chs)+' '+str(dbs)+' '+str(ari)+' ' + \
str(ami)+' '+str(nmi)+' '+str(cs)+' '+str(fms)+' '+str(vms)+' '+str(hs)
resultarray.append(resultstr)
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+outParaTag +
'_'+str(args.L1Para)+'_'+str(args.L2Para)+'_benchmark.txt', resultarray, fmt='%s')
# save internal results for imputation
# if args.imputeMode:
# np.save(args.npyDir+args.datasetName+'_'+str(args.dropoutRatio)+'_'+args.regulized_type+'_reconOri.npy',reconOri)
# np.save(args.npyDir+args.datasetName+'_'+str(args.dropoutRatio)+'_'+args.regulized_type+'_adj.npy',adj)
# np.save(args.npyDir+args.datasetName+'_'+str(args.dropoutRatio)+'_'+args.regulized_type+'_listResult.npy',listResult)
# else:
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+'_reconOri.npy',reconOri)
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+'_adj.npy',adj)
# np.save(args.npyDir+args.datasetName+'_'+args.regulized_type+'_listResult.npy',listResult)
# Step 2. Imputation with best results of graph and celltype
# if args.imputeMode:
# reconOri = np.load(args.npyDir+args.datasetName+'_'+str(args.dropoutRatio)+'_'+args.regulized_type+'_reconOri.npy')
# adj = np.load(args.npyDir+args.datasetName+'_'+str(args.dropoutRatio)+'_'+args.regulized_type+'_adj.npy',allow_pickle=True)
# listResult = np.load(args.npyDir+args.datasetName+'_'+str(args.dropoutRatio)+'_'+args.regulized_type+'_listResult.npy')
# else:
# reconOri = np.load(args.npyDir+args.datasetName+'_'+args.regulized_type+'_reconOri.npy')
# adj = np.load(args.npyDir+args.datasetName+'_'+args.regulized_type+'_adj.npy',allow_pickle=True)
# listResult = np.load(args.npyDir+args.datasetName+'_'+args.regulized_type+'_listResult.npy')
# Use new dataloader
scDataInter = scDatasetInter(reconOri)
train_loader = DataLoader(
scDataInter, batch_size=args.batch_size, shuffle=False, **kwargs)
stateStart = torch.load(ptfileStart)
model.load_state_dict(stateStart['state_dict'])
optimizer.load_state_dict(stateStart['optimizer'])
# if args.aePara == 'start':
# model.load_state_dict(torch.load(ptfileStart))
# elif args.aePara == 'end':
# model.load_state_dict(torch.load(ptfileEnd))
# generate graph regularizer from graph
# adj = adj.tolist() # Used for read/load
# adjdense = sp.csr_matrix.todense(adj)
# generate adj from edgeList
adjdense = sp.csr_matrix.todense(adj)
adjsample = torch.from_numpy(adjdense)
if args.precisionModel == 'Float':
adjsample = adjsample.float()
elif args.precisionModel == 'Double':
adjsample = adjsample.type(torch.DoubleTensor)
adjsample = adjsample.to(device)
# generate celltype regularizer from celltype
celltypesample = generateCelltypeRegu(listResult)
celltypesample = torch.from_numpy(celltypesample)
if args.precisionModel == 'Float':
celltypesample = celltypesample.float()
elif args.precisionModel == 'Double':
celltypesample = celltypesample.type(torch.DoubleTensor)
celltypesample = celltypesample.to(device)
for epoch in range(1, args.EM_epochs + 1):
recon, original, z = train(epoch, EMFlag=True, taskType='imputation')
reconOut = recon.detach().cpu().numpy()
# out imputation Results
if args.imputeMode:
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type +
'_'+str(args.dropoutRatio)+'_'+outParaTag+'_recon.npy', reconOut)
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type+'_'+str(
args.dropoutRatio)+'_'+outParaTag+'_recon.csv', reconOut, delimiter=",", fmt='%10.4f')
else:
np.save(args.npyDir+args.datasetName+'_'+args.regulized_type +
'_'+outParaTag+'_recon.npy', reconOut)
np.savetxt(args.npyDir+args.datasetName+'_'+args.regulized_type +
'_'+outParaTag+'_recon.csv', reconOut, delimiter=",", fmt='%10.4f')
debuginfoStr('scGNN finished')
| 52.640049 | 257 | 0.645359 |
94908a0cc4aa0a1f9b50fb67fb6b0f7e1a0226fd | 4,645 | py | Python | rasa_nlu_gao/models/elmo_cn/modules/token_embedder.py | 1073521013/rasa_nlu_gq | 6c8bea1b14390246b39770abc544986f4c7acf26 | [
"Apache-2.0"
] | 3 | 2020-08-03T00:21:04.000Z | 2021-09-01T06:20:00.000Z | rasa_nlu_gao/models/elmo_cn/modules/token_embedder.py | 1073521013/rasa_nlu_gq | 6c8bea1b14390246b39770abc544986f4c7acf26 | [
"Apache-2.0"
] | 1 | 2022-02-10T06:42:23.000Z | 2022-02-10T06:42:23.000Z | rasa_nlu_gao/models/elmo_cn/modules/token_embedder.py | 1073521013/rasa_nlu_gq | 6c8bea1b14390246b39770abc544986f4c7acf26 | [
"Apache-2.0"
] | 4 | 2019-06-06T08:05:22.000Z | 2020-08-07T08:07:22.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import copy
from .highway import Highway
class LstmTokenEmbedder(nn.Module):
def __init__(self, config, word_emb_layer, char_emb_layer, use_cuda=False):
super(LstmTokenEmbedder, self).__init__()
self.config = config
self.use_cuda = use_cuda
self.word_emb_layer = word_emb_layer
self.char_emb_layer = char_emb_layer
self.output_dim = config['encoder']['projection_dim']
emb_dim = 0
if word_emb_layer is not None:
emb_dim += word_emb_layer.n_d
if char_emb_layer is not None:
emb_dim += char_emb_layer.n_d * 2
self.char_lstm = nn.LSTM(char_emb_layer.n_d, char_emb_layer.n_d, num_layers=1, bidirectional=True,
batch_first=True, dropout=config['dropout'])
self.projection = nn.Linear(emb_dim, self.output_dim, bias=True)
def forward(self, word_inp, chars_inp, shape):
embs = []
batch_size, seq_len = shape
if self.word_emb_layer is not None:
word_emb = self.word_emb_layer(Variable(word_inp).cuda() if self.use_cuda else Variable(word_inp))
embs.append(word_emb)
if self.char_emb_layer is not None:
chars_inp = chars_inp.view(batch_size * seq_len, -1)
chars_emb = self.char_emb_layer(Variable(chars_inp).cuda() if self.use_cuda else Variable(chars_inp))
_, (chars_outputs, __) = self.char_lstm(chars_emb)
chars_outputs = chars_outputs.contiguous().view(-1, self.config['token_embedder']['char_dim'] * 2)
embs.append(chars_outputs)
token_embedding = torch.cat(embs, dim=2)
return self.projection(token_embedding)
class ConvTokenEmbedder(nn.Module):
def __init__(self, config, word_emb_layer, char_emb_layer, use_cuda):
super(ConvTokenEmbedder, self).__init__()
self.config = config
self.use_cuda = use_cuda
self.word_emb_layer = word_emb_layer
self.char_emb_layer = char_emb_layer
self.output_dim = config['encoder']['projection_dim']
self.emb_dim = 0
if word_emb_layer is not None:
self.emb_dim += word_emb_layer.n_d
if char_emb_layer is not None:
self.convolutions = []
cnn_config = config['token_embedder']
filters = cnn_config['filters']
char_embed_dim = cnn_config['char_dim']
for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=char_embed_dim,
out_channels=num,
kernel_size=width,
bias=True
)
self.convolutions.append(conv)
self.convolutions = nn.ModuleList(self.convolutions)
self.n_filters = sum(f[1] for f in filters)
self.n_highway = cnn_config['n_highway']
self.highways = Highway(self.n_filters, self.n_highway, activation=torch.nn.functional.relu)
self.emb_dim += self.n_filters
self.projection = nn.Linear(self.emb_dim, self.output_dim, bias=True)
def forward(self, word_inp, chars_inp, shape):
embs = []
batch_size, seq_len = shape
if self.word_emb_layer is not None:
batch_size, seq_len = word_inp.size(0), word_inp.size(1)
word_emb = self.word_emb_layer(Variable(word_inp).cuda() if self.use_cuda else Variable(word_inp))
embs.append(word_emb)
if self.char_emb_layer is not None:
chars_inp = chars_inp.view(batch_size * seq_len, -1)
character_embedding = self.char_emb_layer(Variable(chars_inp).cuda() if self.use_cuda else Variable(chars_inp))
character_embedding = torch.transpose(character_embedding, 1, 2)
cnn_config = self.config['token_embedder']
if cnn_config['activation'] == 'tanh':
activation = torch.nn.functional.tanh
elif cnn_config['activation'] == 'relu':
activation = torch.nn.functional.relu
else:
raise Exception("Unknown activation")
convs = []
for i in range(len(self.convolutions)):
convolved = self.convolutions[i](character_embedding)
# (batch_size * sequence_length, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convolved = activation(convolved)
convs.append(convolved)
char_emb = torch.cat(convs, dim=-1)
char_emb = self.highways(char_emb)
embs.append(char_emb.view(batch_size, -1, self.n_filters))
token_embedding = torch.cat(embs, dim=2)
return self.projection(token_embedding)
| 36.574803 | 118 | 0.676426 |
4ba7b0f57e6e4a060c5e1fd4c9d8a1237db3ecf9 | 1,592 | py | Python | pathattach/parseide.py | CookiePLMonster/butcher-tools | d63ec0411c4342c594b5927e0bc85c81af8c2e0b | [
"MIT"
] | 7 | 2017-04-10T12:23:26.000Z | 2020-05-07T21:57:02.000Z | pathattach/parseide.py | CookiePLMonster/butcher-tools | d63ec0411c4342c594b5927e0bc85c81af8c2e0b | [
"MIT"
] | null | null | null | pathattach/parseide.py | CookiePLMonster/butcher-tools | d63ec0411c4342c594b5927e0bc85c81af8c2e0b | [
"MIT"
] | null | null | null | import fileinput
def parseIdePath( files ):
# We store IDE paths in a dictionary with modelID key, but IPL paths should be stored
# in a plain list
class PATHnode:
def __init__(self):
self.NodeType = 0
self.NextNode = -1
self.IsCrossRoad = False
self.Pos = ()
self.Median = 0.0
self.LeftLanes = -1
self.RightLanes = -1
self.SpeedLimit = -1
self.Flags = -1
self.SpawnRate = 0.0
class PATHgroup:
def __init__(self):
self.ModelID = -1
self.ModelName = ""
self.NodesByGroup = [], [], []
def __init__(self, id, name):
self.ModelID = id
self.ModelName = name
self.NodesByGroup = [], [], []
parse = False
pathlist = {}
curModel = -1
curGroup = -1
for line in fileinput.input( files ):
line = line.strip().split('#', 1)[0]
if line == "end":
parse = False
if parse == True:
tokens = line.split(", ")
if len(tokens) == 3:
curGroup = int(tokens[0])
curModel = int(tokens[1])
pathlist.setdefault( curModel, PATHgroup( curModel, tokens[2] ) )
elif len(tokens) == 12:
node = PATHnode()
node.NodeType = int(tokens[0])
node.NextNode = int(tokens[1])
node.IsCrossRoad = int(tokens[2])
node.Pos = ( float(tokens[3]), float(tokens[4]), float(tokens[5]) )
node.Median = float(tokens[6])
node.LeftLanes = int(tokens[7])
node.RightLanes = int(tokens[8])
node.SpeedLimit = int(tokens[9])
node.Flags = int(tokens[10])
node.SpawnRate = float(tokens[11])
pathlist[ curModel ].NodesByGroup[ curGroup ].append( node )
if line == "path":
parse = True
return pathlist | 25.269841 | 86 | 0.626256 |
e2e3cdd73a9dc552850f477e5467ec071c8554d1 | 13,353 | py | Python | tests/poller_test.py | lumatijev/mitogen | b610b0c93bbab1bc0fbe86cfcc4f3a56fd2b2c14 | [
"BSD-3-Clause"
] | null | null | null | tests/poller_test.py | lumatijev/mitogen | b610b0c93bbab1bc0fbe86cfcc4f3a56fd2b2c14 | [
"BSD-3-Clause"
] | null | null | null | tests/poller_test.py | lumatijev/mitogen | b610b0c93bbab1bc0fbe86cfcc4f3a56fd2b2c14 | [
"BSD-3-Clause"
] | null | null | null |
import errno
import os
import select
import socket
import sys
import time
import unittest2
import mitogen.core
import mitogen.parent
import testlib
try:
next
except NameError:
# Python 2.4
from mitogen.core import next
class SockMixin(object):
def tearDown(self):
self.close_socks()
super(SockMixin, self).tearDown()
def setUp(self):
super(SockMixin, self).setUp()
self._setup_socks()
def _setup_socks(self):
# "left" and "right" side of two socket pairs. We use sockets instead
# of pipes since the same process can manipulate transmit/receive
# buffers on both sides (bidirectional IO), making it easier to test
# combinations of readability/writeability on the one side of a single
# file object.
self.l1_sock, self.r1_sock = socket.socketpair()
self.l1 = self.l1_sock.fileno()
self.r1 = self.r1_sock.fileno()
self.l2_sock, self.r2_sock = socket.socketpair()
self.l2 = self.l2_sock.fileno()
self.r2 = self.r2_sock.fileno()
for fd in self.l1, self.r1, self.l2, self.r2:
mitogen.core.set_nonblock(fd)
def fill(self, fd):
"""Make `fd` unwriteable."""
while True:
try:
os.write(fd, mitogen.core.b('x')*4096)
except OSError:
e = sys.exc_info()[1]
if e.args[0] == errno.EAGAIN:
return
raise
def drain(self, fd):
"""Make `fd` unreadable."""
while True:
try:
if not os.read(fd, 4096):
return
except OSError:
e = sys.exc_info()[1]
if e.args[0] == errno.EAGAIN:
return
raise
def close_socks(self):
for sock in self.l1_sock, self.r1_sock, self.l2_sock, self.r2_sock:
sock.close()
class PollerMixin(object):
klass = None
def setUp(self):
super(PollerMixin, self).setUp()
self.p = self.klass()
def tearDown(self):
self.p.close()
super(PollerMixin, self).tearDown()
class ReceiveStateMixin(PollerMixin, SockMixin):
def test_start_receive_adds_reader(self):
self.p.start_receive(self.l1)
self.assertEquals([(self.l1, self.l1)], self.p.readers)
self.assertEquals([], self.p.writers)
def test_start_receive_adds_reader_data(self):
data = object()
self.p.start_receive(self.l1, data=data)
self.assertEquals([(self.l1, data)], self.p.readers)
self.assertEquals([], self.p.writers)
def test_stop_receive(self):
self.p.start_receive(self.l1)
self.p.stop_receive(self.l1)
self.assertEquals([], self.p.readers)
self.assertEquals([], self.p.writers)
def test_stop_receive_dup(self):
self.p.start_receive(self.l1)
self.p.stop_receive(self.l1)
self.assertEquals([], self.p.readers)
self.assertEquals([], self.p.writers)
self.p.stop_receive(self.l1)
self.assertEquals([], self.p.readers)
self.assertEquals([], self.p.writers)
def test_stop_receive_noexist(self):
p = self.klass()
p.stop_receive(123) # should not fail
self.assertEquals([], p.readers)
self.assertEquals([], self.p.writers)
class TransmitStateMixin(PollerMixin, SockMixin):
def test_start_transmit_adds_writer(self):
self.p.start_transmit(self.r1)
self.assertEquals([], self.p.readers)
self.assertEquals([(self.r1, self.r1)], self.p.writers)
def test_start_transmit_adds_writer_data(self):
data = object()
self.p.start_transmit(self.r1, data=data)
self.assertEquals([], self.p.readers)
self.assertEquals([(self.r1, data)], self.p.writers)
def test_stop_transmit(self):
self.p.start_transmit(self.r1)
self.p.stop_transmit(self.r1)
self.assertEquals([], self.p.readers)
self.assertEquals([], self.p.writers)
def test_stop_transmit_dup(self):
self.p.start_transmit(self.r1)
self.p.stop_transmit(self.r1)
self.assertEquals([], self.p.readers)
self.assertEquals([], self.p.writers)
self.p.stop_transmit(self.r1)
self.assertEquals([], self.p.readers)
self.assertEquals([], self.p.writers)
def test_stop_transmit_noexist(self):
p = self.klass()
p.stop_receive(123) # should not fail
self.assertEquals([], p.readers)
self.assertEquals([], self.p.writers)
class CloseMixin(PollerMixin):
def test_single_close(self):
self.p.close()
def test_double_close(self):
self.p.close()
self.p.close()
class PollMixin(PollerMixin):
def test_empty_zero_timeout(self):
t0 = time.time()
self.assertEquals([], list(self.p.poll(0)))
self.assertTrue((time.time() - t0) < .1) # vaguely reasonable
def test_empty_small_timeout(self):
t0 = time.time()
self.assertEquals([], list(self.p.poll(.2)))
self.assertTrue((time.time() - t0) >= .2)
class ReadableMixin(PollerMixin, SockMixin):
def test_unreadable(self):
self.p.start_receive(self.l1)
self.assertEquals([], list(self.p.poll(0)))
def test_readable_before_add(self):
self.fill(self.r1)
self.p.start_receive(self.l1)
self.assertEquals([self.l1], list(self.p.poll(0)))
def test_readable_after_add(self):
self.p.start_receive(self.l1)
self.fill(self.r1)
self.assertEquals([self.l1], list(self.p.poll(0)))
def test_readable_then_unreadable(self):
self.fill(self.r1)
self.p.start_receive(self.l1)
self.assertEquals([self.l1], list(self.p.poll(0)))
self.drain(self.l1)
self.assertEquals([], list(self.p.poll(0)))
def test_readable_data(self):
data = object()
self.fill(self.r1)
self.p.start_receive(self.l1, data=data)
self.assertEquals([data], list(self.p.poll(0)))
def test_double_readable_data(self):
data1 = object()
data2 = object()
self.fill(self.r1)
self.p.start_receive(self.l1, data=data1)
self.fill(self.r2)
self.p.start_receive(self.l2, data=data2)
self.assertEquals(set([data1, data2]), set(self.p.poll(0)))
class WriteableMixin(PollerMixin, SockMixin):
def test_writeable(self):
self.p.start_transmit(self.r1)
self.assertEquals([self.r1], list(self.p.poll(0)))
def test_writeable_data(self):
data = object()
self.p.start_transmit(self.r1, data=data)
self.assertEquals([data], list(self.p.poll(0)))
def test_unwriteable_before_add(self):
self.fill(self.r1)
self.p.start_transmit(self.r1)
self.assertEquals([], list(self.p.poll(0)))
def test_unwriteable_after_add(self):
self.p.start_transmit(self.r1)
self.fill(self.r1)
self.assertEquals([], list(self.p.poll(0)))
def test_unwriteable_then_writeable(self):
self.fill(self.r1)
self.p.start_transmit(self.r1)
self.assertEquals([], list(self.p.poll(0)))
self.drain(self.l1)
self.assertEquals([self.r1], list(self.p.poll(0)))
def test_double_unwriteable_then_Writeable(self):
self.fill(self.r1)
self.p.start_transmit(self.r1)
self.fill(self.r2)
self.p.start_transmit(self.r2)
self.assertEquals([], list(self.p.poll(0)))
self.drain(self.l1)
self.assertEquals([self.r1], list(self.p.poll(0)))
self.drain(self.l2)
self.assertEquals(set([self.r1, self.r2]), set(self.p.poll(0)))
class MutateDuringYieldMixin(PollerMixin, SockMixin):
# verify behaviour when poller contents is modified in the middle of
# poll() output generation.
def test_one_readable_removed_before_yield(self):
self.fill(self.l1)
self.p.start_receive(self.r1)
p = self.p.poll(0)
self.p.stop_receive(self.r1)
self.assertEquals([], list(p))
def test_one_writeable_removed_before_yield(self):
self.p.start_transmit(self.r1)
p = self.p.poll(0)
self.p.stop_transmit(self.r1)
self.assertEquals([], list(p))
def test_one_readable_readded_before_yield(self):
# fd removed, closed, another fd opened, gets same fd number, re-added.
# event fires for wrong underlying object.
self.fill(self.l1)
self.p.start_receive(self.r1)
p = self.p.poll(0)
self.p.stop_receive(self.r1)
self.p.start_receive(self.r1)
self.assertEquals([], list(p))
def test_one_readable_readded_during_yield(self):
self.fill(self.l1)
self.p.start_receive(self.r1)
self.fill(self.l2)
self.p.start_receive(self.r2)
p = self.p.poll(0)
# figure out which one is consumed and which is still to-read.
consumed = next(p)
ready = (self.r1, self.r2)[consumed == self.r1]
# now remove and re-add the one that hasn't been read yet.
self.p.stop_receive(ready)
self.p.start_receive(ready)
# the start_receive() may be for a totally new underlying file object,
# the live loop iteration must not yield any buffered readiness event.
self.assertEquals([], list(p))
class FileClosedMixin(PollerMixin, SockMixin):
# Verify behaviour when a registered file object is closed in various
# scenarios, without first calling stop_receive()/stop_transmit().
def test_writeable_then_closed(self):
self.p.start_transmit(self.r1)
self.assertEquals([self.r1], list(self.p.poll(0)))
self.close_socks()
try:
self.assertEquals([], list(self.p.poll(0)))
except select.error:
# a crash is also reasonable here.
pass
def test_writeable_closed_before_yield(self):
self.p.start_transmit(self.r1)
p = self.p.poll(0)
self.close_socks()
try:
self.assertEquals([], list(p))
except select.error:
# a crash is also reasonable here.
pass
def test_readable_then_closed(self):
self.fill(self.l1)
self.p.start_receive(self.r1)
self.assertEquals([self.r1], list(self.p.poll(0)))
self.close_socks()
try:
self.assertEquals([], list(self.p.poll(0)))
except select.error:
# a crash is also reasonable here.
pass
def test_readable_closed_before_yield(self):
self.fill(self.l1)
self.p.start_receive(self.r1)
p = self.p.poll(0)
self.close_socks()
try:
self.assertEquals([], list(p))
except select.error:
# a crash is also reasonable here.
pass
class TtyHangupMixin(PollerMixin):
def test_tty_hangup_detected(self):
# bug in initial select.poll() implementation failed to detect POLLHUP.
master_fd, slave_fd = mitogen.parent.openpty()
try:
self.p.start_receive(master_fd)
self.assertEquals([], list(self.p.poll(0)))
os.close(slave_fd)
slave_fd = None
self.assertEquals([master_fd], list(self.p.poll(0)))
finally:
if slave_fd is not None:
os.close(slave_fd)
os.close(master_fd)
class DistinctDataMixin(PollerMixin, SockMixin):
# Verify different data is yielded for the same FD according to the event
# being raised.
def test_one_distinct(self):
rdata = object()
wdata = object()
self.p.start_receive(self.r1, data=rdata)
self.p.start_transmit(self.r1, data=wdata)
self.assertEquals([wdata], list(self.p.poll(0)))
self.fill(self.l1) # r1 is now readable and writeable.
self.assertEquals(set([rdata, wdata]), set(self.p.poll(0)))
class AllMixin(ReceiveStateMixin,
TransmitStateMixin,
ReadableMixin,
WriteableMixin,
MutateDuringYieldMixin,
FileClosedMixin,
DistinctDataMixin,
PollMixin,
TtyHangupMixin,
CloseMixin):
"""
Helper to avoid cutpasting mixin names below.
"""
class SelectTest(AllMixin, testlib.TestCase):
klass = mitogen.core.Poller
SelectTest = unittest2.skipIf(
condition=(not SelectTest.klass.SUPPORTED),
reason='select.select() not supported'
)(SelectTest)
class PollTest(AllMixin, testlib.TestCase):
klass = mitogen.parent.PollPoller
PollTest = unittest2.skipIf(
condition=(not PollTest.klass.SUPPORTED),
reason='select.poll() not supported'
)(PollTest)
class KqueueTest(AllMixin, testlib.TestCase):
klass = mitogen.parent.KqueuePoller
KqueueTest = unittest2.skipIf(
condition=(not KqueueTest.klass.SUPPORTED),
reason='select.kqueue() not supported'
)(KqueueTest)
class EpollTest(AllMixin, testlib.TestCase):
klass = mitogen.parent.EpollPoller
EpollTest = unittest2.skipIf(
condition=(not EpollTest.klass.SUPPORTED),
reason='select.epoll() not supported'
)(EpollTest)
if __name__ == '__main__':
unittest2.main()
| 30.486301 | 79 | 0.621883 |
fa4bda5c23b5ae0a830e880a5f010d903822e4b3 | 12,041 | py | Python | idaes/generic_models/properties/core/state_definitions/FpcTP.py | eslickj/idaes-pse | 328ed07ffb0b4d98c03e972675ea32c41dd2531a | [
"RSA-MD"
] | 112 | 2019-02-11T23:16:36.000Z | 2022-03-23T20:59:57.000Z | idaes/generic_models/properties/core/state_definitions/FpcTP.py | eslickj/idaes-pse | 328ed07ffb0b4d98c03e972675ea32c41dd2531a | [
"RSA-MD"
] | 621 | 2019-03-01T14:44:12.000Z | 2022-03-31T19:49:25.000Z | idaes/generic_models/properties/core/state_definitions/FpcTP.py | eslickj/idaes-pse | 328ed07ffb0b4d98c03e972675ea32c41dd2531a | [
"RSA-MD"
] | 154 | 2019-02-01T23:46:33.000Z | 2022-03-23T15:07:10.000Z | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Methods for setting up FpcTP as the state variables in a generic property
package
"""
from pyomo.environ import \
Constraint, Expression, NonNegativeReals, Var, value, units as pyunits
from idaes.core import (MaterialFlowBasis,
MaterialBalanceType,
EnergyBalanceType)
from idaes.generic_models.properties.core.generic.utility import \
get_bounds_from_config
from .electrolyte_states import \
define_electrolyte_state, calculate_electrolyte_scaling
from idaes.core.util.exceptions import ConfigurationError
import idaes.logger as idaeslog
import idaes.core.util.scaling as iscale
# Set up logger
_log = idaeslog.getLogger(__name__)
def set_metadata(b):
# The default metadata should be correct in this case, so no need to update
pass
def define_state(b):
# FpcTP contains full information on the phase equilibrium, so flash
# calculations re not always needed
b.always_flash = False
# Check that only necessary state_bounds are defined
expected_keys = ["flow_mol_phase_comp", "enth_mol",
"temperature", "pressure"]
if (b.params.config.state_bounds is not None and
any(b.params.config.state_bounds.keys()) not in expected_keys):
for k in b.params.config.state_bounds.keys():
if k not in expected_keys:
raise ConfigurationError(
"{} - found unexpected state_bounds key {}. Please ensure "
"bounds are provided only for expected state variables "
"and that you have typed the variable names correctly."
.format(b.name, k))
units = b.params.get_metadata().derived_units
# Get bounds and initial values from config args
f_bounds, f_init = get_bounds_from_config(
b, "flow_mol_phase_comp", units["flow_mole"])
t_bounds, t_init = get_bounds_from_config(
b, "temperature", units["temperature"])
p_bounds, p_init = get_bounds_from_config(
b, "pressure", units["pressure"])
# Add state variables
b.flow_mol_phase_comp = Var(b.phase_component_set,
initialize=f_init,
domain=NonNegativeReals,
bounds=f_bounds,
doc='Phase-component molar flowrate',
units=units["flow_mole"])
b.pressure = Var(initialize=p_init,
domain=NonNegativeReals,
bounds=p_bounds,
doc='State pressure',
units=units["pressure"])
b.temperature = Var(initialize=t_init,
domain=NonNegativeReals,
bounds=t_bounds,
doc='State temperature',
units=units["temperature"])
# Add supporting variables
b.flow_mol = Expression(
expr=sum(b.flow_mol_phase_comp[i]
for i in b.phase_component_set),
doc="Total molar flowrate")
def flow_mol_phase(b, p):
return sum(b.flow_mol_phase_comp[p, j]
for j in b.component_list
if (p, j) in b.phase_component_set)
b.flow_mol_phase = Expression(b.phase_list,
rule=flow_mol_phase,
doc='Phase molar flow rates')
def rule_flow_mol_comp(b, j):
return sum(b.flow_mol_phase_comp[p, j]
for p in b.phase_list
if (p, j) in b.phase_component_set)
b.flow_mol_comp = Expression(b.component_list,
rule=rule_flow_mol_comp,
doc='Component molar flow rates')
def mole_frac_comp(b, j):
return (sum(b.flow_mol_phase_comp[p, j]
for p in b.phase_list
if (p, j) in b.phase_component_set) / b.flow_mol)
b.mole_frac_comp = Expression(b.component_list,
rule=mole_frac_comp,
doc='Mixture mole fractions')
b.mole_frac_phase_comp = Var(
b.phase_component_set,
bounds=(1e-20, 1.001),
initialize=1/len(b.component_list),
doc='Phase mole fractions',
units=None)
def rule_mole_frac_phase_comp(b, p, j):
# Calcualting mole frac phase comp is degenerate if there is only one
# component in phase.
# Count components
comp_count = 0
for p1, j1 in b.phase_component_set:
if p1 == p:
comp_count += 1
if comp_count > 1:
return b.mole_frac_phase_comp[p, j] * b.flow_mol_phase[p] == \
b.flow_mol_phase_comp[p, j]
else:
return b.mole_frac_phase_comp[p, j] == 1
b.mole_frac_phase_comp_eq = Constraint(
b.phase_component_set, rule=rule_mole_frac_phase_comp)
def rule_phase_frac(b, p):
if len(b.phase_list) == 1:
return 1
else:
return b.flow_mol_phase[p] / b.flow_mol
b.phase_frac = Expression(
b.phase_list,
rule=rule_phase_frac,
doc='Phase fractions')
# Add electrolye state vars if required
if b.params._electrolyte:
define_electrolyte_state(b)
# -------------------------------------------------------------------------
# General Methods
def get_material_flow_terms_FpcTP(p, j):
"""Create material flow terms for control volume."""
return b.flow_mol_phase_comp[p, j]
b.get_material_flow_terms = get_material_flow_terms_FpcTP
def get_enthalpy_flow_terms_FpcTP(p):
"""Create enthalpy flow terms."""
# enth_mol_phase probably does not exist when this is created
# Use try/except to build flow term if not present
try:
eflow = b._enthalpy_flow_term
except AttributeError:
def rule_eflow(b, p):
return b.flow_mol_phase[p] * b.enth_mol_phase[p]
eflow = b._enthalpy_flow_term = Expression(
b.phase_list, rule=rule_eflow)
return eflow[p]
b.get_enthalpy_flow_terms = get_enthalpy_flow_terms_FpcTP
def get_material_density_terms_FpcTP(p, j):
"""Create material density terms."""
# dens_mol_phase probably does not exist when this is created
# Use try/except to build term if not present
try:
mdens = b._material_density_term
except AttributeError:
def rule_mdens(b, p, j):
return b.dens_mol_phase[p] * b.mole_frac_phase_comp[p, j]
mdens = b._material_density_term = Expression(
b.phase_component_set, rule=rule_mdens)
return mdens[p, j]
b.get_material_density_terms = get_material_density_terms_FpcTP
def get_energy_density_terms_FpcTP(p):
"""Create energy density terms."""
# Density and energy terms probably do not exist when this is created
# Use try/except to build term if not present
try:
edens = b._energy_density_term
except AttributeError:
def rule_edens(b, p):
return b.dens_mol_phase[p] * b.energy_internal_mol_phase[p]
edens = b._energy_density_term = Expression(
b.phase_list, rule=rule_edens)
return edens[p]
b.get_energy_density_terms = get_energy_density_terms_FpcTP
def default_material_balance_type_FpcTP():
return MaterialBalanceType.componentTotal
b.default_material_balance_type = default_material_balance_type_FpcTP
def default_energy_balance_type_FpcTP():
return EnergyBalanceType.enthalpyTotal
b.default_energy_balance_type = default_energy_balance_type_FpcTP
def get_material_flow_basis_FpcTP():
return MaterialFlowBasis.molar
b.get_material_flow_basis = get_material_flow_basis_FpcTP
def define_state_vars_FpcTP():
"""Define state vars."""
return {"flow_mol_phase_comp": b.flow_mol_phase_comp,
"temperature": b.temperature,
"pressure": b.pressure}
b.define_state_vars = define_state_vars_FpcTP
def define_display_vars_FpcTP():
"""Define display vars."""
return {"Molar Flowrate": b.flow_mol_phase_comp,
"Temperature": b.temperature,
"Pressure": b.pressure}
b.define_display_vars = define_display_vars_FpcTP
def state_initialization(b):
for i in b.phase_component_set:
b.mole_frac_phase_comp[i].value = value(
b.flow_mol_phase_comp[i] / b.flow_mol_phase[i[0]])
def define_default_scaling_factors(b):
"""
Method to set default scaling factors for the property package. Scaling
factors are based on the default initial value for each variable provided
in the state_bounds config argument.
"""
# Get bounds and initial values from config args
units = b.get_metadata().derived_units
state_bounds = b.config.state_bounds
if state_bounds is None:
return
try:
f_bounds = state_bounds["flow_mol_phase_comp"]
if len(f_bounds) == 4:
f_init = pyunits.convert_value(f_bounds[1],
from_units=f_bounds[3],
to_units=units["flow_mole"])
else:
f_init = f_bounds[1]
except KeyError:
f_init = 1
try:
p_bounds = state_bounds["pressure"]
if len(p_bounds) == 4:
p_init = pyunits.convert_value(p_bounds[1],
from_units=p_bounds[3],
to_units=units["pressure"])
else:
p_init = p_bounds[1]
except KeyError:
p_init = 1
try:
t_bounds = state_bounds["temperature"]
if len(t_bounds) == 4:
t_init = pyunits.convert_value(t_bounds[1],
from_units=t_bounds[3],
to_units=units["temperature"])
else:
t_init = t_bounds[1]
except KeyError:
t_init = 1
# Set default scaling factors
b.set_default_scaling("flow_mol", 1/f_init)
b.set_default_scaling("flow_mol_phase", 1/f_init)
b.set_default_scaling("flow_mol_comp", 1/f_init)
b.set_default_scaling("flow_mol_phase_comp", 1/f_init)
b.set_default_scaling("pressure", 1/p_init)
b.set_default_scaling("temperature", 1/t_init)
def calculate_scaling_factors(b):
for p, j in b.phase_component_set:
sf = iscale.get_scaling_factor(b.flow_mol_phase_comp[
p, j], default=1, warning=True)
iscale.constraint_scaling_transform(
b.mole_frac_phase_comp_eq[p, j], sf, overwrite=False)
if b.params._electrolyte:
calculate_electrolyte_scaling(b)
do_not_initialize = []
class FpcTP(object):
set_metadata = set_metadata
define_state = define_state
state_initialization = state_initialization
do_not_initialize = do_not_initialize
define_default_scaling_factors = define_default_scaling_factors
calculate_scaling_factors = calculate_scaling_factors
| 38.717042 | 81 | 0.614318 |
6e742b9a38c5d55840e3a40f2750517afd450536 | 736 | py | Python | core/venom.py | p-panagiotis/venom-virtual-assistant | 40db8cceedb71934e708624141da8e641460feb7 | [
"MIT"
] | null | null | null | core/venom.py | p-panagiotis/venom-virtual-assistant | 40db8cceedb71934e708624141da8e641460feb7 | [
"MIT"
] | null | null | null | core/venom.py | p-panagiotis/venom-virtual-assistant | 40db8cceedb71934e708624141da8e641460feb7 | [
"MIT"
] | null | null | null | import sys
from core.configs import Configuration
from core.intents import welcome
from core.logs import Logger
from core.modules.input_mod import take_input
from core.modules.listen_mod import listen
from core.modules.output_mod import output
from core.modules.process_mod import process
cfg = None
def run():
# load virtual assistant configuration
global cfg
cfg = Configuration(filename=sys.argv[1] if len(sys.argv) > 1 else None)
# initialize virtual assistant logger
Logger().configure()
welcome.greet(master=cfg["va.master"])
while True:
if cfg["va.listen"]:
query = listen()
else:
query = take_input()
o = process(query=query)
output(o)
| 23 | 76 | 0.691576 |
71921c81fa9c543f5bc024eec280a0a8bc358985 | 1,985 | py | Python | src/cfnlint/rules/templates/Base.py | j0lly/cfn-python-lint | 3032bab8fe190763bd0df1c34905c3528ceb411f | [
"MIT-0"
] | 1 | 2019-03-19T22:49:38.000Z | 2019-03-19T22:49:38.000Z | src/cfnlint/rules/templates/Base.py | j0lly/cfn-python-lint | 3032bab8fe190763bd0df1c34905c3528ceb411f | [
"MIT-0"
] | null | null | null | src/cfnlint/rules/templates/Base.py | j0lly/cfn-python-lint | 3032bab8fe190763bd0df1c34905c3528ceb411f | [
"MIT-0"
] | 1 | 2020-05-04T16:32:19.000Z | 2020-05-04T16:32:19.000Z | """
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class Base(CloudFormationLintRule):
"""Check Base Template Settings"""
id = 'E1001'
shortdesc = 'Basic CloudFormation Template Configuration'
description = 'Making sure the basic CloudFormation template components are properly configured'
source_url = 'https://github.com/awslabs/cfn-python-lint'
tags = ['base']
required_keys = [
'Resources'
]
def match(self, cfn):
"""Basic Matching"""
matches = []
top_level = []
for x in cfn.template:
top_level.append(x)
if x not in cfn.sections:
message = 'Top level item {0} isn\'t valid'
matches.append(RuleMatch([x], message.format(x)))
for y in self.required_keys:
if y not in top_level:
message = 'Missing top level item {0} to file module'
matches.append(RuleMatch([y], message.format(y)))
return matches
| 39.7 | 100 | 0.690176 |
8926e56139ffa4920903be26e85c580ca2377074 | 72 | py | Python | hello.py | lazyxu/pythonvm | 8c25acc6ee1e01a0bb65bb35aae987264d6876aa | [
"MIT"
] | null | null | null | hello.py | lazyxu/pythonvm | 8c25acc6ee1e01a0bb65bb35aae987264d6876aa | [
"MIT"
] | null | null | null | hello.py | lazyxu/pythonvm | 8c25acc6ee1e01a0bb65bb35aae987264d6876aa | [
"MIT"
] | null | null | null | i = 0
while i < 5:
i = i + 1
if i > 3:
break
print(i)
| 8 | 13 | 0.375 |
a47ed9ddcc0c6f4acfa918695d52e9eed1a040ba | 110 | py | Python | src/utilities/data_structures/binary_heap.py | xproj2501x/ecs-python | 7947a9e8551c0f379ac59f57e0268b4054aaafae | [
"MIT"
] | null | null | null | src/utilities/data_structures/binary_heap.py | xproj2501x/ecs-python | 7947a9e8551c0f379ac59f57e0268b4054aaafae | [
"MIT"
] | null | null | null | src/utilities/data_structures/binary_heap.py | xproj2501x/ecs-python | 7947a9e8551c0f379ac59f57e0268b4054aaafae | [
"MIT"
] | null | null | null | class BinaryHeap:
def __init__(self):
self._size = []
def insert(self, data):
pass
| 12.222222 | 27 | 0.545455 |
d00661b0be7317ca2d5993b496320b0eaba6e2a6 | 3,284 | py | Python | 2020/day14.py | Ceridan/advent-of-code | e52dad16c9b5a82c341f76cc9d51d59ff3228ff6 | [
"MIT"
] | null | null | null | 2020/day14.py | Ceridan/advent-of-code | e52dad16c9b5a82c341f76cc9d51d59ff3228ff6 | [
"MIT"
] | null | null | null | 2020/day14.py | Ceridan/advent-of-code | e52dad16c9b5a82c341f76cc9d51d59ff3228ff6 | [
"MIT"
] | null | null | null | import itertools
import os
import re
from typing import List
def part1(instructions: List[str]) -> int:
memory = {}
mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
for instr in instructions:
if instr.startswith('mask'):
mask = re.findall(r'([01X]+)', instr)[0]
else:
address, value = [int(v) for v in re.findall(r'(\d+)', instr)]
memory[address] = _apply_mask_v1(mask, value)
return sum(memory.values())
def part2(instructions: List[str]) -> int:
memory = {}
mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
for instr in instructions:
if instr.startswith('mask'):
mask = re.findall(r'([01X]+)', instr)[0]
else:
address, value = [int(v) for v in re.findall(r'(\d+)', instr)]
masked_address = _apply_mask_v2(mask, address)
for addr in _generate_addresses(masked_address):
memory[addr] = value
return sum(memory.values())
def _apply_mask_v1(mask: str, value: int) -> int:
n = len(mask)
result = value
for i in range(n):
if mask[i] == '0':
result &= ~(2 ** (n - i - 1))
elif mask[i] == '1':
result |= 2 ** (n - i - 1)
return result
def _apply_mask_v2(mask: str, value: int) -> str:
bin_val = format(value, '#038b')[2:]
result = []
for m, v in zip(mask, bin_val):
if m == 'X':
result.append('X')
else:
result.append(str(int(m) | int(v)))
return ''.join(result)
def _generate_addresses(masked_address: str) -> List[int]:
if 'X' not in masked_address:
return [int(masked_address, 2)]
addresses = []
x_positions = []
for i, ch in enumerate(masked_address):
if ch == 'X':
x_positions.append(i)
products = itertools.product('01', repeat=len(x_positions))
next_address = list(masked_address)
for p in products:
for j, pos in enumerate(x_positions):
next_address[pos] = p[j]
addresses.append(int(''.join(next_address), 2))
return addresses
def test(expected, actual):
assert expected == actual, f'Expected: {expected}, Actual: {actual}'
test(73, _apply_mask_v1('XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X', 11))
test(101, _apply_mask_v1('XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X', 101))
test(64, _apply_mask_v1('XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X', 0))
test('000000000000000000000000000000X1101X', _apply_mask_v2('000000000000000000000000000000X1001X', 42))
test('00000000000000000000000000000001X0XX', _apply_mask_v2('00000000000000000000000000000000X0XX', 26))
test([18, 19, 50, 51], _generate_addresses('000000000000000000000000000000X1001X'))
test(165, part1([
'mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X',
'mem[8] = 11',
'mem[7] = 101',
'mem[8] = 0',
]))
test(208, part2([
'mask = 000000000000000000000000000000X1001X',
'mem[42] = 100',
'mask = 00000000000000000000000000000000X0XX',
'mem[26] = 1',
]))
file_path = os.path.join(os.path.dirname(__file__), 'data/day14.txt')
with open(file_path, 'r') as f:
input_data = [line.strip() for line in f.readlines()]
print('Day 14, part 1: %r' % (part1(input_data)))
print('Day 14, part 2: %r' % (part2(input_data)))
| 28.068376 | 104 | 0.624543 |
50f53979250345a40028d796d5e43c6da5130913 | 2,787 | py | Python | mspray/apps/mda/urls.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | null | null | null | mspray/apps/mda/urls.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | 76 | 2018-03-15T09:37:56.000Z | 2019-05-15T12:45:51.000Z | mspray/apps/mda/urls.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | 1 | 2020-10-31T07:15:22.000Z | 2020-10-31T07:15:22.000Z | # -*- coding: utf-8 -*-
"""MDA urls"""
from django.conf.urls import include
from django.urls import path, re_path
from rest_framework import routers
from mspray.apps.main.views import (
districts,
household,
household_buffer,
performance,
sprayday,
target_area,
)
from mspray.apps.main.views.mopup import HealthFacilityMopUpView, MopUpView
from mspray.apps.mda.views.index import MDALocationView, MDAView
from mspray.apps.mda.views.map import MapView
from mspray.apps.mda.views.spray_area import SprayAreaView
app_name = "mda" # pylint: disable=invalid-name
router = routers.DefaultRouter(trailing_slash=False) # pylint: disable=C0103
router.register(r"buffers", household_buffer.HouseholdBufferViewSet)
router.register(r"districts", districts.DistrictViewSet, "district")
router.register(r"households", household.HouseholdViewSet)
router.register(r"spraydays", sprayday.SprayDayViewSet)
router.register(r"targetareas", target_area.TargetAreaViewSet)
performance_urls = ( # pylint: disable=C0103
[
path(
"",
performance.MDADistrictPerfomanceView.as_view(),
name="districts",
),
path(
"rhcs/<int:district_id>",
performance.RHCPerformanceView.as_view(),
name="rhcs",
),
path(
"team-leaders/<int:slug>",
performance.TeamLeadersPerformanceView.as_view(),
name="team-leaders",
),
path(
"spray-operators/<int:rhc_id>/summary",
performance.MDASprayOperatorSummaryView.as_view(),
name="spray-operator-summary",
),
path(
"spray-operators/<int:rhc_id>/<int:spray_operator>/daily",
performance.MDASprayOperatorDailyView.as_view(),
name="spray-operator-daily",
),
path(
"definitions-and-conditions",
performance.DefinitionAndConditionView.as_view(),
name="definitions-and-conditions",
),
],
"mspray",
)
urlpatterns = [ # pylint: disable=invalid-name
path("", MDAView.as_view(), name="index"),
path("<int:location>", MDALocationView.as_view(), name="location"),
path("<int:district_pk>/<int:slug>", MapView.as_view(), name="spray-area"),
path("performance/", include(performance_urls, namespace="performance")),
path("api/", include(router.urls)),
path("sprayareas", SprayAreaView.as_view(), name="sprayareas"),
re_path(
r"^sprayareas\.(?P<format>[csv]{3})$",
SprayAreaView.as_view(),
name="sprayareas-csv",
),
path("mop-up", MopUpView.as_view(), name="mop-up"),
path(
"mop-up/<int:district>",
HealthFacilityMopUpView.as_view(),
name="mop-up",
),
]
| 32.406977 | 79 | 0.643703 |
17faf253e80cc90c47a5ac57bce8a89c6e565ee4 | 5,891 | py | Python | cupy/cuda/__init__.py | kalvdans/cupy | 07a052f90811349501bda2320a04a57705608ce9 | [
"MIT"
] | 1 | 2020-12-27T13:06:35.000Z | 2020-12-27T13:06:35.000Z | cupy/cuda/__init__.py | trivialfis/cupy | e0f0b3bba0fa1e809780350a4562b2ed1d1fe024 | [
"MIT"
] | 4 | 2020-09-15T01:49:38.000Z | 2020-12-11T03:52:35.000Z | cupy/cuda/__init__.py | trivialfis/cupy | e0f0b3bba0fa1e809780350a4562b2ed1d1fe024 | [
"MIT"
] | 2 | 2018-07-21T13:44:26.000Z | 2019-03-25T11:30:16.000Z | import contextlib
import warnings
from cupy._environment import get_cuda_path, get_nvcc_path # NOQA
from cupy.cuda import compiler # NOQA
from cupy.cuda import device # NOQA
from cupy.cuda import function # NOQA
from cupy.cuda import memory # NOQA
from cupy.cuda import memory_hook # NOQA
from cupy.cuda import memory_hooks # NOQA
from cupy.cuda import pinned_memory # NOQA
from cupy.cuda import stream # NOQA
from cupy.cuda import texture # NOQA
from cupy_backends.cuda.api import driver # NOQA
from cupy_backends.cuda.api import runtime # NOQA
from cupy_backends.cuda.libs import cublas # NOQA
from cupy_backends.cuda.libs import curand # NOQA
from cupy_backends.cuda.libs import cusolver # NOQA
from cupy_backends.cuda.libs import cusparse # NOQA
from cupy_backends.cuda.libs import nvrtc # NOQA
from cupy_backends.cuda.libs import profiler # NOQA
_available = None
class _UnavailableModule():
available = False
def __init__(self, name):
self.__name__ = name
# TODO(leofang): always import cub (but not enable it) when hipCUB is supported
if not runtime.is_hip:
from cupy.cuda import cub # NOQA
else:
cub = _UnavailableModule('cupy.cuda.cub')
try:
from cupy.cuda import nvtx # NOQA
except ImportError:
nvtx = _UnavailableModule('cupy.cuda.nvtx')
try:
from cupy.cuda import thrust # NOQA
except ImportError:
thrust = _UnavailableModule('cupy.cuda.thrust')
try:
from cupy.cuda import nccl # NOQA
except ImportError:
nccl = _UnavailableModule('cupy.cuda.nccl')
try:
from cupy_backends.cuda.libs import cutensor
except ImportError:
cutensor = _UnavailableModule('cupy.cuda.cutensor')
def __getattr__(key):
# `*_enabled` flags are kept for backward compatibility.
# Note: module-level getattr only runs on Python 3.7+.
if key == 'cusolver_enabled':
# cuSOLVER is always available in CUDA 8.0+.
warnings.warn('''
cupy.cuda.cusolver_enabled has been deprecated in CuPy v8 and will be removed in the future release.
This flag always returns True as cuSOLVER is always available in CUDA 8.0 or later.
''', DeprecationWarning) # NOQA
return True
for mod in [nvtx, nccl, thrust, cub, cutensor]:
flag = '{}_enabled'.format(mod.__name__.split('.')[-1])
if key == flag:
warnings.warn('''
cupy.cuda.{} has been deprecated in CuPy v8 and will be removed in the future release.
Use {}.available instead.
'''.format(flag, mod.__name__), DeprecationWarning) # NOQA
return not isinstance(mod, _UnavailableModule)
raise AttributeError(
"module '{}' has no attribute '{}'".format(__name__, key))
def is_available():
global _available
if _available is None:
_available = False
try:
_available = runtime.getDeviceCount() > 0
except Exception as e:
if (e.args[0] !=
'cudaErrorNoDevice: no CUDA-capable device is detected'):
raise
return _available
# import class and function
from cupy.cuda.compiler import compile_with_cache # NOQA
from cupy.cuda.device import Device # NOQA
from cupy.cuda.device import get_cublas_handle # NOQA
from cupy.cuda.device import get_device_id # NOQA
from cupy.cuda.function import Function # NOQA
from cupy.cuda.function import Module # NOQA
from cupy.cuda.memory import alloc # NOQA
from cupy.cuda.memory import BaseMemory # NOQA
from cupy.cuda.memory import malloc_managed # NOQA
from cupy.cuda.memory import ManagedMemory # NOQA
from cupy.cuda.memory import Memory # NOQA
from cupy.cuda.memory import MemoryPointer # NOQA
from cupy.cuda.memory import MemoryPool # NOQA
from cupy.cuda.memory import PythonFunctionAllocator # NOQA
from cupy.cuda.memory import set_allocator # NOQA
from cupy.cuda.memory import get_allocator # NOQA
from cupy.cuda.memory import UnownedMemory # NOQA
from cupy.cuda.memory_hook import MemoryHook # NOQA
from cupy.cuda.pinned_memory import alloc_pinned_memory # NOQA
from cupy.cuda.pinned_memory import PinnedMemory # NOQA
from cupy.cuda.pinned_memory import PinnedMemoryPointer # NOQA
from cupy.cuda.pinned_memory import PinnedMemoryPool # NOQA
from cupy.cuda.pinned_memory import set_pinned_memory_allocator # NOQA
from cupy.cuda.stream import Event # NOQA
from cupy.cuda.stream import get_current_stream # NOQA
from cupy.cuda.stream import get_elapsed_time # NOQA
from cupy.cuda.stream import Stream # NOQA
from cupy.cuda.stream import ExternalStream # NOQA
@contextlib.contextmanager
def using_allocator(allocator=None):
"""Sets a thread-local allocator for GPU memory inside
context manager
Args:
allocator (function): CuPy memory allocator. It must have the same
interface as the :func:`cupy.cuda.alloc` function, which takes the
buffer size as an argument and returns the device buffer of that
size. When ``None`` is specified, raw memory allocator will be
used (i.e., memory pool is disabled).
"""
# Note: cupy/memory.pyx would be the better place to implement this
# function but `contextmanager` decoration doesn't behave well in Cython.
if allocator is None:
allocator = memory._malloc
previous_allocator = memory._get_thread_local_allocator()
memory._set_thread_local_allocator(allocator)
try:
yield
finally:
memory._set_thread_local_allocator(previous_allocator)
@contextlib.contextmanager
def profile():
"""Enable CUDA profiling during with statement.
This function enables profiling on entering a with statement, and disables
profiling on leaving the statement.
>>> with cupy.cuda.profile():
... # do something you want to measure
... pass
"""
profiler.start()
try:
yield
finally:
profiler.stop()
| 34.652941 | 100 | 0.7211 |
71996117aad08baad13c78005e99de5d470893ce | 467 | py | Python | tests/sql_in_python_variables_example.py | AzisK/readsql | 8a26e87df1bc74756a675b54acdb563b004a7384 | [
"MIT"
] | 62 | 2020-08-19T17:09:34.000Z | 2022-02-04T13:35:55.000Z | tests/sql_in_python_variables_example.py | AzisK/readsql | 8a26e87df1bc74756a675b54acdb563b004a7384 | [
"MIT"
] | null | null | null | tests/sql_in_python_variables_example.py | AzisK/readsql | 8a26e87df1bc74756a675b54acdb563b004a7384 | [
"MIT"
] | 1 | 2020-12-31T08:35:18.000Z | 2020-12-31T08:35:18.000Z | def get_sql():
limit = 6
sql = f"SELEct speed from world where animal='dolphin' limit {limit}"
return sql
def get_query_template():
limit = 6
query_template = (
f"SELEct speed from world where animal='dolphin' group by family limit {limit}"
)
return query_template
def get_query():
limit = 99
query = f"SELEct speed from world where animal='dolphin' and name is not null group by family limit {limit}"
return query
| 24.578947 | 112 | 0.670236 |
0f60cea830424eb8e4ae4c3185d8a7ca25b84a8f | 584 | py | Python | dino/sizes.py | 00ff0000red/raw-wasm | a2b2fa9c826c5df51bb1b6bb07739bd5f7eacd61 | [
"MIT"
] | 314 | 2019-05-30T06:35:03.000Z | 2022-03-12T21:44:04.000Z | dino/sizes.py | 00ff0000red/raw-wasm | a2b2fa9c826c5df51bb1b6bb07739bd5f7eacd61 | [
"MIT"
] | 3 | 2019-06-15T17:36:28.000Z | 2021-01-23T00:42:11.000Z | dino/sizes.py | 00ff0000red/raw-wasm | a2b2fa9c826c5df51bb1b6bb07739bd5f7eacd61 | [
"MIT"
] | 13 | 2020-07-05T20:31:29.000Z | 2022-03-07T08:15:48.000Z | import math
sizes = [
('dead', 20, 22, 83),
('stand', 20, 22, 83),
('run1', 20, 22, 83),
('run2', 20, 22, 83),
('duck1', 28, 13, 83),
('duck2', 28, 13, 83),
('cactus1', 13, 26, 83),
('cactus2', 19, 18, 83),
('cactus3', 28, 18, 83),
('cactus4', 9, 18, 83),
('cactus5', 40, 26, 83),
('cloud', 26, 8, 83),
('ground1', 32, 5, 83),
('ground2', 32, 5, 83),
('ground3', 32, 5, 83),
]
addr = 0
for name, w, h, alpha in sizes:
print(';; %+d %s.ppm %d %d' % (addr, name, w, h))
print('"\%02x\%02x\%02x"' % (w, h, alpha))
addr += math.ceil((w * h) / 8 + 3)
| 22.461538 | 51 | 0.467466 |
e35f7c828c9bc97d77b9df140bead122e920e3bf | 847 | py | Python | tests/chemistry/test_morgan_fingerprint.py | StephenSchroeder/pyjanitor | 56ffe6048ffbcedc1680f52f26547e0d09053d26 | [
"MIT"
] | null | null | null | tests/chemistry/test_morgan_fingerprint.py | StephenSchroeder/pyjanitor | 56ffe6048ffbcedc1680f52f26547e0d09053d26 | [
"MIT"
] | null | null | null | tests/chemistry/test_morgan_fingerprint.py | StephenSchroeder/pyjanitor | 56ffe6048ffbcedc1680f52f26547e0d09053d26 | [
"MIT"
] | null | null | null | import importlib
import pytest
import janitor.chemistry # noqa: disable=unused-import
@pytest.mark.skipif(
importlib.util.find_spec("rdkit") is None,
reason="rdkit tests only required for CI",
)
@pytest.mark.chemistry
def test_morgan_fingerprint_counts(chemdf):
morgans = chemdf.smiles2mol("smiles", "mol").morgan_fingerprint(
"mol", kind="counts"
)
assert morgans.shape == (10, 2048)
assert (morgans.values >= 0).all()
@pytest.mark.skipif(
importlib.util.find_spec("rdkit") is None,
reason="rdkit tests only required for CI",
)
@pytest.mark.chemistry
def test_morgan_fingerprint_bits(chemdf):
morgans = chemdf.smiles2mol("smiles", "mol").morgan_fingerprint(
"mol", kind="bits"
)
assert morgans.shape == (10, 2048)
assert set(morgans.values.flatten().tolist()) == set([0, 1])
| 26.46875 | 68 | 0.690673 |
8d5505f61d84857286b67a3603351adfc3259492 | 399 | py | Python | PythonChallenge/Ex31/31_04.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | PythonChallenge/Ex31/31_04.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | PythonChallenge/Ex31/31_04.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding:utf-8
from PIL import Image
img1 = Image.open("mandelbrot.gif")
img2 = Image.open("newMandelbrot.gif")
diff = [(i-j) for i,j in zip(img1.getdata(), img2.getdata()) if i != j]
filterdiff = [(0, 255)[e>0] for e in diff] # [i>0 and 255 or 0 for i in diff]
plot = Image.new('L', (23, 73)) # 'F', 'L', 'P'
plot.putdata(filterdiff)
# plot.show()
plot.save("plot.gif")
| 26.6 | 78 | 0.631579 |
1511878b830e6281d248f01d2152353c1ebf2f3a | 4,518 | py | Python | limbo/plugins/mlb.py | uilab-github/slask | aded42ac86484e24cd4fa437d8f0ce54fe738695 | [
"MIT"
] | null | null | null | limbo/plugins/mlb.py | uilab-github/slask | aded42ac86484e24cd4fa437d8f0ce54fe738695 | [
"MIT"
] | null | null | null | limbo/plugins/mlb.py | uilab-github/slask | aded42ac86484e24cd4fa437d8f0ce54fe738695 | [
"MIT"
] | 1 | 2015-02-09T08:28:08.000Z | 2015-02-09T08:28:08.000Z | # -*- coding: utf-8 -*-
"""!mlb <team> will return that team's upcoming games"""
from datetime import datetime
import re
import requests
from bs4 import BeautifulSoup as Soup
schedules = {
'arizona diamondbacks': 'http://espn.go.com/mlb/team/schedule/_/name/ari/arizona-diamondbacks',
'atlanta braves': 'http://espn.go.com/mlb/team/schedule/_/name/atl/atlanta-braves',
'baltimore orioles': 'http://espn.go.com/mlb/team/schedule/_/name/bal/baltimore-orioles',
'boston red sox': 'http://espn.go.com/mlb/team/schedule/_/name/bos/boston-red-sox',
'chicago cubs': 'http://espn.go.com/mlb/team/schedule/_/name/chc/chicago-cubs',
'chicago white sox': 'http://espn.go.com/mlb/team/schedule/_/name/chw/chicago-white-sox',
'cincinnati reds': 'http://espn.go.com/mlb/team/schedule/_/name/cin/cincinnati-reds',
'cleveland indians': 'http://espn.go.com/mlb/team/schedule/_/name/cle/cleveland-indians',
'colorado rockies': 'http://espn.go.com/mlb/team/schedule/_/name/col/colorado-rockies',
'detroit tigers': 'http://espn.go.com/mlb/team/schedule/_/name/det/detroit-tigers',
'houston astros': 'http://espn.go.com/mlb/team/schedule/_/name/hou/houston-astros',
'kansas city royals': 'http://espn.go.com/mlb/team/schedule/_/name/kc/kansas-city-royals',
'los angeles angels': 'http://espn.go.com/mlb/team/schedule/_/name/laa/los-angeles-angels',
'los angeles dodgers': 'http://espn.go.com/mlb/team/schedule/_/name/lad/los-angeles-dodgers',
'miami marlins': 'http://espn.go.com/mlb/team/schedule/_/name/mia/miami-marlins',
'milwaukee brewers': 'http://espn.go.com/mlb/team/schedule/_/name/mil/milwaukee-brewers',
'minnesota twins': 'http://espn.go.com/mlb/team/schedule/_/name/min/minnesota-twins',
'new york mets': 'http://espn.go.com/mlb/team/schedule/_/name/nym/new-york-mets',
'new york yankees': 'http://espn.go.com/mlb/team/schedule/_/name/nyy/new-york-yankees',
'oakland athletics': 'http://espn.go.com/mlb/team/schedule/_/name/oak/oakland-athletics',
'philadelphia phillies': 'http://espn.go.com/mlb/team/schedule/_/name/phi/philadelphia-phillies',
'pittsburgh pirates': 'http://espn.go.com/mlb/team/schedule/_/name/pit/pittsburgh-pirates',
'san diego padres': 'http://espn.go.com/mlb/team/schedule/_/name/sd/san-diego-padres',
'san francisco giants': 'http://espn.go.com/mlb/team/schedule/_/name/sf/san-francisco-giants',
'seattle mariners': 'http://espn.go.com/mlb/team/schedule/_/name/sea/seattle-mariners',
'st. louis cardinals': 'http://espn.go.com/mlb/team/schedule/_/name/stl/st-louis-cardinals',
'tampa bay rays': 'http://espn.go.com/mlb/team/schedule/_/name/tb/tampa-bay-rays',
'texas rangers': 'http://espn.go.com/mlb/team/schedule/_/name/tex/texas-rangers',
'toronto blue jays': 'http://espn.go.com/mlb/team/schedule/_/name/tor/toronto-blue-jays',
'washington nationals': 'http://espn.go.com/mlb/team/schedule/_/name/wsh/washington-nationals'
}
def fmtdatetime(dt):
hour = datetime.strftime(dt, "%I").lstrip('0')
return datetime.strftime(dt, "%m/%d {0}%p".format(hour))
def schedule(query):
url = None
query = query.lower()
for team in schedules:
if query in team:
url = schedules[team]
break
if not url:
return "Unable to find {0}".format(query)
r = requests.get(url)
soup = Soup(r.text, "html5lib")
sched = soup.find("table", attrs={"class": "tablehead"})
games = []
for row in sched.findAll("tr")[2:]:
# month name rows have OPPONENT in them
if "OPPONENT" in row.text:
continue
rawdt, rawopp, time = [t.text for t in row.findAll("td")][0:3]
yr = datetime.strftime(datetime.now(), "%Y")
try:
dt = datetime.strptime("{0} {1} {2}".format(rawdt, yr, time), "%a, %b %d %Y %I:%M %p")
except ValueError:
# some games are TBA
dt = datetime.strptime(dt, "%a, %b %d")
# away games come as @, which is fine. Home games should have the
# leading "vs" stripped
opp = rawopp.lstrip("vs")
games.append((dt, opp))
next3 = [
"{0} {1}".format(fmtdatetime(dt), opp)
for dt, opp
in games
if dt > datetime.now()
][:3]
return "{0}: ".format(team.title()) + " :baseball: ".join(next3)
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"!mlb (.*)", text)
if not match:
return
return schedule(match[0])
| 48.06383 | 101 | 0.651173 |
9ddaa03428dbfb50e3afe6f5c1e9b99a6dc38782 | 1,354 | py | Python | mayan/apps/acls/classes.py | edsonbin/maxacali | 1fd3ac99543788f77f1a7795981179b2cc8c4421 | [
"Apache-2.0"
] | null | null | null | mayan/apps/acls/classes.py | edsonbin/maxacali | 1fd3ac99543788f77f1a7795981179b2cc8c4421 | [
"Apache-2.0"
] | null | null | null | mayan/apps/acls/classes.py | edsonbin/maxacali | 1fd3ac99543788f77f1a7795981179b2cc8c4421 | [
"Apache-2.0"
] | 2 | 2020-02-24T21:02:31.000Z | 2021-01-05T23:52:01.000Z | from __future__ import unicode_literals, absolute_import
import logging
from django.apps import apps
logger = logging.getLogger(__name__)
class ModelPermission(object):
_registry = {}
_proxies = {}
_inheritances = {}
@classmethod
def register(cls, model, permissions):
cls._registry.setdefault(model, [])
for permission in permissions:
cls._registry[model].append(permission)
@classmethod
def get_for_instance(cls, instance):
StoredPermission = apps.get_model(
app_label='permissions', model_name='StoredPermission'
)
permissions = []
class_permissions = cls._registry.get(type(instance))
if class_permissions:
permissions.extend(class_permissions)
proxy = cls._proxies.get(type(instance))
if proxy:
permissions.extend(cls._registry.get(proxy))
pks = [permission.stored_permission.pk for permission in set(permissions)]
return StoredPermission.objects.filter(pk__in=pks)
@classmethod
def register_proxy(cls, source, model):
cls._proxies[model] = source
@classmethod
def register_inheritance(cls, model, related):
cls._inheritances[model] = related
@classmethod
def get_inheritance(cls, model):
return cls._inheritances[model]
| 25.54717 | 82 | 0.672083 |
98af4e6130677ad8ef848649da67d94537aa59e8 | 4,456 | py | Python | azure/mgmt/compute/v2017_03_30/operations/virtual_machine_sizes_operations.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | azure/mgmt/compute/v2017_03_30/operations/virtual_machine_sizes_operations.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/compute/v2017_03_30/operations/virtual_machine_sizes_operations.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineSizesOperations(object):
"""VirtualMachineSizesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""Lists all available virtual machine sizes for a subscription in a
location.
:param location: The location upon which virtual-machine-sizes is
queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`VirtualMachineSize
<azure.mgmt.compute.v2017_03_30.models.VirtualMachineSize>`
:rtype: :class:`VirtualMachineSizePaged
<azure.mgmt.compute.v2017_03_30.models.VirtualMachineSizePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| 41.64486 | 144 | 0.633752 |
568650581a94318d5f390fda7c310671d62bf57e | 1,283 | py | Python | setup.py | initios/aeat-web-services | 3e9533c6f5675df679ea6b42b07304ba938ebcb5 | [
"MIT"
] | 7 | 2018-01-04T10:57:54.000Z | 2021-07-30T09:56:22.000Z | setup.py | initios/aeat-web-services | 3e9533c6f5675df679ea6b42b07304ba938ebcb5 | [
"MIT"
] | 18 | 2018-01-04T10:27:01.000Z | 2018-02-20T13:56:19.000Z | setup.py | initios/aeat-web-services | 3e9533c6f5675df679ea6b42b07304ba938ebcb5 | [
"MIT"
] | 2 | 2018-11-07T09:07:50.000Z | 2020-12-10T09:47:00.000Z | #!/usr/bin/env python
# Copyright 2018 Initios Desarrollos
#
# All rights reserved
import os
import setuptools
base_dir = os.path.dirname(__file__)
about = {}
with open(os.path.join(base_dir, 'src', 'aeat', '__about__.py')) as f:
exec(f.read(), about)
with open(os.path.join(base_dir, 'README.rst')) as f:
long_description = f.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# https://packaging.python.org/tutorials/distributing-packages/
setuptools.setup(
name=about['__title__'],
version=about['__version__'],
description=about['__summary__'],
long_description=long_description,
license=about['__license__'],
url=about['__uri__'],
author=about['__author__'],
author_email=about['__email__'],
python_requires='>=3.4,<3.7',
install_requires=[
'lxml>=4.1.1,<4.2.0',
'xmlsec>=1.3.3,<1.4.0',
'zeep>=2.4.0,<2.5.0',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
],
packages=['aeat', 'aeat.rest_framework'],
package_dir={'': 'src'},
)
| 22.910714 | 78 | 0.638348 |
4482e52a5711b5cd9f93597edbad77bce68ca735 | 21,523 | py | Python | beta_rec/models/sgl.py | Alva112358/beta-recsys | c245d0d826132a1859987a5a4ac85869019d6686 | [
"MIT"
] | null | null | null | beta_rec/models/sgl.py | Alva112358/beta-recsys | c245d0d826132a1859987a5a4ac85869019d6686 | [
"MIT"
] | null | null | null | beta_rec/models/sgl.py | Alva112358/beta-recsys | c245d0d826132a1859987a5a4ac85869019d6686 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
from beta_rec.models.torch_engine import ModelEngine
def randint_choice(high, size=None, replace=True, p=None, exclusion=None):
"""Return random integers from `0` (inclusive) to `high` (exclusive)."""
a = np.arange(high)
if exclusion is not None:
if p is None:
p = np.ones_like(a)
else:
p = np.array(p, copy=True)
p = p.flatten()
p[exclusion] = 0
if p is not None:
p = p / np.sum(p)
sample = np.random.choice(a, size=size, replace=replace, p=p)
return sample
def l2_norm(user_emb1):
norm = torch.norm(user_emb1, 2, 1)
return (user_emb1.T / norm).T
class calc_ssl_loss(nn.Module):
def __init__(self, ssl_reg, ssl_temp):
super(calc_ssl_loss, self).__init__()
self.ssl_reg = ssl_reg
self.ssl_temp = ssl_temp
def forward(
self,
ua_embeddings_sub1,
ua_embeddings_sub2,
ia_embeddings_sub1,
ia_embeddings_sub2,
users,
pos_items,
):
user_emb1 = ua_embeddings_sub1[users]
user_emb2 = ua_embeddings_sub2[users]
normalize_user_emb1 = l2_norm(user_emb1)
normalize_user_emb2 = l2_norm(user_emb2)
item_emb1 = ia_embeddings_sub1[pos_items]
item_emb2 = ia_embeddings_sub2[pos_items]
normalize_item_emb1 = l2_norm(item_emb1)
normalize_item_emb2 = l2_norm(item_emb2)
normalize_user_emb2_neg = normalize_user_emb2
normalize_item_emb2_neg = normalize_item_emb2
pos_score_user = torch.sum(
torch.mul(normalize_user_emb1, normalize_user_emb2), dim=1
)
ttl_score_user = torch.matmul(normalize_user_emb1, normalize_user_emb2_neg.T)
pos_score_item = torch.sum(
torch.mul(normalize_item_emb1, normalize_item_emb2), dim=1
)
ttl_score_item = torch.matmul(normalize_item_emb1, normalize_item_emb2_neg.T)
pos_score_user = torch.exp(pos_score_user / self.ssl_temp)
ttl_score_user = torch.sum(torch.exp(ttl_score_user / self.ssl_temp), dim=1)
pos_score_item = torch.exp(pos_score_item / self.ssl_temp)
ttl_score_item = torch.sum(torch.exp(ttl_score_item / self.ssl_temp), dim=1)
ssl_loss_user = -torch.sum(torch.log(pos_score_user / ttl_score_user))
ssl_loss_item = -torch.sum(torch.log(pos_score_item / ttl_score_item))
ssl_loss = self.ssl_reg * (ssl_loss_user + ssl_loss_item)
return ssl_loss
class calc_ssl_loss_v2(nn.Module):
def __init__(self, ssl_reg, ssl_temp, ssl_mode):
super(calc_ssl_loss_v2, self).__init__()
self.ssl_reg = ssl_reg
self.ssl_temp = ssl_temp
self.ssl_mode = ssl_mode
def forward(
self,
ua_embeddings_sub1,
ua_embeddings_sub2,
ia_embeddings_sub1,
ia_embeddings_sub2,
users,
pos_items,
):
if self.ssl_mode in ["user_side", "both_side"]:
user_emb1 = ua_embeddings_sub1[users]
user_emb2 = ua_embeddings_sub2[users]
normalize_user_emb1 = l2_norm(user_emb1)
normalize_user_emb2 = l2_norm(user_emb2)
normalize_all_user_emb2 = l2_norm(ua_embeddings_sub2)
pos_score_user = torch.sum(
torch.mul(normalize_user_emb1, normalize_user_emb2), dim=1
)
ttl_score_user = torch.matmul(
normalize_user_emb1, normalize_all_user_emb2.T
)
pos_score_user = torch.exp(pos_score_user / self.ssl_temp)
ttl_score_user = torch.sum(torch.exp(ttl_score_user / self.ssl_temp), dim=1)
ssl_loss_user = -torch.sum(torch.log(pos_score_user / ttl_score_user))
if self.ssl_mode in ["item_side", "both_side"]:
item_emb1 = ia_embeddings_sub1[pos_items]
item_emb2 = ia_embeddings_sub2[pos_items]
normalize_item_emb1 = l2_norm(item_emb1)
normalize_item_emb2 = l2_norm(item_emb2)
normalize_all_item_emb2 = l2_norm(ia_embeddings_sub2)
pos_score_item = torch.sum(
torch.mul(normalize_item_emb1, normalize_item_emb2), dim=1
)
ttl_score_item = torch.matmul(
normalize_item_emb1, normalize_all_item_emb2.T
)
pos_score_item = torch.exp(pos_score_item / self.ssl_temp)
ttl_score_item = torch.sum(torch.exp(ttl_score_item / self.ssl_temp), dim=1)
ssl_loss_item = -torch.sum(torch.log(pos_score_item / ttl_score_item))
if self.ssl_mode == "user_side":
ssl_loss = self.ssl_reg * ssl_loss_user
elif self.ssl_mode == "item_side":
ssl_loss = self.ssl_reg * ssl_loss_item
else:
ssl_loss = self.ssl_reg * (ssl_loss_user + ssl_loss_item)
return ssl_loss
class calc_ssl_loss_v3(nn.Module):
def __init__(self, ssl_reg, ssl_temp):
super(calc_ssl_loss_v3, self).__init__()
self.ssl_reg = ssl_reg
self.ssl_temp = ssl_temp
def forward(
self,
ua_embeddings_sub1,
ua_embeddings_sub2,
ia_embeddings_sub1,
ia_embeddings_sub2,
users,
pos_items,
):
batch_users = torch.unique(users)
user_emb1 = ua_embeddings_sub1[batch_users]
user_emb2 = ua_embeddings_sub2[batch_users]
batch_items, _ = torch.unique(pos_items)
item_emb1 = ia_embeddings_sub1[batch_items]
item_emb2 = ia_embeddings_sub2[batch_items]
emb_merge1 = torch.cat([user_emb1, item_emb1], dim=0)
emb_merge2 = torch.cat([user_emb2, item_emb2], dim=0)
# cosine similarity
normalize_emb_merge1 = l2_norm(emb_merge1)
normalize_emb_merge2 = l2_norm(emb_merge2)
pos_score = torch.sum(
torch.mul(normalize_emb_merge1, normalize_emb_merge2), dim=1
)
ttl_score = torch.matmul(normalize_emb_merge1, normalize_emb_merge2.T)
pos_score = torch.exp(pos_score / self.ssl_temp)
ttl_score = torch.sum(torch.exp(ttl_score / self.ssl_temp), dim=1)
ssl_loss = -torch.sum(torch.log(pos_score / ttl_score))
ssl_loss = self.ssl_reg * ssl_loss
return ssl_loss
class create_bpr_loss(nn.Module):
def __init__(self, reg):
super(create_bpr_loss, self).__init__()
self.reg = reg
def forward(
self,
ua_embeddings,
ia_embeddings,
users,
pos_items,
neg_items,
user_embedding,
item_embedding,
):
batch_u_embeddings = ua_embeddings[users]
batch_pos_i_embeddings = ia_embeddings[pos_items]
batch_neg_i_embeddings = ia_embeddings[neg_items]
batch_u_embeddings_pre = user_embedding[users]
batch_pos_i_embeddings_pre = item_embedding[pos_items]
batch_neg_i_embeddings_pre = item_embedding[neg_items]
regularizer = (
torch.sum(torch.pow(batch_u_embeddings_pre, 2)) / 2
+ torch.sum(torch.pow(batch_pos_i_embeddings_pre, 2)) / 2
+ torch.sum(torch.pow(batch_neg_i_embeddings_pre, 2)) / 2
)
emb_loss = self.reg * regularizer
pos_scores = torch.sum(
torch.mul(batch_u_embeddings, batch_pos_i_embeddings), dim=-1
)
neg_scores = torch.sum(
torch.mul(batch_u_embeddings, batch_neg_i_embeddings), dim=-1
)
bpr_loss = torch.sum(-torch.log(torch.sigmoid(pos_scores - neg_scores)))
return bpr_loss, emb_loss
class SGL(nn.Module):
def __init__(self, config):
self.config = config
super(SGL, self).__init__()
self.create_bpr_loss = create_bpr_loss(self.config["regs"])
self.calc_ssl_loss_v3 = calc_ssl_loss_v3(
self.config["ssl_reg"], self.config["ssl_temp"]
)
self.calc_ssl_loss_v2 = calc_ssl_loss_v2(
self.config["ssl_reg"], self.config["ssl_temp"], self.config["ssl_mode"]
)
self.calc_ssl_loss = calc_ssl_loss(
self.config["ssl_reg"], self.config["ssl_temp"]
)
self.ssl_mode = self.config["ssl_mode"]
self.norm_adj = self.config["norm_adj"]
self.n_users = self.config["n_users"]
self.n_items = self.config["n_items"]
self.n_layers = self.config["n_layers"]
self.pretrain = self.config["pretrain"]
self.ua_embeddings = None
self.ia_embeddings = None
self.embe_dim = self.config["emb_dim"]
self.aug_type = self.config["aug_type"]
self.user_embedding = nn.Parameter(
nn.init.xavier_uniform_(torch.empty(self.n_users, self.embe_dim)),
requires_grad=True,
)
self.item_embedding = nn.Parameter(
nn.init.xavier_uniform_(torch.empty(self.n_items, self.embe_dim)),
requires_grad=True,
)
self.adj_mat = self._convert_sp_mat_to_sp_tensor(self.norm_adj)
def _convert_sp_mat_to_sp_tensor(self, X):
coo = X.tocoo().astype(np.float32)
indices = np.mat([coo.row, coo.col])
return torch.sparse_coo_tensor(torch.tensor(indices), coo.data, coo.shape)
def _convert_csr_to_sparse_tensor_inputs(self, X):
coo = X.tocoo()
indices = np.mat([coo.row, coo.col])
return indices, coo.data, coo.shape
def forward(self, sub_mat, bat_users, bat_pos_items, bat_neg_items):
for k in range(1, self.n_layers + 1):
if self.aug_type in [0, 1]:
sub_mat["sub_mat_1%d" % k] = torch.sparse_coo_tensor(
sub_mat["adj_indices_sub1"],
sub_mat["adj_values_sub1"],
sub_mat["adj_shape_sub1"],
).to(self.device)
sub_mat["sub_mat_2%d" % k] = torch.sparse_coo_tensor(
sub_mat["adj_indices_sub2"],
sub_mat["adj_values_sub2"],
sub_mat["adj_shape_sub2"],
).to(self.device)
else:
sub_mat["sub_mat_1%d" % k] = torch.sparse_coo_tensor(
sub_mat["adj_indices_sub1%d" % k],
sub_mat["adj_values_sub1%d" % k],
sub_mat["adj_shape_sub1%d" % k],
).to(self.device)
sub_mat["sub_mat_2%d" % k] = torch.sparse_coo_tensor(
sub_mat["adj_indices_sub2%d" % k],
sub_mat["adj_values_sub2%d" % k],
sub_mat["adj_shape_sub2%d" % k],
).to(self.device)
ego_embeddings = torch.cat([self.user_embedding, self.item_embedding], dim=0)
ego_embeddings_sub1 = ego_embeddings
ego_embeddings_sub2 = ego_embeddings
all_embeddings = [ego_embeddings]
all_embeddings_sub1 = [ego_embeddings_sub1]
all_embeddings_sub2 = [ego_embeddings_sub2]
for k in range(1, self.n_layers + 1):
ego_embeddings = torch.matmul(self.adj_mat.to(self.device), ego_embeddings)
all_embeddings += [ego_embeddings]
ego_embeddings_sub1 = torch.matmul(
sub_mat["sub_mat_1%d" % k], ego_embeddings_sub1
)
all_embeddings_sub1 += [ego_embeddings_sub1]
ego_embeddings_sub2 = torch.matmul(
sub_mat["sub_mat_2%d" % k], ego_embeddings_sub2
)
all_embeddings_sub2 += [ego_embeddings_sub2]
all_embeddings = torch.stack(all_embeddings, 1)
all_embeddings = torch.mean(all_embeddings, dim=1, keepdim=False)
u_g_embeddings, i_g_embeddings = torch.split(
all_embeddings, [self.n_users, self.n_items], 0
)
all_embeddings_sub1 = torch.stack(all_embeddings_sub1, 1)
all_embeddings_sub1 = torch.mean(all_embeddings_sub1, dim=1, keepdim=False)
u_g_embeddings_sub1, i_g_embeddings_sub1 = torch.split(
all_embeddings_sub1, [self.n_users, self.n_items], 0
)
all_embeddings_sub2 = torch.stack(all_embeddings_sub2, 1)
all_embeddings_sub2 = torch.mean(all_embeddings_sub2, dim=1, keepdim=False)
u_g_embeddings_sub2, i_g_embeddings_sub2 = torch.split(
all_embeddings_sub2, [self.n_users, self.n_items], 0
)
(
ua_embeddings,
ia_embeddings,
ua_embeddings_sub1,
ia_embeddings_sub1,
ua_embeddings_sub2,
ia_embeddings_sub2,
) = (
u_g_embeddings,
i_g_embeddings,
u_g_embeddings_sub1,
i_g_embeddings_sub1,
u_g_embeddings_sub2,
i_g_embeddings_sub2,
)
self.ua_embeddings = ua_embeddings
self.ia_embeddings = ia_embeddings
if self.pretrain:
ssl_loss = 0
else:
if self.ssl_mode in ["user_side", "item_side", "both_side"]:
ssl_loss = self.calc_ssl_loss_v2(
ua_embeddings_sub1,
ua_embeddings_sub2,
ia_embeddings_sub1,
ia_embeddings_sub2,
bat_users,
bat_pos_items,
)
elif self.ssl_mode in ["merge"]:
ssl_loss = self.calc_ssl_loss_v3(
ua_embeddings_sub1,
ua_embeddings_sub2,
ia_embeddings_sub1,
ia_embeddings_sub2,
bat_users,
bat_pos_items,
)
else:
raise ValueError("Invalid ssl_mode!")
sl_loss, emb_loss = self.create_bpr_loss(
ua_embeddings,
ia_embeddings,
bat_users,
bat_pos_items,
bat_neg_items,
self.user_embedding,
self.item_embedding,
)
return sl_loss, emb_loss, ssl_loss
def predict(self, users, items):
users_t = torch.tensor(users, dtype=torch.int64, device=self.device)
items_t = torch.tensor(items, dtype=torch.int64, device=self.device)
with torch.no_grad():
user_embed = self.ua_embeddings[users_t]
items_embed = self.ia_embeddings[items_t]
scores = torch.mul(user_embed, items_embed).sum(dim=1)
return scores
class SGLEngine(ModelEngine):
"""SGLEngine Class."""
# A class includes train an epoch and train a batch of SGL
def __init__(self, config):
"""Initialize SGLEngine Class."""
self.config = config
self.norm_adj = config["model"]["norm_adj"]
self.n_layers = self.config["model"]["n_layers"]
self.aug_type = self.config["model"]["aug_type"]
self.model = SGL(config["model"])
super(SGLEngine, self).__init__(config)
self.model.to(self.device)
def train_single_batch(self, sub_mat, batch_data):
"""Train the model in a single batch.
Args:
batch_data (list): batch users, positive items and negative items.
Return:
loss (float): batch loss.
"""
assert hasattr(self, "model"), "Please specify the exact model !"
self.optimizer.zero_grad()
batch_users, pos_items, neg_items = batch_data
sl_loss, emb_loss, ssl_loss = self.model(
sub_mat, batch_users, pos_items, neg_items
)
batch_loss = sl_loss + emb_loss + ssl_loss
batch_loss.backward()
self.optimizer.step()
loss = batch_loss.item()
return loss
def train_an_epoch(self, train_loader, epoch_id):
"""Train the model in one epoch.
Args:
epoch_id (int): the number of epoch.
train_loader (function): user, pos_items and neg_items generator.
"""
assert hasattr(self, "model"), "Please specify the exact model !"
self.model.train()
total_loss = 0.0
sub_mat = self.sub_mat_refresher(train_loader)
for batch_data in train_loader:
loss = self.train_single_batch(sub_mat, batch_data)
total_loss += loss
print("[Training Epoch {}], Loss {}".format(epoch_id, loss))
self.writer.add_scalar("model/loss", total_loss, epoch_id)
def create_sgl_mat(self, train_loader):
"""Create adjacent matirx from the user-item interaction matrix."""
n_users = self.config["model"]["n_users"]
n_items = self.config["model"]["n_items"]
n_nodes = n_users + n_items
is_subgraph = self.config["model"]["is_subgraph"]
aug_type = self.config["model"]["aug_type"]
ssl_ratio = self.config["model"]["ssl_ratio"]
user_np = train_loader.dataset.user_tensor.cpu().numpy()
item_np = train_loader.dataset.pos_item_tensor.cpu().numpy()
if is_subgraph and aug_type in [0, 1, 2] and ssl_ratio > 0:
# data augmentation type --- 0: Node Dropout; 1: Edge Dropout; 2: Random Walk
if aug_type == 0:
drop_user_idx = self.randint_choice(
n_users, size=n_users * ssl_ratio, replace=False
)
drop_item_idx = self.randint_choice(
n_items, size=n_items * ssl_ratio, replace=False
)
indicator_user = np.ones(n_users, dtype=np.float32)
indicator_item = np.ones(n_items, dtype=np.float32)
indicator_user[drop_user_idx] = 0.0
indicator_item[drop_item_idx] = 0.0
diag_indicator_user = sp.diags(indicator_user)
diag_indicator_item = sp.diags(indicator_item)
R = sp.csr_matrix(
(np.ones_like(user_np, dtype=np.float32), (user_np, item_np)),
shape=(n_users, n_items),
)
R_prime = diag_indicator_user.dot(R).dot(diag_indicator_item)
(user_np_keep, item_np_keep) = R_prime.nonzero()
ratings_keep = R_prime.data
tmp_adj = sp.csr_matrix(
(ratings_keep, (user_np_keep, item_np_keep + n_users)),
shape=(n_nodes, n_nodes),
)
if aug_type in [1, 2]:
keep_idx = self.randint_choice(
len(user_np),
size=int(len(user_np) * (1 - ssl_ratio)),
replace=False,
)
user_keep_np = np.array(user_np)[keep_idx]
item_keep_np = np.array(item_np)[keep_idx]
ratings = np.ones_like(user_keep_np, dtype=np.float32)
tmp_adj = sp.csr_matrix(
(ratings, (user_keep_np, item_keep_np + n_users)),
shape=(n_nodes, n_nodes),
)
else:
ratings = np.ones_like(user_np, dtype=np.float32)
tmp_adj = sp.csr_matrix(
(ratings, (user_np, item_np + n_users)), shape=(n_nodes, n_nodes)
)
adj_mat = tmp_adj + tmp_adj.T
# pre adjcency matrix
rowsum = np.array(adj_mat.sum(1))
d_inv = np.power(rowsum, -0.5).flatten()
d_inv[np.isinf(d_inv)] = 0.0
d_mat_inv = sp.diags(d_inv)
norm_adj_tmp = d_mat_inv.dot(adj_mat)
adj_matrix = norm_adj_tmp.dot(d_mat_inv)
return adj_matrix
def randint_choice(self, high, size=None, replace=True, p=None, exclusion=None):
"""Return random integers from `0` (inclusive) to `high` (exclusive)."""
a = np.arange(high)
if exclusion is not None:
if p is None:
p = np.ones_like(a)
else:
p = np.array(p, copy=True)
p = p.flatten()
p[exclusion] = 0
if p is not None:
p = p / np.sum(p)
sample = np.random.choice(a, size=size, replace=replace, p=p)
return sample
def sub_mat_refresher(self, train_loader):
sub_mat = {}
if self.aug_type in [0, 1]:
(
sub_mat["adj_indices_sub1"],
sub_mat["adj_values_sub1"],
sub_mat["adj_shape_sub1"],
) = self.convert_csr_to_sparse_tensor_inputs(
self.create_sgl_mat(train_loader)
)
(
sub_mat["adj_indices_sub2"],
sub_mat["adj_values_sub2"],
sub_mat["adj_shape_sub2"],
) = self.convert_csr_to_sparse_tensor_inputs(
self.create_sgl_mat(train_loader)
)
else:
for k in range(1, self.n_layers + 1):
(
sub_mat["adj_indices_sub1%d" % k],
sub_mat["adj_values_sub1%d" % k],
sub_mat["adj_shape_sub1%d" % k],
) = self.convert_csr_to_sparse_tensor_inputs(
self.create_sgl_mat(train_loader)
)
(
sub_mat["adj_indices_sub2%d" % k],
sub_mat["adj_values_sub2%d" % k],
sub_mat["adj_shape_sub2%d" % k],
) = self.convert_csr_to_sparse_tensor_inputs(
self.create_sgl_mat(train_loader)
)
return sub_mat
def convert_csr_to_sparse_tensor_inputs(self, X):
coo = X.tocoo()
indices = np.mat([coo.row, coo.col])
return indices, coo.data, coo.shape
| 36.728669 | 89 | 0.592529 |
3799510bfdd90fcecba718d31b26dfdd79c92864 | 5,831 | py | Python | pspy/so_window.py | xgarrido/pspy | 8c1c13828ca982a1747ddeed2ee9c35b09fd9f0b | [
"BSD-3-Clause"
] | 6 | 2020-01-26T22:00:31.000Z | 2021-05-04T08:13:44.000Z | pspy/so_window.py | simonsobs/pspy | b1faf15eb7c9f4c2bee80fe5cfafaab1d4bc6470 | [
"BSD-3-Clause"
] | 5 | 2021-02-12T13:04:08.000Z | 2022-01-24T18:57:34.000Z | pspy/so_window.py | xgarrido/pspy | 8c1c13828ca982a1747ddeed2ee9c35b09fd9f0b | [
"BSD-3-Clause"
] | 1 | 2021-11-02T11:01:58.000Z | 2021-11-02T11:01:58.000Z | """
routines for window function generation
"""
import healpy as hp
import numpy as np
from pixell import curvedsky, enmap
from pspy import sph_tools
def get_distance(binary, rmax=None):
"""Get the distance to the closest masked pixels for CAR and healpix so_map binary.
Parameters
----------
binary: ``so_map``
a ``so_map`` with binary data (1 is observed, 0 is masked)
"""
dist = binary.copy()
# Make sure distances are floating point number
dist.data = dist.data.astype(float)
if binary.pixel == "HEALPIX":
dist.data[:] = enmap.distance_transform_healpix(binary.data, method="heap", rmax=rmax)
dist.data[:] *= 180 / np.pi
if binary.pixel == "CAR":
dist.data[:] = enmap.distance_transform(binary.data, rmax=rmax)
dist.data[:] *= 180 / np.pi
return dist
def create_apodization(binary, apo_type, apo_radius_degree, use_rmax=False):
"""Create a apodized window function from a binary mask.
Parameters
----------
binary: ``so_map``
a ``so_map`` with binary data (1 is observed, 0 is masked)
apo_type: string
the type of apodisation you want to use ("C1","C2" or "Rectangle")
apo_radius: float
the apodisation radius in degrees
"""
if use_rmax:
rmax = (apo_radius_degree * 1.1) * np.pi / 180
else:
rmax = None
if apo_type == "C1":
window = apod_C1(binary, apo_radius_degree, rmax)
if apo_type == "C2":
window = apod_C2(binary, apo_radius_degree, rmax)
if apo_type == "Rectangle":
if binary.pixel == "HEALPIX":
raise ValueError("No rectangle apodization for HEALPIX map")
if binary.pixel == "CAR":
window = apod_rectangle(binary, apo_radius_degree)
return window
def apod_C2(binary, radius, rmax=None):
"""Create a C2 apodisation as defined in https://arxiv.org/pdf/0903.2350.pdf
Parameters
----------
binary: ``so_map``
a ``so_map`` with binary data (1 is observed, 0 is masked)
apo_radius: float
the apodisation radius in degrees
"""
if radius == 0:
return binary
else:
dist = get_distance(binary, rmax)
win = binary.copy()
idx = np.where(dist.data > radius)
win.data = dist.data / radius - np.sin(2 * np.pi * dist.data / radius) / (2 * np.pi)
win.data[idx] = 1
return win
def apod_C1(binary, radius, rmax=None):
"""Create a C1 apodisation as defined in https://arxiv.org/pdf/0903.2350.pdf
Parameters
----------
binary: ``so_map``
a so_map with binary data (1 is observed, 0 is masked)
apo_radius: float
the apodisation radius in degrees
"""
if radius == 0:
return binary
else:
dist = get_distance(binary, rmax)
win = binary.copy()
idx = np.where(dist.data > radius)
win.data = 1.0 / 2 - 1.0 / 2 * np.cos(-np.pi * dist.data / radius)
win.data[idx] = 1
return win
def apod_rectangle(binary, radius):
"""Create an apodisation for rectangle window (in CAR) (smoother at the corner)
Parameters
----------
binary: ``so_map``
a so_map with binary data (1 is observed, 0 is masked)
apo_radius: float
the apodisation radius in degrees
"""
# TODO: clean this one
if radius == 0:
return binary
else:
shape = binary.data.shape
wcs = binary.data.wcs
Ny, Nx = shape
pix_scale_y, pix_scale_x = enmap.pixshape(shape, wcs)
win = binary.copy()
win.data = win.data * 0 + 1
win_x = win.copy()
win_y = win.copy()
ones = np.ones((Ny, Nx))
deg_to_pix_x = np.pi / 180 / pix_scale_x
deg_to_pix_y = np.pi / 180 / pix_scale_y
lenApod_x = int(radius * deg_to_pix_x)
lenApod_y = int(radius * deg_to_pix_y)
for i in range(lenApod_x):
r = float(i)
win_x.data[:, i] = 1.0 / 2 * (ones[:, i] - np.cos(-np.pi * r / lenApod_x))
win_x.data[:, Nx - i - 1] = win_x.data[:, i]
for j in range(lenApod_y):
r = float(j)
win_y.data[j, :] = 1.0 / 2 * (ones[j, :] - np.cos(-np.pi * r / lenApod_y))
win_y.data[Ny - j - 1, :] = win_y.data[j, :]
win.data = win_x.data * win_y.data
return win
def get_spinned_windows(w, lmax, niter):
"""Compute the spinned window functions (for pure B modes method)
Parameters
----------
w: ``so_map``
map of the window function
lmax: integer
maximum value of the multipole for the harmonic transform
niter: integer
number of iteration for the harmonic transform
"""
template = np.array([w.data.copy(), w.data.copy()])
w1_plus, w1_minus, w2_plus, w2_minus = w.copy(), w.copy(), w.copy(), w.copy()
if w.pixel == "CAR":
template = enmap.samewcs(template, w.data)
wlm = sph_tools.map2alm(w, lmax=lmax, niter=niter)
ell = np.arange(lmax + 1)
filter_1 = -np.sqrt((ell + 1) * ell)
filter_2 = np.sqrt((ell + 2) * (ell + 1) * ell * (ell - 1))
filter_2[:1] = 0
wlm1_e = hp.almxfl(wlm, filter_1)
wlm2_e = hp.almxfl(wlm, filter_2)
wlm1_b = np.zeros_like(wlm1_e)
wlm2_b = np.zeros_like(wlm2_e)
w1 = template.copy()
w2 = template.copy()
if w.pixel == "HEALPIX":
curvedsky.alm2map_healpix(np.array([wlm1_e, wlm1_b]), w1, spin=1)
curvedsky.alm2map_healpix(np.array([wlm2_e, wlm2_b]), w2, spin=2)
if w.pixel == "CAR":
curvedsky.alm2map(np.array([wlm1_e, wlm1_b]), w1, spin=1)
curvedsky.alm2map(np.array([wlm2_e, wlm2_b]), w2, spin=2)
w1_plus.data = w1[0]
w1_minus.data = w1[1]
w2_plus.data = w2[0]
w2_minus.data = w2[1]
return w1_plus, w1_minus, w2_plus, w2_minus
| 28.169082 | 94 | 0.59338 |
6a78d9aca4d14708856986a330049bdfc97fcc0a | 99 | py | Python | tests/test_exception_notregistered/registered_blocks.py | WGBH/wagtail-streamfieldtools | 192f86845532742b0b7d432bef3987357833b8ed | [
"MIT"
] | 6 | 2015-12-14T17:59:22.000Z | 2016-09-28T10:38:54.000Z | tests/test_exception_notregistered/registered_blocks.py | WGBH/wagtail-streamfieldtools | 192f86845532742b0b7d432bef3987357833b8ed | [
"MIT"
] | 10 | 2016-05-13T17:59:31.000Z | 2022-03-11T23:13:01.000Z | tests/test_exception_notregistered/registered_blocks.py | WGBH/wagtail-streamfieldtools | 192f86845532742b0b7d432bef3987357833b8ed | [
"MIT"
] | 2 | 2016-03-18T18:56:58.000Z | 2016-05-13T18:14:15.000Z | from streamfield_tools.registry import block_registry
block_registry.unregister_block('boo_urns')
| 24.75 | 53 | 0.878788 |
1aeb7219b22cd025bb26fcd32aeef6342f184266 | 765 | py | Python | EdmFest/landing/migrations/0010_auto_20200127_2019.py | Mrgove10/EdmFest | 38f79ff0586476022b91199f17f6721aa53eb377 | [
"MIT"
] | null | null | null | EdmFest/landing/migrations/0010_auto_20200127_2019.py | Mrgove10/EdmFest | 38f79ff0586476022b91199f17f6721aa53eb377 | [
"MIT"
] | 1 | 2020-06-06T01:16:59.000Z | 2020-06-06T01:16:59.000Z | EdmFest/landing/migrations/0010_auto_20200127_2019.py | Mrgove10/EdmFest | 38f79ff0586476022b91199f17f6721aa53eb377 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.9 on 2020-01-27 19:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('landing', '0009_auto_20200127_2018'),
]
operations = [
migrations.AlterField(
model_name='artist',
name='added_by',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='festival',
name='added_by',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 27.321429 | 90 | 0.620915 |
7eb9c3e6e2e793eb8e48f0dbb9f553d1daa688dc | 5,401 | py | Python | venv/Lib/site-packages/caffe2/python/scope_test.py | Westlanderz/AI-Plat1 | 1187c22819e5135e8e8189c99b86a93a0d66b8d8 | [
"MIT"
] | 1 | 2022-01-08T12:30:44.000Z | 2022-01-08T12:30:44.000Z | venv/Lib/site-packages/caffe2/python/scope_test.py | Westlanderz/AI-Plat1 | 1187c22819e5135e8e8189c99b86a93a0d66b8d8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/caffe2/python/scope_test.py | Westlanderz/AI-Plat1 | 1187c22819e5135e8e8189c99b86a93a0d66b8d8 | [
"MIT"
] | null | null | null |
from caffe2.python import scope, core, workspace
import unittest
import threading
import time
SUCCESS_COUNT = 0
def thread_runner(idx, testobj):
global SUCCESS_COUNT
testobj.assertEquals(scope.CurrentNameScope(), "")
testobj.assertEquals(scope.CurrentDeviceScope(), None)
namescope = "namescope_{}".format(idx)
dsc = core.DeviceOption(workspace.GpuDeviceType, idx)
with scope.DeviceScope(dsc):
with scope.NameScope(namescope):
testobj.assertEquals(scope.CurrentNameScope(), namescope + "/")
testobj.assertEquals(scope.CurrentDeviceScope(), dsc)
time.sleep(0.01 + idx * 0.01)
testobj.assertEquals(scope.CurrentNameScope(), namescope + "/")
testobj.assertEquals(scope.CurrentDeviceScope(), dsc)
testobj.assertEquals(scope.CurrentNameScope(), "")
testobj.assertEquals(scope.CurrentDeviceScope(), None)
SUCCESS_COUNT += 1
class TestScope(unittest.TestCase):
def testNamescopeBasic(self):
self.assertEquals(scope.CurrentNameScope(), "")
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
self.assertEquals(scope.CurrentNameScope(), "")
def testNamescopeAssertion(self):
self.assertEquals(scope.CurrentNameScope(), "")
try:
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
raise Exception()
except Exception:
pass
self.assertEquals(scope.CurrentNameScope(), "")
def testEmptyNamescopeBasic(self):
self.assertEquals(scope.CurrentNameScope(), "")
with scope.NameScope("test_scope"):
with scope.EmptyNameScope():
self.assertEquals(scope.CurrentNameScope(), "")
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
def testDevicescopeBasic(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
dsc = core.DeviceOption(workspace.GpuDeviceType, 9)
with scope.DeviceScope(dsc):
self.assertEquals(scope.CurrentDeviceScope(), dsc)
self.assertEquals(scope.CurrentDeviceScope(), None)
def testEmptyDevicescopeBasic(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
dsc = core.DeviceOption(workspace.GpuDeviceType, 9)
with scope.DeviceScope(dsc):
self.assertEquals(scope.CurrentDeviceScope(), dsc)
with scope.EmptyDeviceScope():
self.assertEquals(scope.CurrentDeviceScope(), None)
self.assertEquals(scope.CurrentDeviceScope(), dsc)
self.assertEquals(scope.CurrentDeviceScope(), None)
def testDevicescopeAssertion(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
dsc = core.DeviceOption(workspace.GpuDeviceType, 9)
try:
with scope.DeviceScope(dsc):
self.assertEquals(scope.CurrentDeviceScope(), dsc)
raise Exception()
except Exception:
pass
self.assertEquals(scope.CurrentDeviceScope(), None)
def testTags(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
extra_info1 = ["key1:value1"]
extra_info2 = ["key2:value2"]
extra_info3 = ["key3:value3"]
extra_info_1_2 = ["key1:value1", "key2:value2"]
extra_info_1_2_3 = ["key1:value1", "key2:value2", "key3:value3"]
with scope.DeviceScope(core.DeviceOption(0, extra_info=extra_info1)):
self.assertEquals(scope.CurrentDeviceScope().extra_info, extra_info1)
with scope.DeviceScope(core.DeviceOption(0, extra_info=extra_info2)):
self.assertEquals(scope.CurrentDeviceScope().extra_info, extra_info_1_2)
with scope.DeviceScope(core.DeviceOption(0, extra_info=extra_info3)):
self.assertEquals(
scope.CurrentDeviceScope().extra_info, extra_info_1_2_3
)
self.assertEquals(scope.CurrentDeviceScope().extra_info, extra_info_1_2)
self.assertEquals(scope.CurrentDeviceScope().extra_info, extra_info1)
self.assertEquals(scope.CurrentDeviceScope(), None)
def testMultiThreaded(self):
"""
Test that name/device scope are properly local to the thread
and don't interfere
"""
global SUCCESS_COUNT
self.assertEquals(scope.CurrentNameScope(), "")
self.assertEquals(scope.CurrentDeviceScope(), None)
threads = []
for i in range(4):
threads.append(threading.Thread(
target=thread_runner,
args=(i, self),
))
for t in threads:
t.start()
with scope.NameScope("master"):
self.assertEquals(scope.CurrentDeviceScope(), None)
self.assertEquals(scope.CurrentNameScope(), "master/")
for t in threads:
t.join()
self.assertEquals(scope.CurrentNameScope(), "master/")
self.assertEquals(scope.CurrentDeviceScope(), None)
# Ensure all threads succeeded
self.assertEquals(SUCCESS_COUNT, 4)
| 35.300654 | 89 | 0.623588 |
8336a8442ad25d8dda25ebb5c595dced8655847c | 3,488 | py | Python | contrib/go/src/python/pants/contrib/go/subsystems/archive_retriever.py | mpopenko-exos/pants | 47d27037c8b13291fc9023e56ddd1b1defdf1b8e | [
"Apache-2.0"
] | null | null | null | contrib/go/src/python/pants/contrib/go/subsystems/archive_retriever.py | mpopenko-exos/pants | 47d27037c8b13291fc9023e56ddd1b1defdf1b8e | [
"Apache-2.0"
] | 1 | 2018-09-04T17:37:34.000Z | 2018-09-04T19:42:58.000Z | contrib/go/src/python/pants/contrib/go/subsystems/archive_retriever.py | mpopenko-exos/pants | 47d27037c8b13291fc9023e56ddd1b1defdf1b8e | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import shutil
from contextlib import closing, contextmanager
from urllib.parse import urlparse
import requests
from pants.fs.archive import archiver_for_path
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_dir, temporary_file
from pants.contrib.go.subsystems.fetch_error import FetchError
logger = logging.getLogger(__name__)
class ArchiveRetriever(Subsystem):
"""Retrieves and unpacks remote libraries from archives."""
options_scope = 'go-archive-retriever'
@classmethod
def register_options(cls, register):
super().register_options(register)
register('--buffer-size', metavar='<bytes>', type=int, advanced=True,
default=10 * 1024, # 10KB in case jumbo frames are in play.
help='The number of bytes of archive content to buffer in memory before flushing to '
'disk when downloading an archive.')
register('--retries', type=int, default=1, advanced=True,
help='How many times to retry when fetching a remote archive.')
def fetch_archive(self, archive_url, strip_level, dest):
try:
archiver = archiver_for_path(archive_url)
except ValueError:
raise FetchError(f"Don't know how to unpack archive at url {archive_url}")
with self._fetch(archive_url) as archive:
if strip_level == 0:
archiver.extract(archive, dest)
else:
with temporary_dir() as scratch:
archiver.extract(archive, scratch)
for dirpath, dirnames, filenames in os.walk(scratch, topdown=True):
if dirpath != scratch:
relpath = os.path.relpath(dirpath, scratch)
relpath_components = relpath.split(os.sep)
if len(relpath_components) == strip_level and (dirnames or filenames):
for path in dirnames + filenames:
src = os.path.join(dirpath, path)
dst = os.path.join(dest, path)
shutil.move(src, dst)
del dirnames[:] # Stops the walk.
@contextmanager
def _fetch(self, url):
parsed = urlparse(url)
if not parsed.scheme or parsed.scheme == 'file':
yield parsed.path
else:
with self._download(url) as download_path:
yield download_path
@contextmanager
def _download(self, url):
# TODO(jsirois): Wrap with workunits, progress meters, checksums.
logger.info(f'Downloading {url}...')
with closing(self._session().get(url, stream=True)) as res:
if res.status_code != requests.codes.ok:
raise FetchError(f'Failed to download {url} ({res.status_code} error)')
with temporary_file() as archive_fp:
# NB: Archives might be very large so we play it safe and buffer them to disk instead of
# memory before unpacking.
for chunk in res.iter_content(chunk_size=self.get_options().buffer_size):
archive_fp.write(chunk)
archive_fp.close()
res.close()
yield archive_fp.name
def _session(self):
session = requests.session()
# Override default http adapters with a retriable one.
retriable_http_adapter = requests.adapters.HTTPAdapter(max_retries=self.get_options().retries)
session.mount("http://", retriable_http_adapter)
session.mount("https://", retriable_http_adapter)
return session
| 38.32967 | 98 | 0.6832 |
66d64699ef792f77ee32a8276c0aa4845d4c29a1 | 35,011 | py | Python | module_constants.py | vornne/pw_module_system | a35fd5d89cbb4e684ddf2fe4a6de9fe5066b9988 | [
"BSD-3-Clause"
] | 17 | 2015-01-19T07:53:57.000Z | 2021-07-10T02:26:51.000Z | module_constants.py | vornne/pw_module_system | a35fd5d89cbb4e684ddf2fe4a6de9fe5066b9988 | [
"BSD-3-Clause"
] | 1 | 2017-08-31T03:55:09.000Z | 2017-08-31T03:55:09.000Z | module_constants.py | vornne/pw_module_system | a35fd5d89cbb4e684ddf2fe4a6de9fe5066b9988 | [
"BSD-3-Clause"
] | 14 | 2015-05-03T05:20:01.000Z | 2021-12-29T17:10:50.000Z | ##############################################################
# These constants are used in various files.
# If you need to define a value that will be used in those files,
# just define it here rather than copying it across each file, so
# that it will be easy to change it if you need to.
##############################################################
########################################################
## PLAYER SLOTS #############################
########################################################
slot_player_faction_id = 0
slot_player_spawn_state = 1 # listed below, starting with player_spawn_state_
slot_player_spawn_invulnerable_time = 2 # mission time when the player spawned with temporary invlunerability
slot_player_spawn_health_percent = 3 # saved health percentage to be applied when next spawning
slot_player_spawn_entry_point = 4 # entry point used at last spawn
player_spawn_state_dead = 0
player_spawn_state_invulnerable = 1 # while invlunerable soon after spawning
player_spawn_state_at_marker = 2 # set before spawning to indicate that the agent should be shifted to the player's marker scene prop
player_spawn_state_alive = 3
slot_player_inactive_index = 5 # index in the inactive players array, if stored
slot_player_next_chat_event_type = 6 # next chat event number that the server expects this player's client to use
slot_player_list_button_id = 7 # overlay id in the player list presentation
slot_player_outlaw_rating = 8
slot_player_is_lord = 9
slot_player_non_lord_troop_id = 10 # the last troop used before changing to a lord only troop, to revert after respawning if someone else is voted lord
slot_player_poll_faction_id = 11 # marks whether the player can vote in the current poll
slot_player_requested_spawn_point = 12 # the spawn point requested by the player after dying, if any; -1 to indicate a newly connected player that hasn't yet requested to spawn
slot_player_has_faction_door_key = 13
slot_player_has_faction_money_key = 14
slot_player_has_faction_item_key = 15
slot_player_teleport_to_ship_no = 16 # instance no of the last ship teleported to with the admin tool
slot_player_last_faction_kicked_from = 17 # stores when kicked from a faction, so subsequent kicks can be free of cost
slot_player_accessing_instance_id = 18 # stores the instance id of the inventory currently being accessed by the player, for updates if anyone else changes it
slot_player_last_action_time = 19 # mission time of the last action that should be prevented from quick repetition
slot_player_equip_item_0 = 20 # module equipment slots corresponding to the hard coded ones in header_items starting with ek_
slot_player_equip_item_1 = 21
slot_player_equip_item_2 = 22
slot_player_equip_item_3 = 23
slot_player_equip_head = 24
slot_player_equip_body = 25
slot_player_equip_foot = 26
slot_player_equip_gloves = 27
slot_player_equip_horse = 28
slot_player_equip_end = 29
slot_player_equip_item_0_ammo = 30
slot_player_equip_item_1_ammo = 31
slot_player_equip_item_2_ammo = 32
slot_player_equip_item_3_ammo = 33
slot_player_spawn_food_amount = 34 # saved food for next spawn
slot_player_faction_chat_muted = 35
slot_player_kick_at_time = 36 # time to kick a player after the name server has rejected them, to allow time to recieve the message
slot_player_can_faction_announce = 37
slot_player_next_spawn_health_percent = 38 # spawn health percentage for the troop applied after death, if that server option is enabled
slot_player_accessing_unique_id = 39 # a unique number identifying an inventory scene prop being accessed that could despawn and the instance id be reused, like corpses
slot_player_admin_no_panel = 40 # admin permission slots: the default value 0 is permissive so everything works when a name server is not connected
slot_player_admin_no_gold = 41
slot_player_admin_no_kick = 42
slot_player_admin_no_temporary_ban = 43
slot_player_admin_no_permanent_ban = 44
slot_player_admin_no_kill_fade = 45
slot_player_admin_no_freeze = 46
slot_player_admin_no_teleport_self = 47
slot_player_admin_no_admin_items = 48
slot_player_admin_no_heal_self = 49
slot_player_admin_no_godlike_troop = 50
slot_player_admin_no_ships = 51
slot_player_admin_no_announce = 52
slot_player_admin_no_override_poll = 53
slot_player_admin_no_all_items = 54
slot_player_admin_no_mute = 55
slot_player_admin_no_animals = 56
slot_player_admin_no_factions = 57
slot_player_admin_end = 58
########################################################
## AGENT SLOTS #############################
########################################################
slot_agent_horse_last_rider = 0 # if a horse, the agent id of the last (or current) rider, or if stray, negative numbers counting down to when the horse will be removed
slot_agent_drowning_count = 1 # counts upwards each time an agent is found to be drowning underwater
slot_agent_poison_amount = 2 # increases each time the agent is attacked with poison, reduced when healed
slot_agent_poisoner_agent_id = 3 # agent id that last poisoned the agent
slot_agent_poisoner_player_uid = 4 # player unique id of the poisoner when applicable, to give correct death messages
slot_agent_freeze_instance_id = 5 # instance id of the invisible scene prop being used to freeze
slot_agent_is_targeted = 6 # mark that the stored target agent id is correct
slot_agent_food_amount = 7
slot_agent_fishing_last_school = 8 # last school fished from, to speed up repetitive check
slot_agent_last_horse_ridden = 9
slot_agent_money_bag_1_value = 10 # the values of the money bags picked up, in order
slot_agent_money_bag_2_value = 11
slot_agent_money_bag_3_value = 12
slot_agent_money_bag_4_value = 13
slot_agent_hunting_last_carcass = 14 # last animal carcass processed, to speed up repetitive checks
slot_agent_died_normally = 15
slot_agent_animation_end_time_ms = 16 # mission time in milliseconds
slot_agent_last_animation_string_id = 17
slot_agent_recent_animations_delay_ms = 18 # interval in milliseconds
slot_agent_storage_corpse_instance_id = 19 # saved when discarding armor
slot_agent_animal_herd_manager = 20 # instance id of the herd manager item attached to
slot_agent_animal_birth_time = 21 # mission time when the animal was spawned as a child, or extrapolated if spawned as an adult
slot_agent_animal_grow_time = 22 # mission time after which the animal will grow to an adult or birth a child
slot_agent_animal_move_time = 23 # mission time after which to move
slot_agent_animal_last_damage_time = 24
slot_agent_animal_food = 25
slot_agent_animal_carcass_instance_id = 26
slot_agent_animal_times_stuck = 27
slot_agent_animal_end = 28
slot_agent_head_damage_factor = 40 # agent modifier factors for armor slots
slot_agent_head_speed_factor = 41
slot_agent_head_accuracy_factor = 42
slot_agent_head_reload_factor = 43
slot_agent_body_damage_factor = 44
slot_agent_body_speed_factor = 45
slot_agent_body_accuracy_factor = 46
slot_agent_body_reload_factor = 47
slot_agent_foot_damage_factor = 48
slot_agent_foot_speed_factor = 49
slot_agent_foot_accuracy_factor = 50
slot_agent_foot_reload_factor = 51
slot_agent_hand_damage_factor = 52
slot_agent_hand_speed_factor = 53
slot_agent_hand_accuracy_factor = 54
slot_agent_hand_reload_factor = 55
slot_agent_armor_damage_factor = 56 # total agent modifier factors for armor
slot_agent_armor_speed_factor = 57
slot_agent_armor_accuracy_factor = 58
slot_agent_armor_reload_factor = 59
slot_agent_weapon_damage_factor = 60 # agent modifier factors for the wielded weapon
slot_agent_weapon_speed_factor = 61
slot_agent_weapon_accuracy_factor = 62
slot_agent_weapon_reload_factor = 63
slot_agent_cannot_attack = 64 # marks that any attack should be canceled
slot_agent_armor_damage_through = 65 # factor of letting damage received bleed through the armor
slot_agent_last_apply_factors_item_id = 66 # last item id that modifier factors were last checked for, to avoid duplicating calculations due to trigger activation quirks
########################################################
## SCENE PROP SLOTS #############################
########################################################
slot_scene_prop_item_id = 0 # main associated item id, for stockpiles and similar
slot_scene_prop_gold_value = 1 # preset gold value, or cached value of the associated item
slot_scene_prop_gold_multiplier = 2 # cached price multiplier of the associated item
slot_scene_prop_use_string = 3 # string id displayed when players look at the scene prop
slot_scene_prop_troop_id = 4 # for troop training stations
slot_scene_prop_full_hit_points = 5
slot_scene_prop_is_mercenary = 6 # 1 = stay associated with the faction that owned the castle at mission start, rather than changing with capture
slot_scene_prop_required_horse = 7 # horse item required for attaching to a cart
slot_scene_prop_average_craft_skill = 8
slot_scene_prop_is_resource_stockpile = 9 # marks stockpiles for raw resources, which have different sell price calculations
slot_scene_prop_linked_scene_prop = 10 # instance ids of linked scene props
slot_scene_prop_linked_scene_prop_1 = 10
slot_scene_prop_linked_scene_prop_2 = 11
slot_scene_prop_linked_scene_prop_3 = 12
slot_scene_prop_linked_scene_prop_4 = 13
linked_scene_prop_slot_count = 4
slot_scene_prop_linked_sail = slot_scene_prop_linked_scene_prop_1
slot_scene_prop_linked_sail_off = slot_scene_prop_linked_scene_prop_2
slot_scene_prop_linked_ramp = slot_scene_prop_linked_scene_prop_3
slot_scene_prop_linked_hold = slot_scene_prop_linked_scene_prop_4
slot_scene_prop_linked_platform_1 = slot_scene_prop_linked_scene_prop_1
slot_scene_prop_linked_platform_2 = slot_scene_prop_linked_scene_prop_2
slot_scene_prop_linked_ferry_winch = slot_scene_prop_linked_scene_prop_3
slot_scene_prop_position = 15 # multiple meanings, mostly ships and carts - use with care
slot_scene_prop_target_position = 16 # used for ships
slot_scene_prop_rotation = 17 # multiple meanings, for ships, carts, and doors - use with care
slot_scene_prop_target_rotation = 18 # used for ships
slot_scene_prop_max_position = slot_scene_prop_rotation
slot_scene_prop_max_distance = slot_scene_prop_target_rotation
slot_scene_prop_attached_to_agent = 19 # store agent id attached to
slot_scene_prop_controlling_agent = 20 # agent id steering the ship
slot_scene_prop_length = 21 # multiple meanings - use with care
slot_scene_prop_width = 22 # multiple meanings - use with care
slot_scene_prop_height = 23 # multiple meanings - use with care
slot_scene_prop_collision_kind = 24 # collision testing scene prop kind for ships; set to -1 for scene props that should never be checked for collision with ships
slot_scene_prop_speed_limit = 25 # used for ships
slot_scene_prop_no_move_physics = 26 # whether to disable physics when moving, so agents can't ride on the prop
slot_scene_prop_capture_faction_id = 27 # faction that has captured this prop individually, rather than the castle it belongs to
slot_scene_prop_next_resource_hp = 30 # hit points when the next resource item should be produced
slot_scene_prop_state = 31 # constants below starting with scene_prop_state_
slot_scene_prop_state_time = 32 # mission time involved with changing state, if appropriate
slot_scene_prop_stock_count = 33
slot_scene_prop_stock_count_update_time = 34 # on clients, time of the last stock count update, to prevent quickly repeated requests
slot_scene_prop_unlocked = 35
slot_scene_prop_regrow_script = 36 # script id to call when finished regrowing
slot_scene_prop_resource_item_id = 37
slot_scene_prop_prune_time = 38 # mission time when a spawned item scene prop will be pruned
slot_scene_prop_resources_default_cost = 39
slot_scene_prop_water = 40
slot_scene_prop_seeds = 41
slot_scene_prop_fruiting_interval = slot_scene_prop_water
slot_scene_prop_fruit_count = slot_scene_prop_seeds
slot_scene_prop_show_linked_hit_points = 45
slot_scene_prop_disabled = 46
slot_scene_prop_resource_refund_cost = 50
slot_scene_prop_crafting_resource_1 = 51
slot_scene_prop_crafting_resource_2 = 52
slot_scene_prop_crafting_resource_3 = 53
slot_scene_prop_crafting_resource_4 = 54
scene_prop_state_active = 0
scene_prop_state_destroyed = 1
scene_prop_state_hidden = 2
scene_prop_state_regenerating = 3
slot_scene_prop_inventory_targeted = 196 # instance id of targeted inventory, used with item scene props
slot_scene_prop_inventory_unique_id = 197 # unique number identifying spawned item scene props
slot_scene_prop_inventory_max_length = 198 # maximum length of items inside the container
slot_scene_prop_inventory_count = 199 # number of inventory slots for this container
slot_scene_prop_inventory_begin = 200 # item ids in container
slot_scene_prop_inventory_item_0 = 290 # item ids in player equipment
slot_scene_prop_inventory_ammo_begin = 300 # ammo counts in container
slot_scene_prop_inventory_mod_begin = 400 # item changes in container needing presentation updates
slot_scene_prop_inventory_mod_item_0 = 490 # item changes in player equipment needing presentation updates
slot_scene_prop_inventory_obj_begin = 500 # container slot overlay ids
slot_scene_prop_inventory_obj_item_0 = 590 # player equipment overlay ids
slot_scene_prop_inventory_mesh_begin = 600 # container item mesh overlay ids
slot_scene_prop_inventory_mesh_item_0 = 690 # player equipment item mesh overlay ids
slot_scene_prop_inventory_end = slot_scene_prop_inventory_ammo_begin
slot_scene_prop_inventory_mod_end = slot_scene_prop_inventory_obj_begin
slot_scene_prop_inventory_obj_end = slot_scene_prop_inventory_mesh_begin
inventory_count_maximum = slot_scene_prop_inventory_item_0 - slot_scene_prop_inventory_begin
corpse_inventory_slots = 5 # coded into the module so values are the same on the server and clients
corpse_inventory_max_length = 100
slot_animal_herd_manager_adult_item_id= 100
slot_animal_herd_manager_starving = 101
slot_animal_carcass_meat_count = 100
slot_animal_carcass_hide_count = 101
########################################################
## ITEM SLOTS #############################
########################################################
slot_item_difficulty = 0
slot_item_length = 1
slot_item_class = 2 # listed below, starting with item_class_
slot_item_resource_amount = 3 # resource amount of the item class
slot_item_gender = 4 # 0 = male or anyone, 1 = female only
slot_item_max_ammo = 5
slot_item_bonus_against_wood = 6
slot_item_couchable = 7
slot_item_has_attack_requirements = 8
slot_item_max_raw_damage = 9 # maximum out of swing and thrust damage
item_class_none = 0
item_class_repair = 1
item_class_wood_cutting = 2
item_class_wood = 3
item_class_mining = 4
item_class_iron = 5
item_class_lock_pick = 6
item_class_heraldic = 7 # marks the item to be redrawn when the banner changes
item_class_precious = 8
item_class_food = 9
item_class_grain_harvesting = 10
item_class_knife = 11
item_class_cloth = 12
item_class_leather = 13
item_class_herding_calm = 14
item_class_herding_rouse = 15
slot_item_animal_adult_item_id = 20
slot_item_animal_child_item_id = 21
slot_item_animal_grow_time = 22
slot_item_animal_max_in_herd = 23
slot_item_animal_attack_reaction = 24 # listed below, starting with animal_reaction_
slot_item_animal_death_sound = 25
slot_item_animal_meat_count = 26
slot_item_animal_hide_count = 27
slot_item_animal_wildness = 28 # higher values have greater unpredictability when herded or attacked
animal_reaction_flee = 0
animal_reaction_charge = 1
########################################################
## FACTION SLOTS #############################
########################################################
slot_faction_banner_mesh = 0
slot_faction_name_is_custom = 1 # 1 if the name has been changed from the default
slot_faction_is_active = 2 # 1 if the faction has at least one capture point associated with their castle at mission start
slot_faction_lord_player_uid = 3 # player unique id of the faction lord
slot_faction_lord_last_seen_time = 4
slot_faction_castle_banner_variant = 5 # work around an unwanted engine optimization: change tableau id used when changing faction banners to force them to update
slot_faction_list_button_id = 6 # overlay id in the faction list presentation
slot_faction_is_locked = 7 # 1 if an adminstrator locked the faction to prevent lord polls
slot_faction_poll_end_time = 20
slot_faction_poll_voter_count = 21
slot_faction_poll_yes_votes = 22
slot_faction_poll_no_votes = 23
slot_faction_poll_type = 24 # listed below, starting with poll_type_
slot_faction_poll_value_1 = 25
slot_faction_poll_value_2 = 26
slot_faction_poll_target_unique_id = 27 # when targeting a player, store their unique id to prevent accidentally harming another player reusing their id after they quit
poll_type_change_scene = 0
poll_type_kick_player = 1
poll_type_ban_player = 2
poll_type_faction_lord = 10
poll_cost_change_scene = 1000
poll_cost_kick_player = 500
poll_cost_ban_player = 700
poll_cost_faction_lord = 1000
poll_vote_no = 0
poll_vote_yes = 1
poll_vote_admin_no = 2
poll_vote_admin_yes = 3
poll_vote_abstain = 4
poll_result_no = -1
poll_result_yes = -2
poll_result_admin_no = -3
poll_result_admin_yes = -4
poll_result_existing = -5
poll_result_invalid = -6
poll_result_color = 0xFF0000
slot_faction_relations_begin = 30
faction_cost_change_banner = 500
faction_cost_change_name = 500
faction_cost_kick_player = 500
faction_cost_outlaw_player = 1000
########################################################
## SCENE SLOTS #############################
########################################################
########################################################
## TROOP SLOTS #############################
########################################################
troop_slot_count_per_equipment_type = 5
slot_troop_equipment_one_hand_begin = 0
slot_troop_equipment_two_hand_begin = 1 * troop_slot_count_per_equipment_type
slot_troop_equipment_ranged_begin = 2 * troop_slot_count_per_equipment_type
slot_troop_equipment_ammo_begin = 3 * troop_slot_count_per_equipment_type
slot_troop_equipment_shield_begin = 4 * troop_slot_count_per_equipment_type
slot_troop_equipment_head_begin = 5 * troop_slot_count_per_equipment_type
slot_troop_equipment_body_begin = 6 * troop_slot_count_per_equipment_type
slot_troop_equipment_foot_begin = 7 * troop_slot_count_per_equipment_type
slot_troop_equipment_hand_begin = 8 * troop_slot_count_per_equipment_type
slot_troop_equipment_horse_begin = 9 * troop_slot_count_per_equipment_type
slot_troop_ranking = 50 # used for sorting troop types in the player stats chart
slot_troop_spawn_health_percent = 51 # respawn health percentage when dying as this troop
slot_player_array_size = 0
slot_player_array_begin = 1
player_array_unique_id = 0
player_array_troop_id = 1
player_array_faction_id = 2
player_array_gold_value = 3
player_array_outlaw_rating = 4
player_array_entry_size = 5 # number of values stored in the disconnected players array
max_castle_count = 8
slot_mission_data_castle_owner_faction_begin = 0 # owner factions of all castles
slot_mission_data_castle_owner_faction_end = 8
slot_mission_data_castle_is_active_begin = 10 # flags of which castles are active, with at least 1 capture point
slot_mission_data_castle_is_active_end = 18
slot_mission_data_castle_name_string_begin = 20 # string ids for castle names
slot_mission_data_castle_name_string_end = 28
slot_mission_data_castle_money_chest_begin = 30 # instance ids of the main money chest linked to each castle
slot_mission_data_castle_money_chest_end = 38
slot_mission_data_castle_allows_training_begin = 40 # flags of which active castles have at least one linked training station
slot_mission_data_castle_allows_training_end = 48
slot_mission_data_faction_to_change_name_of = 100 # store the faction id for the next faction name change message
slot_last_chat_message_event_type = 0 # for the last chat message sent: network event number, combined with a type from the list below starting with chat_event_type_
slot_last_chat_message_not_recieved = 1 # mark that the server has not notified of receiving the last chat message
chat_event_type_local = 0 # for each chat type, holding shift while pressing enter will add 1 to the type
chat_event_type_local_shout = 1
chat_event_type_set_faction_name = 2
chat_event_type_faction = 4
chat_event_type_faction_announce = 5
chat_event_type_admin = 6
chat_event_type_admin_announce = 7
slot_chat_overlay_local_color = 0
slot_chat_overlay_faction_color = 1
slot_ship_array_count = 0 # count of ship instance ids in the scene
slot_ship_array_begin = 1 # array of ship instance ids
slot_ship_array_collision_props_count = 100 # stored instance ids of scene props near water level, for checking collision with ships
slot_ship_array_collision_props_begin = 101
slot_array_count = 0
slot_array_begin = 1
########################################################
## TEAM SLOTS #############################
########################################################
########################################################
spawn_invulnerable_time = 10 # time agents are invlunerable after freshly spawning
loop_player_check_outlaw_interval = 60
loop_agent_check_interval = 2
loop_horse_check_interval = 30
loop_health_check_interval = 29
loop_weather_adjust_interval = 32
stock_count_check_interval = 5 # don't request stock count updates of the scene prop aimed at more often than this
repeat_action_min_interval = 5 # prevent players from repeating certain potentially expensive actions more often than this
carcass_search_min_interval = 5 # only search for a different animal carcass to process after this interval from the last
poll_time_duration = 60
name_server_kick_delay_interval = 5 # delay before kicking from the server to allow the rejection message to be received
def sq(distance):
return distance * distance / 100 # get_sq_distance_between_positions always uses fixed point multiplier 100
max_distance_to_play_sound = 10000
max_distance_to_see_labels = 1500
max_distance_horse_rider = 5000
max_distance_local_chat = 3000
max_distance_local_chat_shout = 5000
ambient_distance_local_chat = 1000
ambient_distance_local_chat_shout = 2000
max_distance_local_animation = 2500
z_position_to_hide_object = -4999 # lower values might cause the position to "wrap around" up into the sky
z_position_water_level = -30 # approximate visible water level based on tests
max_distance_to_use = 300
max_distance_to_loot = 100
max_distance_admin_cart = 2000 # allow admins in their armor to attach carts from greater distances
max_distance_to_catch_fish = 2000
fish_school_max_move_distance = 500
fish_school_min_move_distance = 200
fish_school_minimum_depth = 200 # minimum water depth that a fish school will move into
fish_spawn_time = 300 # time before pruning fish items spawned
max_distance_to_include_in_herd = 5000 # when searching for a herd for an animal
castle_tax_gold_percentage = 20 # percentage of item price subtracted for selling price and added to the linked castle chest when bought
castle_training_gold_percentage = 50 # percentage of training cost added to the linked castle chest
craft_price_gold_reward_percentage = 20 # percentage of item price given to the crafter proportional to difference from target stock count
craft_skill_gold_reward_multiplier = 300 # multiplier of crafting skill required given to the crafter proportional to difference from target stock count
base_export_percentage = 100 # default percentage of item price for export stations
reduction_factor_base = 90
armor_damage_reduction_factor = 10
head_armor_speed_reduction_factor = 10
head_armor_accuracy_reduction_factor = 50
head_armor_reload_reduction_factor = 20
body_armor_speed_reduction_factor = 20
body_armor_accuracy_reduction_factor = 30
body_armor_reload_reduction_factor = 10
foot_armor_speed_reduction_factor = 30
foot_armor_accuracy_reduction_factor = 5
foot_armor_reload_reduction_factor = 5
hand_armor_speed_reduction_factor = 5
hand_armor_accuracy_reduction_factor = 30
hand_armor_reload_reduction_factor = 10
melee_damage_reduction_factor = 25
melee_speed_reduction_factor = 5
crossbow_damage_reduction_factor = 15
crossbow_speed_reduction_factor = 5
crossbow_accuracy_reduction_factor = 30
crossbow_reload_reduction_factor = 30
bow_thrown_damage_reduction_factor = 30
bow_thrown_speed_reduction_factor = 5
bow_thrown_accuracy_reduction_factor = 20
melee_max_level_difference = 3 # max strength difference to be able to swing a melee weapon
crossbow_max_level_difference = 4 # max strength difference to be able to shoot a crossbow
bow_ranged_max_level_difference = 3 # max power draw or power throw difference to be able to shoot a bow or throw a weapon
winch_type_drawbridge = 0
winch_type_portcullis = 1
winch_type_platform = 2
winch_type_sliding_door = 3
repairable_hit = 0
repairable_destroyed = 1
repairable_hit_destroyed = 2
repairable_repairing = 3
repairable_resource_required = 4
repairable_repaired = 5
ship_station_not_on_ship = 0
ship_station_none = 1
ship_station_mast = 2
ship_station_rudder = 3
ship_forwards_maximum = 9 # maximum forwards speed - also limited by ship type and agent skill
ship_rotation_maximum = 5 # maximum turning speed
ship_forwards_multiplier = 100
ship_rotation_multiplier = 3
player_list_item_height = 20
escape_menu_item_height = 35
admin_panel_item_height = 40
action_menu_item_height = 23
faction_menu_item_height = 120
animation_menu_item_height = 32
chat_overlay_item_height = 17
chat_overlay_ring_buffer_begin = "trp_chat_overlay_ring_buffer_0"
chat_overlay_ring_buffer_end = "trp_chat_overlay_ring_buffer_end"
chat_overlay_ring_buffer_size = 11
local_chat_color = 0xFFFFDD8A
local_chat_shout_color = 0xFFFF8C27
local_animation_color = 0xFFFFBBAA
admin_chat_color = 0xFFFF00FF
invalid_faction_color = 0xFF888888
outlaw_rating_for_kill = 2
outlaw_rating_for_team_kill = 5
outlaw_rating_for_lord_outlawed = 4
outlaw_rating_outlawed = 15 # outlaw players when they get this rating
outlaw_rating_maximum = 30 # don't add increase the rating more than this
change_faction_type_respawn = 0 # changing faction when training
change_faction_type_no_respawn = 1 # changing faction by clicking the use control, to the same troop type or one that allows it
change_faction_type_outlawed = 2 # being forced to change when outlawed, without respawning
capture_point_type_primary = 0 # after the required secondary points are captured, take over the castle
capture_point_type_secondary_all = 1 # require taking all secondary capture points of this type
capture_point_type_secondary_one = 2 # require taking at least one secondary capture point of this type
redraw_all_banners = 0 # at mission start on the server
redraw_castle_banners = 1 # when a castle is captured
redraw_faction_banners = 2 # when a faction lord changes their banner
redraw_client_banner_positions = 3 # at mission start on a client, to work around engine quirks with spawned items
redraw_single_capture_point_banner = 4 # when a secondary point is captured
inventory_slots_per_row = 6
inventory_slot_spacing = 100
inventory_mesh_offset = 50
inventory_container_x_offset = 190
inventory_container_y_offset = 175
scene_prop_hit_points_bar_scale_x = 6230
scene_prop_hit_points_bar_scale_y = 15000
select_agent_max_x = 300
select_agent_max_y = 200
presentation_max_x = 1000 # at fixed point multiplier 1000
presentation_max_y = 750 # at fixed point multiplier 1000
animation_menu_end_offset = 11
max_scene_prop_instance_id = 10000 # when trying to loop over all props in a scene, stop at this limit
max_food_amount = 100
max_hit_points_percent = 200
all_items_begin = "itm_tattered_headcloth"
all_items_end = "itm_all_items_end"
wielded_items_begin = "itm_club"
wielded_items_end = "itm_all_items_end"
scripted_items_begin = "itm_surgeon_scalpel" # items outside this range are not checked from the ti_on_agent_hit trigger
scripted_items_end = "itm_money_bag"
herd_animal_items_begin = "itm_deer" # item range used for herd animal spawners
herd_animal_items_end = "itm_stick"
playable_troops_begin = "trp_peasant" # troops outside this range are treated as storage objects unusable by players
playable_troops_end = "trp_playable_troops_end"
factions_begin = "fac_commoners"
castle_factions_begin = "fac_1"
factions_end = "fac_factions_end"
castle_names_begin = "str_castle_name_0"
castle_names_end = "str_castle_names_end"
scenes_begin = "scn_scene_1"
scenes_end = "scn_scenes_end"
scene_names_begin = "str_scene_name_1" # this range of strings must correspond to the available scene slots
scene_names_end = "str_scene_names_end"
game_type_mission_templates_begin = "mt_conquest"
game_type_mission_templates_end = "mt_edit_scene"
game_type_names_begin = "str_game_type_1"
game_type_names_end = "str_game_types_end"
game_type_info_strings_begin = "str_game_type_1_info"
banner_meshes_begin = "mesh_banner_a01"
banner_meshes_end = "mesh_banners_default_a"
banner_items_begin = "itm_pw_banner_pole_a01" # range of items associated with banner mesh ids
banner_items_end = "itm_admin_horse"
commands_module_system_names_begin = "str_bot_count" # range of strings associated with hard coded server commands
commands_napoleonic_wars_names_begin = "str_use_class_limits"
admin_action_log_strings_begin = "str_log_admin_kick" # range of strings associated with admin actions, for the server log
ambient_sounds_begin = "snd_fire_loop" # for ambient sound emitter scene props
ambient_sounds_end = "snd_sounds_end"
action_menu_strings_begin = "str_toggle_name_labels" # range of strings associated with the action menu
action_menu_strings_end = "str_action_menu_end"
animation_strings_begin = "str_anim_cheer" # range of strings associated with the animation menu
animation_strings_end = "str_log_animation"
profile_option_strings_begin = "str_display_name_labels" # range of strings for options stored in a player profile
from header_common import *
profile_options = [ # global flag variables for options stored in a player profile
"$g_display_agent_labels",
"$g_hide_faction_in_name_labels",
"$g_display_chat_overlay",
"$g_chat_overlay_type_selected",
"$g_disable_automatic_shadow_recalculation",
"$g_animation_menu_no_mouse_grab",
"$g_mute_global_chat",
"$g_disable_rain_snow_particles",
]
if len(profile_options) >= profile_banner_id_option_bits_end - profile_banner_id_option_bits_begin:
raise Exception("Too many profile options: %d, maximum %d" % (len(profile_options), profile_banner_id_option_bits_end - profile_banner_id_option_bits_begin))
| 54.534268 | 180 | 0.702379 |
350835b40c02a596f127ba587ff1d2a25f67cf06 | 1,565 | py | Python | day_05/py3/main.py | musale/advent-of-code-2020 | cd467ec5f85a2733552bdf0984d1e8ce5e8b846c | [
"MIT"
] | null | null | null | day_05/py3/main.py | musale/advent-of-code-2020 | cd467ec5f85a2733552bdf0984d1e8ce5e8b846c | [
"MIT"
] | null | null | null | day_05/py3/main.py | musale/advent-of-code-2020 | cd467ec5f85a2733552bdf0984d1e8ce5e8b846c | [
"MIT"
] | null | null | null | def part_1():
file = open('day_05/input.txt', 'r')
highest_id = 0
for lineWithOptionNewLine in file:
line = lineWithOptionNewLine.strip('\n')
left = 0
right = 127
for letter in line[:-3]:
mid = (left + right) // 2
if letter == "F":
right = mid
else:
left = mid + 1
row = left
left = 0
right = 7
for letter in line[-3:]:
mid = (left + right) // 2
if letter == "L":
right = mid
else:
left = mid + 1
col = left
id = row * 8 + col
highest_id = max(highest_id, id)
return highest_id
def part_2():
file = open('day_05/input.txt', 'r')
ids = []
for lineWithOptionNewLine in file:
line = lineWithOptionNewLine.strip('\n')
left = 0
right = 127
for letter in line[:-3]:
mid = (left + right) // 2
if letter == "F":
right = mid
else:
left = mid + 1
row = left
left = 0
right = 7
for letter in line[-3:]:
mid = (left + right) // 2
if letter == "L":
right = mid
else:
left = mid + 1
ids.append(row * 8 + left)
sortedIds = sorted(ids)
for i in range(len(sortedIds) - 1):
if sortedIds[i] + 2 == sortedIds[i+1]:
return sortedIds[i] + 1
return -1
print(part_2())
print(part_1())
| 22.681159 | 48 | 0.434505 |
d96a1b29cd7a53e3fe6baf11a18264e5522dba64 | 4,719 | py | Python | lcm/ns_vls/biz/delete_vls.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 4 | 2018-08-29T02:51:38.000Z | 2021-11-16T11:36:11.000Z | lcm/ns_vls/biz/delete_vls.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | null | null | null | lcm/ns_vls/biz/delete_vls.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 1 | 2019-05-12T08:21:19.000Z | 2019-05-12T08:21:19.000Z | # Copyright 2016 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import traceback
from lcm.pub.config.config import REPORT_TO_AAI
from lcm.pub.database.models import VLInstModel, VNFFGInstModel
from lcm.pub.exceptions import NSLCMException
from lcm.pub.msapi import extsys
from lcm.pub.msapi.aai import query_network_aai, delete_network_aai
from lcm.pub.nfvi.vim import vimadaptor
logger = logging.getLogger(__name__)
class DeleteVls(object):
def __init__(self, vl_inst_id):
self.vl_inst_id = vl_inst_id
self.ns_inst_id = ""
def do(self):
try:
vl_inst_info = VLInstModel.objects.filter(vlinstanceid=self.vl_inst_id)
if not vl_inst_info:
logger.info("vl inst id(%s) is not exist or has been already deleted" % self.vl_inst_id)
return {"result": 0, "detail": "vl is not exist or has been already deleted"}
self.ns_inst_id = vl_inst_info[0].ownerid
# vim_id = vl_inst_info[0].vimid
vim_id = json.JSONDecoder().decode(vl_inst_info[0].vimid) if isinstance(vl_inst_info[0].vimid, str) \
else vl_inst_info[0].vimid
subnetwork_id_list = vl_inst_info[0].relatedsubnetworkid.split(",")
network_id = vl_inst_info[0].relatednetworkid
self.delete_vl_from_vim(vim_id, subnetwork_id_list, network_id)
if REPORT_TO_AAI:
self.delete_network_and_subnet_in_aai()
self.delete_vl_from_db(vl_inst_info)
return {"result": 0, "detail": "delete vl success"}
except NSLCMException as e:
return self.exception_handle(e)
except Exception as e:
logger.error(traceback.format_exc())
return self.exception_handle(e)
def exception_handle(self, e):
detail = "vl delete failed, detail message: %s" % e.args[0]
logger.error(detail)
return {"result": 0, "detail": detail}
def delete_vl_from_vim(self, vim_id, subnetwork_id_list, network_id):
vim_resp_body = extsys.get_vim_by_id(vim_id)
data = {
"vimid": vim_id,
"vimtype": vim_resp_body["type"],
"url": vim_resp_body["url"],
"user": vim_resp_body["userName"],
"passwd": vim_resp_body["password"],
"tenant": vim_resp_body["tenant"]}
vim_api = vimadaptor.VimAdaptor(data)
for subnetwork_id in subnetwork_id_list:
vim_api.delete_subnet(subnet_id=subnetwork_id)
vim_api.delete_network(network_id=network_id)
def delete_vl_inst_id_in_vnffg(self):
for vnffg_info in VNFFGInstModel.objects.filter(nsinstid=self.ns_inst_id):
new_vl_id_list = ""
for old_vl_id in vnffg_info.vllist.split(","):
if old_vl_id != self.vl_inst_id:
new_vl_id_list += old_vl_id + ","
new_vl_id_list = new_vl_id_list[:-1]
VNFFGInstModel.objects.filter(vnffginstid=vnffg_info.vnffginstid).update(vllist=new_vl_id_list)
def delete_network_and_subnet_in_aai(self):
logger.debug("DeleteVls::delete_network_in_aai[%s] in aai.", self.vl_inst_id)
try:
# query network in aai, get resource_version
customer_info = query_network_aai(self.vl_inst_id)
resource_version = customer_info.get("resource-version")
# delete network from aai
resp_data, resp_status = delete_network_aai(self.vl_inst_id, resource_version)
logger.debug("Delete network[%s] from aai successfully, status: %s", self.vl_inst_id, resp_status)
except NSLCMException as e:
logger.debug("Fail to delete network[%s] from aai: %s", self.vl_inst_id, e.args[0])
except Exception as e:
logger.error("Exception occurs when delete network[%s] from aai: %s", self.vl_inst_id, e.args[0])
logger.error(traceback.format_exc())
def delete_vl_from_db(self, vl_inst_info):
# do_biz_with_share_lock("delete-vllist-in-vnffg-%s" % self.ns_inst_id, self.delete_vl_inst_id_in_vnffg)
self.delete_vl_inst_id_in_vnffg()
vl_inst_info.delete()
| 44.942857 | 113 | 0.672388 |
893a232aa1158c7d138a2853922125313b44ecca | 8,587 | py | Python | mmhelper/bacteria_tracking.py | jmetz/momanalysis | 8d71490c99127568b184784890258e9a6ef876ef | [
"MIT"
] | null | null | null | mmhelper/bacteria_tracking.py | jmetz/momanalysis | 8d71490c99127568b184784890258e9a6ef876ef | [
"MIT"
] | 3 | 2019-07-25T13:43:15.000Z | 2019-11-04T12:39:22.000Z | mmhelper/bacteria_tracking.py | jmetz/momanalysis | 8d71490c99127568b184784890258e9a6ef876ef | [
"MIT"
] | 1 | 2021-03-28T03:00:21.000Z | 2021-03-28T03:00:21.000Z | # FILE : bacteria_tracking.py
# CREATED : 14/11/17 13:08:52
# AUTHOR : A. Smith <as624@exeter.ac.uk>
# DESCRIPTION : Bacteria tracking functions
#
"""Bacteria tracking functions
"""
from functools import reduce
from skimage.measure import regionprops
import numpy as np
def find_changes(in_list, option_list, well, new_well):
"""
Takes a list
Parameters
------
in_list : list
A list of labels from the current well
option_list : list
A list of all the possible combinations possible of how the bacteria
in the previous well could be in the new well
well : ndarray (2D) of dtype int
A labelled image showing the detected bacteria in the old well
new_well : ndarray (2D) of dtype int
A labelled image showing the detected bacteria in the new well
Yields
------
option : list
Containing the potential output combination
in_options_dict : dictionary
where the key is one of the input bacteria labels and the values
is a list of the number of divisions, area change and centroid change
for that respective bacteria for that potential output combination
"""
measurements_in = {}
measurements_out = {}
for i, region in enumerate(regionprops(well)):
# find y centr coord and area of old each bac
measurements_in[i] = [region.centroid[0], region.area]
for j, region2 in enumerate(regionprops(new_well)):
# find y centr coord and area of each new bac
measurements_out[j] = [region2.centroid[0], region2.area]
for option in option_list: # each option is a potential combination of bacteria lineage/death
in_options_dict = {}
for in_num, in_options in enumerate(in_list):
out_bac_area = []
out_bac_centr = []
# determine the number of divisions/deaths
num_divs = (option.count(in_options)) - 1
for lst, opt in enumerate(option):
if opt == in_options: # if the values match append the new centr/areas
out_bac_area.append(measurements_out[lst][1])
out_bac_centr.append(measurements_out[lst][0])
# need to divide by biggest number (so prob < 1)
if sum(out_bac_area) < (measurements_in[in_num][1]):
# find relative change in area compared to original
area_chan = sum(out_bac_area) / (measurements_in[in_num][1])
else:
# find relative change in area compared to original
area_chan = (measurements_in[in_num][1]) / sum(out_bac_area)
if len(out_bac_centr) is not 0:
# find the average new centroid
centr_chan = abs(((sum(out_bac_centr)) / (len(out_bac_centr)))
- (measurements_in[in_num][0]))
else:
centr_chan = 0
# assign the values to the correct 'in' label
in_options_dict[in_options] = [num_divs, area_chan, centr_chan]
# change_dict[option] = in_options_dict #assign the changes to the
# respective option
yield option, in_options_dict # assign the changes to the respective option
# return change_dict
def find_probs(
probs,
prob_div=0.01,
prob_death=0.5,
prob_no_change=0.95,
av_bac_length=18,
):
"""
Takes a dictionary of information for a potential combination
and returns an overall probability
Parameters
------
probs : dictionary
Key is a unique number of an input bacteria and the value is a
list of the number of divisions, area change and centroid change
for that respective bacteria
prob_div : float, optional
Probability a bacteria divides between consecutive timepoints (default : 0.01)
prob_death : float, optional
Probability a bacteria lyses between consecutive timepoints (default : 0.5)
prob_no_change : float, optional
Probability there is no change between consecutive timepoints (default : 0.95)
av_bac_length : float, optional
The average bacteria length in pixels (default : 18)
Returns
------
combined_prob : float
The overall probability for this combination of events
"""
probslist = []
for pro in probs:
# find the potential number of deaths/divisions for each bac
divs_deaths = probs[pro][0]
relative_area = probs[pro][1] # find the relative area change
# find the number of pixels the centroid has moved by
change_centr = probs[pro][2]
if divs_deaths < 0: # if the bacteria has died:
prob_divis = prob_death # probability simply equals that of death
prob_centr = 1 # the change in centroid is irrelevant so set probability as 1
prob_area = 1 # the change in area is irrelevant so set probability as 1
if divs_deaths == 0: # if the bacteria hasn't died/or divided
# probability of division simply equals probability of no change
prob_divis = prob_no_change
# the area will be equal to the relative area change - may need
# adjusting
prob_area = relative_area
# if there is no change then set prob to 1 (0 will cause div error)
if change_centr == 0:
prob_centr = 1
else:
# the greater the change the less likely
prob_centr = 1 / (abs(change_centr))
if divs_deaths > 0: # if bacteria have divided:
# need to make sure we divide by biggest number to keep prob < 1
if relative_area < divs_deaths:
# normalise relative area to the number of divisions
prob_area = relative_area / divs_deaths
else:
# normalise relative area to the number of divisions
prob_area = divs_deaths / relative_area
# each division becomes more likely - need to think about it
prob_divis = prob_div**(divs_deaths * divs_deaths)
# for each division the bacteria centroid is expected to move half
# the bac length
prob_centr = 1 / \
abs(((divs_deaths * (av_bac_length / 2)) - (change_centr)))
# combine the probabilities for division, area and centroid
probslist.append(prob_area * prob_divis * prob_centr)
# multiply the probabilities across all bacteria
combined_prob = reduce(lambda x, y: x * y, probslist)
return combined_prob
def label_most_likely(most_likely, new_well, label_dict_string):
"""
Takes the most likely combination of how the bacteria may have
divided/died or moved around and re-labels them accordingly
Parameters
------
most_likely : list
Containing the most likely output combination
new_well : ndarray (2D) of dtype int
A labelled image showing the detected bacteria in the new well
label_dict_string : dictionary
Each key is a unique label of a bacteria, each value is
a string containing its lineage information
Returns
------
out_well : ndarray (2D) of dtype int
A labelled image showing the tracked bacteria in the new well
label_dict_string : dictionary
Updated dictionary where each key is a unique label of a bacteria,
each value is a string containing its lineage information
"""
out_well = np.zeros(new_well.shape, dtype=new_well.dtype)
if most_likely is None:
# if there is no likely option return an empty well
return out_well, label_dict_string
new_label_string = 0
smax = 0
smax = max(label_dict_string, key=int)
for i, region in enumerate(regionprops(new_well)):
if most_likely.count(most_likely[i]) == 1:
out_well[new_well == region.label] = most_likely[i]
else:
smax += 1
out_well[new_well == region.label] = smax
if i > 0:
last_label_start = label_dict_string[most_likely[i - 1]]
else:
last_label_start = label_dict_string[most_likely[i]]
new_label_start = label_dict_string[most_likely[i]]
if new_label_start != last_label_start:
new_label_string = 0
new_label_string += 1
add_string = "_%s" % (new_label_string)
label_dict_string[smax] = new_label_start + add_string
return out_well, label_dict_string
| 43.150754 | 98 | 0.63666 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.