hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4962f14d7aebf6b2527a227274f71ac4af8c4389 | 584 | py | Python | test/test_utils.py | ColCarroll/pete | 066bc5c6e5706bc215038f835202d8fb5162033f | [
"MIT"
] | 1 | 2020-10-28T03:39:56.000Z | 2020-10-28T03:39:56.000Z | test/test_utils.py | ColCarroll/pete | 066bc5c6e5706bc215038f835202d8fb5162033f | [
"MIT"
] | 2 | 2016-07-10T17:09:05.000Z | 2021-04-20T17:58:12.000Z | test/test_utils.py | ColCarroll/pete | 066bc5c6e5706bc215038f835202d8fb5162033f | [
"MIT"
] | 1 | 2016-08-15T13:33:15.000Z | 2016-08-15T13:33:15.000Z | import os
from pete import Broadcaster, Task
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class GatheringBroadcaster(Broadcaster):
name = 'gathering broadcaster'
def __init__(self):
self.messages = []
def send(self, messages):
self.messages += messages
class NamedTask(Task):
name = 'named task'
def __init__(self):
self.will_run = True
self.run_count = 0
def should_run(self):
return self.will_run
def run(self):
self.run_count += 1
return ["{0.name} {0.run_count}".format(self)]
| 19.466667 | 54 | 0.635274 |
1aad3969f3eb5889dfc07bcc14450b230526dff2 | 1,852 | py | Python | pref/webapp/urls.py | ahampt/Pref | 6a6b44c751da4358d97c7f170237b8fc0a4bc3d0 | [
"MIT"
] | null | null | null | pref/webapp/urls.py | ahampt/Pref | 6a6b44c751da4358d97c7f170237b8fc0a4bc3d0 | [
"MIT"
] | 7 | 2015-08-02T20:58:23.000Z | 2016-05-02T03:25:21.000Z | pref/webapp/urls.py | ahampt/Pref | 6a6b44c751da4358d97c7f170237b8fc0a4bc3d0 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
('^' + settings.PREFIX_URL + r'$', 'webapp.views.site.access'),
('^' + settings.PREFIX_URL + r'home/$', 'webapp.views.site.home'),
('^' + settings.PREFIX_URL + r'register/$', 'webapp.views.profile.register'),
('^' + settings.PREFIX_URL + r'profiles/$', 'webapp.views.profile.view_list'),
('^' + settings.PREFIX_URL + r'profiles/(?P<username>\w{0,30})/$', 'webapp.views.profile.view'),
('^' + settings.PREFIX_URL + r'login/$', 'webapp.views.profile.login'),
('^' + settings.PREFIX_URL + r'logout/$', 'webapp.views.profile.logout'),
('^' + settings.PREFIX_URL + r'movies/$', 'webapp.views.movie.view_list'),
('^' + settings.PREFIX_URL + r'movies/(?P<urltitle>[a-zA-Z0-9_~]{0,100})/$', 'webapp.views.movie.view'),
('^' + settings.PREFIX_URL + r'people/$', 'webapp.views.property.people'),
('^' + settings.PREFIX_URL + r'people/(?P<urlname>[a-zA-Z0-9_~]{0,100})/$', 'webapp.views.property.person'),
('^' + settings.PREFIX_URL + r'genres/$', 'webapp.views.property.genres'),
('^' + settings.PREFIX_URL + r'genres/(?P<description>[a-zA-Z0-9_~-]{0,50})/$', 'webapp.views.property.genre'),
('^' + settings.PREFIX_URL + r'search/$', 'webapp.views.movie.search'),
('^' + settings.PREFIX_URL + r'random/$', 'webapp.views.movie.random'),
('^' + settings.PREFIX_URL + r'discovery/$', 'webapp.views.site.discovery'),
('^' + settings.PREFIX_URL + r'about/$', 'webapp.views.site.about'),
('^' + settings.PREFIX_URL + r'disclaimers/$', 'webapp.views.site.disclaimers'),
('^' + settings.PREFIX_URL + r'privacy_policy/$', 'webapp.views.site.privacy'),
('^' + settings.PREFIX_URL + r'channel/$', 'webapp.views.site.channel'),
)
| 61.733333 | 112 | 0.661447 |
b637401353d6971f7d1ee31be54324f0b91a027b | 21,534 | py | Python | neutorch/dataset/transform.py | brain-map/neutorch | 0fc0c2845e8d513b0a130b6d001a26833c89a63a | [
"Apache-2.0"
] | null | null | null | neutorch/dataset/transform.py | brain-map/neutorch | 0fc0c2845e8d513b0a130b6d001a26833c89a63a | [
"Apache-2.0"
] | 1 | 2022-03-07T15:47:53.000Z | 2022-03-07T15:47:53.000Z | neutorch/dataset/transform.py | brain-map/neutorch | 0fc0c2845e8d513b0a130b6d001a26833c89a63a | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
import random
from functools import lru_cache
from chunkflow.lib.bounding_boxes import Cartesian
# from copy import deepcopy
import numpy as np
from scipy.ndimage.filters import gaussian_filter
# from scipy.ndimage import affine_transform
import cv2
from skimage.util import random_noise
from skimage.transform import swirl
from .patch import Patch
DEFAULT_PROBABILITY = .5
class AbstractTransform(ABC):
def __init__(self, probability: float = DEFAULT_PROBABILITY):
assert probability > 0.
assert probability <= 1.
self.probability = probability
@property
def name(self):
return self.__class__.__name__
def is_invertible(self):
return hasattr(self, 'invert')
def __call__(self, patch: Patch):
if random.random() < self.probability:
self.transform(patch)
else:
# for spatial transform, we need to correct the size
# to make sure that the final patch size is correct
if hasattr(self, 'shrink_size'):
patch.accumulate_delayed_shrink_size(self.shrink_size)
@abstractmethod
def transform(self, patch: Patch):
"""perform the real transform of image and target
Args:
patch (Patch): image and target
"""
pass
class SpatialTransform(AbstractTransform):
"""Modify image voxel position and reinterprete."""
def __init__(self, probability: float = DEFAULT_PROBABILITY):
super().__init__(probability=probability)
@abstractmethod
def transform(self, patch: Patch):
"""transform the image and target together
Args:
patch (tuple): image and target pair
"""
pass
@property
def shrink_size(self):
"""this transform might shrink the patch size.
for example, droping a section will shrink the z axis.
Return:
shrink_size (tuple): z0,y0,x0,z1,y1,x1
"""
return (0, 0, 0, 0, 0, 0)
class IntensityTransform(AbstractTransform):
"""change image intensity only"""
def __init__(self, probability: float = DEFAULT_PROBABILITY):
super().__init__(probability=probability)
@abstractmethod
def transform(self, patch: Patch):
pass
class SectionTransform(AbstractTransform):
"""change a random section only."""
def __init__(self, probability: float = DEFAULT_PROBABILITY ):
super().__init__(probability=probability)
def transform(self, patch: Patch):
self.selected_axis = random.randrange(3)
self.selected_idx = random.randrange(
patch.image.shape[self.selected_axis]
)
patch = self.transform_section(patch)
return patch
@abstractmethod
def transform_section(self, patch: Patch):
pass
class Compose(object):
def __init__(self, transforms: list):
"""compose multiple transforms
Args:
transforms (list): list of transform instances
"""
self.transforms = transforms
shrink_size = np.zeros((6,), dtype=np.int64)
for transform in transforms:
if isinstance(transform, SpatialTransform):
shrink_size += np.asarray(transform.shrink_size)
self.shrink_size = tuple(x for x in shrink_size)
def __call__(self, patch: Patch):
for transform in self.transforms:
transform(patch)
# after the transformation, the stride of array
# could be negative, and pytorch could not tranform
# the array to Tensor. Copy can fix it.
patch.image = patch.image.copy()
patch.target = patch.target.copy()
class OneOf(AbstractTransform):
def __init__(self, transforms: list,
probability: float = DEFAULT_PROBABILITY) -> None:
super().__init__(probability=probability)
assert len(transforms) > 1
self.transforms = transforms
shrink_size = np.zeros((6,), dtype=np.int64)
for transform in transforms:
if isinstance(transform, SpatialTransform):
shrink_size += np.asarray(transform.shrink_size)
self.shrink_size = tuple(x for x in shrink_size)
def transform(self, patch: Patch):
# select one of the transforms
transform = random.choice(self.transforms)
transform(patch)
class DropSection(SpatialTransform):
def __init__(self, probability: float = DEFAULT_PROBABILITY):
super().__init__(probability=probability)
def transform(self, patch: Patch):
# since this transform really removes information
# we do not delay the shrinking
# make the first and last section missing is meaning less
b0, c0, z0, y0, x0 = patch.shape
z = random.randint(1, z0-1)
image = np.zeros((b0, c0, z0-1, y0, x0), dtype=patch.image.dtype)
target = np.zeros((b0, c0, z0-1, y0, x0), dtype=patch.target.dtype)
image[..., :z, :, :] = patch.image[..., :z, :, :]
target[..., :z, :, :] = patch.target[..., :z, :, :]
image[..., z:, :, :] = patch.image[..., z+1:, :, :]
target[..., z:, :, :] = patch.target[..., z+1:, :, :]
patch.image = image
patch.target = target
@property
def shrink_size(self):
return (0, 0, 0, 1, 0, 0)
class BlackBox(IntensityTransform):
def __init__(self,
probability: float = DEFAULT_PROBABILITY,
max_box_size: tuple = (8,8,8),
max_box_num: int = 3):
"""make some black cubes in image patch
Args:
probability (float, optional): probability of triggering this augmentation. Defaults to 1..
max_box_size (tuple, optional): maximum cube size. Defaults to (4,4,4).
max_box_num (int, optional): maximum number of black boxes. Defaults to 2.
"""
super().__init__(probability=probability)
assert len(max_box_size) == 3
self.max_box_size = max_box_size
self.max_box_num = max_box_num
def transform(self, patch: Patch):
box_num = random.randint(1, self.max_box_num)
for _ in range(box_num):
box_size = tuple(random.randint(1, s) for s in self.max_box_size)
# randint is inclusive
start = tuple(random.randint(1, t-b-1) for t, b in zip(patch.shape[-3:], box_size))
patch.image[
...,
start[0] : start[0] + box_size[0],
start[1] : start[1] + box_size[1],
start[2] : start[2] + box_size[2],
] = 0
class NormalizeTo01(IntensityTransform):
def __init__(self, probability: float = 1.):
super().__init__(probability=probability)
def transform(self, patch: Patch):
if np.issubdtype(patch.image.dtype, np.uint8):
patch.image = patch.image.astype(np.float32) / 255.
class AdjustBrightness(IntensityTransform):
def __init__(self, probability: float = DEFAULT_PROBABILITY,
min_factor: float = 0.05,
max_factor: float = 0.3):
super().__init__(probability=probability)
max_factor = np.clip(max_factor, 0, 2)
self.min_factor = min_factor
self.max_factor = max_factor
def transform(self, patch: Patch):
patch.image += random.uniform(-0.5, 0.5) * random.uniform(
self.min_factor, self.max_factor)
np.clip(patch.image, 0., 1., out=patch.image)
class AdjustContrast(IntensityTransform):
def __init__(self, probability: float = DEFAULT_PROBABILITY,
factor_range: tuple = (0.05, 2.)):
super().__init__(probability=probability)
# factor_range = np.clip(factor_range, 0., 2.)
self.factor_range = factor_range
def transform(self, patch: Patch):
#factor = 1 + random.uniform(-0.5, 0.5) * random.uniform(
# self.factor_range[0], self.factor_range[1])
factor = random.uniform(self.factor_range[0], self.factor_range[1])
patch.image *= factor
np.clip(patch.image, 0., 1., out=patch.image)
class Gamma(IntensityTransform):
def __init__(self, probability: float = DEFAULT_PROBABILITY):
super().__init__(probability=probability)
def transform(self, patch: Patch):
# gamma = random.random() * 2. - 1.
gamma = random.uniform(-1., 1.)
patch.image **= 2.** gamma
class GaussianBlur2D(IntensityTransform):
def __init__(self, probability: float=DEFAULT_PROBABILITY,
sigma: float = 1.5):
super().__init__(probability=probability)
self.sigma = sigma
def transform(self, patch: Patch):
sigma = random.uniform(0.2, self.sigma)
gaussian_filter(patch.image, sigma=sigma, output=patch.image)
class GaussianBlur3D(IntensityTransform):
def __init__(self, probability: float = DEFAULT_PROBABILITY,
max_sigma: tuple = (1.5, 1.5, 1.5)):
super().__init__(probability=probability)
self.max_sigma = max_sigma
def transform(self, patch: Patch):
sigma = tuple(random.uniform(0.2, s) for s in self.max_sigma)
gaussian_filter(patch.image, sigma=sigma, output=patch.image)
class Noise(IntensityTransform):
def __init__(self, probability: float = DEFAULT_PROBABILITY,
mode: str='gaussian', max_variance: float = 0.02):
super().__init__(probability=probability)
self.mode = mode
self.max_variance = max_variance
def transform(self, patch: Patch):
variance = random.uniform(0.01, self.max_variance)
random_noise(patch.image, mode=self.mode, var=variance)
np.clip(patch.image, 0., 1., out=patch.image)
class Flip(SpatialTransform):
def __init__(self, probability: float = DEFAULT_PROBABILITY):
super().__init__(probability=probability)
def transform(self, patch: Patch):
axis_num = random.randint(1, 3)
axis = random.sample(range(3), axis_num)
# the image and target is 5d
# the first two axises are batch and channel
axis5d = tuple(2+x for x in axis)
patch.image = np.flip(patch.image, axis=axis5d)
patch.target = np.flip(patch.target, axis=axis5d)
shrink = list(patch.delayed_shrink_size)
for ax in axis:
# swap the axis to be shrinked
shrink[3+ax], shrink[ax] = shrink[ax], shrink[3+ax]
patch.delayed_shrink_size = tuple(shrink)
class Transpose(SpatialTransform):
def __init__(self, probability: float = DEFAULT_PROBABILITY):
super().__init__(probability=probability)
def transform(self, patch: Patch):
axis = [2,3,4]
random.shuffle(axis)
axis5d = (0, 1, *axis,)
patch.image = np.transpose(patch.image, axis5d)
patch.target = np.transpose(patch.target, axis5d)
shrink = list(patch.delayed_shrink_size)
for ax0, ax1 in enumerate(axis):
ax1 -= 2
# swap the axis to be shrinked
shrink[ax0] = patch.delayed_shrink_size[ax1]
shrink[3+ax0] = patch.delayed_shrink_size[3+ax1]
patch.delayed_shrink_size = tuple(shrink)
class MissAlignment(SpatialTransform):
def __init__(self, probability: float=DEFAULT_PROBABILITY,
max_displacement: int=2):
"""move part of volume alone x axis
We'll alwasy select a position alone z axis, and move the bottom part alone X axis.
By combining with transpose, flip and rotation, we can get other displacement automatically.
Args:
probability (float, optional): probability of this augmentation. Defaults to DEFAULT_PROBABILITY.
max_displacement (int, optional): maximum displacement. Defaults to 2.
"""
super().__init__(probability=probability)
assert max_displacement > 0
assert max_displacement < 8
self.max_displacement = max_displacement
def transform(self, patch: Patch):
axis = random.randint(2, 4)
displacement = random.randint(1, self.max_displacement)
# random direction
# no need to use random direction because we can combine with rotation and flipping
# displacement *= random.choice([-1, 1])
_,_, sz, sy, sx = patch.shape
if axis == 2:
zloc = random.randint(1, sz-1)
patch.image[..., zloc:,
self.max_displacement : sy-self.max_displacement,
self.max_displacement : sx-self.max_displacement,
] = patch.image[..., zloc:,
self.max_displacement+displacement : sy+displacement-self.max_displacement,
self.max_displacement+displacement : sx+displacement-self.max_displacement,
]
patch.target[..., zloc:,
self.max_displacement : sy-self.max_displacement,
self.max_displacement : sx-self.max_displacement,
] = patch.target[..., zloc:,
self.max_displacement+displacement : sy+displacement-self.max_displacement,
self.max_displacement+displacement : sx+displacement-self.max_displacement,
]
elif axis == 3:
yloc = random.randint(1, sy-1)
# print('right side shape: ', patch.image[..., self.max_displacement+displacement : sz+displacement-self.max_displacement,yloc:,self.max_displacement+displacement : sx+displacement-self.max_displacement,].shape)
patch.image[...,
self.max_displacement : sz-self.max_displacement,
yloc:,
self.max_displacement : sx-self.max_displacement,
] = patch.image[...,
self.max_displacement+displacement : sz+displacement-self.max_displacement,
yloc:,
self.max_displacement+displacement : sx+displacement-self.max_displacement,
]
patch.target[...,
self.max_displacement : sz-self.max_displacement,
yloc:,
self.max_displacement : sx-self.max_displacement,
] = patch.target[...,
self.max_displacement+displacement : sz+displacement-self.max_displacement,
yloc:,
self.max_displacement+displacement : sx+displacement-self.max_displacement,
]
elif axis == 4:
xloc = random.randint(1, sx-1)
patch.image[...,
self.max_displacement : sz-self.max_displacement,
self.max_displacement : sy-self.max_displacement,
xloc:,
] = patch.image[...,
self.max_displacement+displacement : sz+displacement-self.max_displacement,
self.max_displacement+displacement : sy+displacement-self.max_displacement,
xloc:
]
patch.target[...,
self.max_displacement : sz-self.max_displacement,
self.max_displacement : sy-self.max_displacement,
xloc:,
] = patch.target[...,
self.max_displacement+displacement : sz+displacement-self.max_displacement,
self.max_displacement+displacement : sy+displacement-self.max_displacement,
xloc:,
]
# only keep the central region
patch.shrink(self.shrink_size)
@property
@lru_cache
def shrink_size(self):
# return (0, 0, 0, 0, 0, self.max_displacement)
return (self.max_displacement,) * 6
class Perspective2D(SpatialTransform):
def __init__(self, probability: float=DEFAULT_PROBABILITY,
corner_ratio: float=0.2):
"""Warp image using Perspective transform
Args:
probability (float, optional): probability of this transformation. Defaults to DEFAULT_PROBABILITY.
corner_ratio (float, optional): We split the 2D image to four equal size rectangles.
For each axis in rectangle, we further divid it to four rectangles using this ratio.
The rectangle containing the image corner was used as a sampling point region.
This idea is inspired by this example:
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html#perspective-transformation
Defaults to 0.5.
"""
super().__init__(probability=probability)
self.corner_ratio = corner_ratio
def transformation_matrix(self, sy: int, sx: int):
corner_ratio = random.uniform(0.02, self.corner_ratio)
# corner_ratio = self.corner_ratio
upper_left_point = [
random.randint(0, round(sy*corner_ratio/2)),
random.randint(0, round(sx*corner_ratio/2))
]
upper_right_point = [
random.randint(0, round(sy*corner_ratio/2)),
random.randint(sx-round(sx*corner_ratio/2), sx-1)
]
lower_left_point = [
random.randint(sy-round(sy*corner_ratio/2), sy-1),
random.randint(0, round(sx*corner_ratio/2))
]
lower_right_point = [
random.randint(sy-round(sy*corner_ratio/2), sy-1),
random.randint(sx-round(sx*corner_ratio/2), sx-1)
]
pts1 = [
upper_left_point,
upper_right_point,
lower_left_point,
lower_right_point
]
# push the list order to get rotation effect
# for example, push one position will rotate about 90 degrees
# push_index = random.randint(0, 3)
# if push_index > 0:
# tmp = deepcopy(pts1)
# pts1[push_index:] = tmp[:4-push_index]
# # the pushed out elements should be reversed
# pts1[:push_index] = tmp[4-push_index:][::-1]
pts1 = np.asarray(pts1, dtype=np.float32)
pts2 =np.float32([[0, 0], [0, sx], [sy, 0], [sy, sx]])
M = cv2.getPerspectiveTransform(pts1, pts2)
return M
def transform(self, patch: Patch):
# matrix = np.eye(3)
# offset = tuple(-ps // 2 for ps in patch.shape[-3:] )
sy, sx = patch.shape[-2:]
M = self.transformation_matrix(sy, sx)
for batch in range(patch.shape[0]):
for channel in range(patch.shape[1]):
for z in range(patch.shape[2]):
patch.image[batch,channel,z,...] = self._transform2d(
patch.image[batch, channel, z, ...], cv2.INTER_LINEAR, M, sy, sx
)
patch.target[batch,channel,z,...] = self._transform2d(
patch.target[batch, channel, z, ...], cv2.INTER_NEAREST, M, sy, sx
)
patch.shrink(self.shrink_size)
def _transform2d(self, arr: np.ndarray, interpolation: int, M: np.ndarray, sy: int, sx: int):
dst = cv2.warpPerspective(arr, M, (sy, sx), flags=interpolation)
return dst
# class RotateScale(SpatialTransform):
# def __init__(self, probability: float=DEFAULT_PROBABILITY,
# max_scaling: float=1.3):
# super().__init__(probability=probability)
# raise NotImplementedError('this augmentation is not working correctly yet. The image and target could have patchy effect.We are not sure why.')
# self.max_scaling = max_scaling
# def transform(self, patch: Patch):
# # because we do not know the rotation angle
# # we should apply the shrinking first
# patch.apply_delayed_shrink_size()
# # if the rotation is close to diagnal, for example 45 degree
# # the target could be outside the volume and be black!
# # angle = random.choice([0, 90, 180, -90, -180]) + random.randint(-5, 5)
# angle = random.randint(0, 180)
# scale = random.uniform(1.1, self.max_scaling)
# center = patch.center[-2:]
# mat = cv2.getRotationMatrix2D( center, angle, scale )
# for batch in range(patch.shape[0]):
# for channel in range(patch.shape[1]):
# for z in range(patch.shape[2]):
# patch.image[batch, channel, z, ...] = cv2.warpAffine(
# patch.image[batch, channel, z, ...],
# mat, patch.shape[-2:], flags=cv2.INTER_LINEAR
# )
# patch.target[batch, channel, z, ...] = cv2.warpAffine(
# patch.target[batch, channel, z, ...],
# mat, patch.shape[-2:], flags=cv2.INTER_NEAREST
# )
class Swirl(SpatialTransform):
def __init__(self, max_rotation: int = 5, max_strength: int = 3, probability: float = DEFAULT_PROBABILITY):
super().__init__(probability=probability)
self.max_strength = max_strength
self.max_rotation = max_rotation
def transform(self, patch: Patch):
for z in range(patch.shape[-3]):
patch.image[..., z, :, :] = swirl(
patch.image[..., z, :, :],
rotation=random.randint(1, self.max_rotation),
strength=random.randint(1, self.max_strength),
radius = (patch.shape[-1] + patch.shape[-2]) // 4,
)
| 39.43956 | 224 | 0.605322 |
e9bd07bef97a0cb7c8b9b8613104005b7b5a6165 | 13,717 | py | Python | packaging/wheel/relocate.py | Licht-T/vision | 052edcecef3eb0ae9fe9e4b256fa2a488f9f395b | [
"BSD-3-Clause"
] | 1 | 2020-10-20T12:31:36.000Z | 2020-10-20T12:31:36.000Z | packaging/wheel/relocate.py | Licht-T/vision | 052edcecef3eb0ae9fe9e4b256fa2a488f9f395b | [
"BSD-3-Clause"
] | 3 | 2021-11-20T09:20:18.000Z | 2021-12-13T09:19:25.000Z | packaging/wheel/relocate.py | Licht-T/vision | 052edcecef3eb0ae9fe9e4b256fa2a488f9f395b | [
"BSD-3-Clause"
] | 1 | 2021-12-17T18:13:39.000Z | 2021-12-17T18:13:39.000Z | # -*- coding: utf-8 -*-
"""Helper script to package wheels and relocate binaries."""
# Standard library imports
import os
import io
import sys
import glob
import shutil
import zipfile
import hashlib
import platform
import subprocess
import os.path as osp
from base64 import urlsafe_b64encode
# Third party imports
if sys.platform == 'linux':
from auditwheel.lddtree import lddtree
from wheel.bdist_wheel import get_abi_tag
WHITELIST = {
'libgcc_s.so.1', 'libstdc++.so.6', 'libm.so.6',
'libdl.so.2', 'librt.so.1', 'libc.so.6',
'libnsl.so.1', 'libutil.so.1', 'libpthread.so.0',
'libresolv.so.2', 'libX11.so.6', 'libXext.so.6',
'libXrender.so.1', 'libICE.so.6', 'libSM.so.6',
'libGL.so.1', 'libgobject-2.0.so.0', 'libgthread-2.0.so.0',
'libglib-2.0.so.0', 'ld-linux-x86-64.so.2', 'ld-2.17.so'
}
WINDOWS_WHITELIST = {
'MSVCP140.dll', 'KERNEL32.dll',
'VCRUNTIME140_1.dll', 'VCRUNTIME140.dll',
'api-ms-win-crt-heap-l1-1-0.dll',
'api-ms-win-crt-runtime-l1-1-0.dll',
'api-ms-win-crt-stdio-l1-1-0.dll',
'api-ms-win-crt-filesystem-l1-1-0.dll',
'api-ms-win-crt-string-l1-1-0.dll',
'api-ms-win-crt-environment-l1-1-0.dll',
'api-ms-win-crt-math-l1-1-0.dll',
'api-ms-win-crt-convert-l1-1-0.dll'
}
HERE = osp.dirname(osp.abspath(__file__))
PACKAGE_ROOT = osp.dirname(osp.dirname(HERE))
PLATFORM_ARCH = platform.machine()
PYTHON_VERSION = sys.version_info
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def rehash(path, blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
# unicode/str python2 issues
return (digest, str(length)) # type: ignore
def unzip_file(file, dest):
"""Decompress zip `file` into directory `dest`."""
with zipfile.ZipFile(file, 'r') as zip_ref:
zip_ref.extractall(dest)
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None
On macOS systems, a .app is considered installed if
it exists.
"""
if (sys.platform == 'darwin' and basename.endswith('.app') and
osp.exists(basename)):
return basename
for path in os.environ["PATH"].split(os.pathsep):
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == 'nt':
# Windows platforms
extensions = ('.exe', '.bat', '.cmd', '.dll')
if not basename.endswith(extensions):
names = [basename + ext for ext in extensions] + [basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def patch_new_path(library_path, new_dir):
library = osp.basename(library_path)
name, *rest = library.split('.')
rest = '.'.join(rest)
hash_id = hashlib.sha256(library_path.encode('utf-8')).hexdigest()[:8]
new_name = '.'.join([name, hash_id, rest])
return osp.join(new_dir, new_name)
def find_dll_dependencies(dumpbin, binary):
out = subprocess.run([dumpbin, "/dependents", binary],
stdout=subprocess.PIPE)
out = out.stdout.strip().decode('utf-8')
start_index = out.find('dependencies:') + len('dependencies:')
end_index = out.find('Summary')
dlls = out[start_index:end_index].strip()
dlls = dlls.split(os.linesep)
dlls = [dll.strip() for dll in dlls]
return dlls
def relocate_elf_library(patchelf, output_dir, output_library, binary):
"""
Relocate an ELF shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel while updating their respective rpaths.
"""
print('Relocating {0}'.format(binary))
binary_path = osp.join(output_library, binary)
ld_tree = lddtree(binary_path)
tree_libs = ld_tree['libs']
binary_queue = [(n, binary) for n in ld_tree['needed']]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
library_info = tree_libs[library]
print(library)
if library_info['path'] is None:
print('Omitting {0}'.format(library))
continue
if library in WHITELIST:
# Omit glibc/gcc/system libraries
print('Omitting {0}'.format(library))
continue
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_info['path']
binary_queue += [(n, library) for n in library_info['needed']]
print('Copying dependencies to wheel directory')
new_libraries_path = osp.join(output_dir, 'torchvision.libs')
os.makedirs(new_libraries_path)
new_names = {binary: binary_path}
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = patch_new_path(library_path, new_libraries_path)
print('{0} -> {1}'.format(library, new_library_path))
shutil.copyfile(library_path, new_library_path)
new_names[library] = new_library_path
print('Updating dependency names by new files')
for library in binary_paths:
if library != binary:
if library not in binary_dependencies:
continue
library_dependencies = binary_dependencies[library]
new_library_name = new_names[library]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print('{0}: {1} -> {2}'.format(library, dep, new_dep))
subprocess.check_output(
[
patchelf,
'--replace-needed',
dep,
new_dep,
new_library_name
],
cwd=new_libraries_path)
print('Updating library rpath')
subprocess.check_output(
[
patchelf,
'--set-rpath',
"$ORIGIN",
new_library_name
],
cwd=new_libraries_path)
subprocess.check_output(
[
patchelf,
'--print-rpath',
new_library_name
],
cwd=new_libraries_path)
print("Update library dependencies")
library_dependencies = binary_dependencies[binary]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print('{0}: {1} -> {2}'.format(binary, dep, new_dep))
subprocess.check_output(
[
patchelf,
'--replace-needed',
dep,
new_dep,
binary
],
cwd=output_library)
print('Update library rpath')
subprocess.check_output(
[
patchelf,
'--set-rpath',
"$ORIGIN:$ORIGIN/../torchvision.libs",
binary_path
],
cwd=output_library
)
def relocate_dll_library(dumpbin, output_dir, output_library, binary):
"""
Relocate a DLL/PE shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel.
"""
print('Relocating {0}'.format(binary))
binary_path = osp.join(output_library, binary)
library_dlls = find_dll_dependencies(dumpbin, binary_path)
binary_queue = [(dll, binary) for dll in library_dlls]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
if library in WINDOWS_WHITELIST or library.startswith('api-ms-win'):
print('Omitting {0}'.format(library))
continue
library_path = find_program(library)
if library_path is None:
print('{0} not found'.format(library))
continue
if osp.basename(osp.dirname(library_path)) == 'system32':
continue
print('{0}: {1}'.format(library, library_path))
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_path
downstream_dlls = find_dll_dependencies(dumpbin, library_path)
binary_queue += [(n, library) for n in downstream_dlls]
print('Copying dependencies to wheel directory')
package_dir = osp.join(output_dir, 'torchvision')
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = osp.join(package_dir, library)
print('{0} -> {1}'.format(library, new_library_path))
shutil.copyfile(library_path, new_library_path)
def compress_wheel(output_dir, wheel, wheel_dir, wheel_name):
"""Create RECORD file and compress wheel distribution."""
print('Update RECORD file in wheel')
dist_info = glob.glob(osp.join(output_dir, '*.dist-info'))[0]
record_file = osp.join(dist_info, 'RECORD')
with open(record_file, 'w') as f:
for root, _, files in os.walk(output_dir):
for this_file in files:
full_file = osp.join(root, this_file)
rel_file = osp.relpath(full_file, output_dir)
if full_file == record_file:
f.write('{0},,\n'.format(rel_file))
else:
digest, size = rehash(full_file)
f.write('{0},{1},{2}\n'.format(rel_file, digest, size))
print('Compressing wheel')
base_wheel_name = osp.join(wheel_dir, wheel_name)
shutil.make_archive(base_wheel_name, 'zip', output_dir)
os.remove(wheel)
shutil.move('{0}.zip'.format(base_wheel_name), wheel)
shutil.rmtree(output_dir)
def patch_linux():
# Get patchelf location
patchelf = find_program('patchelf')
if patchelf is None:
raise FileNotFoundError('Patchelf was not found in the system, please'
' make sure that is available on the PATH.')
# Find wheel
print('Finding wheels...')
wheels = glob.glob(osp.join(PACKAGE_ROOT, 'dist', '*.whl'))
output_dir = osp.join(PACKAGE_ROOT, 'dist', '.wheel-process')
image_binary = 'image.so'
video_binary = 'video_reader.so'
torchvision_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print('Unzipping wheel...')
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print('{0}'.format(wheel_file))
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print('Finding ELF dependencies...')
output_library = osp.join(output_dir, 'torchvision')
for binary in torchvision_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_elf_library(
patchelf, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
def patch_win():
# Get dumpbin location
dumpbin = find_program('dumpbin')
if dumpbin is None:
raise FileNotFoundError('Dumpbin was not found in the system, please'
' make sure that is available on the PATH.')
# Find wheel
print('Finding wheels...')
wheels = glob.glob(osp.join(PACKAGE_ROOT, 'dist', '*.whl'))
output_dir = osp.join(PACKAGE_ROOT, 'dist', '.wheel-process')
image_binary = 'image.pyd'
video_binary = 'video_reader.pyd'
torchvision_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print('Unzipping wheel...')
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print('{0}'.format(wheel_file))
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print('Finding DLL/PE dependencies...')
output_library = osp.join(output_dir, 'torchvision')
for binary in torchvision_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_dll_library(
dumpbin, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
if __name__ == '__main__':
if sys.platform == 'linux':
patch_linux()
elif sys.platform == 'win32':
patch_win()
| 32.815789 | 79 | 0.61267 |
cae3fab06f0357e4c9c7481724cbd2eadce13603 | 1,514 | py | Python | car/iot_connection.py | akesiraju/raspberrypi | e8ae5e535a9953631ffa2d1e7de926c9dc19f961 | [
"MIT"
] | 2 | 2019-03-26T23:47:40.000Z | 2020-03-28T03:23:31.000Z | car/iot_connection.py | akesiraju/raspberrypi | e8ae5e535a9953631ffa2d1e7de926c9dc19f961 | [
"MIT"
] | 1 | 2019-03-27T10:59:14.000Z | 2019-03-27T10:59:14.000Z | car/iot_connection.py | akesiraju/raspberrypi | e8ae5e535a9953631ffa2d1e7de926c9dc19f961 | [
"MIT"
] | 1 | 2018-07-14T23:55:14.000Z | 2018-07-14T23:55:14.000Z | import json
# from flask_cors import CORS, cross_origin
import logging
# Import SDK packages
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import os
class IotConnection:
def __init__(self):
self.config = self._load_config('.vscode/config.json')
self.logger = logging.getLogger('IotConnection')
self.logger.setLevel(logging.DEBUG)
self.myMQTTClient = AWSIoTMQTTClient(self.config['thingName'])
self.myMQTTClient.configureEndpoint(self.config['hostUrl'], 8883)
self.myMQTTClient.configureCredentials(
self.config['caFilePath'], self.config['privateKeyFilePath'], self.config['certFilePath'])
self.myMQTTClient.configureOfflinePublishQueueing(-1)
self.myMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
self.myMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
self.myMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
self.myMQTTClient.connect()
self.logger.debug('connection success')
def publish(self, message):
self.myMQTTClient.publish('iotCarTopic', json.dumps(message), 0)
self.logger.debug('publish complete')
def _load_config(self, config_file_path):
with open(config_file_path) as f:
config = json.load(f)
return config
def clean(self):
self.logger.debug('clean START')
self.myMQTTClient.disconnect()
self.logger.debug('clean END') | 38.820513 | 103 | 0.678336 |
8899160c25ce54488ae99a342978ec42890413fc | 49,104 | py | Python | stretchSenseLibrary.py | jeremyLabrado/Rasbian-Library | 7b0789ca687e837f768f922887cfc84e573b6c14 | [
"MIT"
] | null | null | null | stretchSenseLibrary.py | jeremyLabrado/Rasbian-Library | 7b0789ca687e837f768f922887cfc84e573b6c14 | [
"MIT"
] | null | null | null | stretchSenseLibrary.py | jeremyLabrado/Rasbian-Library | 7b0789ca687e837f768f922887cfc84e573b6c14 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
- Website : https://www.stretchsense.com
- Important : This StretchSense Library has been designed to enable the connection of one or more "StretchSense Sensor" and "StretchSense IC Boards" to your Raspberry Pi
- Author : Louis Germain
- Copyright : 2017 StretchSense
- Date : 26/07/2017
- Version : 1.0.0
"""
from __future__ import print_function
import argparse
import binascii
import time
import os
import sys
import RPi.GPIO as GPIO
import spidev
from threading import Timer, Lock
from bluepy import btle
class RepeatedTimer(object):
#print("RepeatedTimer()")
"""
Class used to create a timer which repeat every defined interval.
:param object: int, function
int : timeout
function : is the function that you want to repeat (should be written as lambda: "function")
:returns:
timer
Returns a timer which start when the function is called, you should do "timer.stop" to terminate it
"""
def __init__(self, interval, function, *args, **kwargs):
self._lock = Lock()
self._timer = None
self.function = function
self.interval = interval
self.args = args
self.kwargs = kwargs
self._stopped = True
if kwargs.pop('autostart', True):
self.start()
def _run(self):
self.start(from_run=True)
self.function(*self.args, **self.kwargs)
def start(self, from_run=False):
self._lock.acquire()
if from_run or self._stopped:
self._stopped = False
self._timer = Timer(self.interval, self._run)
self._timer.start()
self._lock.release()
def stop(self):
self._lock.acquire()
self._stopped = True
self._timer.cancel()
self._lock.release()
"""
Board Configuration Raspberry Pi 3 B+ SPI0:
NSS = 24
SCK = 23
MISO = 21
MOSI = 19
Board Configuration Raspberry Pi 3 B+ SPI1:
NSS = 26
SCK = 40
MISO = 35
MOSI = 38
"""
# GPIO Layout configuration
GPIOLAYOUT = GPIO.BOARD
#GPIOLAYOUT = GPIO.BCM
# Set up the SPI pattern from the GPIO LAYOUT
if (GPIOLAYOUT == GPIO.BOARD):
CE_PIN0 = 24
CE_PIN1 = 26
INTERRUPT_PIN = 13
TRIGGER_PIN = 15
elif (GPIOLAYOUT == GPIO.BCM):
CE_PIN0 = 10
CE_PIN1 = 11
INTERRUPT_PIN = 2
TRIGGER_PIN = 3
# GPIO & SPI Pin Configuration
GPIO.setmode(GPIOLAYOUT)
GPIO.setwarnings(False)
SPI0 = 0
SPI1 = 1 # SPI1 still in developpment
# Data Package options
DATA = 0x00
CONFIG = 0x01
# Output Data Rate
RATE_OFF = 0x00
RATE_25HZ = 0x01
RATE_50HZ = 0x02
RATE_100HZ = 0x03
RATE_166HZ = 0x04
RATE_200HZ = 0x05
RATE_250HZ = 0x06
RATE_500HZ = 0x07
RATE_1KHZ = 0x08
# Interrupt Mode
INTERRUPT_DISABLED = 0x00
INTERRUPT_ENABLED = 0x01
# Trigger Mode
TRIGGER_DISABLED = 0x00
TRIGGER_ENABLED = 0x01
# Filter Mode
FILTER_0PT = 0x00
FILTER_1PT = 0x01
FILTER_3PT = 0x03
FILTER_7PT = 0x07
FILTER_15PT = 0x0F
FILTER_31PT = 0x1F
FILTER_63PT = 0x3F
FILTER_127PT = 0x7F
FILTER_255PT = 0xFF
# Resolution Mode
RESOLUTION_1pF = 0x00
RESOLUTION_100fF = 0x01
RESOLUTION_10fF = 0x02
RESOLUTION_1fF = 0x03
# Config Transfer
PADDING = 0x00
# Configuration Setup
ODR_MODE = RATE_50HZ
INTERRUPT_MODE = INTERRUPT_DISABLED
TRIGGER_MODE = TRIGGER_DISABLED
FILTER_MODE = FILTER_1PT
RESOLUTION_MODE = RESOLUTION_100fF
"""
StretchSense Classes & generators for the different type of sensors.
"""
class StretchSensePeripheral:
#print("\033[0;35;40m StretchSensePeripheral()\033[0m")
"""
Class which create a StretchSense peripheral with all the argument that can be used.
:param addr: string:
Contains the address of the device : "xx:xx:xx:xx:xx".
:param uuid: string:
Contains the UUID of the device "xxxxxxxx-7374-7265-7563-6873656e7365".
:param value: int:
Contains the value of the device in hexadecimal.
:param channelNumber: int:
For devices with multiple channels, contains the number of the channel.
:param gen: string:
This number is the generation of the device.
:param color: string:
This gives the device a color to use when displayed.
"""
def __init__(self):
#print("\033[0;35;40m __init__().StretchSensePeripheral()\033[0m")
# Mac Address
self.addr = ''
# Unique Number Identifier
self.uuid = ''
# Current Capacitance value of the sensor
self.value = 0x0000
# A unique number for each sensor, based on the order when the sensor is connected to the device
self.channelNumber = 0
# Generation of the circuit, we initialize it at a Generation 2 which the common One-Channel sensor
self.gen = ''
# Background color of the circuit
self.color = ''
class StretchSenseAPI():
#print("\033[0;35;40m StretchSenseAPI()\033[0m")
# This is the list of peripherals we are using for the SPI
listPeripheralSpi = [StretchSensePeripheral()]
# This is the list of peripherals we are using to connect to the BLE
listPeripheralInUse = [btle.Peripheral()]
# This is the list of StretchSense Bluetooth peripherals detected by the Raspberry Pi during a scan event
listPeripheralAvailable = [StretchSensePeripheral()]
# This is the list of StretchSense Bluetooth peripherals which are connected to the Raspberry Pi after being scanned
listPeripheralIsConnected = [StretchSensePeripheral()]
# This is the list of StretchSense Bluetooth peripherals which are saved to the Raspberry Pi after being connected once
listPeripheralIsOnceConnected = [StretchSensePeripheral()]
"""
Variables : Services & Characteristics UUID
"""
# The name we use to filter BLE scan results and find only StretchSense's sensors
deviceName = 'StretchSense'
# The UUID used to filter Bluetooth scan results and find the services from StretchSense sensors Gen 3
serviceUUID3 = '00001701-7374-7265-7563-6873656e7365'
# The UUID used to filter the device characteristics and find the "data characteristic" from StretchSense sensors Gen 3
dataUUID3 = '00001702-7374-7265-7563-6873656e7365'
# The UUID used to filter Bluetooth scan results and find the services from StretchSense sensors Gen 2
serviceUUID2 = '00001501-7374-7265-7563-6873656e7365'
# The UUID used to filter the device characteristics and find the "data characteristic" from StretchSense sensors Gen 2
dataUUID2 = '00001502-7374-7265-7563-6873656e7365'
# The UUID used to filter Bluetooth scan results and find the services from StretchSense circuit 10TT
serviceUUID10TT = '00601001-7374-7265-7563-6873656e7365'
# The UUID used to filter the device characteristics and find the "data characteristic" from StretchSense circuit 10TT
dataUUID10TT = '00601002-7374-7265-7563-6873656e7365'
"""
Variables : Set sensors & info
"""
# Number of data samples within the filtering array
numberOfSample = 30
# Initialisation value of the sampling time value (SamplingTime = (value +1)*40ms)
samplingTimeNumber = 0
# Sized of the filter based on the number of samples
filteringNumber = 0
"""
Bluepy buffer Scanning class.
"""
class ScanPrint(btle.DefaultDelegate):
#print("\033[0;33;40m ScanPrint()\033[0m")
def __init__(self, opts):
#print("\033[0;33;40m __init__().ScanPrint()\033[0m")
btle.DefaultDelegate.__init__(self)
self.opts = opts
def handleDiscovery(self, dev, isNewDev, isNewData):
#print("\033[0;33;40m handleDiscovery()\033[0m")
if isNewDev:
status = "new"
elif isNewData:
if self.opts.new:
return
status = "update"
else:
if not self.opts.all:
return
status = "old"
if dev.rssi < self.opts.sensitivity:
return
if not dev.scanData:
print ('\t(no data)')
print()
"""
Serial Peripheral Interface Functions
"""
def spi_generateTenChannel(self):
#print("\033[0;35;40m spi_generateTenChannel()\033[0m")
"""
This function generate ten peripheral type StretchSensePeripheral used for the SPI.
"""
newSensor1 = StretchSensePeripheral()
newSensor1.addr = "SPI0"
newSensor1.uuid = self.serviceUUID3
newSensor1.value = 0
newSensor1.gen = 3
newSensor1.channelNumber = 0
newSensor2 = StretchSensePeripheral()
newSensor2.addr = "SPI0"
newSensor2.uuid = self.serviceUUID3
newSensor2.value = 0
newSensor2.gen = 3
newSensor2.channelNumber = 1
newSensor3 = StretchSensePeripheral()
newSensor3.addr = "SPI0"
newSensor3.uuid = self.serviceUUID3
newSensor3.value = 0
newSensor3.gen = 3
newSensor3.channelNumber = 2
newSensor4 = StretchSensePeripheral()
newSensor4.addr = "SPI0"
newSensor4.uuid = self.serviceUUID3
newSensor4.value = 0
newSensor4.gen = 3
newSensor4.channelNumber = 3
newSensor5 = StretchSensePeripheral()
newSensor5.addr = "SPI0"
newSensor5.uuid = self.serviceUUID3
newSensor5.value = 0
newSensor5.gen = 3
newSensor5.channelNumber = 4
newSensor6 = StretchSensePeripheral()
newSensor6.addr = "SPI0"
newSensor6.uuid = self.serviceUUID3
newSensor6.value = 0
newSensor6.gen = 3
newSensor6.channelNumber = 5
newSensor7 = StretchSensePeripheral()
newSensor7.addr = "SPI0"
newSensor7.uuid = self.serviceUUID3
newSensor7.value = 0
newSensor7.gen = 3
newSensor7.channelNumber = 6
newSensor8 = StretchSensePeripheral()
newSensor8.addr = "SPI0"
newSensor8.uuid = self.serviceUUID3
newSensor8.value = 0
newSensor8.gen = 3
newSensor8.channelNumber = 7
newSensor9 = StretchSensePeripheral()
newSensor9.addr = "SPI0"
newSensor9.uuid = self.serviceUUID3
newSensor9.value = 0
newSensor9.gen = 3
newSensor9.channelNumber = 8
newSensor10 = StretchSensePeripheral()
newSensor10.addr = "SPI0"
newSensor10.uuid = self.serviceUUID3
newSensor10.value = 0
newSensor10.gen = 3
newSensor10.channelNumber = 9
self.listPeripheralSpi.append(newSensor1)
self.listPeripheralSpi.append(newSensor2)
self.listPeripheralSpi.append(newSensor3)
self.listPeripheralSpi.append(newSensor4)
self.listPeripheralSpi.append(newSensor5)
self.listPeripheralSpi.append(newSensor6)
self.listPeripheralSpi.append(newSensor7)
self.listPeripheralSpi.append(newSensor8)
self.listPeripheralSpi.append(newSensor9)
self.listPeripheralSpi.append(newSensor10)
def spi_setup(self):
#print("\033[0;33;40m spi_setup()\033[0m")
"""
Start the setup of the SPI communication.
"""
# Creating a new setup requires get rid of the old one
del self.listPeripheralSpi[0:]
# Initialise SPI & GPIO ports
self.myDevice = spidev.SpiDev()
self.myDevice.close()
self.myDevice.open(0, SPI0)
self.myDevice.max_speed_hz = 2000000
self.myDevice.mode = 1
self.myDevice.lsbfirst = False
self.capacitanceScalingFactor = 100
self.rawData = [0] * 20
self.spi_generateTenChannel()
# Initialise the data ready and chip enable pins
GPIO.setup(INTERRUPT_PIN, GPIO.IN)
GPIO.setup(CE_PIN0, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(TRIGGER_PIN, GPIO.OUT)
# Configure 16FGV1.0
self.spi_writeConfiguration()
# Give the circuit the time to set up
time.sleep(0.01)
# Get capacitance scaling factor
self.capacitanceScalingFactor = self.spi_getCapacitanceScalingFactor(RESOLUTION_MODE)
def spi_mode(self):
#print("spi_mode()")
"""
This function is called before using the SPI transmission, it verify which mode we are using.
"""
if (INTERRUPT_MODE == INTERRUPT_DISABLED & TRIGGER_MODE == TRIGGER_DISABLED):
self.spi_continuousModeCapacitance()
elif (INTERRUPT_MODE == INTERRUPT_ENABLED & TRIGGER_MODE == TRIGGER_DISABLED):
self.spi_continuousModeCapacitance()
elif (INTERRUPT_MODE == INTERRUPT_DISABLED & TRIGGER_MODE == TRIGGER_ENABLED):
self.spi_triggerModeCapacitance()
else:
pass
def spi_triggerModeCapacitance(self):
#print("\033[0;35;40m spi_triggerModeCapacitance()\033[0m")
"""
When TRIGGER is enable we use this SPI function.
"""
# Trigger a sample to begin
GPIO.output(TRIGGER_PIN, GPIO.HIGH)
GPIO.output(TRIGGER_PIN, GPIO.LOW)
# Allow the circuit to start a sample
time.sleep(0.1)
# Read the sensor data
self.readData = self.spi_readCapacitance()
# Convert the raw data to capacitance
for i in range(10):
self.spi_extractCapacitance(self.readData, i)
def spi_continuousModeCapacitance(self):
#print("\033[0;35;40m spi_continuousModeCapacitance()\033[0m")
"""
When INTERRUPT is enable or in continuous mode we use this SPI function.
"""
# Check if the interrupt mode is enabled (in configuration)
if (INTERRUPT_MODE == INTERRUPT_ENABLED):
# Don't do anything until the interrupt goes low
while(GPIO.input(INTERRUPT_PIN) == GPIO.HIGH):
pass
self.readData = self.spi_readCapacitance()
# Convert the raw data to capacitance
for i in range(10):
self.spi_extractCapacitance(self.readData, i)
# Wait for the next data packet to start sampling
if(INTERRUPT_MODE == INTERRUPT_ENABLED):
# Don't do anything until the interruptcoes high
while(GPIO.input(INTERRUPT_PIN) == GPIO.LOW):
pass
def spi_writeConfiguration(self):
#print("\033[0;35;40m spi_writeConfiguration()\033[0m")
"""
Function called in spi_setup() to write the configuration on the SPI bus.
"""
# 16FGV1.0 requires a configuration package to start streaming the data
# Set the chip select to low to select device
GPIO.output(CE_PIN0, GPIO.LOW)
# Select configure package and sets it
self.myDevice.xfer2([CONFIG, ODR_MODE, INTERRUPT_MODE, TRIGGER_MODE, FILTER_MODE, RESOLUTION_MODE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# Take the chip select to high to de-select
GPIO.output(CE_PIN0, GPIO.HIGH)
def spi_readCapacitance(self):
#print("\033[0;35;40m spi_readCapacitance()\033[0m")
"""
Function which read the capacitance in hexadecimal in the SPI bus.
:returns: int :
raw sensing from the 16FGV1.0
"""
# 16FGV1.0 transmits data in the form of 10, 16bit capacitance values
# Set the chip select to low to select the device
GPIO.output(CE_PIN0, GPIO.LOW)
# Select Data package to return values
raw = self.myDevice.xfer2([DATA, PADDING, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
del raw[:2]
return raw
#Take the chip select to high to de-select
GPIO.output(CE_PIN0, GPIO.HIGH)
def spi_getCapacitanceScalingFactor(self, resolutionConfig):
#print("\033[0;33;40m spi_getCapacitanceScalingFactor()\033[0m")
"""
The 16FGV1.0 has an adjustable LSB resolution this function scales raw data to capacitance based on
the configuration.
:param resolutionConfig: int :
Resolution set on the SPI bus during spi_writeConfiguration.
:returns: int :
The scale of the resolution.
"""
if resolutionConfig == RESOLUTION_1pF:
return 1
elif resolutionConfig == RESOLUTION_100fF:
return 10
elif resolutionConfig == RESOLUTION_10fF:
return 100
elif resolutionConfig == RESOLUTION_1fF:
return 1000
return 1
def spi_extractCapacitance(self, raw, channel):
#print("\033[0;35;40m spi_extractCapacitance()\033[0m")
"""
Does the conversion of the raw value received by the 16FGV1.0 into a decimal value.
:param raw: int :
Raw is the raw value that we read on the SPI bus.
:param channel: int :
Number from 0 to 9 corresponding of the channel number to convert the raw value.
"""
capacitance = 0.0
numberOfSpiPeripheral = len(self.listPeripheralSpi)
if numberOfSpiPeripheral > 0:
for myPeripheral in self.listPeripheralSpi:
if channel == myPeripheral.channelNumber:
capacitance = raw[2 * channel] * 256 + raw[2 * channel + 1]
capacitance /= self.capacitanceScalingFactor
myPeripheral.value = capacitance
#print("MainmyPeripheral.value = ", myPeripheral.value)
def spi_listToCsv(self):
#print("\033[0;35;40m spi_listToCsv()\033[0m")
"""
Display the values in a csv format in the terminal.
"""
listToCsv = ""
numberOfSpiPeripheralConnected = len(self.listPeripheralSpi) - 1
if numberOfSpiPeripheralConnected > 0:
for myPeripheral in self.listPeripheralSpi:
listToCsv += ("%s ," % myPeripheral.value)
print("\n")
print(listToCsv)
def spi_getValuesCsv(self):
#print("\033[0;35;40m spi_getValuesCsv()\033[0m")
"""
Return the values in a csv format.
:returns: string :
16FGV1.0 values in a csv format.
"""
listToReturn = ""
numberOfSpiPeripheralConnected = len(self.listPeripheralSpi) - 1
if numberOfSpiPeripheralConnected > 0:
for myPeripheral in self.listPeripheralSpi:
listToReturn += ("%s ," % myPeripheral.value)
return listToReturn
def spi_getListPeripheral(self):
#print("\033[0;35;40m spi_getListPeripheral()\033[0m")
"""
Return a list of all peripheral created by spi_generateTenChannel().
:returns: list :
List containing all the information of each SPI peripheral connected.
"""
return self.listPeripheralSpi
def spi_close(self):
#print("\033[0;35;40m spi_close()\033[0m")
"""
Close the SPI transmission.
"""
self.myDevice.close()
"""
Functions : Scan, Print, Connect/Disconnect & Update
"""
def ble_printAllPeripheralsAvailable(self):
#print("\033[0;35;40m ble_printAllPeripheralsAvailable()\033[0m")
"""
Display all the StretchSense's device addresses available around.
"""
numberOfPeripheralAvailable = len(self.listPeripheralAvailable) - 1
if (numberOfPeripheralAvailable == 0):
return print("No devices available")
elif numberOfPeripheralAvailable >= 1:
print("Number of StretchSense devices detected : ", numberOfPeripheralAvailable)
for myPeripheralAvailable in self.listPeripheralAvailable:
if myPeripheralAvailable.addr != '':
print("StretchSense device : %s\n" % (myPeripheralAvailable.addr))
def ble_printAllPeripheralsConnected(self):
#print("\033[0;35;40m ble_printAllPeripheralsConnected()\033[0m")
"""
Display all the StretchSense's device addresses connected.
"""
numberOfPeripheralConnected = len(self.listPeripheralIsConnected) - 1
if (numberOfPeripheralConnected == 0):
return print("No sensors connected")
elif numberOfPeripheralConnected >= 1:
for myPeripheralConnected in self.listPeripheralIsConnected:
if myPeripheralConnected.addr != '':
print("StretchSense Sensor Connected : %s\n" % (myPeripheralConnected.addr))
def ble_scanning(self, scanTime):
#print("\033[0;35;40m ble_scanning()\033[0m")
"""
Scan for StretchSense devices in the area and store them in listPeripheralAvailable.
:param scanTime: int :
Time to scan for.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--hci', action='store', type=int, default=0,
help='Interface number for scan')
parser.add_argument('-t', '--timeout', action='store', type=int, default=4,
help='Scan delay, 0 for continuous')
parser.add_argument('-s', '--sensitivity', action='store', type=int, default=-128,
help='dBm value for filtering far devices')
parser.add_argument('-d', '--discover', action='store_true',
help='Connect and discover service to scanned devices')
parser.add_argument('-a', '--all', action='store_true',
help='Display duplicate adv responses, by default show new + updated')
parser.add_argument('-n', '--new', action='store_true',
help='Display only new adv responses, by default show new + updated')
parser.add_argument('-v', '--verbose', action='store_true',
help='Increase output verbosity')
arg = parser.parse_args(sys.argv[1:])
self.Debugging = arg.verbose
scanner = btle.Scanner(arg.hci).withDelegate(self.ScanPrint(arg))
devicesAvailable = scanner.scan(scanTime)
self.listPeripheralInUse = []
for devices in devicesAvailable:
deviceAlreadyInTheList = False
for (sdid, desc, val) in devices.getScanData():
if (val == StretchSenseAPI.deviceName):
for myDeviceInTheList in self.listPeripheralAvailable:
if (myDeviceInTheList.addr == devices.addr):
deviceAlreadyInTheList = True
if deviceAlreadyInTheList is False:
self.listPeripheralAvailable.append(devices)
def ble_connectOnePeripheral(self, myDeviceAddr):
#print("\033[0;35;40m ble_connectOnePeripheral()\033[0m")
"""
Connect one StretchSense device which is available using its address, generate one or more
peripherals as StretchSensePeripheral and store them into listPeripheralIsConnected.
The peripheral is also stored in listPeripheralInUse.
:param myDeviceAddr: string :
Address of the device that you want to connect.
"""
numberOfPeripheralAvailable = len(self.listPeripheralAvailable) - 1
if (len(self.listPeripheralIsConnected) - 1) > 0:
pass
else:
self.listPeripheralIsConnected = []
if numberOfPeripheralAvailable > 0:
for myPeripheralAvailable in self.listPeripheralAvailable:
if (myPeripheralAvailable.addr == myDeviceAddr):
myPeripheralConnected = btle.Peripheral(myPeripheralAvailable)
myPeripheralConnected.setDelegate(StretchSenseDelegate(myPeripheralConnected))
myPeripheralConnected.deviceAddr = myDeviceAddr
self.listPeripheralInUse.append(myPeripheralConnected)
listOfServices = sorted(myPeripheralConnected.services, key=lambda services: services.hndStart)
for services in listOfServices:
if services.hndStart == services.hndEnd:
continue
if services.uuid == self.serviceUUID2:
myPeripheralConnected.gen = "2"
myPeripheralConnected.uuid = self.serviceUUID2
self.ble_generateOneChannel(myPeripheralConnected)
characteristics = services.getCharacteristics()[0]
myPeripheralConnected.writeCharacteristic(characteristics.valHandle + 1, b"\x01\x00")
continue
if services.uuid == self.serviceUUID3:
myPeripheralConnected.gen = "3"
myPeripheralConnected.uuid = self.serviceUUID3
self.ble_generateTenChannel(myPeripheralConnected)
characteristics = services.getCharacteristics()[0]
myPeripheralConnected.writeCharacteristic(characteristics.valHandle + 1, b"\x01\x00")
continue
if services.uuid == self.serviceUUID10TT:
myPeripheralConnected.gen = "10TT"
myPeripheralConnected.uuid = self.serviceUUID10TT
self.ble_generateTenChannel(myPeripheralConnected)
characteristics = services.getCharacteristics()
for char in characteristics:
if char.uuid == self.dataUUID10TT:
myPeripheralConnected.writeCharacteristic(char.valHandle + 1, b"\x01\x00")
continue
def ble_connectAllPeripheral(self):
#print("\033[0;35;40m ble_connectAllPeripheral()\033[0m")
"""
Connect all StretchSense devices which are available using their addresses, generate the
peripherals as StretchSensePeripheral and store them into listPeripheralIsConnected.
Peripherals are also stored in listPeripheralInUse.
"""
numberOfPeripheralAvailable = len(self.listPeripheralAvailable) - 1
self.listPeripheralIsConnected = []
self.listPeripheralInUse = []
if numberOfPeripheralAvailable > 0:
for myPeripheralAvailable in self.listPeripheralAvailable:
if myPeripheralAvailable.addr != '':
#print('Address we are trying to connect to : ', myPeripheralAvailable.addr)
myPeripheralConnected = btle.Peripheral(myPeripheralAvailable)
myPeripheralConnected.setDelegate(StretchSenseDelegate(myPeripheralConnected))
myPeripheralConnected.deviceAddr = myPeripheralAvailable.addr
self.listPeripheralInUse.append(myPeripheralConnected)
listOfServices = sorted(myPeripheralConnected.services, key=lambda services: services.hndStart)
for services in listOfServices:
if services.hndStart == services.hndEnd:
continue
if services.uuid == self.serviceUUID2:
myPeripheralConnected.gen = '2'
myPeripheralConnected.uuid = self.serviceUUID2
self.ble_generateOneChannel(myPeripheralConnected)
characteristics = services.getCharacteristics()[0]
myPeripheralConnected.writeCharacteristic(characteristics.valHandle + 1, b"\x01\x00")
continue
if services.uuid == self.serviceUUID3:
myPeripheralConnected.gen = '3'
myPeripheralConnected.uuid = self.serviceUUID3
self.ble_generateTenChannel(myPeripheralConnected)
characteristics = services.getCharacteristics()[0]
myPeripheralConnected.writeCharacteristic(characteristics.valHandle + 1, b"\x01\x00")
continue
if services.uuid == self.serviceUUID10TT:
myPeripheralConnected.gen = '10TT'
myPeripheralConnected.uuid = self.serviceUUID10TT
self.ble_generateTenChannel(myPeripheralConnected)
characteristics = services.getCharacteristics()[0]
myPeripheralConnected.writeCharacteristic(characteristics.valHandle + 1, b"\x01\x00")
continue
def ble_disconnectOnePeripheral(self, myDeviceAddr):
#print("\033[0;35;40m ble_disconnectOnePeripheral()\033[0m")
"""
Disconnect one StretchSense device which is connected using its address, and remove it from
listPeripheralIsConnected.
The peripheral is also removed from listPeripheralInUse.
:param myDeviceAddr: string :
Address of the device that you want to disconnect.
"""
numberOfPeripheralConnected = len(self.listPeripheralIsConnected)
if numberOfPeripheralConnected > 0:
i = 0
for myPeripheralConnected in self.listPeripheralIsConnected:
if myPeripheralConnected.addr == myDeviceAddr:
del self.listPeripheralIsConnected[i:i + 10]
break
i += 1
for myPeripheralInUse in self.listPeripheralInUse:
if myPeripheralInUse.addr == myDeviceAddr:
self.listPeripheralInUse.remove(myPeripheralInUse)
myPeripheralInUse.disconnect()
def ble_disconnectAllPeripherals(self):
#print("\033[0;35;40m ble_disconnectAllPeripherals()\033[0m")
"""
Disconnect all StretchSense devices which are connected, and remove them from both
listPeripheralIsConnected and listPeripheralInUse.
"""
for myPeripheralInUse in self.listPeripheralInUse:
myPeripheralInUse.disconnect()
del self.listPeripheralAvailable[1:]
del self.listPeripheralIsConnected[1:]
del self.listPeripheralInUse[1:]
"""
Functions : Discover/Generate/Update Services & Characteristics, and wait for notifications
"""
def ble_generateOneChannel(self, peripheral):
#print("\033[0;35;40m ble_generateOneChannel()\033[0m")
"""
When a StretchSense gen2 device is connected we create a newSensor receiving the
specifications needed.
:param peripheral: Peripheral :
Using the BLE Peripheral format we can convert it into a StretchSensePeripheral
format easier to use.
:param periphUUID: UUID :
UUID of the StretchSense circuit
"""
# We create a newSensor with the address
newSensor = StretchSensePeripheral()
newSensor.addr = peripheral.addr
newSensor.uuid = peripheral.uuid
newSensor.value = 0
newSensor.gen = peripheral.gen
newSensor.channelNumber = 0
self.listPeripheralIsConnected.append(newSensor)
def ble_generateTenChannel(self, peripheral):
#print("\033[0;35;40m ble_generateTenChannel()\033[0m")
"""
When a StretchSense gen3 device is connected we create ten newSensor receiving the
specifications needed.
:param peripheral: Peripheral :
Using the BLE Peripheral format we can convert it into a StretchSensePeripheral
format easier to use.
"""
# We create ten newSensor with the address
newSensor1 = StretchSensePeripheral()
newSensor1.addr = peripheral.addr
newSensor1.uuid = peripheral.uuid
newSensor1.value = 0
newSensor1.gen = peripheral.gen
newSensor1.channelNumber = 0
newSensor2 = StretchSensePeripheral()
newSensor2.addr = peripheral.addr
newSensor2.uuid = peripheral.uuid
newSensor2.value = 0
newSensor2.gen = peripheral.gen
newSensor2.channelNumber = 1
newSensor3 = StretchSensePeripheral()
newSensor3.addr = peripheral.addr
newSensor3.uuid = peripheral.uuid
newSensor3.value = 0
newSensor3.gen = peripheral.gen
newSensor3.channelNumber = 2
newSensor4 = StretchSensePeripheral()
newSensor4.addr = peripheral.addr
newSensor4.uuid = peripheral.uuid
newSensor4.value = 0
newSensor4.gen = peripheral.gen
newSensor4.channelNumber = 3
newSensor5 = StretchSensePeripheral()
newSensor5.addr = peripheral.addr
newSensor5.uuid = peripheral.uuid
newSensor5.value = 0
newSensor5.gen = peripheral.gen
newSensor5.channelNumber = 4
newSensor6 = StretchSensePeripheral()
newSensor6.addr = peripheral.addr
newSensor6.uuid = peripheral.uuid
newSensor6.value = 0
newSensor6.gen = peripheral.gen
newSensor6.channelNumber = 5
newSensor7 = StretchSensePeripheral()
newSensor7.addr = peripheral.addr
newSensor7.uuid = peripheral.uuid
newSensor7.value = 0
newSensor7.gen = peripheral.gen
newSensor7.channelNumber = 6
newSensor8 = StretchSensePeripheral()
newSensor8.addr = peripheral.addr
newSensor8.uuid = peripheral.uuid
newSensor8.value = 0
newSensor8.gen = peripheral.gen
newSensor8.channelNumber = 7
newSensor9 = StretchSensePeripheral()
newSensor9.addr = peripheral.addr
newSensor9.uuid = peripheral.uuid
newSensor9.value = 0
newSensor9.gen = peripheral.gen
newSensor9.channelNumber = 8
newSensor10 = StretchSensePeripheral()
newSensor10.addr = peripheral.addr
newSensor10.uuid = peripheral.uuid
newSensor10.value = 0
newSensor10.gen = peripheral.gen
newSensor10.channelNumber = 9
self.listPeripheralIsConnected.append(newSensor1)
self.listPeripheralIsConnected.append(newSensor2)
self.listPeripheralIsConnected.append(newSensor3)
self.listPeripheralIsConnected.append(newSensor4)
self.listPeripheralIsConnected.append(newSensor5)
self.listPeripheralIsConnected.append(newSensor6)
self.listPeripheralIsConnected.append(newSensor7)
self.listPeripheralIsConnected.append(newSensor8)
self.listPeripheralIsConnected.append(newSensor9)
self.listPeripheralIsConnected.append(newSensor10)
def ble_discoverServices(self):
#print("\033[0;35;40m ble_discoverServices()\033[0m")
"""
Display on the terminal all the services for each StretchSense devices connected.
"""
for myPeripheral in self.listPeripheralInUse:
listOfServices = sorted(myPeripheral.services, key=lambda services: services.hndStart)
for services in listOfServices:
print(services)
def ble_discoverCharacteristics(self):
#print("\033[0;35;40m ble_discoverCharacteristics()\033[0m")
"""
Display on the terminal all the characteristics for each StretchSense devices connected.
"""
for myPeripheral in self.listPeripheralInUse:
listOfServices = sorted(myPeripheral.services, key=lambda services: services.hndStart)
for services in listOfServices:
characteristics = services.getCharacteristics()
for chars in characteristics:
print(chars)
def ble_updateOneChannelWithNotifications(self, data, addr):
#print("\033[0;35;40m ble_updateOneChannelWithNotifications()\033[0m")
"""
Update data from one or more StretchSense gen2 devices connected using BLE notifications
and stores its value.
:param data: UTF-8 characters :
data transmitted by the device once a notification is detected.
:param addr: string :
Address of the gen2 we want to update.
"""
numberOfPeripheralConnected = len(globalSensor)
if numberOfPeripheralConnected >= 1:
for myPeripheral in globalSensor:
if myPeripheral.addr == addr:
decimalValue = int(binascii.b2a_hex(data), 16) / 10.0
myPeripheral.value = decimalValue
#print("myPeripheral.value = ", myPeripheral.value)
def ble_updateOneChannel(self):
#print("\033[0;35;40m ble_updateOneChannel()\033[0m")
"""
Update once the value of every StretchSense gen2 devices connected without using BLE notifications.
"""
numberOfPeripheralConnected = len(self.listPeripheralIsConnected)
if numberOfPeripheralConnected >= 1:
for myPeripheral in self.listPeripheralInUse:
for myPeripheralConnected in self.listPeripheralIsConnected:
if (myPeripheralConnected.gen == 2) & (myPeripheralConnected.addr == myPeripheral.addr):
characteristics = myPeripheral.getCharacteristics()
for chars in characteristics:
if chars.uuid == self.dataUUID2:
handler = chars.getHandle()
value = myPeripheral.readCharacteristic(handler)
decimalValue = int(binascii.b2a_hex(value), 16) / 10.0
myPeripheralConnected.value = decimalValue
print("myPeripheralConnected.value = ", myPeripheralConnected.value)
def ble_updateTenChannelWithNotifications(self, data, addr):
#print("\033[0;35;40m ble_updateTenChannelWithNotifications()\033[0m")
"""
Update data from one or more StretchSense gen3 devices connected using BLE notifications
and stores its value.
:param data: UTF-8 characters :
data transmitted by the device once a notification is detected.
:param addr: string :
Address of the gen3 we want to update.
"""
numberOfPeripheralConnected = len(globalSensor)
if numberOfPeripheralConnected >= 10:
for myPeripheral in globalSensor:
if myPeripheral.addr == addr:
index = globalSensor.index(myPeripheral)
decimalValue = (binascii.b2a_hex(data))
splitted = [decimalValue[i:i + 4] for i in range(0, len(decimalValue), 4)]
globalSensor[index + 0].value = int((splitted[0]), 16) / 10.0
globalSensor[index + 1].value = int((splitted[1]), 16) / 10.0
globalSensor[index + 2].value = int((splitted[2]), 16) / 10.0
globalSensor[index + 3].value = int((splitted[3]), 16) / 10.0
globalSensor[index + 4].value = int((splitted[4]), 16) / 10.0
globalSensor[index + 5].value = int((splitted[5]), 16) / 10.0
globalSensor[index + 6].value = int((splitted[6]), 16) / 10.0
globalSensor[index + 7].value = int((splitted[7]), 16) / 10.0
globalSensor[index + 8].value = int((splitted[8]), 16) / 10.0
globalSensor[index + 9].value = int((splitted[9]), 16) / 10.0
break
def ble_updateTenChannel(self):
#print("\033[0;35;40m ble_updateTenChannel()\033[0m")
"""
Update once the value of every StretchSense gen3 devices connected without BLE using notifications.
"""
numberOfPeripheralConnected = len(self.listPeripheralIsConnected)
if numberOfPeripheralConnected >= 10:
for myPeripheral in self.listPeripheralInUse:
for myPeripheralConnected in self.listPeripheralIsConnected:
if (myPeripheralConnected.gen == 3) & (myPeripheralConnected.addr == myPeripheral.deviceAddr):
characteristics = myPeripheral.getCharacteristics()
for chars in characteristics:
if chars.uuid == (self.dataUUID3 or self.dataUUID10TT):
handler = chars.getHandle()
value = myPeripheral.readCharacteristic(handler)
decimalValue = (binascii.b2a_hex(value))
splitted = [decimalValue[i:i + 4] for i in range(0, len(decimalValue), 4)]
for channel in range(0, 10, 1):
if channel == myPeripheralConnected.channelNumber:
myPeripheralConnected.value = int((splitted[channel]), 16) / 10.0
print("myPeripheralConnected.value = ", myPeripheralConnected.value)
break
break
def ble_updateAllPeripherals(self):
#print("\033[0;35;40m ble_updateAllPeripherals()\033[0m")
"""
Update the value of the capacitance of each StretchSense devices which are connected.
"""
listPeripheralUpdated = []
numberOfPeripheralInUse = len(self.listPeripheralInUse)
if numberOfPeripheralInUse > 0:
for myPeripheral in self.listPeripheralInUse:
if myPeripheral.deviceAddr != '':
skipThisPeripheral = False
for myPeripheralUpdated in listPeripheralUpdated:
if myPeripheral.addr == myPeripheralUpdated.deviceAddr:
skipThisPeripheral = True
if skipThisPeripheral is False:
listOfServices = sorted(myPeripheral.services, key=lambda services: services.hndStart)
for services in listOfServices:
if services.hndStart == services.hndEnd:
continue
if services.uuid == self.serviceUUID2:
self.ble_updateOneChannel()
listPeripheralUpdated.append(myPeripheral)
continue
if services.uuid == self.serviceUUID3:
self.ble_updateTenChannel()
listPeripheralUpdated.append(myPeripheral)
continue
def ble_waitNotifications(self):
#print("\033[0;35;40m ble_waitNotifications()\033[0m")
"""
When called, run into all connected devices waiting for notification from each of them
and store the new data in their value slot.
"""
numberOfPeripheralInUse = len(self.listPeripheralInUse)
if numberOfPeripheralInUse > 0:
global globalSensor
globalSensor = self.listPeripheralIsConnected
for myPeripheral in self.listPeripheralInUse:
if myPeripheral.waitForNotifications(0.001):
continue
self.listPeripheralIsConnected = globalSensor
"""
Functions : Lists of Peripherals
"""
def ble_getListPeripheralAvailable(self):
#print("\033[0;35;40m ble_getListPeripheralAvailable()\033[0m")
"""
Returns the list of all devices available in the area.
:returns: [Peripheral] : List of all the devices available.
"""
return self.listPeripheralAvailable
def ble_getListAddrPeripheralAvailable(self):
#print("\033[0;35;40m ble_getListAddrPeripheralAvailable()\033[0m")
"""
Returns the list of all devices address available in the area.
:returns: [addr] : List of all devices available addresses.
"""
listAddr = []
numberOfPeripheralAvailable = len(self.listPeripheralAvailable)
if (self.listPeripheralAvailable[0].addr == ''):
return 0
elif numberOfPeripheralAvailable != 0:
for i in [numberOfPeripheralAvailable - 1]:
listAddr.append(self.listPeripheralAvailable[i].addr)
print(listAddr)
return listAddr
def ble_getListPeripheralIsConnected(self):
#print("\033[0;35;40m ble_getListPeripheralIsConnected()\033[0m")
"""
Returns the list of all devices connected.
:returns: [StretchSensePeripheral] : List of all devices connected.
"""
return self.listPeripheralIsConnected
def ble_getListPeripheralOnceConnected(self):
#print("\033[0;35;40m ble_getListPeripheralOnceConnected()\033[0m")
"""
Returns the list of all devices once connected.
:returns: [StretchSensePeripheral] : List of all devices once connected.
"""
return self.listPeripheralIsOnceConnected
def ble_getListPeripheralInUse(self):
#print("\033[0;35;40m ble_getListPeripheralInUse()\033[0m")
"""
Returns the list of all devices currently in use.
:returns: [Peripheral] : List of all devices we are using.
"""
return self.listPeripheralInUse
def ble_listToCsv(self):
#print("\033[0;35;40m ble_listToCsv()\033[0m")
"""
Displays on the terminal the values of each connected devices in a csv format.
"""
listToCsv = ""
numberOfPeripheralConnected = len(self.listPeripheralIsConnected)
if numberOfPeripheralConnected >= 1:
for myPeripheral in self.listPeripheralIsConnected:
listToCsv += ("%s ," % myPeripheral.value)
listToCsv += ("\n")
print(listToCsv)
def ble_getValuesCsv(self):
#print("\033[0;35;40m ble_getValuesCsv()\033[0m")
"""
Returns the values of each connected devices in a csv format.
:returns: string : Values of each connected devices.
"""
listToReturn = ""
numberOfPeripheralConnected = len(self.listPeripheralIsConnected)
if numberOfPeripheralConnected > 0:
for myPeripheral in self.listPeripheralIsConnected:
listToReturn += ("%s , " % myPeripheral.value)
return listToReturn
"""
Class StretchSenseDelegate : Class to handle the BLE notifications
"""
class StretchSenseDelegate(btle.DefaultDelegate):
#print("\033[0;35;40m StretchSenseDelegate()\033[0m")
def __init__(self, peripheral):
#print("\033[0;35;40m __init__().StretchSenseDelegate()\033[0m")
btle.DefaultDelegate.__init__(self)
self.peripheral = peripheral
self.addr = self.peripheral.addr
def handleNotification(self, cHandle, data):
#print("\033[0;35;40m StretchSenseDelegateHandleNotification()\033[0m")
for myPeripheral in globalSensor:
if myPeripheral.addr == self.addr:
if myPeripheral.uuid == StretchSenseAPI.serviceUUID2:
StretchSenseAPI.ble_updateOneChannelWithNotifications(self, data, self.addr)
if myPeripheral.uuid == StretchSenseAPI.serviceUUID3:
StretchSenseAPI.ble_updateTenChannelWithNotifications(self, data, self.addr)
if myPeripheral.uuid == StretchSenseAPI.serviceUUID10TT:
StretchSenseAPI.ble_updateTenChannelWithNotifications(self, data, self.addr)
break
"""
Global lists of values
"""
globalSensor = [StretchSensePeripheral()]
""" Main initialisation
Main initial declaration to compile examples and files that you are using.
"""
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit("Usage:\n %s <mac-address> [random]" % sys.argv[0])
if not os.path.isfile(btle.helperExe):
raise ImportError("Cannot find required executable '%s'" % btle.helperExe)
devAddr = sys.argv[1]
if len(sys.argv) == 3:
addrType = sys.argv[2]
else:
addrType = btle.ADDR_TYPE_PUBLIC
print("Connecting to: {}, address type: {}".format(devAddr, addrType))
conn = btle.Peripheral(devAddr, addrType)
try:
for svc in conn.services:
print(str(svc), ":")
for ch in svc.getCharacteristics():
print(" {}, hnd={}, supports {}".format(ch, hex(ch.handle), ch.propertiesToString()))
chName = btle.AssignedNumbers.getCommonName(ch.uuid)
if (ch.supportsRead()):
try:
print(" ->", repr(ch.read()))
except btle.BTLEException as e:
print(" ->", e)
finally:
conn.disconnect()
| 32.262812 | 173 | 0.617832 |
d9ce3930a37d21c590c0e8b622357f85a4085151 | 3,306 | py | Python | GitSvnServer/vcs/git/db.py | cespedes/git-svnserver | 97741e064f9473423e7a80260d2e09a4596384b9 | [
"BSD-3-Clause"
] | 1 | 2016-01-24T11:59:25.000Z | 2016-01-24T11:59:25.000Z | GitSvnServer/vcs/git/db.py | slonopotamus/git_svn_server | 80889d9557c99873ceaa58e5260b8edfa30bcffb | [
"BSD-3-Clause"
] | null | null | null | GitSvnServer/vcs/git/db.py | slonopotamus/git_svn_server | 80889d9557c99873ceaa58e5260b8edfa30bcffb | [
"BSD-3-Clause"
] | null | null | null | import os
import sqlite3
class GitDb(object):
def __init__(self, git, location):
self.git = git
self.map_file = os.path.join(location, 'svnserver', 'db')
def connect(self):
conn = sqlite3.connect(self.map_file)
conn.row_factory = sqlite3.Row
return conn
def execute(self, sql, *args):
conn = self.connect()
results = conn.execute(sql, args).fetchall()
conn.close()
return results
class GitMap(GitDb):
def get_latest_rev(self):
conn = self.connect()
sql = 'SELECT revision FROM transactions ORDER BY revision DESC'
row = conn.execute(sql).fetchone()
conn.close()
if row is None:
return 0
return int(row['revision'])
def find_commit(self, ref, rev=None, tag_sha1=False):
if rev is None:
rev = self.get_latest_rev()
conn = self.connect()
sql = 'SELECT revision, action, sha1, origin FROM transactions WHERE ' \
'ref = ? AND revision <= ? ORDER BY revision DESC'
row = conn.execute(sql, (ref, rev)).fetchone()
conn.close()
if row is None:
return None
if row['action'] in ['commit', 'branch', 'merge']:
return row['sha1']
if row['action'] in ['tag']:
if tag_sha1:
return row['sha1']
return row['origin']
return None
def get_commit_by_rev(self, rev, tag_sha1=False):
conn = self.connect()
sql = 'SELECT revision, action, sha1, origin FROM transactions WHERE ' \
'revision = ?'
row = conn.execute(sql, (rev,)).fetchone()
conn.close()
if row is None:
return None
if row['action'] in ['commit', 'branch', 'merge']:
return row['sha1']
if row['action'] in ['tag']:
if tag_sha1:
return row['sha1']
return row['origin']
return None
def get_commit_by_pattern(self, pattern, rev=None, tag_sha1=False):
conn = self.connect()
sql = 'SELECT revision, action, sha1, origin FROM transactions WHERE ' \
'ref like ? AND revision <= ? ORDER BY revision DESC'
row = conn.execute(sql, (pattern, rev)).fetchone()
conn.close()
if row is None:
return None
if row['action'] in ['commit', 'branch', 'merge']:
return row['sha1']
if row['action'] in ['tag']:
if tag_sha1:
return row['sha1']
return row['origin']
return None
def get_commits(self, ref, frm, to, order='ASC'):
conn = self.connect()
sql = 'SELECT revision, action, sha1, origin FROM transactions WHERE ' \
'ref = ? AND revision >= ? AND revision <= ? ORDER BY revision ' \
'%s' % order
rows = conn.execute(sql, (ref, frm, to)).fetchall()
conn.close()
return rows
def get_ref_rev(self, sha1):
conn = self.connect()
sql = 'SELECT revision, ref FROM transactions WHERE sha1 = ?'
row = conn.execute(sql, (sha1,)).fetchone()
conn.close()
if row is None:
return None, None
return row['ref'], int(row['revision'])
| 29 | 80 | 0.541742 |
39dca290842160b320878c8db27f1c3a43c3b468 | 6,419 | py | Python | 3.3.1/gdal-utils/osgeo_utils/auxiliary/raster_creation.py | PROgram52bc/pygdal | ea3722f8b887d74877c267ad9a1b722ec203f93c | [
"MIT"
] | 1 | 2020-11-13T09:22:12.000Z | 2020-11-13T09:22:12.000Z | 3.3.1/gdal-utils/osgeo_utils/auxiliary/raster_creation.py | PROgram52bc/pygdal | ea3722f8b887d74877c267ad9a1b722ec203f93c | [
"MIT"
] | null | null | null | 3.3.1/gdal-utils/osgeo_utils/auxiliary/raster_creation.py | PROgram52bc/pygdal | ea3722f8b887d74877c267ad9a1b722ec203f93c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ******************************************************************************
#
# Project: GDAL utils.auxiliary
# Purpose: raster creation utility functions
# Author: Idan Miara <idan@miara.com>
#
# ******************************************************************************
# Copyright (c) 2021, Idan Miara <idan@miara.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import os
import shutil
import tempfile
from numbers import Real
from typing import Sequence, Optional
from osgeo import gdal, osr
from osgeo_utils.auxiliary.base import PathLikeOrStr, MaybeSequence, is_true
from osgeo_utils.auxiliary.util import get_bigtiff_creation_option_value, get_data_type, DataTypeOrStr, CreationOptions, \
open_ds
def create_flat_raster(filename: Optional[PathLikeOrStr],
driver: Optional[str] = None, dt: DataTypeOrStr = gdal.GDT_Byte,
size: MaybeSequence[int] = 128, band_count: int = 1, creation_options: CreationOptions = None,
fill_value: Optional[Real] = None, nodata_value: Optional[Real] = None,
origin: Optional[Sequence[int]] = (500_000, 0), pixel_size: MaybeSequence[int] = 10,
epsg: Optional[int] = 32636,
overview_alg: Optional[str] = 'NEAR', overview_list: Optional[Sequence[int]] = None) -> gdal.Dataset:
if filename is None:
filename = tempfile.mktemp()
elif not filename:
filename = ''
if driver is None:
driver = 'GTiff' if filename else 'MEM'
if not isinstance(size, Sequence):
size = (size, size)
drv = gdal.GetDriverByName(driver)
dt = get_data_type(dt)
creation_options_list = get_creation_options(creation_options, driver=driver)
ds = drv.Create(os.fspath(filename), *size, band_count, dt, creation_options_list)
if pixel_size and origin:
if not isinstance(pixel_size, Sequence):
pixel_size = (pixel_size, -pixel_size)
ds.SetGeoTransform([origin[0], pixel_size[0], 0, origin[1], 0, pixel_size[1]])
if epsg is not None:
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg)
ds.SetSpatialRef(srs)
for bnd_idx in range(band_count):
bnd: gdal.Band = ds.GetRasterBand(bnd_idx+1)
if fill_value is not None:
bnd.Fill(fill_value)
if nodata_value is not None:
bnd.SetNoDataValue(nodata_value)
if overview_alg and overview_list:
ds.BuildOverviews(overview_alg, overviewlist=overview_list)
return ds
def get_creation_options(creation_options: CreationOptions = None,
driver: str = 'GTiff',
sparse_ok: bool = None,
tiled: bool = None,
block_size: Optional[int] = None,
big_tiff: Optional[str] = None,
comp: str = None):
creation_options = dict(creation_options or dict())
driver = driver.lower()
if comp is None:
comp = creation_options.get("COMPRESS", "DEFLATE")
creation_options["BIGTIFF"] = get_bigtiff_creation_option_value(big_tiff)
creation_options["COMPRESS"] = comp
if sparse_ok is None:
sparse_ok = creation_options.get("SPARSE_OK", True)
sparse_ok = is_true(sparse_ok)
creation_options["SPARSE_OK"] = str(sparse_ok)
if tiled is None:
tiled = creation_options.get("TILED", True)
tiled = is_true(tiled)
creation_options["TILED"] = str(tiled)
if tiled and block_size is not None:
if driver == 'gtiff':
creation_options["BLOCKXSIZE"] = block_size
creation_options["BLOCKYSIZE"] = block_size
elif driver == 'cog':
creation_options["BLOCKSIZE"] = block_size
creation_options_list = []
for k, v in creation_options.items():
creation_options_list.append("{}={}".format(k, v))
return creation_options_list
def copy_raster_and_add_overviews(
filename_src: PathLikeOrStr, output_filename_template: str, overview_list: Sequence[int],
overview_alg='bilinear', create_file_per_ovr: bool = True, driver_name: str = 'GTiff'):
files_list = []
ds_with_ovrs = output_filename_template.format('')
shutil.copy(filename_src, ds_with_ovrs)
files_list.append(ds_with_ovrs)
ds_base = output_filename_template.format(0)
shutil.copy(filename_src, ds_base)
files_list.append(ds_base)
ds = open_ds(ds_with_ovrs, gdal.GA_Update)
size = (ds.RasterXSize, ds.RasterYSize)
ds.BuildOverviews(overview_alg, overviewlist=overview_list)
driver = gdal.GetDriverByName(driver_name)
all_ovrs = [1]
all_ovrs.extend(overview_list)
for ovr_idx, f in enumerate(all_ovrs):
filename_i = output_filename_template.format(ovr_idx)
if ovr_idx == 0:
ds1 = open_ds(filename_i)
else:
ds1 = open_ds(ds_with_ovrs, ovr_idx=ovr_idx, ovr_only=True)
if create_file_per_ovr:
driver.CreateCopy(filename_i, ds1)
files_list.append(filename_i)
assert ds1.RasterXSize == int(size[0]/f) and ds1.RasterYSize == int(size[1]/f) and ds1.RasterCount == 1
return all_ovrs, files_list
| 42.230263 | 124 | 0.649478 |
7e0f203c1fefb14718a67fc54db60c6486dc6f13 | 2,412 | py | Python | chatBOT-Final/chat_app/venv/lib/python3.6/site-packages/py2neo/packages/httpstream/packages/urimagic/util.py | ashokupd81/Django-Chatbot | 0d199390b22b294830c1a68ad270c688517e7b11 | [
"MIT"
] | 9 | 2017-07-15T08:45:22.000Z | 2021-07-06T08:32:03.000Z | chatBOT-Final/chat_app/venv/lib/python3.6/site-packages/py2neo/packages/httpstream/packages/urimagic/util.py | ashokupd81/Django-Chatbot | 0d199390b22b294830c1a68ad270c688517e7b11 | [
"MIT"
] | 6 | 2017-05-05T13:11:51.000Z | 2019-01-25T22:46:30.000Z | chatBOT-Final/chat_app/venv/lib/python3.6/site-packages/py2neo/packages/httpstream/packages/urimagic/util.py | ashokupd81/Django-Chatbot | 0d199390b22b294830c1a68ad270c688517e7b11 | [
"MIT"
] | 3 | 2018-03-16T14:11:52.000Z | 2020-03-04T02:08:31.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2013-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if sys.version_info >= (3,):
is_integer = lambda x: isinstance(x, int)
is_numeric = lambda x: isinstance(x, (int, float, complex))
is_string = lambda x: isinstance(x, str)
def bstr(s, encoding="utf-8"):
if isinstance(s, bytes):
return s
elif isinstance(s, bytearray):
return bytes(s)
elif isinstance(s, str):
return bytes(s, encoding)
else:
return bytes(str(s), encoding)
def ustr(s, encoding="utf-8"):
""" Convert argument to unicode string.
"""
if isinstance(s, str):
return s
try:
return s.decode(encoding)
except AttributeError:
return str(s)
def xstr(s, encoding="utf-8"):
""" Convert argument to string type returned by __str__.
"""
return ustr(s, encoding)
else:
is_integer = lambda x: isinstance(x, (int, long))
is_numeric = lambda x: isinstance(x, (int, float, long, complex))
is_string = lambda x: isinstance(x, (str, unicode))
def bstr(s, encoding="utf-8"):
if isinstance(s, bytes):
return s
elif isinstance(s, bytearray):
return bytes(s)
elif isinstance(s, unicode):
return s.encode(encoding)
else:
return str(s)
def ustr(s, encoding="utf-8"):
""" Convert argument to unicode string.
"""
if isinstance(s, str):
return s.decode(encoding)
else:
return unicode(s)
def xstr(s, encoding="utf-8"):
""" Convert argument to string type returned by __str__.
"""
if isinstance(s, str):
return s
else:
return unicode(s).encode(encoding)
| 28.714286 | 74 | 0.597844 |
e9b529eb2618abc6fff4ccd5e4f5f32ffb365532 | 1,092 | py | Python | setup.py | keflavich/pyavm | 9fe8081466bafdc4e5fe3e42439535eb209ec466 | [
"MIT"
] | null | null | null | setup.py | keflavich/pyavm | 9fe8081466bafdc4e5fe3e42439535eb209ec466 | [
"MIT"
] | null | null | null | setup.py | keflavich/pyavm | 9fe8081466bafdc4e5fe3e42439535eb209ec466 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
try: # Python 3.x
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError: # Python 2.x
from distutils.command.build_py import build_py
version = '0.9.3.dev'
setup(name='PyAVM',
version=version,
description='Simple pure-python AVM meta-data handling',
author='Thomas Robitaille',
author_email='thomas.robitaille@gmail.com',
license='MIT',
url='http://astrofrog.github.io/pyavm/',
packages=['pyavm', 'pyavm.tests'],
package_data={'pyavm.tests':['data/*.xml', 'data/*.hdr']},
provides=['pyavm'],
cmdclass={'build_py': build_py},
keywords=['Scientific/Engineering'],
long_description="PyAVM is a module to represent, read, and write metadata following the `Astronomy Visualization Metadata <http://www.virtualastronomy.org/avm_metadata.php>`_ (AVM) standard.",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
)
| 35.225806 | 199 | 0.667582 |
0e14366f95ea858a96daf37a143c5f7d39c7c53d | 13,868 | py | Python | amino/socket.py | Domasgtuz/Amino.py | fbfb981e8a28b49dde0ed8ef4caff99417f022a4 | [
"MIT"
] | null | null | null | amino/socket.py | Domasgtuz/Amino.py | fbfb981e8a28b49dde0ed8ef4caff99417f022a4 | [
"MIT"
] | null | null | null | amino/socket.py | Domasgtuz/Amino.py | fbfb981e8a28b49dde0ed8ef4caff99417f022a4 | [
"MIT"
] | null | null | null | import time
import json
import websocket
import concurrent.futures
import contextlib
import ssl
from random import randint
from sys import _getframe as getframe
from .lib.util import objects, helpers
class SocketHandler:
def __init__(self, client, socket_trace = False, debug = False, security = True):
self.socket_url = "wss://ws1.narvii.com"
self.client = client
self.debug = debug
self.active = False
self.headers = None
self.security = security
self.socket = None
self.socket_thread = None
self.reconnect = True
self.socket_stop = False
self.socketDelay = 0
self.minReconnect = 480
self.maxReconnect = 540
self.background = concurrent.futures.ThreadPoolExecutor(max_workers=50)
self.socket_handler = self.background.submit(self.reconnect_handler)
websocket.enableTrace(socket_trace)
def reconnect_handler(self):
# Made by enchart#3410 thx
# Fixed by The_Phoenix#3967
while True:
temp = randint(self.minReconnect, self.maxReconnect)
time.sleep(temp)
if self.active:
if self.debug is True:
print(f"[socket][reconnect_handler] Random refresh time = {temp} seconds, Reconnecting Socket")
self.close()
self.run_amino_socket()
def on_open(self):
if self.debug is True:
print("[socket][on_open] Socket Opened")
def on_close(self):
if self.debug is True:
print("[socket][on_close] Socket Closed")
#self.active = False
if self.reconnect:
if self.debug is True:
print("[socket][on_close] reconnect is True, Opening Socket")
#self.run_amino_socket()
def on_ping(self, data):
if self.debug is True:
print("[socket][on_ping] Socket Pinged")
contextlib.suppress(self.socket.sock.pong(data))
def handle_message(self, data):
self.client.handle_socket_message(data)
return
def send(self, data):
if self.debug is True:
print(f"[socket][send] Sending Data : {data}")
self.socket.send(data)
def run_amino_socket(self):
if self.debug is True:
print(f"[socket][start] Starting Socket")
if self.client.sid is None:
return
final = f"{self.client.device_id}|{int(time.time() * 1000)}"
self.headers = {
"NDCDEVICEID": self.client.device_id,
"NDCAUTH": f"sid={self.client.sid}",
"NDC-MSG-SIG": helpers.generate_signature(final)
}
self.socket = websocket.WebSocketApp(
f"{self.socket_url}/?signbody={final.replace('|', '%7C')}",
on_message = self.handle_message,
on_open = self.on_open,
on_close = self.on_close,
on_ping = self.on_ping,
header = self.headers
)
socket_settings = {
"ping_interval": 60
}
if not self.security:
socket_settings.update({
'sslopt': {
"cert_reqs": ssl.CERT_NONE,
"check_hostname": False
}
})
self.socket_thread = self.background.submit(self.socket.run_forever)
#self.socket_thread = threading.Thread(target = self.socket.run_forever, kwargs = socket_settings)
#self.socket_thread.start()
self.active = True
if self.debug is True:
print(f"[socket][start] Socket Started")
def close(self):
if self.debug is True:
print(f"[socket][close] Closing Socket")
self.reconnect = False
self.active = False
self.socket_stop = True
try:
self.socket.close()
except Exception as closeError:
if self.debug is True:
print(f"[socket][close] Error while closing Socket : {closeError}")
return
class Callbacks:
def __init__(self, client):
self.client = client
self.handlers = {}
self.methods = {
304: self._resolve_chat_action_start,
306: self._resolve_chat_action_end,
1000: self._resolve_chat_message
}
self.chat_methods = {
"0:0": self.on_text_message,
"0:100": self.on_image_message,
"0:103": self.on_youtube_message,
"1:0": self.on_strike_message,
"2:110": self.on_voice_message,
"3:113": self.on_sticker_message,
"52:0": self.on_voice_chat_not_answered,
"53:0": self.on_voice_chat_not_cancelled,
"54:0": self.on_voice_chat_not_declined,
"55:0": self.on_video_chat_not_answered,
"56:0": self.on_video_chat_not_cancelled,
"57:0": self.on_video_chat_not_declined,
"58:0": self.on_avatar_chat_not_answered,
"59:0": self.on_avatar_chat_not_cancelled,
"60:0": self.on_avatar_chat_not_declined,
"100:0": self.on_delete_message,
"101:0": self.on_group_member_join,
"102:0": self.on_group_member_leave,
"103:0": self.on_chat_invite,
"104:0": self.on_chat_background_changed,
"105:0": self.on_chat_title_changed,
"106:0": self.on_chat_icon_changed,
"107:0": self.on_voice_chat_start,
"108:0": self.on_video_chat_start,
"109:0": self.on_avatar_chat_start,
"110:0": self.on_voice_chat_end,
"111:0": self.on_video_chat_end,
"112:0": self.on_avatar_chat_end,
"113:0": self.on_chat_content_changed,
"114:0": self.on_screen_room_start,
"115:0": self.on_screen_room_end,
"116:0": self.on_chat_host_transfered,
"117:0": self.on_text_message_force_removed,
"118:0": self.on_chat_removed_message,
"119:0": self.on_text_message_removed_by_admin,
"120:0": self.on_chat_tip,
"121:0": self.on_chat_pin_announcement,
"122:0": self.on_voice_chat_permission_open_to_everyone,
"123:0": self.on_voice_chat_permission_invited_and_requested,
"124:0": self.on_voice_chat_permission_invite_only,
"125:0": self.on_chat_view_only_enabled,
"126:0": self.on_chat_view_only_disabled,
"127:0": self.on_chat_unpin_announcement,
"128:0": self.on_chat_tipping_enabled,
"129:0": self.on_chat_tipping_disabled,
"65281:0": self.on_timestamp_message,
"65282:0": self.on_welcome_message,
"65283:0": self.on_invite_message
}
self.chat_actions_start = {
"Typing": self.on_user_typing_start,
}
self.chat_actions_end = {
"Typing": self.on_user_typing_end,
}
def _resolve_chat_message(self, data):
key = f"{data['o']['chatMessage']['type']}:{data['o']['chatMessage'].get('mediaType', 0)}"
return self.chat_methods.get(key, self.default)(data)
def _resolve_chat_action_start(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_start.get(key, self.default)(data)
def _resolve_chat_action_end(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_end.get(key, self.default)(data)
def resolve(self, data):
data = json.loads(data)
return self.methods.get(data["t"], self.default)(data)
def call(self, type, data):
if type in self.handlers:
for handler in self.handlers[type]:
handler(data)
def event(self, type):
def registerHandler(handler):
if type in self.handlers:
self.handlers[type].append(handler)
else:
self.handlers[type] = [handler]
return handler
return registerHandler
def on_text_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_image_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_youtube_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_strike_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_sticker_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_delete_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_group_member_join(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_group_member_leave(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_invite(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_background_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_title_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_icon_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_content_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_screen_room_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_screen_room_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_host_transfered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_text_message_force_removed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_removed_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_text_message_removed_by_admin(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tip(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_pin_announcement(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_open_to_everyone(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_invited_and_requested(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_invite_only(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_view_only_enabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_view_only_disabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_unpin_announcement(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tipping_enabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tipping_disabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_timestamp_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_welcome_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_invite_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_user_typing_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_user_typing_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def default(self, data): self.call(getframe(0).f_code.co_name, data) | 47.493151 | 137 | 0.655466 |
adfedb8cedb10ea3ec6091e5cce510c8b041f87d | 27,294 | py | Python | scripts/release-notes.py | jmininger/cockroach | 97dbd6cccf01256c36cd1b61c8ff983a65f29b95 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | scripts/release-notes.py | jmininger/cockroach | 97dbd6cccf01256c36cd1b61c8ff983a65f29b95 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2018-09-28T20:11:45.000Z | 2018-10-01T09:05:49.000Z | scripts/release-notes.py | jmininger/cockroach | 97dbd6cccf01256c36cd1b61c8ff983a65f29b95 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
#
# Show a compact release note summary of a range of Git commits.
#
# Example use: release-notes.py --help
#
# Note: the first commit in the range is excluded!
#
# Requires:
# - GitPython https://pypi.python.org/pypi/GitPython/
# - You need to configure your local repo to pull the PR refs from
# GitHub. To do this, add a line like:
# fetch = +refs/pull/*/head:refs/pull/origin/*
# to the GitHub remote section of .git/config.
#
# Disclaimer: this program is provided without warranties of any kind,
# including suitability for any purpose. The author(s) will not be
# responsible if this script eats your left sock.
#
# Known limitations:
#
# - if different people with the same name contribute, this script
# will be confused. (it will merge their work under one entry).
# - the list of aliases below must be manually modified when
# contributors change their git name and/or email address.
#
# Note: there are unit tests in the release-notes subdirectory!
import sys
import itertools
import re
import os
import datetime, time
import subprocess
from git import Repo
from optparse import OptionParser
from git.repo.fun import name_to_object
from git.util import Stats
### Global behavior constants ###
# minimum sha length to disambiguate
shamin = 9
# FIXME(knz): This probably needs to use the .mailmap.
author_aliases = {
'dianasaur323': "Diana Hsieh",
'kena': "Raphael 'kena' Poss",
'vivekmenezes': "Vivek Menezes",
'RaduBerinde': "Radu Berinde",
'Andy Kimball': "Andrew Kimball",
'marc': "Marc Berhault",
'Lauren': "Lauren Hirata",
'lhirata' : "Lauren Hirata",
'Emmanuel': "Emmanuel Sales",
'MBerhault': "Marc Berhault",
'Nate': "Nathaniel Stewart",
'a6802739': "Song Hao",
'Abhemailk abhi.madan01@gmail.com': "Abhishek Madan",
'rytaft': "Rebecca Taft",
'songhao': "Song Hao",
'solongordon': "Solon Gordon",
'tim-o': "Tim O'Brien",
'Amruta': "Amruta Ranade",
'yuzefovich': "Yahor Yuzefovich",
'madhavsuresh': "Madhav Suresh",
}
# FIXME(knz): This too.
crdb_folk = set([
"Abhishek Madan",
"Alex Robinson",
"Alfonso Subiotto Marqués",
"Amruta Ranade",
"Andrei Matei",
"Andrew Couch",
"Andrew Kimball",
"Andy Woods",
"Arjun Narayan",
"Ben Darnell",
"Bob Vawter",
"Bram Gruneir",
"Daniel Harrison",
"David Taylor",
"Diana Hsieh",
"Emmanuel Sales",
"Jesse Seldess",
"Jessica Edwards",
"Joseph Lowinske",
"Joey Pereira",
"Jordan Lewis",
"Justin Jaffray",
"Kuan Luo",
"Lauren Hirata",
"Madhav Suresh",
"Marc Berhault",
"Masha Schneider",
"Matt Jibson",
"Matt Tracy",
"Nathan VanBenschoten",
"Nathaniel Stewart",
"Nikhil Benesch",
"Paul Bardea",
"Pete Vilter",
"Peter Mattis",
"Radu Berinde",
"Raphael 'kena' Poss",
"Rebecca Taft",
"Rich Loveland",
"Richard Wu",
"Sean Loiselle",
"Solon Gordon",
"Spencer Kimball",
"Tamir Duberstein",
"Tim O'Brien",
"Tobias Schottdorf",
"Victor Chen",
"Vivek Menezes",
"Yahor Yuzefovich",
])
# Section titles for release notes.
relnotetitles = {
'cli change': "Command-Line Changes",
'sql change': "SQL Language Changes",
'admin ui change': "Admin UI Changes",
'general change': "General Changes",
'build change': "Build Changes",
'enterprise change': "Enterprise Edition Changes",
'backward-incompatible change': "Backward-Incompatible Changes",
'performance improvement': "Performance Improvements",
'bug fix': "Bug Fixes",
}
# Order in which to show the sections.
relnote_sec_order = [
'backward-incompatible change',
'general change',
'enterprise change',
'sql change',
'cli change',
'admin ui change',
'bug fix',
'performance improvement',
'build change',
]
# Release note category common misspellings.
cat_misspells = {
'sql' : 'sql change',
'general': 'general change',
'core change': 'general change',
'bugfix': 'bug fix',
'performance change' : 'performance improvement',
'performance' : 'performance improvement',
'ui' : 'admin ui change',
'backwards-incompatible change': 'backward-incompatible change',
'enterprise': 'enterprise change'
}
## Release note format ##
# The following release note formats have been seen in the wild:
#
# Release note (xxx): yyy <- canonical
# Release Notes: None
# Release note (xxx): yyy
# Release note (xxx) : yyy
# Release note: (xxx): yyy
# Release note: xxx: yyy
# Release note: (xxx) yyy
# Release note: yyy (no category)
# Release note (xxx, zzz): yyy
norelnote = re.compile(r'^[rR]elease [nN]otes?: *[Nn]one', flags=re.M)
# Captures :? (xxx) ?: yyy
form1 = r':? *\((?P<cat1>[^)]*)\) *:?'
# Captures : xxx: yyy - this must be careful not to capture too much, we just accept one or two words
form2 = r': *(?P<cat2>[^ ]+(?: +[^ ]+)?) *:'
# Captures : yyy - no category
form3 = r':(?P<cat3>)'
relnote = re.compile(r'(?:^|[\n\r])[rR]elease [nN]otes? *(?:' + form1 + '|' + form2 + '|' + form3 + r') *(?P<note>.*)$', flags=re.S)
coauthor = re.compile(r'^Co-authored-by: (?P<name>[^<]*) <(?P<email>.*)>', flags=re.M)
fixannot = re.compile(r'^([fF]ix(es|ed)?|[cC]lose(d|s)?) #', flags=re.M)
## Merge commit format ##
# The following merge commits have been seen in the wild:
#
# Merge pull request #XXXXX from ... <- GitHub merges
# Merge #XXXXX #XXXXX #XXXXX <- Bors merges
merge_numbers = re.compile(r'^Merge( pull request)?(?P<numbers>( #[0-9]+)+)')
### Initialization / option parsing ###
parser = OptionParser()
parser.add_option("-k", "--sort-key", dest="sort_key", default="title",
help="sort by KEY (pr, title, insertions, deletions, files, sha, date; default: title)", metavar="KEY")
parser.add_option("-r", "--reverse", action="store_true", dest="reverse_sort", default=False,
help="reverse sort")
parser.add_option("-f", "--from", dest="from_commit",
help="list history from COMMIT. Note: the first commit is excluded.", metavar="COMMIT")
parser.add_option("-t", "--until", dest="until_commit", default="HEAD",
help="list history up and until COMMIT (default: HEAD)", metavar="COMMIT")
parser.add_option("-p", "--pull-ref", dest="pull_ref_prefix", default="refs/pull/origin",
help="prefix for pull request refs (default: refs/pull/origin)", metavar="PREFIX")
parser.add_option("--hide-unambiguous-shas", action="store_true", dest="hide_shas", default=False,
help="omit commit SHAs from the release notes and per-contributor sections")
parser.add_option("--hide-per-contributor-section", action="store_true", dest="hide_per_contributor", default=False,
help="omit the per-contributor section")
parser.add_option("--hide-downloads-section", action="store_true", dest="hide_downloads", default=False,
help="omit the email sign-up and downloads section")
parser.add_option("--hide-header", action="store_true", dest="hide_header", default=False,
help="omit the title and date header")
(options, args) = parser.parse_args()
sortkey = options.sort_key
revsort = options.reverse_sort
pull_ref_prefix = options.pull_ref_prefix
hideshas = options.hide_shas
hidepercontributor = options.hide_per_contributor
hidedownloads = options.hide_downloads
hideheader = options.hide_header
repo = Repo('.')
heads = repo.heads
try:
firstCommit = repo.commit(options.from_commit)
except:
print("Unable to find the first commit of the range.", file=sys.stderr)
print("No ref named %s." % options.from_commit, file=sys.stderr)
exit(0)
try:
commit = repo.commit(options.until_commit)
except:
print("Unable to find the last commit of the range.", file=sys.stderr)
print("No ref named %s." % options.until_commit, file=sys.stderr)
exit(0)
if commit == firstCommit:
print("Commit range is empty!", file=sys.stderr)
print(parser.get_usage(), file=sys.stderr)
print("Example use:", file=sys.stderr)
print(" %s --help" % sys.argv[0], file=sys.stderr)
print(" %s --from xxx >output.md" % sys.argv[0], file=sys.stderr)
print(" %s --from xxx --until yyy >output.md" % sys.argv[0], file=sys.stderr)
print("Note: the first commit is excluded. Use e.g.: --from <prev-release-tag> --until <new-release-candidate-sha>", file=sys.stderr)
exit(0)
# Check that pull_ref_prefix is valid
testrefname = "%s/1" % pull_ref_prefix
try:
repo.commit(testrefname)
except:
print("Unable to find pull request refs at %s." % pull_ref_prefix, file=sys.stderr)
print("Is your repo set up to fetch them? Try adding", file=sys.stderr)
print(" fetch = +refs/pull/*/head:%s/*" % pull_ref_prefix, file=sys.stderr)
print("to the GitHub remote section of .git/config.", file=sys.stderr)
exit(0)
### Reading data from repository ###
def identify_commit(commit):
return '%s ("%s", %s)' % (
commit.hexsha, commit.message.split('\n',1)[0],
datetime.datetime.fromtimestamp(commit.committed_date).ctime())
# Is the first commit reachable from the current one?
base = repo.merge_base(firstCommit, commit)
if len(base) == 0:
print("error: %s:%s\nand %s:%s\nhave no common ancestor" % (
options.from_commit, identify_commit(firstCommit),
options.until_commit, identify_commit(commit)), file=sys.stderr)
exit(1)
commonParent = base[0]
if firstCommit != commonParent:
print("warning: %s:%s\nis not an ancestor of %s:%s!" % (
options.from_commit, identify_commit(firstCommit),
options.until_commit, identify_commit(commit)), file=sys.stderr)
print(file=sys.stderr)
ageindays = int((firstCommit.committed_date - commonParent.committed_date)/86400)
prevlen = sum((1 for x in repo.iter_commits(commonParent.hexsha + '...' + firstCommit.hexsha)))
print("The first common ancestor is %s" % identify_commit(commonParent), file=sys.stderr)
print("which is %d commits older than %s:%s\nand %d days older. Using that as origin." %\
(prevlen, options.from_commit, identify_commit(firstCommit), ageindays), file=sys.stderr)
print(file=sys.stderr)
firstCommit = commonParent
options.from_commit = commonParent.hexsha
print("Changes from\n%s\nuntil\n%s" % (identify_commit(firstCommit), identify_commit(commit)), file=sys.stderr)
release_notes = {}
missing_release_notes = []
def collect_authors(commit):
authors = set()
author = author_aliases.get(commit.author.name, commit.author.name)
if author != 'GitHub':
authors.add(author)
author = author_aliases.get(commit.committer.name, commit.committer.name)
if author != 'GitHub':
authors.add(author)
for m in coauthor.finditer(commit.message):
aname = m.group('name').strip()
author = author_aliases.get(aname, aname)
authors.add(author)
return authors
def extract_release_notes(pr, title, commit):
authors = collect_authors(commit)
if norelnote.search(commit.message) is not None:
# Explicitly no release note. Nothing to do.
# Just report the author(s).
return None, authors
msglines = commit.message.split('\n')
curnote = []
innote = False
foundnote = False
cat = None
notes = []
for line in msglines:
m = coauthor.search(line)
if m is not None:
# A Co-authored-line finishes the parsing of the commit message,
# because it's included at the end only.
break
m = fixannot.search(line)
if m is not None:
# Fix/Close etc. Ignore.
continue
m = relnote.search(line)
if m is None:
# Current line does not contain a release note separator.
# If we were already collecting a note, continue collecting it.
if innote:
curnote.append(line)
continue
# We have a release note boundary. If we were collecting a
# note already, complete it.
if innote:
notes.append((cat, curnote))
curnote = []
innote = False
# Start a new release note.
firstline = m.group('note').strip()
if firstline.lower() == 'none':
# Release note: none - there's no note yet.
continue
foundnote = True
innote = True
# Capitalize the first line.
if firstline != "":
firstline = firstline[0].upper() + firstline[1:]
curnote = [firstline]
cat = m.group('cat1')
if cat is None:
cat = m.group('cat2')
if cat is None:
cat = 'missing category'
# Normalize to tolerate various capitalizations.
cat = cat.lower()
# If there are multiple categories separated by commas or slashes, use the first as grouping key.
cat = cat.split(',', 1)[0]
cat = cat.split('/', 1)[0]
# If there is any misspell, correct it.
if cat in cat_misspells:
cat = cat_misspells[cat]
if innote:
notes.append((cat, curnote))
# At the end the notes will be presented in reverse order, because
# we explore the commits in reverse order. However within 1 commit
# the notes are in the correct order. So reverse them upfront here,
# so that the 2nd reverse gets them in the right order again.
for cat, note in reversed(notes):
completenote(commit, cat, note, authors, pr, title)
missing_item = None
if not foundnote:
# Missing release note. Keep track for later.
missing_item = makeitem(pr, title, commit.hexsha[:shamin], authors)
return missing_item, authors
def makeitem(pr, prtitle, sha, authors):
return {'authors': ', '.join(sorted(authors)),
'sha': sha,
'pr': pr,
'title': prtitle,
'note': None}
def completenote(commit, cat, curnote, authors, pr, title):
notemsg = '\n'.join(curnote).strip()
item = makeitem(pr, title, commit.hexsha[:shamin], authors)
item['note'] = notemsg
# Now collect per category.
catnotes = release_notes.get(cat, [])
catnotes.append(item)
release_notes[cat] = catnotes
per_group_history = {}
individual_authors = set()
allprs = set()
spinner = itertools.cycle(['/', '-', '\\', '|'])
counter = 0
def spin():
global counter
# Display a progress bar
counter += 1
if counter % 10 == 0:
if counter % 100 == 0:
print("\b..", end='', file=sys.stderr)
print("\b", end='', file=sys.stderr)
print(next(spinner), end='', file=sys.stderr)
sys.stderr.flush()
# This function groups and counts all the commits that belong to a particular PR.
# Some description is in order regarding the logic here: it should visit all
# commits that are on the PR and only on the PR. If there's some secondary
# branch merge included on the PR, as long as those commits don't otherwise end
# up reachable from the target branch, they'll be included. If there's a back-
# merge from the target branch, that should be excluded.
#
# Examples:
#
# ### secondary branch merged into PR
#
# Dev branched off of K, made a commit J, made a commit G while someone else
# committed H, merged H from the secondary branch to the topic branch in E,
# made a final commit in C, then merged to master in A.
#
# A <-- master
# |\
# | \
# B C <-- PR tip
# | |
# | |
# D E <-- secondary merge
# | |\
# | | \
# F G H <-- secondary branch
# | | /
# | |/
# I J
# | /
# |/
# K <-- merge base
#
# C, E, G, H, and J will each be checked. None of them are ancestors of B,
# so they will all be visited. E will be not be counted because the message
# starts with "Merge", so in the end C, G, H, and J will be included.
#
# ### back-merge from target branch
#
# Dev branched off H, made one commit G, merged the latest F from master in E,
# made one final commit in C, then merged the PR.
#
# A <-- master
# |\
# | \
# B C <-- PR tip
# | |
# | |
# D E <-- back-merge
# | /|
# |/ |
# F G
# | /
# |/
# H <-- merge base
#
# C, E, F, and G will each be checked. F is an ancestor of B, so it will be
# excluded. E starts with "Merge", so it will not be counted. Only C and G will
# have statistics included.
def analyze_pr(merge, pr):
allprs.add(pr)
refname = pull_ref_prefix + "/" + pr[1:]
tip = name_to_object(repo, refname)
noteexpr = re.compile("^%s: (?P<message>.*) r=.* a=.*" % pr[1:], flags=re.M)
m = noteexpr.search(merge.message)
note = ''
if m is None:
# GitHub merge
note = merge.message.split('\n',3)[2]
else:
# Bors merge
note = m.group('message')
note = note.strip()
merge_base_result = repo.merge_base(merge.parents[0], tip)
if len(merge_base_result) == 0:
print("uh-oh! can't find merge base! pr", pr, file=sys.stderr)
exit(-1)
merge_base = merge_base_result[0]
commits_to_analyze = [tip]
seen_commits = set()
missing_items = []
authors = set()
ncommits = 0
while len(commits_to_analyze) > 0:
spin()
commit = commits_to_analyze.pop(0)
if commit in seen_commits:
# We may be seeing the same commit twice if a feature branch has
# been forked in sub-branches. Just skip over what we've seen
# already.
continue
seen_commits.add(commit)
if not commit.message.startswith("Merge"):
missing_item, prauthors = extract_release_notes(pr, note, commit)
authors.update(prauthors)
ncommits += 1
if missing_item is not None:
missing_items.append(missing_item)
for parent in commit.parents:
if not repo.is_ancestor(parent, merge.parents[0]):
# We're not yet back on the main branch. Just continue digging.
commits_to_analyze.append(parent)
else:
# The parent is on the main branch. We're done digging.
# print("found merge parent, stopping. final authors", authors)
pass
if ncommits == len(missing_items):
# None of the commits found had a release note. List them.
for item in missing_items:
missing_release_notes.append(item)
text = repo.git.diff(merge_base.hexsha, tip.hexsha, '--', numstat=True)
stats = Stats._list_from_string(repo, text)
collect_item(pr, note, merge.hexsha[:shamin], ncommits, authors, stats.total, merge.committed_date)
def collect_item(pr, prtitle, sha, ncommits, authors, stats, prts):
individual_authors.update(authors)
if len(authors) == 0:
authors.add("Unknown Author")
item = makeitem(pr, prtitle, sha, authors)
item.update({'ncommits': ncommits,
'insertions': stats['insertions'],
'deletions': stats['deletions'],
'files': stats['files'],
'lines': stats['lines'],
'date': datetime.date.fromtimestamp(prts).isoformat(),
})
history = per_group_history.get(item['authors'], [])
history.append(item)
per_group_history[item['authors']] = history
def analyze_standalone_commit(commit):
# Some random out-of-branch commit. Let's not forget them.
authors = collect_authors(commit)
title = commit.message.split('\n',1)[0].strip()
item = makeitem('#unknown', title, commit.hexsha[:shamin], authors)
missing_release_notes.append(item)
collect_item('#unknown', title, commit.hexsha[:shamin], 1, authors, commit.stats.total, commit.committed_date)
while commit != firstCommit:
spin()
ctime = datetime.datetime.fromtimestamp(commit.committed_date).ctime()
numbermatch = merge_numbers.search(commit.message)
# Analyze the commit
if numbermatch is not None:
prs = numbermatch.group("numbers").strip().split(" ")
for pr in prs:
print(" \r%s (%s) " % (pr, ctime), end='', file=sys.stderr)
analyze_pr(commit, pr)
else:
print(" \r%s (%s) " % (commit.hexsha[:shamin], ctime), end='', file=sys.stderr)
analyze_standalone_commit(commit)
if len(commit.parents) == 0:
break
commit = commit.parents[0]
allgroups = list(per_group_history.keys())
allgroups.sort(key=lambda x:x.lower())
print("\b\nComputing first-time contributors...", end='', file=sys.stderr)
ext_contributors = individual_authors - crdb_folk
firsttime_contributors = []
for a in individual_authors:
# Find all aliases known for this person
aliases = [a]
for alias, name in author_aliases.items():
if name == a:
aliases.append(alias)
# Collect the history for every alias
hist = b''
for al in aliases:
spin()
c = subprocess.run(["git", "log", "--author=%s" % al, options.from_commit, '-n', '1'], stdout=subprocess.PIPE, check=True)
hist += c.stdout
if len(hist) == 0:
# No commit from that author older than the first commit
# selected, so that's a first-time author.
firsttime_contributors.append(a)
print("\b\n", file=sys.stderr)
sys.stderr.flush()
### Presentation of results ###
## Print the release notes.
# Start with known sections.
current_version = subprocess.check_output(["git", "describe", "--tags", options.until_commit], universal_newlines=True).strip()
previous_version = subprocess.check_output(["git", "describe", "--tags", options.from_commit], universal_newlines=True).strip()
if not hideheader:
print("---")
print("title: What's New in", current_version)
print("toc: false")
print("summary: Additions and changes in CockroachDB version", current_version, "since version", previous_version)
print("---")
print()
print("## " + time.strftime("%B %d, %Y"))
print()
## Print the release notes sign-up and Downloads section.
if not hidedownloads:
print("""Get future release notes emailed to you:
<div class="hubspot-install-form install-form-1 clearfix">
<script>
hbspt.forms.create({
css: '',
cssClass: 'install-form',
portalId: '1753393',
formId: '39686297-81d2-45e7-a73f-55a596a8d5ff',
formInstanceId: 1,
target: '.install-form-1'
});
</script>
</div>""")
print()
print("""### Downloads
<div id="os-tabs" class="clearfix">
<a href="https://binaries.cockroachdb.com/cockroach-""" + current_version + """.darwin-10.9-amd64.tgz"><button id="mac" data-eventcategory="mac-binary-release-notes">Mac</button></a>
<a href="https://binaries.cockroachdb.com/cockroach-""" + current_version + """.linux-amd64.tgz"><button id="linux" data-eventcategory="linux-binary-release-notes">Linux</button></a>
<a href="https://binaries.cockroachdb.com/cockroach-""" + current_version + """.windows-6.2-amd64.zip"><button id="windows" data-eventcategory="windows-binary-release-notes">Windows</button></a>
<a href="https://binaries.cockroachdb.com/cockroach-""" + current_version + """.src.tgz"><button id="source" data-eventcategory="source-release-notes">Source</button></a>
</div>""")
print()
seenshas = set()
seenprs = set()
def renderlinks(item):
ret = '[%(pr)s][%(pr)s]' % item
seenprs.add(item['pr'])
if not hideshas:
ret += ' [%(sha)s][%(sha)s]' % item
seenshas.add(item['sha'])
return ret
for sec in relnote_sec_order:
r = release_notes.get(sec, None)
if r is None:
# No change in this section, nothing to print.
continue
sectitle = relnotetitles[sec]
print("###", sectitle)
print()
for item in reversed(r):
print("-", item['note'].replace('\n', '\n '), renderlinks(item))
print()
extrasec = set()
for sec in release_notes:
if sec in relnote_sec_order:
# already handled above, don't do anything.
continue
extrasec.add(sec)
if len(extrasec) > 0 or len(missing_release_notes) > 0:
print("### Miscellaneous")
print()
if len(extrasec) > 0:
extrasec_sorted = sorted(list(extrasec))
for extrasec in extrasec_sorted:
print("#### %s" % extrasec.title())
print()
for item in release_notes[extrasec]:
print("-", item['note'].replace('\n', '\n '), renderlinks(item))
print()
if len(missing_release_notes) > 0:
print("#### Changes without release note annotation")
print()
for item in missing_release_notes:
authors = item['authors']
print("- [%(pr)s][%(pr)s] [%(sha)s][%(sha)s] %(title)s" % item, "(%s)" % authors)
seenshas.add(item['sha'])
seenprs.add(item['pr'])
print()
## Print the Doc Updates section.
print("### Doc Updates")
print()
print("Docs team: Please add these manually.")
print()
## Print the Contributors section.
print("### Contributors")
print()
print("This release includes %d merged PR%s by %s author%s." %
(len(allprs), len(allprs) != 1 and "s" or "",
len(individual_authors), (len(individual_authors) != 1 and "s" or ""),
))
ext_contributors = individual_authors - crdb_folk
notified_authors = sorted(set(ext_contributors) | set(firsttime_contributors))
if len(notified_authors) > 0:
print("We would like to thank the following contributors from the CockroachDB community:")
print()
for person in notified_authors:
print("-", person, end='')
if person in firsttime_contributors:
annot = ""
if person in crdb_folk:
annot = ", CockroachDB team member"
print(" (first-time contributor%s)" % annot, end='')
print()
print()
## Print the per-author contribution list.
if not hidepercontributor:
print("### PRs merged by contributors")
print()
if not hideshas:
fmt = " - %(date)s [%(pr)-6s][%(pr)-6s] [%(sha)s][%(sha)s] (+%(insertions)4d -%(deletions)4d ~%(lines)4d/%(files)2d) %(title)s"
else:
fmt = " - %(date)s [%(pr)-6s][%(pr)-6s] (+%(insertions)4d -%(deletions)4d ~%(lines)4d/%(files)2d) %(title)s"
for group in allgroups:
items = per_group_history[group]
print("- %s:" % group)
items.sort(key=lambda x:x[sortkey],reverse=not revsort)
for item in items:
print(fmt % item, end='')
if not hideshas:
seenshas.add(item['sha'])
seenprs.add(item['pr'])
ncommits = item['ncommits']
if ncommits > 1:
print(" (", end='')
print("%d commits" % ncommits, end='')
print(")", end='')
print()
print()
print()
# Link the PRs and SHAs
for pr in sorted(seenprs):
print("[%s]: https://github.com/cockroachdb/cockroach/pull/%s" % (pr, pr[1:]))
for sha in sorted(seenshas):
print("[%s]: https://github.com/cockroachdb/cockroach/commit/%s" % (sha, sha))
print()
| 34.462121 | 198 | 0.625009 |
751675da127fe79df625591e58b137a9405d6ac6 | 2,414 | py | Python | utils/test_zensie/test_check.py | alan-turing-institute/CROP | 467956ba8e273daa6afbfafd89bd2c3462a8156e | [
"MIT"
] | 9 | 2020-02-11T17:57:47.000Z | 2022-03-22T14:24:55.000Z | utils/test_zensie/test_check.py | alan-turing-institute/CROP | 467956ba8e273daa6afbfafd89bd2c3462a8156e | [
"MIT"
] | 64 | 2020-02-11T17:35:36.000Z | 2022-03-31T13:19:08.000Z | utils/test_zensie/test_check.py | alan-turing-institute/CROP | 467956ba8e273daa6afbfafd89bd2c3462a8156e | [
"MIT"
] | 2 | 2020-08-16T06:10:24.000Z | 2021-04-15T10:11:51.000Z | import os
import requests
import json
import pandas as pd
from datetime import datetime, timedelta
CONST_CROP_30MHZ_APIKEY = os.environ["CROP_30MHZ_APIKEY"].strip()
CONST_CROP_30MHZ_TEST_T_RH_CHECKID = os.environ["CROP_30MHZ_TEST_T_RH_CHECKID"].strip()
def main():
"""
Main test routine
"""
test_check()
def get_sensor_data(api_key, check_id, dt_from, dt_to):
"""
Makes a request to download sensor data for a specified period of time.
Arguments:
api_key: api key for authetication
check_id: sensor identifier
dt_from: date range from
dt_to: date range to
Return:
success: whether data request was succesful
error: error message
data_df: sensor data as pandas dataframe
"""
success = True
error = ""
data_df = None
headers = {
'Content-Type': 'application/json',
'Authorization': api_key,
}
url_path = 'https://api.30mhz.com/api/stats/check'
params = 'statisticType=averages&intervalSize=5m'
dt_from_iso = dt_from.strftime('%Y-%m-%dT%H:%M:%S') + 'Z'
dt_to_iso = dt_to.strftime('%Y-%m-%dT%H:%M:%S') + 'Z'
url = '{}/{}/from/{}/until/{}?{}'.format(url_path, check_id, dt_from_iso, dt_to_iso, params)
response = requests.get(url, headers=headers)
if response.status_code == 200:
data_df = pd.read_json(response.content).T
if data_df.empty:
error = "Request [%s]: no data" % (url)
success = False
else:
error = "Request's [%s] status code: %d" % (url, response.status_code)
success = False
if success:
data_df.reset_index(inplace=True, drop=False)
data_df.rename(columns={'index': 'Timestamp'}, inplace=True)
for col_name in data_df.columns:
if ".temperature" in col_name:
data_df.rename(columns={col_name: 'Temperature'}, inplace=True)
elif ".humidity" in col_name:
data_df.rename(columns={col_name: 'Humidity'}, inplace=True)
return success, error, data_df
def test_check():
"""
"""
check_id = CONST_CROP_30MHZ_TEST_T_RH_CHECKID
dt_from = datetime.now() + timedelta(days=-1)
dt_to = datetime.now()
success, error, _ = get_sensor_data(CONST_CROP_30MHZ_APIKEY, check_id, dt_from, dt_to)
assert success, error
if __name__ == "__main__":
main() | 24.886598 | 96 | 0.63256 |
e53c48ce163d123b8687ba601d36605e645ac897 | 44,211 | py | Python | lib/network.py | quietnan/electrum-ftc | 5f72cadd777d80a7235e4860589c425287a67fe9 | [
"MIT"
] | null | null | null | lib/network.py | quietnan/electrum-ftc | 5f72cadd777d80a7235e4860589c425287a67fe9 | [
"MIT"
] | null | null | null | lib/network.py | quietnan/electrum-ftc | 5f72cadd777d80a7235e4860589c425287a67fe9 | [
"MIT"
] | null | null | null | # Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import stat
import errno
import random
import re
import select
from collections import defaultdict
import threading
import socket
import json
import socks
from . import util
from . import bitcoin
from .bitcoin import *
from . import constants
from .interface import Connection, Interface
from . import blockchain
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
from .i18n import _
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from .version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).rsplit(':', 2)
if protocol not in 'st':
raise ValueError('invalid network protocol: {}'.format(protocol))
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
os.chmod(dir_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# subscriptions and requests
self.subscribed_addresses = set()
self.h2addr = {}
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.requested_chunks = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
if self.interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, self.interface)
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
self.request_fee_estimates()
self.queue_request('blockchain.relayfee', [])
for h in list(self.subscribed_addresses):
self.queue_request('blockchain.scripthash.subscribe', [h])
def request_fee_estimates(self):
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
self.queue_request('mempool.get_fee_histogram', [])
for i in FEE_ETA_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
out = constants.net.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'mempool.get_fee_histogram':
if error is None:
self.print_error('fee_histogram', result)
self.config.mempool_fees = result
self.notify('fee_histogram')
elif method == 'blockchain.estimatefee':
if result <= 0:
result = 0.01
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN) if result is not None else None
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def addr_to_scripthash(self, addr):
h = bitcoin.address_to_scripthash(addr)
if h not in self.h2addr:
self.h2addr[h] = addr
return h
def overload_cb(self, callback):
def cb2(x):
x2 = x.copy()
p = x2.pop('params')
addr = self.h2addr[p[0]]
x2['params'] = [addr]
callback(x2)
return cb2
def subscribe_to_addresses(self, addresses, callback):
hashes = [self.addr_to_scripthash(addr) for addr in addresses]
msgs = [('blockchain.scripthash.subscribe', [x]) for x in hashes]
self.send(msgs, self.overload_cb(callback))
def request_address_history(self, address, callback):
h = self.addr_to_scripthash(address)
self.send([('blockchain.scripthash.get_history', [h])], self.overload_cb(callback))
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
self.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
self.request_fee_estimates()
def request_chunk(self, interface, index):
if index in self.requested_chunks:
return
interface.print_error("requesting chunk %d" % index)
self.requested_chunks.add(index)
self.queue_request('blockchain.block.get_chunk', [index], interface)
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
blockchain = interface.blockchain
if result is None or params is None or error is not None:
interface.print_error(error or 'bad response')
return
index = params[0]
# Ignore unsolicited chunks
if index not in self.requested_chunks:
interface.print_error("received chunk %d (unsolicited)" % index)
return
else:
interface.print_error("received chunk %d" % index)
self.requested_chunks.remove(index)
connect = blockchain.connect_chunk(index, result)
if not connect:
self.connection_down(interface.server)
return
# If not finished, get the next chunk
if index >= len(blockchain.checkpoints) and blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.mode = 'default'
interface.print_error('catch up done', blockchain.height())
blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
header = response.get('result')
if not header:
interface.print_error(response)
self.connection_down(interface.server)
return
height = header.get('block_height')
if interface.request != height:
interface.print_error("unsolicited header",interface.request, height)
self.connection_down(interface.server)
return
chain = blockchain.check_header(header)
if interface.mode == 'backward':
can_connect = blockchain.can_connect(header)
if can_connect and can_connect.catch_up is None:
interface.mode = 'catch_up'
interface.blockchain = can_connect
interface.blockchain.save_header(header)
next_height = height + 1
interface.blockchain.catch_up = interface.server
elif chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
assert next_height >= self.max_checkpoint(), (interface.bad, interface.good)
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(self.max_checkpoint(), interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
assert next_height >= self.max_checkpoint()
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write('', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
raise Exception(interface.mode)
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // 2016)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
filename = b.path()
length = 80 * len(constants.net.CHECKPOINTS) * 2016
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
if length>0:
f.seek(length-1)
f.write(b'\x00')
with b.lock:
b.update_size()
def run(self):
self.init_headers_file()
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header):
height = header.get('block_height')
if not height:
return
if height < self.max_checkpoint():
self.connection_down(interface.server)
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip +1, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.print_error("switching to catchup mode", tip, self.blockchains)
self.request_header(interface, 0)
else:
self.print_error("chain already catching up with", chain.catch_up.server)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise Exception('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise util.TimeoutException(_('Server did not answer'))
if r.get('error'):
raise Exception(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
def export_checkpoints(self, path):
# run manually from the console to generate checkpoints
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
def max_checkpoint(self):
return max(0, len(constants.net.CHECKPOINTS) * 2016 - 1)
| 40.191818 | 138 | 0.588428 |
a8316f1f480ef7df9d30247922c839c4e6561390 | 6,256 | py | Python | dsl_parser/import_resolver/default_import_resolver.py | mistio/cloudify-dsl-parser | 212864f77591a91ea401c4cfcf99f260d8e41ab3 | [
"Apache-2.0"
] | null | null | null | dsl_parser/import_resolver/default_import_resolver.py | mistio/cloudify-dsl-parser | 212864f77591a91ea401c4cfcf99f260d8e41ab3 | [
"Apache-2.0"
] | null | null | null | dsl_parser/import_resolver/default_import_resolver.py | mistio/cloudify-dsl-parser | 212864f77591a91ea401c4cfcf99f260d8e41ab3 | [
"Apache-2.0"
] | null | null | null | #########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from dsl_parser.exceptions import DSLParsingLogicException
from dsl_parser.import_resolver.abstract_import_resolver \
import AbstractImportResolver, read_import
DEFAULT_RULES = []
DEFAULT_RESLOVER_RULES_KEY = 'rules'
class DefaultResolverValidationException(Exception):
pass
class DefaultImportResolver(AbstractImportResolver):
"""
This class is a default implementation of an import resolver.
This resolver uses the rules to replace URL's prefix with another prefix
and tries to resolve the new URL (after the prefix has been replaced).
If there aren't any rules, none of the rules matches or
none of the prefix replacements works,
the resolver will try to use the original URL.
Each rule in the ``rules`` list is expected to be
a dictionary with one (key, value) pair which represents
a prefix and its replacement which can be used to resolve the import url.
The resolver will go over the rules and for each matching rule
(its key is a prefix of the url) it will replace the prefix
with the value and will try to resolve the new url.
For example:
The rules list: [
{'http://prefix1': 'http://prefix1_replacement'},
{'http://prefix2': 'http://prefix2_replacement1'},
{'http://prefix2': 'http://prefix2_replacement2'}
]
contains three rules that can be used for resolve URLs that
starts with 'http://prefix1' and 'http://prefix2'.
If the url is 'http://prefix2.suffix2.org' than the resolve method
will find a match in both the second and the third rules.
It will first try to apply the second rule by replacing the url's
prefix with the second rule value ('http://prefix2_replacement1')
and will try to resolve the new url:
'http://prefix2_replacement1.suffix2.org'.
In case this url cannot be resolved, it will try to apply
the third rule by replacing the url's prefix with
the third rule value ('http://prefix2_replacement2')
and will try to resolve the url:
'http://prefix2_replacement2.suffix2.org'.
If this url, also, cannot be resolved,
it will try to resolve the original url,
i.e. http://prefix2.suffix2.org'
In case that all the resolve attempts will fail,
a DSLParsingLogicException will be raise.
"""
def __init__(self, rules=None):
# set the rules
self.rules = rules
if self.rules is None:
self.rules = DEFAULT_RULES
self._validate_rules()
def resolve(self, import_url):
failed_urls = {}
# trying to find a matching rule that can resolve this url
for rule in self.rules:
# the validate method checks that the dict has exactly 1 element
prefix, value = list(rule.items())[0]
prefix_len = len(prefix)
if prefix == import_url[:prefix_len]:
# found a matching rule
url_to_resolve = value + import_url[prefix_len:]
# trying to resolve the resolved_url
if url_to_resolve not in failed_urls:
# there is no point to try to resolve the same url twice
try:
return read_import(url_to_resolve)
except DSLParsingLogicException as ex:
# failed to resolve current rule,
# continue to the next one
failed_urls[url_to_resolve] = str(ex)
# failed to resolve the url using the rules
# trying to open the original url
try:
return read_import(import_url)
except DSLParsingLogicException as ex:
if not self.rules:
raise
if not failed_urls:
# no matching rules
msg = 'None of the resolver rules {0} was applicable, ' \
'failed to resolve the original import url: {1} '\
.format(self.rules, ex)
else:
# all urls failed to be resolved
msg = 'Failed to resolve the following urls: {0}. ' \
'In addition, failed to resolve the original ' \
'import url - {1}'.format(failed_urls, ex)
ex = DSLParsingLogicException(13, msg)
ex.failed_import = import_url
raise ex
def _validate_rules(self):
if not isinstance(self.rules, list):
raise DefaultResolverValidationException(
'Invalid parameters supplied for the default resolver: '
'The `{0}` parameter must be a list but it is of type {1}.'
.format(
DEFAULT_RESLOVER_RULES_KEY,
type(self.rules).__name__))
for rule in self.rules:
if not isinstance(rule, dict):
raise DefaultResolverValidationException(
'Invalid parameters supplied for the default resolver: '
'Each rule must be a dictionary but the rule '
'[{0}] is of type {1}.'
.format(rule, type(rule).__name__))
keys = list(rule.keys())
if not len(keys) == 1:
raise DefaultResolverValidationException(
'Invalid parameters supplied for the default resolver: '
'Each rule must be a dictionary with one (key,value) pair '
'but the rule [{0}] has {1} keys.'
.format(rule, len(keys)))
| 43.144828 | 79 | 0.61429 |
4b3ba6f2a634e28fb4083437308feb8eddaefbd8 | 394 | py | Python | your_project_name/config/wsgi.py | peadejay/initial-django | d32f6ff8d65be6835369dbe88cf471f3a023a5bb | [
"MIT"
] | null | null | null | your_project_name/config/wsgi.py | peadejay/initial-django | d32f6ff8d65be6835369dbe88cf471f3a023a5bb | [
"MIT"
] | null | null | null | your_project_name/config/wsgi.py | peadejay/initial-django | d32f6ff8d65be6835369dbe88cf471f3a023a5bb | [
"MIT"
] | null | null | null | """
WSGI config for config project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
application = get_wsgi_application()
| 23.176471 | 78 | 0.784264 |
6fb9c67ca3207ae9835c1184740a0f544c41779f | 492 | py | Python | play.py | wallinslax/stable-baselines3 | 511221d5a85ecf657f5ed1654a6995977c4913cf | [
"MIT"
] | null | null | null | play.py | wallinslax/stable-baselines3 | 511221d5a85ecf657f5ed1654a6995977c4913cf | [
"MIT"
] | null | null | null | play.py | wallinslax/stable-baselines3 | 511221d5a85ecf657f5ed1654a6995977c4913cf | [
"MIT"
] | null | null | null | import gym
from stable_baselines3 import PPO
from stable_baselines3 import DDPG
ENV_NAME = 'Pendulum-v0'
#ENV_NAME = 'CartPole-v1'
env = gym.make(ENV_NAME)
#model = PPO('MlpPolicy', env, verbose=1)
model = DDPG('MlpPolicy', env, verbose=1)
model.learn(total_timesteps=10000)
obs = env.reset()
for i in range(1000):
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
env.render()
if done:
obs = env.reset()
env.close() | 23.428571 | 60 | 0.703252 |
64cef9b6c910e179bb0f9fa3b512192bfcefddc9 | 3,108 | py | Python | blackbox/gen_combos.py | yangzhou6666/error_recovery_experiment | 24af528524629eef30bf185def9f0e0b194d4953 | [
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | blackbox/gen_combos.py | yangzhou6666/error_recovery_experiment | 24af528524629eef30bf185def9f0e0b194d4953 | [
"Apache-2.0",
"MIT-0",
"MIT"
] | 18 | 2018-04-09T09:50:17.000Z | 2020-05-18T19:45:23.000Z | blackbox/gen_combos.py | yangzhou6666/error_recovery_experiment | 24af528524629eef30bf185def9f0e0b194d4953 | [
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-12-24T14:07:57.000Z | 2020-01-12T15:41:35.000Z | #! /usr/bin/env python2.7
import mysql.connector, os, subprocess, sys, tempfile
from Queue import Full, Queue
from threading import Thread
from atomicint import AtomicInt
GENERATE = 200000 # How many combos to generate?
cnx = mysql.connector.connect(user="",
password="",
host="127.0.0.1",
db="blackbox_production")
q = Queue(GENERATE * 10)
with open("combos", "w") as f:
generated = AtomicInt(0)
class Worker(Thread):
def run(self):
tff, tfp = tempfile.mkstemp()
os.close(tff)
while True:
master_event_id, source_file_id = q.get()
rtn = os.system("timeout -s KILL 10 /tools/nccb/bin/print-compile-input /data/compile-inputs %d %d > %s" % (source_file_id, master_event_id, tfp))
if rtn != 0:
q.task_done()
continue
with open(tfp) as f2:
if f2.read().strip() == "Searching rest":
q.task_done()
continue
rtn = os.system("grmtools/target/release/lrlex grammars/java7/java.l %s > /dev/null 2> /dev/null" % tfp)
if rtn != 0:
q.task_done()
continue
out = subprocess.check_output(["../runner/java_parser_none", tfp])
if "Parsed successfully" in out:
q.task_done()
continue
q.task_done()
i = generated.add(1)
if i >= GENERATE:
return
f.write("%d %d\n" % (source_file_id, master_event_id))
f.flush()
if i % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
c1 = cnx.cursor()
c1.execute("""SELECT master_events.id, compile_inputs.source_file_id
FROM master_events straight_join compile_events straight_join compile_inputs
WHERE compile_events.success=0
AND master_events.event_id=compile_events.id
AND (compile_events.reason IS null
OR compile_events.reason="user")
AND master_events.event_type="CompileEvent"
AND master_events.id >= 1000000
AND master_events.id < 1900000000
AND compile_inputs.compile_event_id = compile_events.id
ORDER BY RAND()
""")
workers = []
for _ in range(12):
w = Worker()
w.daemon = True
workers.append(w)
w.start()
try:
for r1 in c1:
if generated.val() == GENERATE:
break
q.put(r1, block=True)
except Exception, e:
print e
for w in workers:
w.join()
if generated.val() < GENERATE:
sys.stderr.write("WARNING: exception happened before combos file is complete")
print
| 34.153846 | 162 | 0.498713 |
7cf3d2123ea95257fce184d7365802fa7419fcc2 | 672 | py | Python | Chapter 06/Chap06_Example6.11.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 06/Chap06_Example6.11.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 06/Chap06_Example6.11.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | import re
mytxt = '123 \n123 gfh'
mypattern = '\s'
myreplace = ''
mystr = re.sub(mypattern, myreplace, mytxt)
print(mystr) # SUB1
print("-------------")
mystr = re.sub(mypattern, myreplace, mytxt,count =1)
print(mystr) # SUB2
print("-------------")
print(re.sub('st', '*#' , 'Stay safe, stay healthy', flags = re.IGNORECASE)) # SUB3
print("-------------")
print(re.sub('st', '*#' , 'Stay safe, stay healthy')) # SUB4
print("-------------")
print(re.sub('st', '*#' , 'Stay safe, stay healthy', count = 1, flags = re.IGNORECASE))
# SUB5
print("-------------")
print(re.sub(r'\sAND\s', ' & ' , 'The prince and the pauper', flags = re.IGNORECASE))
# SUB6 | 35.368421 | 88 | 0.550595 |
d572a9fc50d28b6a6a5a90bfe7a0a6dfa84b315b | 7,014 | py | Python | model/train_tune_cv.py | demattox/glyBERT | c44ca9dcc6223cc6d92b532d803af818f521adba | [
"MIT"
] | null | null | null | model/train_tune_cv.py | demattox/glyBERT | c44ca9dcc6223cc6d92b532d803af818f521adba | [
"MIT"
] | null | null | null | model/train_tune_cv.py | demattox/glyBERT | c44ca9dcc6223cc6d92b532d803af818f521adba | [
"MIT"
] | null | null | null | import os
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import CSVLogger
import numpy as np
from fairseq.modules import LayerNorm
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
import sys
sys.path.append('/home/bowen/glycan/galy')
from gly_encoder import GlyEncoder,GlyEncoder_el
from galy_dataset import GlyDataset_cfg_cv
from sklearn.metrics import f1_score,roc_curve,auc,roc_auc_score
num_gpu=1
# num_gpu=[1]
encode_mode=1
batchsize=128
emb_dim=384
ffn_dim=1536
num_head=12
num_layer=12
vocab_dim=417
anomeric_dim=3
conn_dim=9
dense_dim=128
dense_dim_k=256
ddk0=4
ddk1=6
ddk2=28
ddk3=84
ddk4=206
ddk5=408
mask_prob=0.35
densetune0=256
densetune1=128
densetuneout=1
file=sys.argv[1]
logfolder='cfg3_logs'
log_name=file
save_path='/home/bowen/glycan/cfgcv3/'+log_name
sycn=False
class glycanbert(pl.LightningModule):
def __init__(self):
super().__init__()
if encode_mode==1:
self.encoder = GlyEncoder(
padding_idx=0,
vocab_size=vocab_dim,
num_branch = 2**11,
max_branch=50,
connections=2**9,
aromatic=4,
num_encoder_layers=num_layer,
embedding_dim=emb_dim,
ffn_embedding_dim=ffn_dim,
num_attention_heads=num_head,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
layerdrop=0.1,
max_seq_len=64,
encoder_normalize_before=True,
apply_bert_init=True,
activation_fn="gelu"
)
elif encode_mode==2:
self.encoder = GlyEncoder_el(
padding_idx=0,
vocab_size=vocab_dim,
num_branch = 11,
max_dep=50,
connections=9,
aromatic=3,
parent=10,
num_encoder_layers=num_layer,
embedding_dim=emb_dim,
ffn_embedding_dim=ffn_dim,
num_attention_heads=num_head,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
layerdrop=0.1,
max_seq_len=64,
encoder_normalize_before=True,
apply_bert_init=True,
activation_fn="gelu"
)
self.activation_fn = nn.GELU()
# ml head
self.mlmproj0=torch.nn.Conv1d(emb_dim, emb_dim, 1)
self.mlmproj1=torch.nn.Conv1d(emb_dim, vocab_dim, 1)
self.layer_norm = LayerNorm(emb_dim)
# ma head
self.mamproj0=torch.nn.Conv1d(emb_dim, emb_dim, 1)
self.mamproj1=torch.nn.Conv1d(emb_dim, anomeric_dim, 1)
# mc head
self.mcmproj0=torch.nn.Conv1d(emb_dim, emb_dim, 1)
self.mcmproj1=torch.nn.Conv1d(emb_dim, conn_dim, 1)
# cls head
self.denselink = nn.Linear(emb_dim, dense_dim)
self.densekingdom = nn.Linear(emb_dim, dense_dim_k)
self.denseimmu = nn.Linear(emb_dim, dense_dim)
self.dropout = nn.Dropout(p=0.0)
self.out_link = nn.Linear(dense_dim, 3)
self.out_immu = nn.Linear(dense_dim, 2)
self.out_k0 = nn.Linear(dense_dim_k, ddk0)
self.out_k1 = nn.Linear(dense_dim_k, ddk1)
self.out_k2 = nn.Linear(dense_dim_k, ddk2)
self.out_k3 = nn.Linear(dense_dim_k, ddk3)
self.out_k4 = nn.Linear(dense_dim_k, ddk4)
self.out_k5 = nn.Linear(dense_dim_k, ddk5)
cp=torch.load('checkpoint/glycp_encode_1_s28_12_384_12_b512_ml_mlmamc_15_new_nb/glybert-val-epoch=184-val_total=2.986.ckpt')
self.load_state_dict(cp['state_dict'])
self.tune0 = nn.Linear(emb_dim, densetune0)
self.tune1 = nn.Linear(densetune0, densetune1)
self.tune2 = nn.Linear(densetune1, densetuneout)
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding,rep = self.encoder(x)
cl = self.dropout(rep)
cl = self.tune0(cl)
cl = self.activation_fn(cl)
cl = self.dropout(cl)
cl = self.tune1(cl)
cl = self.activation_fn(cl)
cl = self.dropout(cl)
cl = self.tune2(cl)
return cl
def training_step(self, batch, batch_idx):
x, y = batch
cy=self(x)
loss=F.binary_cross_entropy_with_logits(cy,y.type_as(cy).unsqueeze(1),pos_weight=(torch.numel(y)-y.sum())/y.sum())
try:
accuracy =roc_auc_score(y.cpu().numpy(),torch.sigmoid(cy).detach().cpu().numpy())
self.log('auc', accuracy, sync_dist=sycn)
except:
pass
self.log('total', loss, sync_dist=sycn)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
cy=self(x)
loss=F.binary_cross_entropy_with_logits(cy,y.type_as(cy).unsqueeze(1),pos_weight=(torch.numel(y)-y.sum())/y.sum())
try:
accuracy =roc_auc_score(y.cpu().numpy(),torch.sigmoid(cy).detach().cpu().numpy())
self.log('val_auc', accuracy, sync_dist=sycn)
except:
pass
self.log('val_total', loss)
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
betas=(0.9, 0.98),
lr=5e-4,
eps=1e-06,
weight_decay=0.01
)
return optimizer
def train_dataloader(self):
if encode_mode==1:
train=GlyDataset_cfg_cv('cfg_cv2/train_'+file+'.pkl')
return DataLoader(train,batch_size=batchsize,collate_fn=train.collater,num_workers=2)
def val_dataloader(self):
if encode_mode==1:
valid=GlyDataset_cfg_cv('cfg_cv2/test_'+file+'.pkl')
return DataLoader(valid,batch_size=batchsize,collate_fn=valid.collater,num_workers=2)
gbt = glycanbert()
checkpoint_val = ModelCheckpoint(
monitor='val_total',
dirpath=save_path,
filename='glybert-val-{epoch:02d}-{val_total:.3f}',
save_top_k=3,
mode='min',
)
checkpoint_train = ModelCheckpoint(
monitor='total',
dirpath=save_path,
filename='glybert-train-{epoch:02d}-{total:.3f}',
save_top_k=3,
mode='min',
)
logger = CSVLogger(logfolder, name=log_name)
trainer = pl.Trainer(
max_epochs=100,
gpus=num_gpu,
precision=16,
accumulate_grad_batches=1,
gradient_clip_val=0,
check_val_every_n_epoch=5,
logger=logger
)
trainer.fit(gbt) | 27.944223 | 132 | 0.596806 |
9244ea4206cdcabe1d803fd6edffbfe646855512 | 5,796 | py | Python | thecodebase/thecodebase/settings.py | elmeriniemela/thecodebase | 6ab55231acee78c861baf78b1db3162b16a7316b | [
"MIT"
] | null | null | null | thecodebase/thecodebase/settings.py | elmeriniemela/thecodebase | 6ab55231acee78c861baf78b1db3162b16a7316b | [
"MIT"
] | null | null | null | thecodebase/thecodebase/settings.py | elmeriniemela/thecodebase | 6ab55231acee78c861baf78b1db3162b16a7316b | [
"MIT"
] | null | null | null | """
Django settings for thecodebase project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# CELERY_BROKER_URL = 'amqp://rabbitmq//' # TODO: This works without network_mode: "host"
CELERY_BROKER_URL = 'amqp://guest:guest@localhost:5672//'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 0)))
if not DEBUG:
# Production settings only
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost,127.0.0.1').split(',')
# https://docs.djangoproject.com/en/2.2/ref/middleware/#http-strict-transport-security
SECURE_HSTS_SECONDS = 600
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-SECURE_CONTENT_TYPE_NOSNIFF
SECURE_CONTENT_TYPE_NOSNIFF = True
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-SECURE_BROWSER_XSS_FILTER
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-SECURE_SSL_REDIRECT
SECURE_SSL_REDIRECT = True
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-SESSION_COOKIE_SECURE
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-CSRF_COOKIE_SECURE
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/2.2/ref/clickjacking/
X_FRAME_OPTIONS = 'DENY'
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-SECURE_HSTS_PRELOAD
SECURE_HSTS_PRELOAD = True
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-SECURE_HSTS_INCLUDE_SUBDOMAINS
# WARNING: Setting this incorrectly can irreversibly (for the value of SECURE_HSTS_SECONDS) break your site.
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# https://docs.djangoproject.com/en/3.0/ref/middleware/#referrer-policy
SECURE_REFERRER_POLICY = 'same-origin'
else:
# Development settings
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
},
}
# Application definition
INSTALLED_APPS = [
'main',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'thecodebase.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'main', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'main.context_processors.default_context',
'main.context_processors.topics',
],
},
},
]
WSGI_APPLICATION = 'thecodebase.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'thecodebase',
'USER': 'elmeri',
# 'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'build', 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'main', "static"),
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
LOGIN_REDIRECT_URL = 'main/home'
LOGOUT_REDIRECT_URL = 'main/home'
| 29.42132 | 112 | 0.683402 |
9322f1100c5ffe7c6cc1d70d8cefd768b0aea117 | 4,022 | py | Python | engine.py | teamoptimusai/homeio-nlu | 920052320df74f2ae6f66d6e88e7443cd99617ef | [
"MIT"
] | null | null | null | engine.py | teamoptimusai/homeio-nlu | 920052320df74f2ae6f66d6e88e7443cd99617ef | [
"MIT"
] | null | null | null | engine.py | teamoptimusai/homeio-nlu | 920052320df74f2ae6f66d6e88e7443cd99617ef | [
"MIT"
] | null | null | null | import torch
import joblib
import argparse
from flask import Flask, request, jsonify
import utils.config as config
from utils.inference import entity_extraction, classification, simplify_entities, simplify_intent, simplify_scenario
class NLUEngine:
def __init__(self, weights):
self.tokenizer = config.TOKENIZER
self.max_len = config.MAX_LEN
self.device = config.DEVICE
self.model = torch.jit.load(weights).to(self.device).eval()
self.metadata = joblib.load('metadata.bin')
self.enc_entity = self.metadata['enc_entity']
self.enc_intent = self.metadata['enc_intent']
self.enc_scenario = self.metadata['enc_scenario']
self.num_entity = len(self.enc_entity.classes_)
self.num_intent = len(self.enc_intent.classes_)
self.num_scenario = len(self.enc_scenario.classes_)
def process_sentence(self, text):
sentence = " ".join(str(text).split())
inputs = self.tokenizer.encode_plus(
sentence,
None,
add_special_tokens=True,
truncation=True,
max_length=self.max_len
)
tokenized_ids = inputs['input_ids']
mask = inputs['attention_mask']
token_type_ids = inputs['token_type_ids']
word_pieces = self.tokenizer.decode(inputs['input_ids']).split()[1:-1]
padding_len = self.max_len - len(tokenized_ids)
ids = tokenized_ids + ([0] * padding_len)
mask = mask + ([0] * padding_len)
token_type_ids = token_type_ids + ([0] * padding_len)
ids = torch.tensor(ids, dtype=torch.long).unsqueeze(0).to(self.device)
mask = torch.tensor(mask, dtype=torch.long).unsqueeze(
0).to(self.device)
token_type_ids = torch.tensor(
token_type_ids, dtype=torch.long).unsqueeze(0).to(self.device)
return ids, mask, token_type_ids, tokenized_ids, word_pieces
def predict_sentence(self, ids, mask, token_type_ids):
entity_hs, intent_hs, scenario_hs = self.model(
ids, mask, token_type_ids)
return entity_hs, intent_hs, scenario_hs
def predict(self, sentence):
ids, mask, token_type_ids, tokenized_ids, word_pieces = self.process_sentence(
sentence)
entity_hs, intent_hs, scenario_hs = self.predict_sentence(
ids, mask, token_type_ids)
words_labels_json, words_scores_json = entity_extraction(
self.enc_entity, entity_hs, word_pieces, tokenized_ids)
intent_sentence_labels_json, intent_class_scores_json = classification(
self.enc_intent, self.enc_scenario, intent_hs, task='intent')
scenario_sentence_labels_json, scenario_class_scores_json = classification(
self.enc_intent, self.enc_scenario, scenario_hs, task='scenario')
prediction = {'sentence': sentence}
prediction['entities'] = simplify_entities(
words_labels_json, words_scores_json)
prediction['intent'] = simplify_intent(
intent_sentence_labels_json, intent_class_scores_json)
prediction['scenario'] = simplify_scenario(
scenario_sentence_labels_json, scenario_class_scores_json)
return prediction
app = Flask(__name__)
nlu_engine: NLUEngine = None
@app.route("/nlu_engine")
def predict():
sentence = request.args.get('sentence')
predictions = nlu_engine.predict(sentence)
print(predictions)
return jsonify(predictions)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="HomeIO NLU Engine")
parser.add_argument('--weights', type=str, default=config.MODEL_PATH, required=True,
help='Optimized Weights for Model. use optimize_weights.py')
args = parser.parse_args()
nlu_engine = NLUEngine(args.weights)
# test_sentence = 'wake me up at 5 am please'
# prediction = nlu_engine.predict(test_sentence)
# print(prediction)
app.run(port=5000, host='localhost', debug=True)
| 36.899083 | 116 | 0.676778 |
efe02c8458d1e055df6f74b03127c9b2915d9452 | 3,565 | py | Python | segregation/multigroup/multi_relative_diversity.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | null | null | null | segregation/multigroup/multi_relative_diversity.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | null | null | null | segregation/multigroup/multi_relative_diversity.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | null | null | null | """Multigroup Relative Diversity index"""
__author__ = "Renan X. Cortes <renanc@ucr.edu>, Sergio J. Rey <sergio.rey@ucr.edu> and Elijah Knaap <elijah.knaap@ucr.edu>"
import numpy as np
from geopandas import GeoDataFrame
from .._base import MultiGroupIndex, SpatialImplicitIndex
np.seterr(divide="ignore", invalid="ignore")
def _multi_relative_diversity(data, groups):
"""
Calculation of Multigroup Relative Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Relative Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, Sean F. "Measures of racial diversity and segregation in multigroup and hierarchically structured populations." annual meeting of the Eastern Sociological Society, Philadelphia, PA. 1998.
High diversity means less segregation.
Reference: :cite:`reardon1998measures`.
"""
core_data = data[groups]
df = np.array(core_data)
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
pik = np.nan_to_num(pik) # Replace NaN from zerodivision when unit has no population
Pk = df.sum(axis=0) / df.sum()
Is = (Pk * (1 - Pk)).sum()
MRD = (ti[:, None] * (pik - Pk) ** 2).sum() / (T * Is)
if isinstance(data, GeoDataFrame):
core_data = data[[data.geometry.name]].join(core_data)
return MRD, core_data, groups
class MultiRelativeDiversity(MultiGroupIndex, SpatialImplicitIndex):
"""Multigroup Relative Diversity Index.
Parameters
----------
data : pandas.DataFrame or geopandas.GeoDataFrame, required
dataframe or geodataframe if spatial index holding data for location of interest
groups : list, required
list of columns on dataframe holding population totals for each group
w : libpysal.weights.KernelW, optional
lipysal spatial kernel weights object used to define an egohood
network : pandana.Network
pandana Network object representing the study area
distance : int
Maximum distance (in units of geodataframe CRS) to consider the extent of the egohood
decay : str
type of decay function to apply. Options include
precompute : bool
Whether to precompute the pandana Network object
Attributes
----------
statistic : float
Multigroup Dissimilarity Index value
core_data : a pandas DataFrame
DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
def __init__(
self,
data,
groups,
w=None,
network=None,
distance=None,
decay=None,
precompute=None,
function='triangular'
):
"""Init."""
MultiGroupIndex.__init__(self, data, groups)
if any([w, network, distance]):
SpatialImplicitIndex.__init__(self, w, network, distance, decay, function, precompute)
aux = _multi_relative_diversity(self.data, self.groups)
self.statistic = aux[0]
self.data = aux[1]
self.groups = aux[2]
self._function = _multi_relative_diversity
| 30.732759 | 209 | 0.661711 |
633373b60ec48b45ef318dbd97117b441a7314a7 | 2,687 | py | Python | tests/contrib/operators/test_wasb_delete_blob_operator.py | ankit-shrivastava/airflow | 77b1bdc12ca5ddf043d4550d36948766b59f60ce | [
"Apache-2.0"
] | 2 | 2021-07-30T17:37:15.000Z | 2021-08-03T13:50:56.000Z | tests/contrib/operators/test_wasb_delete_blob_operator.py | larryzhu2018/airflow-1 | 1bb12f31585c36661fe30c11c9b3e0f67586a93a | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/operators/test_wasb_delete_blob_operator.py | larryzhu2018/airflow-1 | 1bb12f31585c36661fe30c11c9b3e0f67586a93a | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import unittest
import mock
from airflow import DAG
from airflow.contrib.operators.wasb_delete_blob_operator import WasbDeleteBlobOperator
class TestWasbDeleteBlobOperator(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = WasbDeleteBlobOperator(
task_id='wasb_operator',
dag=self.dag,
**self._config
)
self.assertEqual(operator.container_name,
self._config['container_name'])
self.assertEqual(operator.blob_name, self._config['blob_name'])
self.assertEqual(operator.is_prefix, False)
self.assertEqual(operator.ignore_if_missing, False)
operator = WasbDeleteBlobOperator(
task_id='wasb_operator',
dag=self.dag,
is_prefix=True,
ignore_if_missing=True,
**self._config
)
self.assertEqual(operator.is_prefix, True)
self.assertEqual(operator.ignore_if_missing, True)
@mock.patch('airflow.contrib.operators.wasb_delete_blob_operator.WasbHook',
autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = WasbDeleteBlobOperator(
task_id='wasb_operator',
dag=self.dag,
is_prefix=True,
ignore_if_missing=True,
**self._config
)
operator.execute(None)
mock_instance.delete_file.assert_called_once_with(
'container', 'blob', True, True
)
if __name__ == '__main__':
unittest.main()
| 31.611765 | 86 | 0.659099 |
79de40edad99bc82c14cbf36bdf99042ce708b5e | 2,279 | py | Python | demos/QCBP_TV/demo_stab_re_nesta_tv_fourier.py | mneyrane/AS-NESTA-net | 0142097b4d9dd0daadd94d876fb4bf73e9984921 | [
"MIT"
] | null | null | null | demos/QCBP_TV/demo_stab_re_nesta_tv_fourier.py | mneyrane/AS-NESTA-net | 0142097b4d9dd0daadd94d876fb4bf73e9984921 | [
"MIT"
] | null | null | null | demos/QCBP_TV/demo_stab_re_nesta_tv_fourier.py | mneyrane/AS-NESTA-net | 0142097b4d9dd0daadd94d876fb4bf73e9984921 | [
"MIT"
] | null | null | null | import math
import torch
import numpy as np
import operators as op
import stability
import nn
from PIL import Image
with Image.open("../demo_images/test_image.png") as im:
im.save("stab-ground-truth.png")
X = np.asarray(im).astype(np.float64) / 255
# parameters
eta = 1e-1
N, _ = X.shape
inner_iters = 5
outer_iters = 8
mu0 = 1e-1
delta = 1e-12
r = 0.5
# generate sampling mask
mask = torch.zeros((N,N), dtype=bool)
for i in range(-N//2+1,N//2+1):
for j in range(-N//2+1,N//2+1):
if (max(abs(i),abs(j)) <= 0.25*(N/2)):
mask[i+N//2,j+N//2] = True
mask = mask | (torch.rand((N,N)) < 0.1)
n_mask = mask.numpy()
(Image.fromarray(n_mask)).save("stab-mask.png")
m = torch.sum(mask)
print('Image size (number of pixels):', N*N)
print('Number of measurements:', m)
print('Sample rate:', m/(N*N))
# generate functions for measurement and weight operators
subsampled_ft = lambda x, mode: op.fourier_2d(x,mode,N,mask)*(N/math.sqrt(m))
discrete_grad = lambda x, mode: op.discrete_gradient_2d(x,mode,N,N)
L_grad = 2.0
# define the inverse problem
noise = (eta/math.sqrt(m))*torch.randn(m)
T = torch.from_numpy(np.reshape(X.transpose(),N*N))
y = subsampled_ft(T,1) + noise
# compute worst-case perturbation
# (see Sec. 19.4 of Adcock & Hansen)
lam = 100.0
x = torch.from_numpy(np.reshape(np.transpose(X),(N*N)))
z0 = torch.zeros(N*N,dtype=y.dtype)
def recon_fn(y):
rec, _ = nn.restarted_nesta_wqcbp(
y, z0,
subsampled_ft, discrete_grad, L_grad,
inner_iters, outer_iters,
eta, mu0, delta, r, False)
return rec
adv_noise, adv_rec = stability.projected_adversarial_perturbation(
x, subsampled_ft, recon_fn, lam, 10, 1e-1, 0.9)
#assert len(adv_noise.shape) == 1
#print('l2 norm of perturbation:', torch.linalg.norm(adv_noise,2))
print(adv_noise.dtype)
print(adv_noise)
X_pert = np.transpose(np.reshape(np.real(adv_rec.numpy()),(N,N)))
X_pert = np.clip((X_pert*255),0,255).astype('uint8')
#pert_im = np.transpose(np.reshape(np.abs(adv_noise.numpy()),(N,N)))
#pert_im = np.clip(255*(pert_im-np.min(pert_im))/(np.max(pert_im)-np.min(pert_im)),0,255).astype('uint8')
Image.fromarray(X_pert).save("stab-worst-pert.png")
#Image.fromarray(pert_im).save("stab-worst-pert.png")
| 27.457831 | 105 | 0.67047 |
e01ec87fe739acf44899a91ea43263bd0fb5fa84 | 21,525 | py | Python | packages/python/plotly/plotly/express/_doc.py | santapresent/plotly.py | 96967d7937fed1777f737f8c3302af48252b4e7a | [
"MIT"
] | 2 | 2019-11-08T07:01:11.000Z | 2019-11-17T10:10:49.000Z | packages/python/plotly/plotly/express/_doc.py | santapresent/plotly.py | 96967d7937fed1777f737f8c3302af48252b4e7a | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/express/_doc.py | santapresent/plotly.py | 96967d7937fed1777f737f8c3302af48252b4e7a | [
"MIT"
] | null | null | null | import inspect
from textwrap import TextWrapper
# TODO contents of columns
# TODO explain categorical
# TODO handle color
# TODO handle details of box/violin/histogram
# TODO handle details of column selection with `dimensions`
# TODO document "or `None`, default `None`" in various places
# TODO standardize positioning and casing of 'default'
colref_type = "str or int or Series or array-like"
colref_desc = "Either a name of a column in `data_frame`, or a pandas Series or array_like object."
colref_list_type = "list of str or int, or Series or array-like"
colref_list_desc = (
"Either names of columns in `data_frame`, or pandas Series, or array_like objects"
)
docs = dict(
data_frame=[
"DataFrame or array-like or dict",
"This argument needs to be passed for column names (and not keyword names) to be used.",
"Array-like and dict are tranformed internally to a pandas DataFrame.",
],
x=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the x axis in cartesian coordinates.",
"For horizontal `histogram`s, these values are used as inputs to `histfunc`.",
],
y=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the y axis in cartesian coordinates.",
"For vertical `histogram`s, these values are used as inputs to `histfunc`.",
],
z=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the z axis in cartesian coordinates.",
"For `density_heatmap` and `density_contour` these values are used as the inputs to `histfunc`.",
],
a=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the a axis in ternary coordinates.",
],
b=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the b axis in ternary coordinates.",
],
c=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the c axis in ternary coordinates.",
],
r=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the radial axis in polar coordinates.",
],
theta=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the angular axis in polar coordinates.",
],
lat=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks according to latitude on a map.",
],
lon=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks according to longitude on a map.",
],
locations=[
colref_type,
colref_desc,
"Values from this column or array_like are to be interpreted according to `locationmode` and mapped to longitude/latitude.",
],
dimensions=[
colref_list_type,
colref_list_desc,
"Values from these columns are used for multidimensional visualization.",
],
error_x=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size x-axis error bars.",
"If `error_x_minus` is `None`, error bars will be symmetrical, otherwise `error_x` is used for the positive direction only.",
],
error_x_minus=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size x-axis error bars in the negative direction.",
"Ignored if `error_x` is `None`.",
],
error_y=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size y-axis error bars.",
"If `error_y_minus` is `None`, error bars will be symmetrical, otherwise `error_y` is used for the positive direction only.",
],
error_y_minus=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size y-axis error bars in the negative direction.",
"Ignored if `error_y` is `None`.",
],
error_z=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size z-axis error bars.",
"If `error_z_minus` is `None`, error bars will be symmetrical, otherwise `error_z` is used for the positive direction only.",
],
error_z_minus=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size z-axis error bars in the negative direction.",
"Ignored if `error_z` is `None`.",
],
color=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign color to marks.",
],
opacity=["float", "Value between 0 and 1. Sets the opacity for markers."],
line_dash=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign dash-patterns to lines.",
],
line_group=[
colref_type,
colref_desc,
"Values from this column or array_like are used to group rows of `data_frame` into lines.",
],
symbol=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign symbols to marks.",
],
size=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign mark sizes.",
],
hover_name=[
colref_type,
colref_desc,
"Values from this column or array_like appear in bold in the hover tooltip.",
],
hover_data=[
colref_list_type,
colref_list_desc,
"Values from these columns appear as extra data in the hover tooltip.",
],
custom_data=[
colref_list_type,
colref_list_desc,
"Values from these columns are extra data, to be used in widgets or Dash callbacks for example. This data is not user-visible but is included in events emitted by the figure (lasso selection etc.)",
],
text=[
colref_type,
colref_desc,
"Values from this column or array_like appear in the figure as text labels.",
],
locationmode=[
"str",
"One of 'ISO-3', 'USA-states', or 'country names'",
"Determines the set of locations used to match entries in `locations` to regions on the map.",
],
facet_row=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign marks to facetted subplots in the vertical direction.",
],
facet_col=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign marks to facetted subplots in the horizontal direction.",
],
facet_col_wrap=[
"int",
"Maximum number of facet columns.",
"Wraps the column variable at this width, so that the column facets span multiple rows.",
"Ignored if 0, and forced to 0 if `facet_row` or a `marginal` is set.",
],
animation_frame=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign marks to animation frames.",
],
animation_group=[
colref_type,
colref_desc,
"Values from this column or array_like are used to provide object-constancy across animation frames: rows with matching `animation_group`s will be treated as if they describe the same object in each frame.",
],
symbol_sequence=[
"list of str",
"Strings should define valid plotly.js symbols.",
"When `symbol` is set, values in that column are assigned symbols by cycling through `symbol_sequence` in the order described in `category_orders`, unless the value of `symbol` is a key in `symbol_map`.",
],
symbol_map=[
"dict with str keys and str values (default `{}`)",
"String values should define plotly.js symbols",
"Used to override `symbol_sequence` to assign a specific symbols to marks corresponding with specific values.",
"Keys in `symbol_map` should be values in the column denoted by `symbol`.",
],
line_dash_map=[
"dict with str keys and str values (default `{}`)",
"Strings values define plotly.js dash-patterns.",
"Used to override `line_dash_sequences` to assign a specific dash-patterns to lines corresponding with specific values.",
"Keys in `line_dash_map` should be values in the column denoted by `line_dash`.",
],
line_dash_sequence=[
"list of str",
"Strings should define valid plotly.js dash-patterns.",
"When `line_dash` is set, values in that column are assigned dash-patterns by cycling through `line_dash_sequence` in the order described in `category_orders`, unless the value of `line_dash` is a key in `line_dash_map`.",
],
color_discrete_sequence=[
"list of str",
"Strings should define valid CSS-colors.",
"When `color` is set and the values in the corresponding column are not numeric, values in that column are assigned colors by cycling through `color_discrete_sequence` in the order described in `category_orders`, unless the value of `color` is a key in `color_discrete_map`.",
"Various useful color sequences are available in the `plotly_express.colors` submodules, specifically `plotly_express.colors.qualitative`.",
],
color_discrete_map=[
"dict with str keys and str values (default `{}`)",
"String values should define valid CSS-colors",
"Used to override `color_discrete_sequence` to assign a specific colors to marks corresponding with specific values.",
"Keys in `color_discrete_map` should be values in the column denoted by `color`.",
],
color_continuous_scale=[
"list of str",
"Strings should define valid CSS-colors",
"This list is used to build a continuous color scale when the column denoted by `color` contains numeric data.",
"Various useful color scales are available in the `plotly_express.colors` submodules, specifically `plotly_express.colors.sequential`, `plotly_express.colors.diverging` and `plotly_express.colors.cyclical`.",
],
color_continuous_midpoint=[
"number (default `None`)",
"If set, computes the bounds of the continuous color scale to have the desired midpoint.",
"Setting this value is recommended when using `plotly_express.colors.diverging` color scales as the inputs to `color_continuous_scale`.",
],
size_max=["int (default `20`)", "Set the maximum mark size when using `size`."],
log_x=[
"boolean (default `False`)",
"If `True`, the x-axis is log-scaled in cartesian coordinates.",
],
log_y=[
"boolean (default `False`)",
"If `True`, the y-axis is log-scaled in cartesian coordinates.",
],
log_z=[
"boolean (default `False`)",
"If `True`, the z-axis is log-scaled in cartesian coordinates.",
],
log_r=[
"boolean (default `False`)",
"If `True`, the radial axis is log-scaled in polar coordinates.",
],
range_x=[
"list of two numbers",
"If provided, overrides auto-scaling on the x-axis in cartesian coordinates.",
],
range_y=[
"list of two numbers",
"If provided, overrides auto-scaling on the y-axis in cartesian coordinates.",
],
range_z=[
"list of two numbers",
"If provided, overrides auto-scaling on the z-axis in cartesian coordinates.",
],
range_color=[
"list of two numbers",
"If provided, overrides auto-scaling on the continuous color scale.",
],
range_r=[
"list of two numbers",
"If provided, overrides auto-scaling on the radial axis in polar coordinates.",
],
title=["str", "The figure title."],
template=[
"str or Plotly.py template object",
"The figure template name or definition.",
],
width=["int (default `None`)", "The figure width in pixels."],
height=["int (default `600`)", "The figure height in pixels."],
labels=[
"dict with str keys and str values (default `{}`)",
"By default, column names are used in the figure for axis titles, legend entries and hovers.",
"This parameter allows this to be overridden.",
"The keys of this dict should correspond to column names, and the values should correspond to the desired label to be displayed.",
],
category_orders=[
"dict with str keys and list of str values (default `{}`)",
"By default, in Python 3.6+, the order of categorical values in axes, legends and facets depends on the order in which these values are first encountered in `data_frame` (and no order is guaranteed by default in Python below 3.6).",
"This parameter is used to force a specific ordering of values per column.",
"The keys of this dict should correspond to column names, and the values should be lists of strings corresponding to the specific display order desired.",
],
marginal=[
"str",
"One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",
"If set, a subplot is drawn alongside the main plot, visualizing the distribution.",
],
marginal_x=[
"str",
"One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",
"If set, a horizontal subplot is drawn above the main plot, visualizing the x-distribution.",
],
marginal_y=[
"str",
"One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",
"If set, a vertical subplot is drawn to the right of the main plot, visualizing the y-distribution.",
],
trendline=[
"str",
"One of `'ols'` or `'lowess'`.",
"If `'ols'`, an Ordinary Least Squares regression line will be drawn for each discrete-color/symbol group.",
"If `'lowess`', a Locally Weighted Scatterplot Smoothing line will be drawn for each discrete-color/symbol group.",
],
trendline_color_override=[
"str",
"Valid CSS color.",
"If provided, and if `trendline` is set, all trendlines will be drawn in this color.",
],
render_mode=[
"str",
"One of `'auto'`, `'svg'` or `'webgl'`, default `'auto'`",
"Controls the browser API used to draw marks.",
"`'svg`' is appropriate for figures of less than 1000 data points, and will allow for fully-vectorized output.",
"`'webgl'` is likely necessary for acceptable performance above 1000 points but rasterizes part of the output. ",
"`'auto'` uses heuristics to choose the mode.",
],
direction=[
"str",
"One of '`counterclockwise'` or `'clockwise'`. Default is `'clockwise'`",
"Sets the direction in which increasing values of the angular axis are drawn.",
],
start_angle=[
"int (default `90`)",
"Sets start angle for the angular axis, with 0 being due east and 90 being due north.",
],
histfunc=[
"str (default `'count'`)",
"One of `'count'`, `'sum'`, `'avg'`, `'min'`, or `'max'`."
"Function used to aggregate values for summarization (note: can be normalized with `histnorm`).",
"The arguments to this function for `histogram` are the values of `y` if `orientation` is `'v'`,",
"otherwise the arguements are the values of `x`.",
"The arguments to this function for `density_heatmap` and `density_contour` are the values of `z`.",
],
histnorm=[
"str (default `None`)",
"One of `'percent'`, `'probability'`, `'density'`, or `'probability density'`",
"If `None`, the output of `histfunc` is used as is.",
"If `'probability'`, the output of `histfunc` for a given bin is divided by the sum of the output of `histfunc` for all bins.",
"If `'percent'`, the output of `histfunc` for a given bin is divided by the sum of the output of `histfunc` for all bins and multiplied by 100.",
"If `'density'`, the output of `histfunc` for a given bin is divided by the size of the bin.",
"If `'probability density'`, the output of `histfunc` for a given bin is normalized such that it corresponds to the probability that a random event whose distribution is described by the output of `histfunc` will fall into that bin.",
],
barnorm=[
"str (default `None`)",
"One of `'fraction'` or `'percent'`.",
"If `'fraction'`, the value of each bar is divided by the sum of all values at that location coordinate.",
"`'percent'` is the same but multiplied by 100 to show percentages.",
"`None` will stack up all values at each location coordinate.",
],
groupnorm=[
"str (default `None`)",
"One of `'fraction'` or `'percent'`.",
"If `'fraction'`, the value of each point is divided by the sum of all values at that location coordinate.",
"`'percent'` is the same but multiplied by 100 to show percentages.",
"`None` will stack up all values at each location coordinate.",
],
barmode=[
"str (default `'relative'`)",
"One of `'group'`, `'overlay'` or `'relative'`",
"In `'relative'` mode, bars are stacked above zero for positive values and below zero for negative values.",
"In `'overlay'` mode, bars are drawn on top of one another.",
"In `'group'` mode, bars are placed beside each other.",
],
boxmode=[
"str (default `'group'`)",
"One of `'group'` or `'overlay'`",
"In `'overlay'` mode, boxes are on drawn top of one another.",
"In `'group'` mode, baxes are placed beside each other.",
],
violinmode=[
"str (default `'group'`)",
"One of `'group'` or `'overlay'`",
"In `'overlay'` mode, violins are on drawn top of one another.",
"In `'group'` mode, violins are placed beside each other.",
],
stripmode=[
"str (default `'group'`)",
"One of `'group'` or `'overlay'`",
"In `'overlay'` mode, strips are on drawn top of one another.",
"In `'group'` mode, strips are placed beside each other.",
],
zoom=["int (default `8`)", "Between 0 and 20.", "Sets map zoom level."],
orientation=[
"str (default `'v'`)",
"One of `'h'` for horizontal or `'v'` for vertical)",
],
line_close=[
"boolean (default `False`)",
"If `True`, an extra line segment is drawn between the first and last point.",
],
line_shape=["str (default `'linear'`)", "One of `'linear'` or `'spline'`."],
scope=[
"str (default `'world'`).",
"One of `'world'`, `'usa'`, `'europe'`, `'asia'`, `'africa'`, `'north america'`, or `'south america'`)"
"Default is `'world'` unless `projection` is set to `'albers usa'`, which forces `'usa'`.",
],
projection=[
"str ",
"One of `'equirectangular'`, `'mercator'`, `'orthographic'`, `'natural earth'`, `'kavrayskiy7'`, `'miller'`, `'robinson'`, `'eckert4'`, `'azimuthal equal area'`, `'azimuthal equidistant'`, `'conic equal area'`, `'conic conformal'`, `'conic equidistant'`, `'gnomonic'`, `'stereographic'`, `'mollweide'`, `'hammer'`, `'transverse mercator'`, `'albers usa'`, `'winkel tripel'`, `'aitoff'`, or `'sinusoidal'`"
"Default depends on `scope`.",
],
center=[
"dict",
"Dict keys are `'lat'` and `'lon'`",
"Sets the center point of the map.",
],
points=[
"str or boolean (default `'outliers'`)",
"One of `'outliers'`, `'suspectedoutliers'`, `'all'`, or `False`.",
"If `'outliers'`, only the sample points lying outside the whiskers are shown.",
"If `'suspectedoutliers'`, all outlier points are shown and those less than 4*Q1-3*Q3 or greater than 4*Q3-3*Q1 are highlighted with the marker's `'outliercolor'`.",
"If `'outliers'`, only the sample points lying outside the whiskers are shown.",
"If `'all'`, all sample points are shown.",
"If `False`, no sample points are shown and the whiskers extend to the full range of the sample.",
],
box=["boolean (default `False`)", "If `True`, boxes are drawn inside the violins."],
notched=["boolean (default `False`)", "If `True`, boxes are drawn with notches."],
cumulative=[
"boolean (default `False`)",
"If `True`, histogram values are cumulative.",
],
nbins=["int", "Positive integer.", "Sets the number of bins."],
nbinsx=["int", "Positive integer.", "Sets the number of bins along the x axis."],
nbinsy=["int", "Positive integer.", "Sets the number of bins along the y axis."],
)
def make_docstring(fn):
tw = TextWrapper(width=77, initial_indent=" ", subsequent_indent=" ")
result = (fn.__doc__ or "") + "\nParameters\n----------\n"
for param in inspect.getargspec(fn)[0]:
param_desc_list = docs[param][1:]
param_desc = (
tw.fill(" ".join(param_desc_list or ""))
if param in docs
else "(documentation missing from map)"
)
param_type = docs[param][0]
result += "%s: %s\n%s\n" % (param, param_type, param_desc)
result += "\nReturns\n-------\n"
result += " A `Figure` object."
return result
| 46.490281 | 413 | 0.632985 |
fd230cb8d8bfa5c27c769b9a60cd451dbd9abfa9 | 12,818 | py | Python | server/.vim/plugged/python-mode/submodules/pylint/tests/test_functional.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | 10 | 2020-07-21T21:59:54.000Z | 2021-07-19T11:01:47.000Z | server/.vim/plugged/python-mode/submodules/pylint/tests/test_functional.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | null | null | null | server/.vim/plugged/python-mode/submodules/pylint/tests/test_functional.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | 1 | 2021-01-30T18:17:01.000Z | 2021-01-30T18:17:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2014-2017 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@upcloud.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Functional full-module tests for PyLint."""
import collections
import csv
import io
import operator
import os
import platform
import re
import sys
import pytest
import six
from six.moves import configparser
from pylint import checkers, interfaces, lint, reporters
class test_dialect(csv.excel):
delimiter = ":"
lineterminator = "\n"
csv.register_dialect("test", test_dialect)
class NoFileError(Exception):
pass
# Notes:
# - for the purpose of this test, the confidence levels HIGH and UNDEFINED
# are treated as the same.
# TODOs
# - implement exhaustivity tests
# If message files should be updated instead of checked.
UPDATE = False
class OutputLine(
collections.namedtuple(
"OutputLine", ["symbol", "lineno", "object", "msg", "confidence"]
)
):
@classmethod
def from_msg(cls, msg):
return cls(
msg.symbol,
msg.line,
msg.obj or "",
msg.msg.replace("\r\n", "\n"),
msg.confidence.name
if msg.confidence != interfaces.UNDEFINED
else interfaces.HIGH.name,
)
@classmethod
def from_csv(cls, row):
confidence = row[4] if len(row) == 5 else interfaces.HIGH.name
return cls(row[0], int(row[1]), row[2], row[3], confidence)
def to_csv(self):
if self.confidence == interfaces.HIGH.name:
return self[:-1]
else:
return self
# Common sub-expressions.
_MESSAGE = {"msg": r"[a-z][a-z\-]+"}
# Matches a #,
# - followed by a comparison operator and a Python version (optional),
# - followed by a line number with a +/- (optional),
# - followed by a list of bracketed message symbols.
# Used to extract expected messages from testdata files.
_EXPECTED_RE = re.compile(
r"\s*#\s*(?:(?P<line>[+-]?[0-9]+):)?"
r"(?:(?P<op>[><=]+) *(?P<version>[0-9.]+):)?"
r"\s*\[(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)\]" % _MESSAGE
)
def parse_python_version(str):
return tuple(int(digit) for digit in str.split("."))
class FunctionalTestReporter(reporters.BaseReporter):
def handle_message(self, msg):
self.messages.append(msg)
def on_set_current_module(self, module, filepath):
self.messages = []
def display_reports(self, layout):
"""Ignore layouts."""
class FunctionalTestFile(object):
"""A single functional test case file with options."""
_CONVERTERS = {
"min_pyver": parse_python_version,
"max_pyver": parse_python_version,
"requires": lambda s: s.split(","),
}
def __init__(self, directory, filename):
self._directory = directory
self.base = filename.replace(".py", "")
self.options = {
"min_pyver": (2, 5),
"max_pyver": (4, 0),
"requires": [],
"except_implementations": [],
}
self._parse_options()
def __repr__(self):
return "FunctionalTest:{}".format(self.base)
def _parse_options(self):
cp = configparser.ConfigParser()
cp.add_section("testoptions")
try:
cp.read(self.option_file)
except NoFileError:
pass
for name, value in cp.items("testoptions"):
conv = self._CONVERTERS.get(name, lambda v: v)
self.options[name] = conv(value)
@property
def option_file(self):
return self._file_type(".rc")
@property
def module(self):
package = os.path.basename(self._directory)
return ".".join([package, self.base])
@property
def expected_output(self):
return self._file_type(".txt", check_exists=False)
@property
def source(self):
return self._file_type(".py")
def _file_type(self, ext, check_exists=True):
name = os.path.join(self._directory, self.base + ext)
if not check_exists or os.path.exists(name):
return name
raise NoFileError("Cannot find '{}'.".format(name))
_OPERATORS = {">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le}
def parse_expected_output(stream):
return [OutputLine.from_csv(row) for row in csv.reader(stream, "test")]
def get_expected_messages(stream):
"""Parses a file and get expected messages.
:param stream: File-like input stream.
:returns: A dict mapping line,msg-symbol tuples to the count on this line.
"""
messages = collections.Counter()
for i, line in enumerate(stream):
match = _EXPECTED_RE.search(line)
if match is None:
continue
line = match.group("line")
if line is None:
line = i + 1
elif line.startswith("+") or line.startswith("-"):
line = i + 1 + int(line)
else:
line = int(line)
version = match.group("version")
op = match.group("op")
if version:
required = parse_python_version(version)
if not _OPERATORS[op](sys.version_info, required):
continue
for msg_id in match.group("msgs").split(","):
messages[line, msg_id.strip()] += 1
return messages
def multiset_difference(left_op, right_op):
"""Takes two multisets and compares them.
A multiset is a dict with the cardinality of the key as the value.
:param left_op: The expected entries.
:param right_op: Actual entries.
:returns: The two multisets of missing and unexpected messages.
"""
missing = left_op.copy()
missing.subtract(right_op)
unexpected = {}
for key, value in list(six.iteritems(missing)):
if value <= 0:
missing.pop(key)
if value < 0:
unexpected[key] = -value
return missing, unexpected
class LintModuleTest(object):
maxDiff = None
def __init__(self, test_file):
test_reporter = FunctionalTestReporter()
self._linter = lint.PyLinter()
self._linter.set_reporter(test_reporter)
self._linter.config.persistent = 0
checkers.initialize(self._linter)
self._linter.disable("I")
try:
self._linter.read_config_file(test_file.option_file)
self._linter.load_config_file()
except NoFileError:
pass
self._test_file = test_file
def setUp(self):
if self._should_be_skipped_due_to_version():
pytest.skip(
"Test cannot run with Python %s." % (sys.version.split(" ")[0],)
)
missing = []
for req in self._test_file.options["requires"]:
try:
__import__(req)
except ImportError:
missing.append(req)
if missing:
pytest.skip("Requires %s to be present." % (",".join(missing),))
if self._test_file.options["except_implementations"]:
implementations = [
item.strip()
for item in self._test_file.options["except_implementations"].split(",")
]
implementation = platform.python_implementation()
if implementation in implementations:
pytest.skip(
"Test cannot run with Python implementation %r" % (implementation,)
)
def _should_be_skipped_due_to_version(self):
return (
sys.version_info < self._test_file.options["min_pyver"]
or sys.version_info > self._test_file.options["max_pyver"]
)
def __str__(self):
return "%s (%s.%s)" % (
self._test_file.base,
self.__class__.__module__,
self.__class__.__name__,
)
def _open_expected_file(self):
return open(self._test_file.expected_output)
def _open_source_file(self):
if self._test_file.base == "invalid_encoded_data":
return open(self._test_file.source)
if "latin1" in self._test_file.base:
return io.open(self._test_file.source, encoding="latin1")
return io.open(self._test_file.source, encoding="utf8")
def _get_expected(self):
with self._open_source_file() as fobj:
expected_msgs = get_expected_messages(fobj)
if expected_msgs:
with self._open_expected_file() as fobj:
expected_output_lines = parse_expected_output(fobj)
else:
expected_output_lines = []
return expected_msgs, expected_output_lines
def _get_received(self):
messages = self._linter.reporter.messages
messages.sort(key=lambda m: (m.line, m.symbol, m.msg))
received_msgs = collections.Counter()
received_output_lines = []
for msg in messages:
assert (
msg.symbol != "fatal"
), "Pylint analysis failed because of '{}'".format(msg.msg)
received_msgs[msg.line, msg.symbol] += 1
received_output_lines.append(OutputLine.from_msg(msg))
return received_msgs, received_output_lines
def _runTest(self):
modules_to_check = [self._test_file.source]
self._linter.check(modules_to_check)
expected_messages, expected_text = self._get_expected()
received_messages, received_text = self._get_received()
if expected_messages != received_messages:
msg = ['Wrong results for file "%s":' % (self._test_file.base)]
missing, unexpected = multiset_difference(
expected_messages, received_messages
)
if missing:
msg.append("\nExpected in testdata:")
msg.extend(" %3d: %s" % msg for msg in sorted(missing))
if unexpected:
msg.append("\nUnexpected in testdata:")
msg.extend(" %3d: %s" % msg for msg in sorted(unexpected))
pytest.fail("\n".join(msg))
self._check_output_text(expected_messages, expected_text, received_text)
def _split_lines(self, expected_messages, lines):
emitted, omitted = [], []
for msg in lines:
if (msg[1], msg[0]) in expected_messages:
emitted.append(msg)
else:
omitted.append(msg)
return emitted, omitted
def _check_output_text(self, expected_messages, expected_lines, received_lines):
assert (
self._split_lines(expected_messages, expected_lines)[0] == received_lines
), "Error with the following functional test: {}".format(self._test_file.base)
class LintModuleOutputUpdate(LintModuleTest):
def _open_expected_file(self):
try:
return super(LintModuleOutputUpdate, self)._open_expected_file()
except IOError:
return io.StringIO()
def _check_output_text(self, expected_messages, expected_lines, received_lines):
if not expected_messages:
return
emitted, remaining = self._split_lines(expected_messages, expected_lines)
if emitted != received_lines:
remaining.extend(received_lines)
remaining.sort(key=lambda m: (m[1], m[0], m[3]))
with open(self._test_file.expected_output, "w") as fobj:
writer = csv.writer(fobj, dialect="test")
for line in remaining:
writer.writerow(line.to_csv())
def get_tests():
input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "functional")
suite = []
for dirpath, dirnames, filenames in os.walk(input_dir):
if dirpath.endswith("__pycache__"):
continue
for filename in filenames:
if filename != "__init__.py" and filename.endswith(".py"):
suite.append(FunctionalTestFile(dirpath, filename))
return suite
TESTS = get_tests()
TESTS_NAMES = [t.base for t in TESTS]
@pytest.mark.parametrize("test_file", TESTS, ids=TESTS_NAMES)
def test_functional(test_file):
LintTest = (
LintModuleOutputUpdate(test_file) if UPDATE else LintModuleTest(test_file)
)
LintTest.setUp()
LintTest._runTest()
if __name__ == "__main__":
if "-u" in sys.argv:
UPDATE = True
sys.argv.remove("-u")
pytest.main(sys.argv)
| 31.727723 | 88 | 0.616477 |
0226e1684fe2c18bf97609f5d0a5986baa69f904 | 1,515 | py | Python | orders/models.py | guilhermelopeseng/django-ecommerce | 2f0f3ccd325aef124b27bc86494f546eed2c3cf3 | [
"MIT"
] | null | null | null | orders/models.py | guilhermelopeseng/django-ecommerce | 2f0f3ccd325aef124b27bc86494f546eed2c3cf3 | [
"MIT"
] | null | null | null | orders/models.py | guilhermelopeseng/django-ecommerce | 2f0f3ccd325aef124b27bc86494f546eed2c3cf3 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from localflavor.br.models import BRCPFField, BRPostalCodeField, BRStateField
from model_utils.models import TimeStampedModel
from products.models import Product
class Order(TimeStampedModel):
cpf = BRCPFField("CPF")
name = models.CharField("Nome Completo", max_length=250)
email = models.EmailField()
postal_code = BRPostalCodeField("CEP")
address = models.CharField("Endereço", max_length=250)
number = models.CharField("Número", max_length=250)
complement = models.CharField("Complemento", max_length=250, blank=True)
district = models.CharField("Bairro", max_length=250)
state = BRStateField("Estado")
city = models.CharField("Cidade", max_length=250)
paid = models.BooleanField(default=False)
class Meta:
ordering = ("-created",)
def __str__(self):
return f"Pedido {self.id}"
class Item(models.Model):
order = models.ForeignKey(
Order, related_name="items", on_delete=models.CASCADE)
product = models.ForeignKey(
Product, related_name="order_items", on_delete=models.CASCADE
)
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField(
validators=[
MinValueValidator(1),
MaxValueValidator(settings.CART_ITEM_MAX_QUANTITY),
]
)
def __str__(self):
return str(self.id)
| 32.934783 | 77 | 0.712211 |
539c3cc6fc76096f77846383b9682f39cdbed61b | 1,032 | py | Python | Menta/Profiles/migrations/0003_remove_skill_gender_skill_level_skill_profile_and_more.py | reembot/Menta | 7da11e28d6d9f61075ab835548956168671b509d | [
"MIT"
] | 2 | 2022-03-26T03:04:45.000Z | 2022-03-26T08:49:32.000Z | Menta/Profiles/migrations/0003_remove_skill_gender_skill_level_skill_profile_and_more.py | reembot/Menta | 7da11e28d6d9f61075ab835548956168671b509d | [
"MIT"
] | null | null | null | Menta/Profiles/migrations/0003_remove_skill_gender_skill_level_skill_profile_and_more.py | reembot/Menta | 7da11e28d6d9f61075ab835548956168671b509d | [
"MIT"
] | null | null | null | # Generated by Django 4.0.2 on 2022-03-26 19:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Profiles', '0002_alter_profile_website_skill'),
]
operations = [
migrations.RemoveField(
model_name='skill',
name='gender',
),
migrations.AddField(
model_name='skill',
name='level',
field=models.CharField(choices=[('B', 'Beginner'), ('I', 'Intermediate'), ('E', 'Expert')], default='B', max_length=100, verbose_name='Proficiency'),
),
migrations.AddField(
model_name='skill',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Profiles.profile'),
),
migrations.AlterField(
model_name='skill',
name='skillName',
field=models.CharField(max_length=100, verbose_name='Skill'),
),
]
| 30.352941 | 161 | 0.58624 |
e52f60b5e5dab57c1af10b6bef990b221ad2ac9a | 2,107 | py | Python | common/middleware/keystone.py | escudocloud/encswift_server | 90a5a999c06b40da89d5f785795f80bd9d990640 | [
"Apache-2.0"
] | null | null | null | common/middleware/keystone.py | escudocloud/encswift_server | 90a5a999c06b40da89d5f785795f80bd9d990640 | [
"Apache-2.0"
] | null | null | null | common/middleware/keystone.py | escudocloud/encswift_server | 90a5a999c06b40da89d5f785795f80bd9d990640 | [
"Apache-2.0"
] | 1 | 2016-09-27T09:08:24.000Z | 2016-09-27T09:08:24.000Z | #!/usr/bin/env python
from keystoneclient.exceptions import NotFound, Conflict
import keystoneclient.v2_0.client as kc
import logging
# set logger info to INFO
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
class SimpleKeystoneClient:
def __init__(self, admin_user, admin_pass, admin_tenant, auth_url):
self.ks_client = kc.Client(username=admin_user, password=admin_pass,
tenant_name=admin_tenant, auth_url=auth_url)
def create_tenant(self, name, **kwargs):
try:
tenant = self.ks_client.tenants.find(name=name)
logger.info('Tenant %s exists [id: %s].' % (name, tenant.id))
except NotFound:
tenant = self.ks_client.tenants.create(tenant_name=name, **kwargs)
logger.info('Tenant %s created [id: %s].' % (name, tenant.id))
return tenant
def create_user(self, name, password, tenant_name, **kwargs):
try:
user = self.ks_client.users.find(name=name)
logger.warning('User %s exists (password unchanged).' % name)
except NotFound:
tenant = self.create_tenant(tenant_name)
user = self.ks_client.users.create(name=name, password=password,
tenant_id=tenant.id, **kwargs)
logger.info('User %s created.' % name)
return user
def create_role(self, role_name, **kwargs):
try:
role = self.ks_client.roles.find(name=role_name)
logger.info('Role %s exists.' % role_name)
except NotFound:
role = self.ks_client.roles.create(role_name, **kwargs)
logger.info('Role %s created.' % role_name)
return role
def add_user_role(self, user, role, tenant, **kwargs):
try:
self.ks_client.roles.add_user_role(user, role, tenant, **kwargs)
logger.info('Role given to user.')
except Conflict:
logger.info('User already has the requested role.')
| 37.625 | 79 | 0.623636 |
dfe79e902c7fa3433fd2c3f19ad615675e7b3756 | 1,068 | py | Python | databricks_cli/oauth/__init__.py | sweisdb/databricks-cli | 5444cb8b94ef481e1656845f588d8d118bc352db | [
"Apache-2.0"
] | null | null | null | databricks_cli/oauth/__init__.py | sweisdb/databricks-cli | 5444cb8b94ef481e1656845f588d8d118bc352db | [
"Apache-2.0"
] | null | null | null | databricks_cli/oauth/__init__.py | sweisdb/databricks-cli | 5444cb8b94ef481e1656845f588d8d118bc352db | [
"Apache-2.0"
] | null | null | null | # Databricks CLI
# Copyright 2021 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 46.434783 | 75 | 0.764045 |
76b9af6500748d912b85c6751b6b954d2abac287 | 2,482 | py | Python | util/scale.py | kaidisn/carvana-challenge | cba536657714df7c1c33150b92e3e152195b68db | [
"MIT"
] | 24 | 2017-10-09T03:05:10.000Z | 2021-11-28T14:36:34.000Z | util/scale.py | kaidisn/carvana-challenge | cba536657714df7c1c33150b92e3e152195b68db | [
"MIT"
] | null | null | null | util/scale.py | kaidisn/carvana-challenge | cba536657714df7c1c33150b92e3e152195b68db | [
"MIT"
] | 3 | 2018-05-20T17:07:16.000Z | 2021-09-08T15:40:13.000Z | import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import cv2
def resize_image(im, sz):
'''
input:
im: numpy array of shape (num of channels, height, width)
output:
I_out: (num of channels, height, width)
'''
im = np.swapaxes(im, 0, 2)
h = im.shape[0]
w = im.shape[1]
c = im.shape[2]
# identify if im is a color image or a mask
if c == 3:
I_out = np.zeros((h, w, 3), dtype = np.float)
else :
I_out = np.zeros((h, w, 1), dtype = np.float)
# resize
I = cv2.resize(im, None, None, fx = np.float(sz), fy = np.float(sz), interpolation=cv2.INTER_LINEAR)
h_out = min(im.shape[0],I.shape[0])
w_out = min(im.shape[1],I.shape[1])
out_start=(int((h-h_out)/2), int((w-w_out)/2))
in_start=(int((I.shape[0]-h_out)/2), int((I.shape[1]-w_out)/2))
# set resized image in the center of output
if c==3:
I_out[out_start[0]:out_start[0] + h_out, out_start[1]:out_start[1] + w_out, :] = I[in_start[0]:in_start[0] + h_out, in_start[1]:in_start[1] + w_out, :]
else :
I_out[out_start[0]:out_start[0] + h_out, out_start[1]:out_start[1] + w_out, 0] = I[in_start[0]:in_start[0] + h_out, in_start[1]:in_start[1] + w_out]
I_out = np.swapaxes(I_out, 0, 2).astype('uint8')
#del im, I
#assert I_out.shape == np.swapaxes(im, 0, 2).shape
return I_out
def resize_TTA(im, sz):
im = np.swapaxes(im, 0, 2)
h = im.shape[0]
w = im.shape[1]
c = im.shape[2]
# identify if im is a color image or a mask
if c == 3:
I_out = np.zeros((h, w, 3), dtype=np.float)
else :
I_out = np.zeros((h, w), dtype=np.float)
# resize
I = cv2.resize(im, None, None, fx = np.float(sz), fy = np.float(sz), interpolation=cv2.INTER_LINEAR)
h_out = min(im.shape[0],I.shape[0])
w_out = min(im.shape[1],I.shape[1])
out_start=(int((h-h_out)/2), int((w-w_out)/2))
in_start=(int((I.shape[0]-h_out)/2), int((I.shape[1]-w_out)/2))
# set resized image in the center of output
if c==3:
I_out[out_start[0]:out_start[0] + h_out, out_start[1]:out_start[1] + w_out, :] = I[in_start[0]:in_start[0] + h_out, in_start[1]:in_start[1] + w_out, :]
I_out = np.swapaxes(I_out, 0, 2).astype('uint8')
else :
I_out[out_start[0]:out_start[0] + h_out, out_start[1]:out_start[1] + w_out] = I[in_start[0]:in_start[0] + h_out, in_start[1]:in_start[1] + w_out]
I_out=np.swapaxes(I_out, 0, 1)
#plt.imshow(I_out)
#plt.show()
#del im, I
#assert I_out.shape == np.swapaxes(im, 0, 2).shape
return I_out
| 34.957746 | 155 | 0.624093 |
c601e77acff5ff210c2ded468e06f32f86d6fb52 | 327 | py | Python | tests/EagerPendulumTest.py | dinies/SpikingCNN | 3ab4df25da77de2719f1b0f0f813b7d71d28c132 | [
"MIT"
] | 12 | 2019-03-09T02:36:05.000Z | 2021-07-05T18:50:02.000Z | tests/EagerPendulumTest.py | dinies/SpikingCNN | 3ab4df25da77de2719f1b0f0f813b7d71d28c132 | [
"MIT"
] | null | null | null | tests/EagerPendulumTest.py | dinies/SpikingCNN | 3ab4df25da77de2719f1b0f0f813b7d71d28c132 | [
"MIT"
] | 3 | 2019-04-11T14:03:09.000Z | 2020-02-26T00:09:14.000Z | import unittest
from context import models
from models.EagerPendulum import EagerPendulum
class EagerPendulumTest( unittest.TestCase):
def setUp(self):
self.p = EagerPendulum()
self.epsilon = 0.0001
def test_dummy(self):
self.assertTrue( True)
if __name__ == '__main__':
unittest.main()
| 20.4375 | 46 | 0.697248 |
4ed1898733268fb1dd7561f47af9fca1d1087b95 | 80 | py | Python | insert.py | joshavenue/python_notebook | 8d46ba88ef4f05dea6801364bc134edb981df02e | [
"Unlicense"
] | null | null | null | insert.py | joshavenue/python_notebook | 8d46ba88ef4f05dea6801364bc134edb981df02e | [
"Unlicense"
] | null | null | null | insert.py | joshavenue/python_notebook | 8d46ba88ef4f05dea6801364bc134edb981df02e | [
"Unlicense"
] | null | null | null | x = [1,2,3]
x.insert(1, 1) // Insert 1 into the list 1 //
x = [1,1,2,3]
| 16 | 52 | 0.4625 |
abdd65c0515dbcd41b962df28321b5781b863f49 | 944 | py | Python | DesignerInterface/manageDesign.py | a-b-h-i-97/BlockchainForAM | a204cab4e07e68301f10874b34d0f6c6eb5c3fab | [
"BSD-3-Clause"
] | null | null | null | DesignerInterface/manageDesign.py | a-b-h-i-97/BlockchainForAM | a204cab4e07e68301f10874b34d0f6c6eb5c3fab | [
"BSD-3-Clause"
] | null | null | null | DesignerInterface/manageDesign.py | a-b-h-i-97/BlockchainForAM | a204cab4e07e68301f10874b34d0f6c6eb5c3fab | [
"BSD-3-Clause"
] | null | null | null | from web3 import Web3
import os
import sys
sys.path.append(os.path.abspath('../DesignDatabase'))
import designHelpers as dh
print("\nWelcome to the manage design interface\n")
provider = Web3.IPCProvider(os.path.join(os.path.dirname(__file__), '../DesignNode/geth.ipc'))
w3 = Web3(provider)
choice = 0
while(choice != 5):
print("\n\nEnter 1 to upload a new design.")
print("Enter 2 to get no of design files stored")
print("Enter 3 to view design file details")
print("Enter 4 to update the design")
print("Enter 5 to exit")
choice = int(input("\nEnter your choice : "))
if (choice == 1):
dh.upload_design(w3)
elif (choice == 2):
dh.get_files_length(w3)
elif (choice == 3):
dh.get_design(w3)
elif (choice == 4):
dh.update_design(w3)
elif (choice == 5):
print("Exiting")
elif (choice == 6):
dh.kill(w3)
else:
print("Invalid option") | 26.971429 | 94 | 0.628178 |
c68bb8ace29742a3af1a243af2e207f2350a6d83 | 474 | py | Python | vqa/feature_extractor/_init_feat_paths.py | zjuchenlong/faster-rcnn.pytorch | 71555e8c9306a19c7e2f8a49c14027a3c056aaf8 | [
"MIT"
] | 11 | 2018-09-26T06:39:45.000Z | 2021-02-22T11:09:54.000Z | vqa/feature_extractor/_init_feat_paths.py | zjuchenlong/faster-rcnn.pytorch | 71555e8c9306a19c7e2f8a49c14027a3c056aaf8 | [
"MIT"
] | null | null | null | vqa/feature_extractor/_init_feat_paths.py | zjuchenlong/faster-rcnn.pytorch | 71555e8c9306a19c7e2f8a49c14027a3c056aaf8 | [
"MIT"
] | 3 | 2019-05-13T16:29:46.000Z | 2020-04-10T04:41:01.000Z | import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add lib to PYTHONPATH
faster_rcnn_path = osp.join(this_dir, '..', '..')
# add_path(faster_rcnn_path)
tools_path = osp.join(faster_rcnn_path, 'tools')
add_path(tools_path)
lib_path = osp.join(faster_rcnn_path, 'lib')
add_path(lib_path)
vqa_path = osp.join(faster_rcnn_path, 'vqa')
add_path(vqa_path) | 22.571429 | 50 | 0.700422 |
1c5f0beb22042e6f0bde5a5c7aa6836d7c918ec1 | 638 | py | Python | qcloudsdkmonitor/BindAlarmRuleObjectsRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkmonitor/BindAlarmRuleObjectsRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkmonitor/BindAlarmRuleObjectsRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class BindAlarmRuleObjectsRequest(Request):
def __init__(self):
super(BindAlarmRuleObjectsRequest, self).__init__(
'monitor', 'qcloudcliV1', 'BindAlarmRuleObjects', 'monitor.api.qcloud.com')
def get_alarmRuleId(self):
return self.get_params().get('alarmRuleId')
def set_alarmRuleId(self, alarmRuleId):
self.add_param('alarmRuleId', alarmRuleId)
def get_dimensions(self):
return self.get_params().get('dimensions')
def set_dimensions(self, dimensions):
self.add_param('dimensions', dimensions)
| 29 | 87 | 0.695925 |
db35bf555c1c5b322e390aee09cb1f957ade2ee6 | 23,202 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/dhcpv4server_a0d174266d4c9a35774364ccc4dfccb3.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/dhcpv4server_a0d174266d4c9a35774364ccc4dfccb3.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/dhcpv4server_a0d174266d4c9a35774364ccc4dfccb3.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Dhcpv4server(Base):
"""DHCPv4 Server protocol.
The Dhcpv4server class encapsulates a list of dhcpv4server resources that are managed by the user.
A list of resources can be retrieved from the server using the Dhcpv4server.find() method.
The list can be managed by using the Dhcpv4server.add() and Dhcpv4server.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'dhcpv4server'
_SDM_ATT_MAP = {
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableIgnoreOpt': 'enableIgnoreOpt',
'Errors': 'errors',
'IgnoreOpt': 'ignoreOpt',
'Multiplier': 'multiplier',
'Name': 'name',
'PoolCount': 'poolCount',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'Subnet': 'subnet',
'SubnetAddrAssign': 'subnetAddrAssign',
'UseRapidCommit': 'useRapidCommit',
}
def __init__(self, parent):
super(Dhcpv4server, self).__init__(parent)
@property
def Dhcp4ServerSessions(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.dhcp4serversessions_ee4473f2c9917b461897bbf5dd13f9d0.Dhcp4ServerSessions): An instance of the Dhcp4ServerSessions class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.dhcp4serversessions_ee4473f2c9917b461897bbf5dd13f9d0 import Dhcp4ServerSessions
return Dhcp4ServerSessions(self)._select()
@property
def TlvProfile(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c.TlvProfile): An instance of the TlvProfile class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c import TlvProfile
return TlvProfile(self)
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableIgnoreOpt(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enables DHCP Server to ignore options provided in the Ignore Options field
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableIgnoreOpt']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def IgnoreOpt(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Choose which Option needs to get ignored by the server (can be given multiple by comma separated)
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IgnoreOpt']))
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PoolCount(self):
"""
Returns
-------
- number: number of DHCP pools a single server has
"""
return self._get_attribute(self._SDM_ATT_MAP['PoolCount'])
@PoolCount.setter
def PoolCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['PoolCount'], value)
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def Subnet(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Choose which subnet to be used for address assignment.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Subnet']))
@property
def SubnetAddrAssign(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enables DHCP Server to assign addresses based on subnet. The leased address is created dynamically by overwriting the subnet portion defined in the Address Pool with the subnet option present in the requests from the clients behind relays.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubnetAddrAssign']))
@property
def UseRapidCommit(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enables DHCP Server to negotiate leases with rapid commit for DHCP Clients that request it.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UseRapidCommit']))
def update(self, ConnectedVia=None, Multiplier=None, Name=None, PoolCount=None, StackedLayers=None):
"""Updates dhcpv4server resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- PoolCount (number): number of DHCP pools a single server has
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, Multiplier=None, Name=None, PoolCount=None, StackedLayers=None):
"""Adds a new dhcpv4server resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- PoolCount (number): number of DHCP pools a single server has
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved dhcpv4server resources using find and the newly added dhcpv4server resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dhcpv4server resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, Multiplier=None, Name=None, PoolCount=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves dhcpv4server resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dhcpv4server resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dhcpv4server resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- PoolCount (number): number of DHCP pools a single server has
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching dhcpv4server resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dhcpv4server data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dhcpv4server resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, EnableIgnoreOpt=None, IgnoreOpt=None, Subnet=None, SubnetAddrAssign=None, UseRapidCommit=None):
"""Base class infrastructure that gets a list of dhcpv4server device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- EnableIgnoreOpt (str): optional regex of enableIgnoreOpt
- IgnoreOpt (str): optional regex of ignoreOpt
- Subnet (str): optional regex of subnet
- SubnetAddrAssign (str): optional regex of subnetAddrAssign
- UseRapidCommit (str): optional regex of useRapidCommit
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def ForceRenew(self, *args, **kwargs):
"""Executes the forceRenew operation on the server.
Send Force Renew for selected DHCPv4 Server items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
forceRenew(SessionIndices=list)list
-----------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
forceRenew(SessionIndices=string)list
-------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('forceRenew', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| 46.496994 | 326 | 0.645979 |
6638f9069fb1429e4daa1bcc01a914de9f803837 | 552 | py | Python | adaptoctree/types.py | Excalibur-SLE/AdapOctree | 27e49e142463eb0114fb37aad013f51680aa0c0f | [
"BSD-3-Clause"
] | 4 | 2020-12-22T17:34:56.000Z | 2021-04-27T10:58:52.000Z | adaptoctree/types.py | Excalibur-SLE/AdapOctree | 27e49e142463eb0114fb37aad013f51680aa0c0f | [
"BSD-3-Clause"
] | 6 | 2020-12-21T17:10:28.000Z | 2021-06-08T15:32:22.000Z | adaptoctree/types.py | Excalibur-SLE/AdaptOctree | 27e49e142463eb0114fb37aad013f51680aa0c0f | [
"BSD-3-Clause"
] | null | null | null | """
Type aliases.
"""
import numba
Key = numba.int64
Keys = numba.int64[:]
KeySet = numba.types.Set(Key)
KeyList = numba.types.ListType(Key)
Anchor = numba.int64[:]
Anchors = numba.int64[:]
Single = numba.float32
Double = numba.float64
Int = numba.int32
Long = numba.int64
LongArray = numba.int64[:]
LongArray2D = numba.int64[:,:]
IntArray = numba.int32[:]
IntList = numba.types.ListType(Int)
LongIntList = numba.types.ListType(Long)
Coord = numba.float64[:]
Coords = numba.float64[:,:]
Bounds = numba.types.UniTuple(Coord, 2)
Void = numba.types.void | 22.08 | 40 | 0.719203 |
15485780d55d6a56f551ff052991b2007a98791d | 1,446 | py | Python | river/stats/summing.py | Styren/river | 128a5ffe9f80df85e23d9ae871e02bea6dc9c100 | [
"BSD-3-Clause"
] | 1 | 2021-04-19T10:47:11.000Z | 2021-04-19T10:47:11.000Z | river/stats/summing.py | Styren/river | 128a5ffe9f80df85e23d9ae871e02bea6dc9c100 | [
"BSD-3-Clause"
] | null | null | null | river/stats/summing.py | Styren/river | 128a5ffe9f80df85e23d9ae871e02bea6dc9c100 | [
"BSD-3-Clause"
] | 1 | 2021-01-22T15:18:39.000Z | 2021-01-22T15:18:39.000Z | from .. import utils
from . import base
class Sum(base.Univariate):
"""Running sum.
Attributes
----------
sum : float
The running sum.
Examples
--------
>>> from river import stats
>>> X = [-5, -3, -1, 1, 3, 5]
>>> mean = stats.Sum()
>>> for x in X:
... print(mean.update(x).get())
-5.0
-8.0
-9.0
-8.0
-5.0
0.0
"""
def __init__(self):
self.sum = 0.0
def update(self, x):
self.sum += x
return self
def get(self):
return self.sum
class RollingSum(base.RollingUnivariate, utils.Window):
"""Running sum over a window.
Parameters
----------
window_size
Size of the rolling window.
Attributes
----------
sum : int
The running rolling sum.
Examples
--------
>>> from river import stats
>>> X = [1, -4, 3, -2, 2, 1]
>>> rolling_sum = stats.RollingSum(2)
>>> for x in X:
... print(rolling_sum.update(x).get())
1
-3
-1
1
0
3
"""
def __init__(self, window_size: int):
super().__init__(size=window_size)
self.sum = 0
@property
def window_size(self):
return self.size
def update(self, x):
if len(self) == self.size:
self.sum -= self[0]
self.sum += x
self.append(x)
return self
def get(self):
return self.sum
| 15.89011 | 55 | 0.48686 |
b24038e262bb7ad802706cd7f06a002259b747f3 | 3,875 | py | Python | test/test_basic.py | letmerecall/nameko-grpc | 4fa602831aac92218fad975a51e4f41f3a34699e | [
"Apache-2.0"
] | 44 | 2019-02-28T15:07:11.000Z | 2021-11-09T07:17:19.000Z | test/test_basic.py | letmerecall/nameko-grpc | 4fa602831aac92218fad975a51e4f41f3a34699e | [
"Apache-2.0"
] | 10 | 2019-06-10T08:56:41.000Z | 2021-08-04T14:28:54.000Z | test/test_basic.py | letmerecall/nameko-grpc | 4fa602831aac92218fad975a51e4f41f3a34699e | [
"Apache-2.0"
] | 8 | 2019-02-11T16:42:30.000Z | 2020-07-29T06:46:12.000Z | # -*- coding: utf-8 -*-
import pytest
@pytest.mark.equivalence
class TestStandard:
def test_unary_unary(self, client, protobufs):
response = client.unary_unary(protobufs.ExampleRequest(value="A"))
assert response.message == "A"
def test_unary_stream(self, client, protobufs):
responses = client.unary_stream(
protobufs.ExampleRequest(value="A", response_count=2)
)
assert [(response.message, response.seqno) for response in responses] == [
("A", 1),
("A", 2),
]
def test_stream_unary(self, client, protobufs):
def generate_requests():
for value in ["A", "B"]:
yield protobufs.ExampleRequest(value=value)
response = client.stream_unary(generate_requests())
assert response.message == "A,B"
def test_stream_stream(self, client, protobufs):
def generate_requests():
for value in ["A", "B"]:
yield protobufs.ExampleRequest(value=value)
responses = client.stream_stream(generate_requests())
assert [(response.message, response.seqno) for response in responses] == [
("A", 1),
("B", 2),
]
@pytest.mark.equivalence
class TestLarge:
def test_large_unary_request(self, client, protobufs):
response = client.unary_unary(
protobufs.ExampleRequest(value="A", blob="X" * 20000)
)
assert response.message == "A"
def test_large_unary_response(self, client, protobufs):
multiplier = 20000
response = client.unary_unary(
protobufs.ExampleRequest(value="A", multiplier=multiplier)
)
assert response.message == "A" * multiplier
def test_large_streaming_request(self, client, protobufs):
def generate_requests():
for value in ["A", "B"]:
yield protobufs.ExampleRequest(value=value, blob="X" * 20000)
response = client.stream_unary(generate_requests())
assert response.message == "A,B"
def test_large_streaming_response(self, client, protobufs):
multiplier = 20000
responses = client.unary_stream(
protobufs.ExampleRequest(value="A", multiplier=multiplier, response_count=2)
)
assert [(response.message, response.seqno) for response in responses] == [
("A" * multiplier, 1),
("A" * multiplier, 2),
]
@pytest.mark.equivalence
class TestFuture:
def test_unary_unary(self, client, protobufs):
response_future = client.unary_unary.future(protobufs.ExampleRequest(value="A"))
response = response_future.result()
assert response.message == "A"
def test_unary_stream(self, client, protobufs):
responses_future = client.unary_stream.future(
protobufs.ExampleRequest(value="A", response_count=2)
)
responses = responses_future.result()
assert [(response.message, response.seqno) for response in responses] == [
("A", 1),
("A", 2),
]
def test_stream_unary(self, client, protobufs):
def generate_requests():
for value in ["A", "B"]:
yield protobufs.ExampleRequest(value=value)
response_future = client.stream_unary.future(generate_requests())
response = response_future.result()
assert response.message == "A,B"
def test_stream_stream(self, client, protobufs):
def generate_requests():
for value in ["A", "B"]:
yield protobufs.ExampleRequest(value=value)
responses_future = client.stream_stream.future(generate_requests())
responses = responses_future.result()
assert [(response.message, response.seqno) for response in responses] == [
("A", 1),
("B", 2),
]
| 34.598214 | 88 | 0.615226 |
4c440eeeb86e0f73f7c98c4666c241c537c03bd7 | 64 | py | Python | test_hello.py | DJprime127/cs4843 | ec20ca2042f981761936f5659fa44a8c3aee9a5e | [
"Apache-2.0"
] | null | null | null | test_hello.py | DJprime127/cs4843 | ec20ca2042f981761936f5659fa44a8c3aee9a5e | [
"Apache-2.0"
] | null | null | null | test_hello.py | DJprime127/cs4843 | ec20ca2042f981761936f5659fa44a8c3aee9a5e | [
"Apache-2.0"
] | null | null | null | from hello import add
def test_add():
assert add(1,2) == 3
| 12.8 | 24 | 0.640625 |
9189f468dcd32bc0eedd62bccdc8a3c807704c76 | 255 | py | Python | weather_api/urls.py | Josephchinedu/python-rest-weather-api | 27986db99d0b7af0c2778cbbd650c7bfcd8db03f | [
"MIT"
] | null | null | null | weather_api/urls.py | Josephchinedu/python-rest-weather-api | 27986db99d0b7af0c2778cbbd650c7bfcd8db03f | [
"MIT"
] | null | null | null | weather_api/urls.py | Josephchinedu/python-rest-weather-api | 27986db99d0b7af0c2778cbbd650c7bfcd8db03f | [
"MIT"
] | null | null | null | from django.urls import path, include
from weather_api import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('descriptions', views.DescriptionViewSet)
urlpatterns = [
path('', include(router.urls))
]
| 23.181818 | 57 | 0.788235 |
3034bf230219be6e85114fbbe8e600d01a843c42 | 591 | py | Python | main.py | stevommmm/runningwheel | f905a682da23ec0f93c5e54b10292dd5bb03d5fc | [
"MIT"
] | null | null | null | main.py | stevommmm/runningwheel | f905a682da23ec0f93c5e54b10292dd5bb03d5fc | [
"MIT"
] | null | null | null | main.py | stevommmm/runningwheel | f905a682da23ec0f93c5e54b10292dd5bb03d5fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from aiohttp import web
from pathlib import Path
import aiodocker
import logging
import runningwheel
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
app = web.Application()
app['root'] = Path(__file__).parent.absolute()
app['docker'] = aiodocker.Docker()
app.router.add_get('/', runningwheel.upload_wheel)
app.router.add_get('/run/{runtime}', runningwheel.run)
app.router.add_post('/store', runningwheel.store_wheel)
app.on_shutdown.append(runningwheel.on_shutdown)
Path(app['root'], 'wheels/').mkdir(exist_ok=True)
web.run_app(app)
| 24.625 | 56 | 0.754653 |
b3f93c27c5f9c9c92683171840192ad7a3d18d9d | 14,108 | py | Python | tests/unit/states/test_boto_s3_bucket.py | nizD/salt | bbe135d62d8d8b4e4a7d0362097e1b3a3b092bed | [
"Apache-2.0"
] | 5 | 2017-02-07T05:39:29.000Z | 2020-06-13T02:07:33.000Z | tests/unit/states/test_boto_s3_bucket.py | nizD/salt | bbe135d62d8d8b4e4a7d0362097e1b3a3b092bed | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | tests/unit/states/test_boto_s3_bucket.py | nizD/salt | bbe135d62d8d8b4e4a7d0362097e1b3a3b092bed | [
"Apache-2.0"
] | 11 | 2017-01-26T19:36:29.000Z | 2021-12-11T07:54:16.000Z | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
from copy import deepcopy
import logging
import random
import string
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch
# Import Salt libs
from salt.ext import six
import salt.loader
from salt.utils.versions import LooseVersion
import salt.states.boto_s3_bucket as boto_s3_bucket
# pylint: disable=import-error,no-name-in-module,unused-import
from tests.unit.modules.test_boto_s3_bucket import BotoS3BucketTestCaseMixin
# Import 3rd-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
try:
import boto
import boto3
from botocore.exceptions import ClientError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error,no-name-in-module,unused-import
# the boto_s3_bucket module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = '1.2.1'
log = logging.getLogger(__name__)
def _has_required_boto():
'''
Returns True/False boolean depending on if Boto is installed and correct
version.
'''
if not HAS_BOTO:
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
else:
return True
if _has_required_boto():
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'
not_found_error = ClientError({
'Error': {
'Code': '404',
'Message': "Test-defined error"
}
}, 'msg')
error_content = {
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
}
list_ret = {
'Buckets': [{
'Name': 'mybucket',
'CreationDate': None
}],
'Owner': {
'Type': 'CanonicalUser',
'DisplayName': 'testuser',
'ID': '111111222222'
},
'ResponseMetadata': {'Key': 'Value'}
}
config_in = {
'LocationConstraint': 'EU',
'ACL': {
'ACL': 'public-read'
},
'CORSRules': [{
'AllowedMethods': ["GET"],
'AllowedOrigins': ["*"],
}],
'LifecycleConfiguration': [{
'Expiration': {
'Days': 1
},
'Prefix': 'prefix',
'Status': 'Enabled',
'ID': 'asdfghjklpoiuytrewq'
}],
'Logging': {
'TargetBucket': 'my-bucket',
'TargetPrefix': 'prefix'
},
'NotificationConfiguration': {
'LambdaFunctionConfigurations': [{
'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:111111222222:function:my-function',
'Id': 'zxcvbnmlkjhgfdsa',
'Events': ["s3:ObjectCreated:*"],
'Filter': {
'Key': {
'FilterRules': [{
'Name': 'prefix',
'Value': 'string'
}]
}
}
}]
},
'Policy': {
'Version': "2012-10-17",
'Statement': [{
'Sid': "",
'Effect': "Allow",
'Principal': {
'AWS': "arn:aws:iam::111111222222:root"
},
'Action': "s3:PutObject",
'Resource': "arn:aws:s3:::my-bucket/*"
}]
},
'Replication': {
'Role': 'arn:aws:iam::11111222222:my-role',
'Rules': [{
'ID': "r1",
'Prefix': "prefix",
'Status': "Enabled",
'Destination': {
'Bucket': "arn:aws:s3:::my-bucket"
}
}]
},
'RequestPayment': {
'Payer': 'Requester'
},
'Tagging': {
'a': 'b',
'c': 'd'
},
'Versioning': {
'Status': 'Enabled'
},
'Website': {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
}
config_ret = {
'get_bucket_acl': {
'Grants': [{
'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'
},
'Permission': 'READ'
}],
'Owner': {
'DisplayName': 'testuser',
'ID': '111111222222'
}
},
'get_bucket_cors': {
'CORSRules': [{
'AllowedMethods': ["GET"],
'AllowedOrigins': ["*"],
}]
},
'get_bucket_lifecycle_configuration': {
'Rules': [{
'Expiration': {
'Days': 1
},
'Prefix': 'prefix',
'Status': 'Enabled',
'ID': 'asdfghjklpoiuytrewq'
}]
},
'get_bucket_location': {
'LocationConstraint': 'EU'
},
'get_bucket_logging': {
'LoggingEnabled': {
'TargetBucket': 'my-bucket',
'TargetPrefix': 'prefix'
}
},
'get_bucket_notification_configuration': {
'LambdaFunctionConfigurations': [{
'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:111111222222:function:my-function',
'Id': 'zxcvbnmlkjhgfdsa',
'Events': ["s3:ObjectCreated:*"],
'Filter': {
'Key': {
'FilterRules': [{
'Name': 'prefix',
'Value': 'string'
}]
}
}
}]
},
'get_bucket_policy': {
'Policy':
'{"Version":"2012-10-17","Statement":[{"Sid":"","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111222222:root"},"Action":"s3:PutObject","Resource":"arn:aws:s3:::my-bucket/*"}]}'
},
'get_bucket_replication': {
'ReplicationConfiguration': {
'Role': 'arn:aws:iam::11111222222:my-role',
'Rules': [{
'ID': "r1",
'Prefix': "prefix",
'Status': "Enabled",
'Destination': {
'Bucket': "arn:aws:s3:::my-bucket"
}
}]
}
},
'get_bucket_request_payment': {'Payer': 'Requester'},
'get_bucket_tagging': {
'TagSet': [{
'Key': 'c',
'Value': 'd'
}, {
'Key': 'a',
'Value': 'b',
}]
},
'get_bucket_versioning': {
'Status': 'Enabled'
},
'get_bucket_website': {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
}
bucket_ret = {
'Location': 'EU'
}
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoS3BucketStateTestCaseBase(TestCase, LoaderModuleMockMixin):
conn = None
def setup_loader_modules(self):
ctx = {}
utils = salt.loader.utils(
self.opts,
whitelist=['boto', 'boto3', 'args', 'systemd', 'path', 'platform', 'reg'],
context=ctx)
serializers = salt.loader.serializers(self.opts)
self.funcs = funcs = salt.loader.minion_mods(self.opts, context=ctx, utils=utils, whitelist=['boto_s3_bucket'])
self.salt_states = salt.loader.states(opts=self.opts, functions=funcs, utils=utils, whitelist=['boto_s3_bucket'],
serializers=serializers)
return {
boto_s3_bucket: {
'__opts__': self.opts,
'__salt__': funcs,
'__utils__': utils,
'__states__': self.salt_states,
'__serializers__': serializers,
}
}
@classmethod
def setUpClass(cls):
cls.opts = salt.config.DEFAULT_MINION_OPTS
cls.opts['grains'] = salt.loader.grains(cls.opts)
@classmethod
def tearDownClass(cls):
del cls.opts
def setUp(self):
self.addCleanup(delattr, self, 'funcs')
self.addCleanup(delattr, self, 'salt_states')
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
self.patcher = patch('boto3.session.Session')
self.addCleanup(self.patcher.stop)
self.addCleanup(delattr, self, 'patcher')
mock_session = self.patcher.start()
session_instance = mock_session.return_value
self.conn = MagicMock()
self.addCleanup(delattr, self, 'conn')
session_instance.client.return_value = self.conn
class BotoS3BucketTestCase(BotoS3BucketStateTestCaseBase, BotoS3BucketTestCaseMixin):
'''
TestCase for salt.modules.boto_s3_bucket state.module
'''
def test_present_when_bucket_does_not_exist(self):
'''
Tests present on a bucket that does not exist.
'''
self.conn.head_bucket.side_effect = [not_found_error, None]
self.conn.list_buckets.return_value = deepcopy(list_ret)
self.conn.create_bucket.return_value = bucket_ret
for key, value in six.iteritems(config_ret):
getattr(self.conn, key).return_value = deepcopy(value)
with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):
result = self.salt_states['boto_s3_bucket.present'](
'bucket present',
Bucket='testbucket',
**config_in
)
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['bucket']['Location'], config_ret['get_bucket_location'])
def test_present_when_bucket_exists_no_mods(self):
self.conn.list_buckets.return_value = deepcopy(list_ret)
for key, value in six.iteritems(config_ret):
getattr(self.conn, key).return_value = deepcopy(value)
with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):
result = self.salt_states['boto_s3_bucket.present'](
'bucket present',
Bucket='testbucket',
**config_in
)
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_present_when_bucket_exists_all_mods(self):
self.conn.list_buckets.return_value = deepcopy(list_ret)
for key, value in six.iteritems(config_ret):
getattr(self.conn, key).return_value = deepcopy(value)
with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):
result = self.salt_states['boto_s3_bucket.present'](
'bucket present',
Bucket='testbucket',
LocationConstraint=config_in['LocationConstraint']
)
self.assertTrue(result['result'])
self.assertNotEqual(result['changes'], {})
def test_present_with_failure(self):
self.conn.head_bucket.side_effect = [not_found_error, None]
self.conn.list_buckets.return_value = deepcopy(list_ret)
self.conn.create_bucket.side_effect = ClientError(error_content, 'create_bucket')
with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):
result = self.salt_states['boto_s3_bucket.present'](
'bucket present',
Bucket='testbucket',
**config_in
)
self.assertFalse(result['result'])
self.assertTrue('Failed to create bucket' in result['comment'])
def test_absent_when_bucket_does_not_exist(self):
'''
Tests absent on a bucket that does not exist.
'''
self.conn.head_bucket.side_effect = [not_found_error, None]
result = self.salt_states['boto_s3_bucket.absent']('test', 'mybucket')
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_absent_when_bucket_exists(self):
result = self.salt_states['boto_s3_bucket.absent']('test', 'testbucket')
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['bucket'], None)
def test_absent_with_failure(self):
self.conn.delete_bucket.side_effect = ClientError(error_content, 'delete_bucket')
result = self.salt_states['boto_s3_bucket.absent']('test', 'testbucket')
self.assertFalse(result['result'])
self.assertTrue('Failed to delete bucket' in result['comment'])
| 35.007444 | 199 | 0.532606 |
a4e1b64a9d4427367e163de31572a970658eff90 | 567 | py | Python | src/data/Note.py | A-fume/A.fume.Analysis | 064eb453aceccca435d361d890b3f3cc7449e10d | [
"MIT"
] | null | null | null | src/data/Note.py | A-fume/A.fume.Analysis | 064eb453aceccca435d361d890b3f3cc7449e10d | [
"MIT"
] | null | null | null | src/data/Note.py | A-fume/A.fume.Analysis | 064eb453aceccca435d361d890b3f3cc7449e10d | [
"MIT"
] | null | null | null | class Note:
TYPE_TOP = 1
TYPE_MIDDLE = 2
TYPE_BASE = 3
TYPE_SINGLE = 4
def __init__(self, perfume_idx, ingredient_idx, type):
self.perfume_idx = perfume_idx
self.ingredient_idx = ingredient_idx
self.type = type
def __str__(self):
return 'Note({}, {}, {})'.format(self.perfume_idx, self.ingredient_idx, self.type)
@staticmethod
def get_json(note):
return {
'perfume_idx': note.perfume_idx,
'ingredient_idx': note.ingredient_idx,
'type': note.type
}
| 25.772727 | 90 | 0.599647 |
b5b1e11585f418df79341715001d4f9f73978fa6 | 3,291 | py | Python | apps/tinyosGW/sensor_val_tx.py | jeonghoonkang/BerePi | e04283a94a6a0487ab0049dc3e514d6c5dda39cc | [
"BSD-2-Clause"
] | 22 | 2015-06-03T06:28:27.000Z | 2022-03-18T08:02:45.000Z | apps/tinyosGW/sensor_val_tx.py | jeonghoonkang/BerePi | e04283a94a6a0487ab0049dc3e514d6c5dda39cc | [
"BSD-2-Clause"
] | 14 | 2015-06-08T01:31:53.000Z | 2020-08-30T02:19:15.000Z | apps/tinyosGW/sensor_val_tx.py | jeonghoonkang/BerePi | e04283a94a6a0487ab0049dc3e514d6c5dda39cc | [
"BSD-2-Clause"
] | 26 | 2015-05-12T09:33:55.000Z | 2021-08-30T05:41:00.000Z | #-*- coding: utf-8 -*-
#!/usr/bin/python
# Author : jeonghoonkang, https://github.com/jeonghoonkang
from __future__ import print_function
from subprocess import *
from types import *
import platform
import sys
import os
import datetime
import tailer
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
def hostname():
cmd = "hostname"
ret = run_cmd(cmd)
return ret
def get_measure():
cmd="/sbin/ifconfig"
_os_type = platform.system()
_os_ver = os.uname()
#print ( ' FIRST :' , _os_ver[0])
#print ( ' LAST :' , _os_ver[-1])
if (_os_ver[0] == 'Linux') :
if (_os_ver[-1] == 'x86_64') :
_os_type = 'Linux'
cmd = "/sbin/ifconfig"
print ('os-type', _os_type)
if _os_type.find('Cygwin') > 0:
cmd = "ipconfig"
iip = run_cmd(cmd)
#print (iip)
return iip, _os_type
def checkifexist(fname):
cmd='ls ' + fname
print (run_cmd(cmd))
def writefile_list(_in, fn="ip.txt"):
f = open(fn, 'w')
tin = "\n".join(_in)
print(tin)
f.write(tin)
f.flush()
f.close()
return
def args_proc():
msg = "usage : python %s {server_IP_ADD} {server_PORT} {server_id} {passwd_for_server}" %__file__
msg += " => user should input arguments {} "
print (msg, '\n')
if len(sys.argv) < 2:
exit("[bye] you need to input args, ip / port / id")
arg1 = sys.argv[1]
arg2 = sys.argv[2]
arg3 = sys.argv[3]
arg4 = sys.argv[4]
ip = arg1
port = arg2
id = arg3
passwd = arg4
print ("... start running, inputs are ", ip, port, id, passwd)
return ip, port, id, passwd
if __name__ == '__main__':
print ('\n', datetime.datetime.now(), '\n')
ip, port, id, passwd = args_proc()
m, os_type = get_measure()
info = m
hostn = hostname()
try : name = os.getlogin()
except :
print ('[exception] get log-in user name')
name = 'pi' #라즈베리파이 경우 pi로 입력 작성. ubuntu는 사용자
# crontab 으로 실행할때는. getloin()에서 예외 발생하여, 이 부분에 정확한 아이디를 넣어줘야함
# 아이디가 정확하지 않으면 실행 에러로 종료됨
# 확인필수 : https://github.com/jeonghoonkang/BerePi/blob/master/apps/tinyosGW/debug/debug.log
print ("using local id : ", name)
sshpass = ''
if os_type == "Linux":
fname = '/home/%s' %name
elif os_type == 'Win' :
fname = '/home/tinyos' #수동설정해야 함
elif os_type == "Darwin":
fname = '/Users/%s' %name
sshpass = '/usr/local/bin/'
log_file_loc = '%s/devel/BerePi/logs/berelogger_%s.log' %(fname,hostn[:-1])
checkifexist(log_file_loc)
lines = tailer.tail(open(str(log_file_loc)),40)
print(lines, type(lines))
writefile_list (lines, './tmp.log')
cmd = sshpass + 'sshpass -p' + passwd + ' ' + 'scp' + ' -P%s'%port + ' -o' + ' StrictHostKeyChecking=no'
cmd += " tmp.log " + '%s@%s:' %(id,ip) + '/var/www/html/sensor/sensor_dust_%s.log ' %hostn[:-1]
#cmd = 'scp'
#cmd += " %s " %fname + '%s@%s:' %(id,ip) + '/var/www/html/server/'
print (cmd)
print ( 'return of os.system = ', os.system(cmd) )
#ret = run_cmd(cmd)
print ("finish ")
#print (ret)
# ssh-keygen
# cat ~/.ssh/id_rsa.pub | ssh -p xxxx pi@xxx.xxx.xxx 'cat >>
# .ssh/authorized_keys'
| 25.315385 | 108 | 0.576117 |
9ab1a2a67e899324228115982ea3369492d20fa8 | 11,743 | py | Python | next_builder_sdk/model/easy_flow/target_info_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | next_builder_sdk/model/easy_flow/target_info_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | next_builder_sdk/model/easy_flow/target_info_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: target_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from next_builder_sdk.model.cmdb import cluster_info_pb2 as next__builder__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2
from next_builder_sdk.model.easy_flow import version_info_pb2 as next__builder__sdk_dot_model_dot_easy__flow_dot_version__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='target_info.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x11target_info.proto\x12\teasy_flow\x1a.next_builder_sdk/model/cmdb/cluster_info.proto\x1a\x33next_builder_sdk/model/easy_flow/version_info.proto\"\x9b\x04\n\nTargetInfo\x12\x10\n\x08targetId\x18\x01 \x01(\t\x12\x12\n\ntargetName\x18\x02 \x01(\t\x12\x12\n\ninstanceId\x18\x03 \x01(\t\x12\"\n\x07\x63luster\x18\x04 \x01(\x0b\x32\x11.cmdb.ClusterInfo\x12\x38\n\x0cinstanceInfo\x18\x05 \x03(\x0b\x32\".easy_flow.TargetInfo.InstanceInfo\x12:\n\roperationInfo\x18\x06 \x03(\x0b\x32#.easy_flow.TargetInfo.OperationInfo\x1a\x8b\x01\n\x0cInstanceInfo\x12\x13\n\x0bversionName\x18\x01 \x01(\t\x12+\n\x0bversionInfo\x18\x02 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x11\n\tpackageId\x18\x03 \x01(\t\x12\x13\n\x0binstallPath\x18\x04 \x01(\t\x12\x11\n\tversionId\x18\x05 \x01(\t\x1a\xaa\x01\n\rOperationInfo\x12\x11\n\toperation\x18\x01 \x01(\t\x12-\n\rversionToInfo\x18\x02 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12/\n\x0fversionFromInfo\x18\x03 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x13\n\x0binstallPath\x18\x04 \x01(\t\x12\x11\n\tpackageId\x18\x05 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[next__builder__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2.DESCRIPTOR,next__builder__sdk_dot_model_dot_easy__flow_dot_version__info__pb2.DESCRIPTOR,])
_TARGETINFO_INSTANCEINFO = _descriptor.Descriptor(
name='InstanceInfo',
full_name='easy_flow.TargetInfo.InstanceInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='versionName', full_name='easy_flow.TargetInfo.InstanceInfo.versionName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionInfo', full_name='easy_flow.TargetInfo.InstanceInfo.versionInfo', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.TargetInfo.InstanceInfo.packageId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.TargetInfo.InstanceInfo.installPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='easy_flow.TargetInfo.InstanceInfo.versionId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=361,
serialized_end=500,
)
_TARGETINFO_OPERATIONINFO = _descriptor.Descriptor(
name='OperationInfo',
full_name='easy_flow.TargetInfo.OperationInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='easy_flow.TargetInfo.OperationInfo.operation', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionToInfo', full_name='easy_flow.TargetInfo.OperationInfo.versionToInfo', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionFromInfo', full_name='easy_flow.TargetInfo.OperationInfo.versionFromInfo', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.TargetInfo.OperationInfo.installPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.TargetInfo.OperationInfo.packageId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=503,
serialized_end=673,
)
_TARGETINFO = _descriptor.Descriptor(
name='TargetInfo',
full_name='easy_flow.TargetInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targetId', full_name='easy_flow.TargetInfo.targetId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetName', full_name='easy_flow.TargetInfo.targetName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='easy_flow.TargetInfo.instanceId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='easy_flow.TargetInfo.cluster', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceInfo', full_name='easy_flow.TargetInfo.instanceInfo', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operationInfo', full_name='easy_flow.TargetInfo.operationInfo', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TARGETINFO_INSTANCEINFO, _TARGETINFO_OPERATIONINFO, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=134,
serialized_end=673,
)
_TARGETINFO_INSTANCEINFO.fields_by_name['versionInfo'].message_type = next__builder__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_INSTANCEINFO.containing_type = _TARGETINFO
_TARGETINFO_OPERATIONINFO.fields_by_name['versionToInfo'].message_type = next__builder__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_OPERATIONINFO.fields_by_name['versionFromInfo'].message_type = next__builder__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_OPERATIONINFO.containing_type = _TARGETINFO
_TARGETINFO.fields_by_name['cluster'].message_type = next__builder__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_TARGETINFO.fields_by_name['instanceInfo'].message_type = _TARGETINFO_INSTANCEINFO
_TARGETINFO.fields_by_name['operationInfo'].message_type = _TARGETINFO_OPERATIONINFO
DESCRIPTOR.message_types_by_name['TargetInfo'] = _TARGETINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TargetInfo = _reflection.GeneratedProtocolMessageType('TargetInfo', (_message.Message,), {
'InstanceInfo' : _reflection.GeneratedProtocolMessageType('InstanceInfo', (_message.Message,), {
'DESCRIPTOR' : _TARGETINFO_INSTANCEINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo.InstanceInfo)
})
,
'OperationInfo' : _reflection.GeneratedProtocolMessageType('OperationInfo', (_message.Message,), {
'DESCRIPTOR' : _TARGETINFO_OPERATIONINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo.OperationInfo)
})
,
'DESCRIPTOR' : _TARGETINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo)
})
_sym_db.RegisterMessage(TargetInfo)
_sym_db.RegisterMessage(TargetInfo.InstanceInfo)
_sym_db.RegisterMessage(TargetInfo.OperationInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.972 | 1,163 | 0.763093 |
436c4f947fde6da4d0af51a690f6aa7105c9bad1 | 8,636 | py | Python | scales/loadbalancer/aperture.py | steveniemitz/scales | 153fff192a11fbda18afdea12652b090e33b3c59 | [
"MIT"
] | 48 | 2015-06-04T00:08:44.000Z | 2021-02-27T16:00:50.000Z | scales/loadbalancer/aperture.py | steveniemitz/pynagle | 153fff192a11fbda18afdea12652b090e33b3c59 | [
"MIT"
] | 10 | 2015-07-30T20:54:02.000Z | 2020-02-24T15:39:49.000Z | scales/loadbalancer/aperture.py | steveniemitz/pynagle | 153fff192a11fbda18afdea12652b090e33b3c59 | [
"MIT"
] | 11 | 2015-06-10T19:44:34.000Z | 2020-02-14T02:02:40.000Z | """Aperture Load Balancer.
Based on work from finagle's aperture load balancer.
See https://github.com/twitter/finagle/blob/master/finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/Aperture.scala
The aperture balancer attempts to keep the average load going into the underlying
server set between a load band (by default .5 <= load <= 2.
Load is determined via an ema of load over a smoothing window (5 seconds).
The load average is essentially the average number of concurrent requests each
node in the balancer is handling.
"""
import random
from .heap import HeapBalancerSink
from ..asynchronous import AsyncResult
from ..constants import (ChannelState, SinkProperties, SinkRole)
from ..sink import SinkProvider
from ..timer_queue import LOW_RESOLUTION_TIMER_QUEUE, LOW_RESOLUTION_TIME_SOURCE
from ..varz import (
Ema,
Gauge,
MonoClock,
Source,
VarzBase
)
class ApertureBalancerSink(HeapBalancerSink):
"""A load balancer that keeps an aperture adjusted by a load average."""
class ApertureVarz(VarzBase):
"""
idle - The number of nodes idle in the pool (not in the aperture)
active - The number of nodes active in the pool (in the aperture)
load_average - The most recently calculated load average.
"""
_VARZ_BASE_NAME = 'scales.loadbalancer.Aperture'
_VARZ = {
'idle': Gauge,
'active': Gauge,
'load_average': Gauge
}
def __init__(self, next_provider, sink_properties, global_properties):
self._idle_endpoints = set()
self._total = 0
self._ema = Ema(5)
self._time = MonoClock()
self._min_size = sink_properties.min_size
self._max_size = sink_properties.max_size
self._min_load = sink_properties.min_load
self._max_load = sink_properties.max_load
self._jitter_min = sink_properties.jitter_min_sec
self._jitter_max = sink_properties.jitter_max_sec
service_name = global_properties[SinkProperties.Label]
self.__varz = self.ApertureVarz(Source(service=service_name))
self._pending_endpoints = set()
super(ApertureBalancerSink, self).__init__(next_provider, sink_properties, global_properties)
if self._jitter_min > 0:
self._ScheduleNextJitter()
def _UpdateSizeVarz(self):
"""Update active and idle varz"""
self.__varz.active(self._size)
self.__varz.idle(len(self._idle_endpoints))
def _AddSink(self, endpoint, sink_factory):
"""Invoked when a node is added to the underlying server set.
If the number of healthy nodes is < the minimum aperture size, the node
will be added to the aperture, otherwise it will be added to the idle channel
list.
Args:
endpoint - The endpoint being added to the server set.
sink_factory - A callable used to create a sink for the endpoint.
"""
num_healthy = len([c for c in self._heap[1:] if c.channel.is_open])
if num_healthy < self._min_size:
super(ApertureBalancerSink, self)._AddSink(endpoint, sink_factory)
else:
self._idle_endpoints.add(endpoint)
self._UpdateSizeVarz()
def _RemoveSink(self, endpoint):
"""Invoked when a node is removed from the underlying server set.
If the node is currently active, it is removed from the aperture and replaced
by an idle node (if one is available). Otherwise, it is simply discarded.
Args:
endpoint - The endpoint being removed from the server set.
"""
removed = super(ApertureBalancerSink, self)._RemoveSink(endpoint)
if removed:
self._TryExpandAperture()
if endpoint in self._idle_endpoints:
self._idle_endpoints.discard(endpoint)
self._UpdateSizeVarz()
def _TryExpandAperture(self, leave_pending=False):
"""Attempt to expand the aperture. By calling this it's assumed the aperture
needs to be expanded.
The aperture can be expanded if there are idle sinks available.
"""
endpoints = list(self._idle_endpoints)
added_node = None
new_endpoint = None
if endpoints:
new_endpoint = random.choice(endpoints)
self._idle_endpoints.discard(new_endpoint)
self._log.debug('Expanding aperture to include %s.' % str(new_endpoint))
new_sink = self._servers[new_endpoint]
self._pending_endpoints.add(new_endpoint)
added_node = super(ApertureBalancerSink, self)._AddSink(new_endpoint, new_sink)
self._UpdateSizeVarz()
if added_node:
if not leave_pending:
added_node.ContinueWith(
lambda ar: self._pending_endpoints.discard(new_endpoint))
return added_node, new_endpoint
else:
return AsyncResult.Complete(), None
def _ContractAperture(self, force=False):
"""Attempt to contract the aperture. By calling this it's assume the aperture
needs to be contracted.
The aperture can be contracted if it's current size is larger than the
min size.
"""
if self._pending_endpoints and not force:
return
num_healthy = len([c for c in self._heap[1:] if c.channel.is_open])
if num_healthy > self._min_size:
least_loaded_endpoint = None
# Scan the heap for any closed endpoints.
for n in self._heap[1:]:
if n.channel.is_closed and n.endpoint not in self._pending_endpoints:
least_loaded_endpoint = n.endpoint
break
if not least_loaded_endpoint:
# Scan the heap for the least-loaded node. This isn't exactly in-order,
# but "close enough"
for n in self._heap[1:]:
if n.endpoint not in self._pending_endpoints:
least_loaded_endpoint = n.endpoint
break
if least_loaded_endpoint:
self._idle_endpoints.add(least_loaded_endpoint)
super(ApertureBalancerSink, self)._RemoveSink(least_loaded_endpoint)
self._log.debug('Contracting aperture to remove %s' % str(least_loaded_endpoint))
self._UpdateSizeVarz()
def _OnNodeDown(self, node):
"""Invoked by the base class when a node is marked down.
In this case, if the downed node is currently in the aperture, we want to
remove if, and then attempt to adjust the aperture.
"""
if node.channel.state != ChannelState.Idle:
ar, _ = self._TryExpandAperture()
return ar
else:
return AsyncResult.Complete()
def _OnGet(self, node):
"""Invoked by the parent class when a node has been retrieved from the pool
and is about to be used.
Increases the load average of the pool, and adjust the aperture if needed.
"""
self._AdjustAperture(1)
def _OnPut(self, node):
"""Invoked by the parent class when a node is being returned to the pool.
Decreases the load average and adjust the aperture if needed.
"""
self._AdjustAperture(-1)
def _ScheduleNextJitter(self):
"""Schedule the aperture to jitter in a random amount of time between
_jitter_min and _jitter_max.
"""
next_jitter = random.randint(self._jitter_min, self._jitter_max)
now = LOW_RESOLUTION_TIME_SOURCE.now
self._next_jitter = LOW_RESOLUTION_TIMER_QUEUE.Schedule(
now + next_jitter, self._Jitter)
def _Jitter(self):
"""Attempt to expand the aperture by one node, and if successful,
contract it by a node (excluding the one that was just added). This is
done asynchronously.
"""
try:
ar, endpoint = self._TryExpandAperture(True)
if endpoint:
try:
ar.wait()
if not ar.exception:
self._ContractAperture(True)
finally:
self._pending_endpoints.discard(endpoint)
finally:
self._ScheduleNextJitter()
def _AdjustAperture(self, amount):
"""Adjusts the load average of the pool, and adjusts the aperture size
if required by the new load average.
Args:
amount - The amount to change the load by. May be +/-1
"""
self._total += amount
avg = self._ema.Update(self._time.Sample(), self._total)
aperture_size = self._size
if aperture_size == 0:
# Essentially infinite load.
aperture_load = self._max_load
else:
aperture_load = avg / aperture_size
self.__varz.load_average(aperture_load)
if (aperture_load >= self._max_load
and self._idle_endpoints
and aperture_size < self._max_size):
self._TryExpandAperture()
elif aperture_load <= self._min_load and aperture_size > self._min_size:
self._ContractAperture()
ApertureBalancerSink.Builder = SinkProvider(
ApertureBalancerSink,
SinkRole.LoadBalancer,
smoothing_window = 5,
min_size = 1,
max_size = 2**31,
min_load = 0.5,
max_load = 2.0,
server_set_provider = None,
jitter_min_sec = 120,
jitter_max_sec = 240)
| 35.24898 | 126 | 0.706114 |
86d485dc916b65bdef1c9224da6e5f65256c6a10 | 2,179 | py | Python | zcrmsdk/src/com/zoho/crm/api/related_lists/response_wrapper.py | zoho/zohocrm-python-sdk-2.0 | 3a93eb3b57fed4e08f26bd5b311e101cb2995411 | [
"Apache-2.0"
] | null | null | null | zcrmsdk/src/com/zoho/crm/api/related_lists/response_wrapper.py | zoho/zohocrm-python-sdk-2.0 | 3a93eb3b57fed4e08f26bd5b311e101cb2995411 | [
"Apache-2.0"
] | null | null | null | zcrmsdk/src/com/zoho/crm/api/related_lists/response_wrapper.py | zoho/zohocrm-python-sdk-2.0 | 3a93eb3b57fed4e08f26bd5b311e101cb2995411 | [
"Apache-2.0"
] | null | null | null | try:
from zcrmsdk.src.com.zoho.crm.api.exception import SDKException
from zcrmsdk.src.com.zoho.crm.api.util import Constants
from zcrmsdk.src.com.zoho.crm.api.related_lists.response_handler import ResponseHandler
except Exception:
from ..exception import SDKException
from ..util import Constants
from .response_handler import ResponseHandler
class ResponseWrapper(ResponseHandler):
def __init__(self):
"""Creates an instance of ResponseWrapper"""
super().__init__()
self.__related_lists = None
self.__key_modified = dict()
def get_related_lists(self):
"""
The method to get the related_lists
Returns:
list: An instance of list
"""
return self.__related_lists
def set_related_lists(self, related_lists):
"""
The method to set the value to related_lists
Parameters:
related_lists (list) : An instance of list
"""
if related_lists is not None and not isinstance(related_lists, list):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: related_lists EXPECTED TYPE: list', None, None)
self.__related_lists = related_lists
self.__key_modified['related_lists'] = 1
def is_key_modified(self, key):
"""
The method to check if the user has modified the given key
Parameters:
key (string) : A string representing the key
Returns:
int: An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if key in self.__key_modified:
return self.__key_modified.get(key)
return None
def set_key_modified(self, key, modification):
"""
The method to mark the given key as modified
Parameters:
key (string) : A string representing the key
modification (int) : An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if modification is not None and not isinstance(modification, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None)
self.__key_modified[key] = modification
| 27.935897 | 102 | 0.744837 |
cc9bab1bc843fe49b77671f96954b0e08769d3e2 | 2,688 | py | Python | SVR_test.py | jakobhaggstrom/JCA-21-1579 | 30ce251b2caf2f23066980e839f39470bfa4ed3f | [
"MIT"
] | null | null | null | SVR_test.py | jakobhaggstrom/JCA-21-1579 | 30ce251b2caf2f23066980e839f39470bfa4ed3f | [
"MIT"
] | null | null | null | SVR_test.py | jakobhaggstrom/JCA-21-1579 | 30ce251b2caf2f23066980e839f39470bfa4ed3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 19 22:10:37 2021
@author: Jakob Häggström
For more information see:
Journal of Chromatography A, xxx (2022) xxxx–xxxx
doi:xxx
The following program is more or less a user interface that uses the trained
models to predict retentiontimes for a given oligonucleotide sequence and dataset
(PM (C18 method) or IPM (Ion pair method)) for three different gradients.
The gradients in the dataset were:
PM:
[G1, G2, G3] = [2.22, 1.23, 0.81] v%MeCN min^-1
IPM:
[G1, G2, G3] = [0.32, 0.16, 0.08] mM TEA min^-1
The output from the model will follow the same order.
"""
import tRpredictor as tR_pre
import os
def get_model_path(MAIN_DIR,dataset):
gradient = [f"G{i + 1}" for i in range(3)]
pathvec = []
for G in gradient:
filename = f"SVR_{dataset}_{G}.sav"
pathvec.append(os.path.join(MAIN_DIR, 'SVR_models', filename))
return pathvec
def load_models(MAIN_DIR,dataset):
pathvec = get_model_path(MAIN_DIR, dataset)
featvec = ['count']
models = tR_pre.tR_predictor(pathvec, featvec)
return models
def predict(dataset,seq):
"""
Parameters
----------
dataset : String, either PM or IPM
seq : String, or list of strings, string of combinations of ATGC.
Returns
-------
1X3 list of floats, where each elements correnspond to each gradient dataset.
I.e [[time gradient 1, time gradient 2, time gradient 3]]
"""
MAIN_DIR = os.path.split(os.path.abspath(__file__))[0]
model = load_models(MAIN_DIR, dataset)
return model.predict_tR(seq)
def main():
# Here you can input arbitrary sequence(s)
#The most important is that you only make it a single list
# of sequences.
#Example sequences Sample S12A and S16C (Supplementary Material Table S1)
seq = ['CCCACACCCAAC','ATTTTTGTGCGCTCTA']
#Expected output
#[[ 7.21372445 8.74126582 9.93287348]
# [ 8.44104151 11.15892085 13.67501449]]
#[[ 6.42627625 7.09895575 7.6196043 ]
# [ 8.96175395 11.58446108 14.66242965]]
# Here you input the sequence or sequences into the model, and you get
# the retention time for three different gradients for the PM or the IPM
# dataset.
out_PM = predict('PM',seq) # Output for PM
out_IPM = predict('IPM',seq) # Output for IPM
# Print the output.
print(f"{out_PM}")
print(f"{out_IPM}")
if __name__ == '__main__':
main()
| 23.787611 | 83 | 0.606399 |
046aab0c037a7cb22979f0a3a0d6da8724ddb4c4 | 16,186 | py | Python | trestle/core/models/elements.py | jrdoran/compliance-trestle | 0b0b6c4a3dad51bdce921099906347abc0a4d7df | [
"Apache-2.0"
] | 1 | 2021-03-10T13:29:22.000Z | 2021-03-10T13:29:22.000Z | trestle/core/models/elements.py | aNebula/compliance-trestle | a2949e15b79bfc8ee13dd9822aa49e708548c62d | [
"Apache-2.0"
] | null | null | null | trestle/core/models/elements.py | aNebula/compliance-trestle | a2949e15b79bfc8ee13dd9822aa49e708548c62d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Element wrapper of an OSCAL model element."""
import json
import pathlib
from typing import Dict, List, Optional, Union
from pydantic import Field, create_model
from pydantic.error_wrappers import ValidationError
import trestle.core.const as const
from trestle.core import utils
from trestle.core.base_model import OscalBaseModel
from trestle.core.err import TrestleError, TrestleNotFoundError
from trestle.core.models.file_content_type import FileContentType
import yaml
class ElementPath:
"""Element path wrapper of an element.
This only allows a single wildcard '*' at the end to denote elements of an array of dict
"""
PATH_SEPARATOR: str = const.ALIAS_PATH_SEPARATOR
WILDCARD: str = '*'
def __init__(self, element_path: str, parent_path: Optional['ElementPath'] = None) -> None:
"""Initialize an element wrapper.
It assumes the element path contains oscal field alias with hyphens only
"""
if isinstance(parent_path, str):
parent_path = ElementPath(parent_path)
self._parent_path = parent_path
self._path: List[str] = self._parse(element_path)
# Initialize private variables for lazy processing and caching
self._element_name: Optional[str] = None
self._preceding_path: Optional['ElementPath'] = None
def _parse(self, element_path: str) -> List[str]:
"""Parse the element path and validate."""
parts: List[str] = element_path.split(self.PATH_SEPARATOR)
for i, part in enumerate(parts):
if part == '':
raise TrestleError(
f'Invalid path "{element_path}" because having empty path parts between "{self.PATH_SEPARATOR}" \
or in the beginning'
)
elif part == self.WILDCARD and i != len(parts) - 1:
raise TrestleError(f'Invalid path. Wildcard "{self.WILDCARD}" can only be at the end')
if parts[-1] == self.WILDCARD:
if len(parts) == 1:
raise TrestleError(f'Invalid path {element_path} with wildcard.')
if len(parts) <= 1:
raise TrestleError(
'Element path must have at least two parts with the first part being the model root name \
like "target-definition.metadata"'
)
return parts
def get(self) -> List[str]:
"""Return the path parts as a list."""
return self._path
def to_string(self) -> str:
"""Return the path parts as a dot-separated string."""
return self.PATH_SEPARATOR.join(self.get())
def get_parent(self) -> 'ElementPath':
"""Return the parent path.
It can be None or a valid ElementPath
"""
return self._parent_path
def get_first(self) -> str:
"""Return the first part of the path."""
return self._path[0]
def get_last(self) -> str:
"""Return the last part of the path."""
return self._path[-1]
def get_full(self) -> str:
"""Return the full path including parent path parts as a dot separated str."""
all_parts = self.get_full_path_parts()
return self.PATH_SEPARATOR.join(all_parts)
def get_element_name(self) -> str:
"""Return the element alias name from the path.
Essentailly this the last part of the element path
"""
# if it is available then return otherwise compute
if self._element_name is None:
element_name = self.get_last()
if element_name == self.WILDCARD:
element_name = self._path[-2]
self._element_name = element_name
return self._element_name
def get_full_path_parts(self) -> List[str]:
"""Get full path parts to the element including parent path parts as a list."""
path_parts = []
if self.get_parent() is not None:
parent_path_parts = self.get_parent().get_full_path_parts()
path_parts.extend(parent_path_parts)
path_parts.extend(self.get()[1:]) # don't use the first part
else:
path_parts.extend(self.get())
return path_parts
def get_preceding_path(self) -> 'ElementPath':
"""Return the element path to the preceding element in the path."""
# if it is available then return otherwise compute
if self._preceding_path is None:
path_parts = self.get_full_path_parts()
if len(path_parts) > 1:
prec_path_parts = path_parts[:-1]
# prec_path_parts must have at least two parts
if len(prec_path_parts) > 1:
self._preceding_path = ElementPath(self.PATH_SEPARATOR.join(prec_path_parts))
return self._preceding_path
def to_file_path(self, content_type: FileContentType = None, root_dir: str = '') -> pathlib.Path:
"""Convert to a file or directory path for the element path.
if content_type is not passed, it will return a path for directory
"""
path_parts = self.get()
# skip wildcard
if path_parts[-1] == ElementPath.WILDCARD:
path_parts = path_parts[:-1]
if root_dir != '':
path_parts[0] = root_dir
path_str = '/'.join(path_parts)
# add file extension if required
# this will be omitted if it is a dir path
if content_type is not None:
file_extension = FileContentType.to_file_extension(content_type)
path_str = path_str + file_extension
# prepare the path
file_path: pathlib.Path = pathlib.Path(f'./{path_str}')
return file_path
def to_root_path(self, content_type: FileContentType = None) -> pathlib.Path:
"""Convert to a file path for the element root."""
path_str = f'./{self.get_first()}'
if content_type is not None:
file_extension = FileContentType.to_file_extension(content_type)
path_str = path_str + file_extension
file_path: pathlib.Path = pathlib.Path(path_str)
return file_path
def __str__(self) -> str:
"""Return string representation of element path."""
return self.to_string()
def __eq__(self, other) -> bool:
"""Override equality method."""
if not isinstance(other, ElementPath):
return False
return self.get() == other.get()
class Element:
"""Element wrapper of an OSCAL model."""
IGNORE_WRAPPER_ALIAS = '__'
_allowed_sub_element_types: List[str] = ['Element', 'OscalBaseModel', 'list', 'None', 'dict']
def __init__(self, elem: OscalBaseModel, wrapper_alias: str = ''):
"""Initialize an element wrapper.
wrapper_alias is the OSCAL alias for the given elem object and used for seriazation in to_json() method.
For example,
- List[Catalog.Group] element should have wrapper alias 'groups'
- Catalog element should have wrapper alias 'catalog'
wrapper_alias is mandatory for collection type object
if wrapper_alias = IGNORE_WRAPPER_ALIAS, then it is ignored and assumed to be json-serializable during to_json()
"""
self._elem: OscalBaseModel = elem
if wrapper_alias == '' and wrapper_alias != self.IGNORE_WRAPPER_ALIAS:
if utils.is_collection_field_type(elem):
raise TrestleError('wrapper_alias is required for a collection type object')
else:
wrapper_alias = utils.classname_to_alias(elem.__class__.__name__, 'json')
self._wrapper_alias: str = wrapper_alias
def get(self) -> OscalBaseModel:
"""Return the model object."""
return self._elem
def _split_element_path(self, element_path: ElementPath):
"""Split the element path into root_model and remaing attr names."""
path_parts = element_path.get()
root_model = path_parts[0]
path_parts = path_parts[1:]
return root_model, path_parts
def get_at(self,
element_path: ElementPath = None,
check_parent: bool = True) -> Union[OscalBaseModel, List[OscalBaseModel], Dict[str, OscalBaseModel]]:
"""Get the element at the specified element path.
it will return the sub-model object at the path. Sub-model object
can be of type OscalBaseModel or List
"""
if element_path is None:
return self._elem
# find the root-model and element path parts
_, path_parts = self._split_element_path(element_path)
# TODO validate that self._elem is of same type as root_model
# initialize the starting element for search
elm = self._elem
if hasattr(elm, '__root__') and (isinstance(elm.__root__, dict) or isinstance(elm.__root__, list)):
elm = elm.__root__
# if parent exists and does not end with wildcard, use the parent as the starting element for search
if check_parent and element_path.get_parent(
) is not None and element_path.get_parent().get_last() != ElementPath.WILDCARD:
elm_at = self.get_at(element_path.get_parent())
if elm_at is None:
raise TrestleNotFoundError(f'Invalid parent path {element_path.get_parent()}')
elm = elm_at
# return the sub-element at the specified path
for attr in path_parts:
if elm is None:
break
# process for wildcard and array indexes
if attr == ElementPath.WILDCARD:
break
elif attr.isnumeric():
if isinstance(elm, list):
elm = elm[int(attr)]
else:
# index to a non list type should return None
return None
elif isinstance(elm, dict):
elm = elm.get(attr, None)
else:
elm = elm.get_field_value_by_alias(attr)
return elm
def get_preceding_element(self, element_path: ElementPath) -> Optional[OscalBaseModel]:
"""Get the preceding element in the path."""
preceding_path = element_path.get_preceding_path()
preceding_elm: Optional[OscalBaseModel] = self.get_at(preceding_path)
return preceding_elm
def _get_sub_element_obj(self, sub_element):
"""Convert sub element into allowed model obj."""
if not self.is_allowed_sub_element_type(sub_element):
raise TrestleError(
f'Sub element must be one of "{self.get_allowed_sub_element_types()}", found "{sub_element.__class__}"'
)
model_obj = sub_element
if isinstance(sub_element, Element):
model_obj = sub_element.get()
return model_obj
def set_at(self, element_path: ElementPath, sub_element: OscalBaseModel) -> 'Element':
"""Set a sub_element at the path in the current element.
Sub element can be Element, OscalBaseModel, list or None type
It returns the element itself so that chaining operation can be done such as
`element.set_at(path, sub-element).get()`.
"""
# convert the element_path to ElementPath if needed
if isinstance(element_path, str):
element_path = ElementPath(element_path)
# convert sub-element to OscalBaseModel if needed
model_obj = self._get_sub_element_obj(sub_element)
# find the root-model and element path parts
_, path_parts = self._split_element_path(element_path)
# TODO validate that self._elem is of same type as root_model
# If wildcard is present, check the input type and determine the preceding element
if element_path.get_last() == ElementPath.WILDCARD:
# validate the type is either list or OscalBaseModel
if not isinstance(model_obj, list) and not isinstance(model_obj, OscalBaseModel):
raise TrestleError(
f'The model object needs to be a List or OscalBaseModel for path with "{ElementPath.WILDCARD}"'
)
# since wildcard * is there, we need to go one level up for preceding element in the path
preceding_elm = self.get_preceding_element(element_path.get_preceding_path())
else:
# get the preceding element in the path
preceding_elm = self.get_preceding_element(element_path)
if preceding_elm is None:
raise TrestleError(f'Invalid sub element path {element_path} with no valid preceding element')
# check if it can be a valid sub_element of the parent
sub_element_name = element_path.get_element_name().replace('-', '_')
if hasattr(preceding_elm, sub_element_name) is False:
raise TrestleError(
f'Element "{preceding_elm.__class__}" does not have the attribute "{sub_element_name}" \
of type "{model_obj.__class__}"'
)
# set the sub-element
try:
setattr(preceding_elm, sub_element_name, model_obj)
except ValidationError:
sub_element_class = self.get_sub_element_class(preceding_elm, sub_element_name)
raise TrestleError(
f'Validation error: {sub_element_name} is expected to be "{sub_element_class}", \
but found "{model_obj.__class__}"'
)
# returning self will allow to do 'chaining' of commands after set
return self
def to_yaml(self) -> str:
"""Convert into YAML string."""
yaml_data = yaml.dump(yaml.safe_load(self.to_json()))
return yaml_data
def to_json(self) -> str:
"""Convert into JSON string."""
if self._wrapper_alias == self.IGNORE_WRAPPER_ALIAS:
json_data = json.dumps(self._elem, sort_keys=False, indent=4)
else:
dynamic_passer = {}
dynamic_passer['TransientField'] = (self._elem.__class__, Field(self, alias=self._wrapper_alias))
wrapper_model = create_model('TransientModel', __base__=OscalBaseModel, **dynamic_passer) # type: ignore
wrapped_model = wrapper_model(**{self._wrapper_alias: self._elem})
json_data = wrapped_model.json(exclude_none=True, by_alias=True, indent=4)
return json_data
@classmethod
def get_sub_element_class(cls, parent_elm: OscalBaseModel, sub_element_name: str):
"""Get the class of the sub-element."""
sub_element_class = parent_elm.__fields__[sub_element_name].outer_type_
return sub_element_class
@classmethod
def get_allowed_sub_element_types(cls) -> List[str]:
"""Get the list of allowed sub element types."""
return cls._allowed_sub_element_types
@classmethod
def is_allowed_sub_element_type(cls, elm) -> bool:
"""Check if is of allowed sub element type."""
# FIXME: The following logic does not use the _allowed_sub_element_types being defined for the class
if (isinstance(elm, Element) or isinstance(elm, OscalBaseModel) or isinstance(elm, list)
or isinstance(elm, dict) or elm is None):
return True
return False
def __str__(self) -> str:
"""Return string representation of element."""
return type(self._elem).__name__
def __eq__(self, other: object) -> bool:
"""Check that two elements are equal."""
if not isinstance(other, Element):
return False
return self.get() == other.get()
| 38.630072 | 120 | 0.64043 |
fccdccc6d634227d047895181f016bc7e617b149 | 669 | py | Python | scripts/find_car.py | ahou8288/self-driving-car-1 | ea436a563ed6fe7668533ad77039e0580a912a31 | [
"MIT"
] | 5 | 2018-09-24T13:05:16.000Z | 2019-03-05T18:15:36.000Z | scripts/find_car.py | ahou8288/self-driving-car-1 | ea436a563ed6fe7668533ad77039e0580a912a31 | [
"MIT"
] | 1 | 2019-05-22T11:02:45.000Z | 2019-05-22T11:02:45.000Z | scripts/find_car.py | ahou8288/self-driving-car-1 | ea436a563ed6fe7668533ad77039e0580a912a31 | [
"MIT"
] | 4 | 2018-02-17T05:30:34.000Z | 2019-03-26T11:51:36.000Z | """
Convienience script to find your Raspberry Pi on a local network.
Usage:
find_car.py [--ip=<ip>]
Options:
--ip=<ip> Base ip address of your network [default: 192.168.1.0]
"""
from docopt import docopt
import os
import socket
args = docopt(__doc__)
ip = args['--ip']
print('Looking up your computer IP address...')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
print('Your IP address: %s ' %s.getsockname()[0])
s.close()
import subprocess
print("Finding your car's IP address...")
cmd = "sudo nmap -sP " + ip + "/24 | awk '/^Nmap/{ip=$NF}/B8:27:EB/{print ip}'"
print("Your car's ip address is:" )
os.system(cmd)
| 19.114286 | 79 | 0.660688 |
87334f0dd237df4acfd5f41b8bca159512c9245c | 3,333 | py | Python | examples/visualize/animate.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 208 | 2019-05-25T21:35:35.000Z | 2022-03-28T17:33:13.000Z | examples/visualize/animate.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 30 | 2019-07-27T08:23:54.000Z | 2022-03-24T18:17:36.000Z | examples/visualize/animate.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 27 | 2019-07-27T05:42:23.000Z | 2022-03-05T03:08:52.000Z | import os
import sys
_path = os.path.abspath(os.path.pardir)
if not _path in sys.path:
sys.path = [_path] + sys.path
import argparse
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import torch
from torchcule.atari import Env, Rom
from utils.openai.envs import create_vectorize_atari_env
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CuLE')
parser.add_argument('--color', type=str, default='rgb', help='Color mode (rgb or gray)')
parser.add_argument('--debug', action='store_true', help='Single step through frames for debugging')
parser.add_argument('--env-name', type=str, help='Atari Game')
parser.add_argument('--gpu', type=int, default=0, help='GPU ID (default: 0)')
parser.add_argument('--initial-steps', type=int, default=1000, help='Number of steps used to initialize the environment')
parser.add_argument('--num-envs', type=int, default=5, help='Number of atari environments')
parser.add_argument('--rescale', action='store_true', help='Resize output frames to 84x84 using bilinear interpolation')
parser.add_argument('--training', action='store_true', help='Set environment to training mode')
parser.add_argument('--use-cuda', action='store_true', help='Execute ALEs on GPU')
parser.add_argument('--use-openai', action='store_true', default=False, help='Use OpenAI Gym environment')
args = parser.parse_args()
cmap = None if args.color == 'rgb' else 'gray'
device = torch.device('cuda:{}'.format(args.gpu) if args.use_cuda else 'cpu')
debug = args.debug
num_actions = 4
num_envs = args.num_envs
if args.use_openai:
env = create_vectorize_atari_env(args.env_name, seed=0, num_envs=args.num_envs,
episode_life=False, clip_rewards=False)
observations = env.reset()
else:
env = Env(args.env_name, args.num_envs, args.color, device=device,
rescale=args.rescale, episodic_life=True, repeat_prob=0.0)
print(env.cart)
if args.training:
env.train()
observations = env.reset(initial_steps=args.initial_steps, verbose=True).cpu().numpy()
fig = plt.figure()
img = plt.imshow(np.squeeze(np.hstack(observations)), animated=True, cmap=cmap)
ax = fig.add_subplot(111)
frame = 0
if debug:
ax.set_title('frame: {}, rewards: {}, done: {}'.format(frame, [], []))
else:
fig.suptitle(frame)
def updatefig(*args):
global ax, debug, env, frame, img, num_envs
if debug:
input('Press Enter to continue...')
actions = env.sample_random_actions()
# actions = np.random.randint(0, num_actions, (num_envs,))
observations, reward, done, info = env.step(actions)
observations = observations.cpu().numpy()
reward = reward.cpu().numpy()
done = done.cpu().numpy()
img.set_array(np.squeeze(np.hstack(observations)))
if debug:
ax.title.set_text('{}) rewards: {}, done: {}'.format(frame, reward, done))
else:
fig.suptitle(frame)
frame += 1
return img,
ani = animation.FuncAnimation(fig, updatefig, interval=10, blit=False)
plt.axis('off')
plt.tight_layout()
plt.show()
| 37.033333 | 125 | 0.654065 |
e5c5174fa09d7007f7d459f98a169f144cb18b4a | 436 | py | Python | fitapp/migrations/0003_add_refresh_token_field.py | thesignalcenter/django-fitbit | aa17ee5dacbbf4ad1edea85f480829185e6f39f9 | [
"Apache-2.0"
] | null | null | null | fitapp/migrations/0003_add_refresh_token_field.py | thesignalcenter/django-fitbit | aa17ee5dacbbf4ad1edea85f480829185e6f39f9 | [
"Apache-2.0"
] | null | null | null | fitapp/migrations/0003_add_refresh_token_field.py | thesignalcenter/django-fitbit | aa17ee5dacbbf4ad1edea85f480829185e6f39f9 | [
"Apache-2.0"
] | 2 | 2018-06-21T20:12:01.000Z | 2019-06-11T23:32:07.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fitapp', '0002_initial_data'),
]
operations = [
migrations.AddField(
model_name='userfitbit',
name='refresh_token',
field=models.TextField(default=''),
preserve_default=False,
),
]
| 21.8 | 47 | 0.598624 |
bcbc5e0252a9bf13b45ca42b9ec85156aae3c752 | 11,185 | py | Python | models.py | kevinddchen/FCN-PascalVOC | 2b7bfbcc0d8d237388a7ea70ee062069e3c32af6 | [
"MIT"
] | 5 | 2021-04-30T16:54:16.000Z | 2022-01-14T06:54:39.000Z | models.py | kevinddchen/FCN-PascalVOC | 2b7bfbcc0d8d237388a7ea70ee062069e3c32af6 | [
"MIT"
] | null | null | null | models.py | kevinddchen/FCN-PascalVOC | 2b7bfbcc0d8d237388a7ea70ee062069e3c32af6 | [
"MIT"
] | 3 | 2021-05-28T03:27:23.000Z | 2022-03-27T05:56:34.000Z | '''
This file contains the FCN models.
'''
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
def vgg16(l2=0, dropout=0):
'''Convolutionized VGG16 network.
Args:
l2 (float): L2 regularization strength
dropout (float): Dropout rate
Returns:
(keras Model)
'''
## Input
input_layer = keras.Input(shape=(None, None, 3), name='input')
## Preprocessing
x = keras.layers.Lambda(tf.keras.applications.vgg16.preprocess_input, name='preprocessing')(input_layer)
## Block 1
x = keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block1_conv1')(x)
x = keras.layers.Conv2D(filters=64, kernel_size=3, strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block1_conv2')(x)
x = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid', name='block1_pool')(x)
## Block 2
x = keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block2_conv1')(x)
x = keras.layers.Conv2D(filters=128, kernel_size=3, strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block2_conv2')(x)
x = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid', name='block2_pool')(x)
## Block 3
x = keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block3_conv1')(x)
x = keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block3_conv2')(x)
x = keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block3_conv3')(x)
x = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid', name='block3_pool')(x)
## Block 4
x = keras.layers.Conv2D(filters=512, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block4_conv1')(x)
x = keras.layers.Conv2D(filters=512, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block4_conv2')(x)
x = keras.layers.Conv2D(filters=512, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block4_conv3')(x)
x = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid', name='block4_pool')(x)
## Block 5
x = keras.layers.Conv2D(filters=512, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block5_conv1')(x)
x = keras.layers.Conv2D(filters=512, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block5_conv2')(x)
x = keras.layers.Conv2D(filters=512, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='block5_conv3')(x)
x = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2), padding='valid', name='block5_pool')(x)
## Convolutionized fully-connected layers
x = keras.layers.Conv2D(filters=4096, kernel_size=(7,7), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='conv6')(x)
x = keras.layers.Dropout(rate=dropout, name='drop6')(x)
x = keras.layers.Conv2D(filters=4096, kernel_size=(1,1), strides=(1,1), padding='same', activation='relu',
kernel_regularizer=keras.regularizers.L2(l2=l2), name='conv7')(x)
x = keras.layers.Dropout(rate=dropout, name='drop7')(x)
## Inference layer
x = keras.layers.Conv2D(filters=1000, kernel_size=(1,1), strides=(1,1), padding='same', activation='softmax',
name='pred')(x)
return keras.Model(input_layer, x)
def fcn32(vgg16, l2=0):
'''32x upsampled FCN.
Args:
vgg16 (keras Model): VGG16 model to build upon
l2 (float): L2 regularization strength
Returns:
(keras Model)
'''
x = keras.layers.Conv2D(filters=21, kernel_size=(1,1), strides=(1,1), padding='same', activation='linear',
kernel_regularizer=keras.regularizers.L2(l2=l2),
name='score7')(vgg16.get_layer('drop7').output)
x = keras.layers.Conv2DTranspose(filters=21, kernel_size=(64,64), strides=(32,32),
padding='same', use_bias=False, activation='softmax',
kernel_initializer=BilinearInitializer(),
kernel_regularizer=keras.regularizers.L2(l2=l2),
name='fcn32')(x)
return keras.Model(vgg16.input, x)
def fcn16(vgg16, fcn32, l2=0):
'''16x upsampled FCN.
Args:
vgg16 (keras Model): VGG16 model to build upon
fcn32 (keras Model): FCN32 model to build upon
l2 (float): L2 regularization strength
Returns:
(keras Model)
'''
x = keras.layers.Conv2DTranspose(filters=21, kernel_size=(4,4), strides=(2,2),
padding='same', use_bias=False, activation='linear',
kernel_initializer=BilinearInitializer(),
kernel_regularizer=keras.regularizers.L2(l2=l2),
name='score7_upsample')(fcn32.get_layer('score7').output)
y = keras.layers.Conv2D(filters=21, kernel_size=(1,1), strides=(1,1), padding='same', activation='linear',
kernel_initializer=keras.initializers.Zeros(),
kernel_regularizer=keras.regularizers.L2(l2=l2),
name='score4')(vgg16.get_layer('block4_pool').output)
x = keras.layers.Add(name='skip4')([x, y])
x = keras.layers.Conv2DTranspose(filters=21, kernel_size=(32,32), strides=(16, 16),
padding='same', use_bias=False, activation='softmax',
kernel_initializer=BilinearInitializer(),
kernel_regularizer=keras.regularizers.L2(l2=l2),
name='fcn16')(x)
return keras.Model(fcn32.input, x)
def fcn8(vgg16, fcn16, l2=0):
'''8x upsampled FCN.
Args:
vgg16 (keras Model): VGG16 model to build upon
fcn16 (keras Model): FCN16 model to build upon
l2 (float): L2 regularization strength
Returns:
(keras Model)
'''
x = keras.layers.Conv2DTranspose(filters=21, kernel_size=(4,4), strides=(2,2),
padding='same', use_bias=False, activation='linear',
kernel_initializer=BilinearInitializer(),
kernel_regularizer=keras.regularizers.L2(l2=l2),
name='skip4_upsample')(fcn16.get_layer('skip4').output)
y = keras.layers.Conv2D(filters=21, kernel_size=(1,1), strides=(1,1), padding='same', activation='linear',
kernel_initializer=keras.initializers.Zeros(),
kernel_regularizer=keras.regularizers.L2(l2=l2),
name='score3')(vgg16.get_layer('block3_pool').output)
x = keras.layers.Add(name='skip3')([x, y])
x = keras.layers.Conv2DTranspose(filters=21, kernel_size=(16,16), strides=(8,8),
padding='same', use_bias=False, activation='softmax',
kernel_initializer=BilinearInitializer(),
kernel_regularizer=keras.regularizers.L2(l2=l2),
name='fcn8')(x)
return keras.Model(fcn16.input, x)
## ================
## Misc functions for training
## ================
class BilinearInitializer(keras.initializers.Initializer):
'''Initializer for Conv2DTranspose to perform bilinear interpolation on each channel.'''
def __call__(self, shape, dtype=None, **kwargs):
kernel_size, _, filters, _ = shape
arr = np.zeros((kernel_size, kernel_size, filters, filters))
## make filter that performs bilinear interpolation through Conv2DTranspose
upscale_factor = (kernel_size+1)//2
if kernel_size % 2 == 1:
center = upscale_factor - 1
else:
center = upscale_factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
kernel = (1-np.abs(og[0]-center)/upscale_factor) * \
(1-np.abs(og[1]-center)/upscale_factor) # kernel shape is (kernel_size, kernel_size)
for i in range(filters):
arr[..., i, i] = kernel
return tf.convert_to_tensor(arr, dtype=dtype)
def crossentropy(y_true, y_pred_onehot):
'''Custom cross-entropy to handle borders (class = -1).'''
n_valid = tf.math.reduce_sum(tf.cast(y_true != 255, tf.float32))
y_true_onehot = tf.cast(np.arange(21) == y_true, tf.float32)
return tf.reduce_sum(-y_true_onehot * tf.math.log(y_pred_onehot + 1e-7)) / n_valid
def pixelacc(y_true, y_pred_onehot):
'''Custom pixel accuracy to handle borders (class = -1).'''
n_valid = tf.math.reduce_sum(tf.cast(y_true != 255, tf.float32))
y_true = tf.cast(y_true, tf.int32)[..., 0]
y_pred = tf.argmax(y_pred_onehot, axis=-1, output_type=tf.int32)
return tf.reduce_sum(tf.cast(y_true == y_pred, tf.float32)) / n_valid
class MyMeanIoU(keras.metrics.MeanIoU):
'''Custom meanIoU to handle borders (class = -1).'''
def update_state(self, y_true, y_pred_onehot, sample_weight=None):
y_pred = tf.argmax(y_pred_onehot, axis=-1)
## add 1 so boundary class=0
y_true = tf.cast(y_true+1, self._dtype)
y_pred = tf.cast(y_pred+1, self._dtype)
## Flatten the input if its rank > 1.
if y_pred.shape.ndims > 1:
y_pred = tf.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = tf.reshape(y_true, [-1])
## calculate confusion matrix with one extra class
current_cm = tf.math.confusion_matrix(
y_true,
y_pred,
self.num_classes+1,
weights=sample_weight,
dtype=self._dtype)
return self.total_cm.assign_add(current_cm[1:, 1:]) # remove boundary
| 51.307339 | 114 | 0.598391 |
fa1f7c8eb32f18c39216e57bb12a222820092595 | 1,803 | py | Python | src/transformers/models/led/tokenization_led.py | kct22aws/transformers | 04cddaf402591e9f5bdb5f116a111d829a0ce4f4 | [
"Apache-2.0"
] | 31 | 2022-02-02T13:13:41.000Z | 2022-03-29T08:37:20.000Z | src/transformers/models/led/tokenization_led.py | guang7400613/transformers | 28e091430eea9e0d40839e56fd0d57aec262f5f9 | [
"Apache-2.0"
] | 2 | 2022-03-14T10:13:16.000Z | 2022-03-14T11:50:27.000Z | src/transformers/models/led/tokenization_led.py | guang7400613/transformers | 28e091430eea9e0d40839e56fd0d57aec262f5f9 | [
"Apache-2.0"
] | 2 | 2022-03-21T04:32:39.000Z | 2022-03-22T01:02:49.000Z | # coding=utf-8
# Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for LED."""
from ...utils import logging
from ..bart.tokenization_bart import BartTokenizer
logger = logging.get_logger(__name__)
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"allenai/led-base-16384": 16384,
}
class LEDTokenizer(BartTokenizer):
"""
Construct a LED tokenizer.
[`LEDTokenizer`] is identical to [`BartTokenizer`] and runs end-to-end tokenization: punctuation splitting and
wordpiece.
Refer to superclass [`BartTokenizer`] for usage examples and documentation concerning parameters.
"""
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
| 35.352941 | 114 | 0.738769 |
242d1803e77888bc97f6c659ba1d77eac0440c9b | 74 | py | Python | core/manager.py | xreaad/GetSiteScript | f40b72941c7e50392e9f1b655faea40dc2918cb5 | [
"MIT"
] | null | null | null | core/manager.py | xreaad/GetSiteScript | f40b72941c7e50392e9f1b655faea40dc2918cb5 | [
"MIT"
] | null | null | null | core/manager.py | xreaad/GetSiteScript | f40b72941c7e50392e9f1b655faea40dc2918cb5 | [
"MIT"
] | null | null | null | import os
import sys
class Manager:
def __init__(self):
pass
| 10.571429 | 23 | 0.648649 |
aa78424a710f3e046752f4fbea9b7ddbe57f3e69 | 7,693 | py | Python | monailabel/interfaces/datastore.py | IntroAI-termproject/MONAILabel | 6a0fcc797e24aff1a1582088bae71973b2b6582e | [
"Apache-2.0"
] | 214 | 2021-04-30T15:37:47.000Z | 2022-03-27T12:38:58.000Z | monailabel/interfaces/datastore.py | IntroAI-termproject/MONAILabel | 6a0fcc797e24aff1a1582088bae71973b2b6582e | [
"Apache-2.0"
] | 325 | 2021-04-30T15:59:16.000Z | 2022-03-31T19:39:38.000Z | monailabel/interfaces/datastore.py | IntroAI-termproject/MONAILabel | 6a0fcc797e24aff1a1582088bae71973b2b6582e | [
"Apache-2.0"
] | 50 | 2021-05-05T13:57:45.000Z | 2022-03-16T21:01:25.000Z | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from enum import Enum
from typing import Any, Dict, List
class DefaultLabelTag(str, Enum):
ORIGINAL = "original"
FINAL = "final"
class Datastore(metaclass=ABCMeta):
@abstractmethod
def name(self) -> str:
"""
Return the human-readable name of the datastore
:return: the name of the dataset
"""
pass
@abstractmethod
def set_name(self, name: str):
"""
Set the name of the datastore
:param name: a human-readable name for the datastore
"""
pass
@abstractmethod
def description(self) -> str:
"""
Return the user-set description of the dataset
:return: the user-set description of the dataset
"""
pass
@abstractmethod
def set_description(self, description: str):
"""
A human-readable description of the datastore
:param description: string for description
"""
pass
@abstractmethod
def datalist(self) -> List[Dict[str, Any]]:
"""
Return a dictionary of image and label pairs corresponding to the 'image' and 'label'
keys respectively
:return: the {'label': image, 'label': label} pairs for training
"""
pass
@abstractmethod
def get_labels_by_image_id(self, image_id: str) -> Dict[str, str]:
"""
Retrieve all label ids for the given image id
:param image_id: the desired image's id
:return: label ids mapped to the appropriate `LabelTag` as Dict[LabelTag, str]
"""
pass
@abstractmethod
def get_label_by_image_id(self, image_id: str, tag: str) -> str:
"""
Retrieve label id for the given image id and tag
:param image_id: the desired image's id
:param tag: matching tag name
:return: label id
"""
pass
@abstractmethod
def get_image(self, image_id: str) -> Any:
"""
Retrieve image object based on image id
:param image_id: the desired image's id
:return: return the "image"
"""
pass
@abstractmethod
def get_image_uri(self, image_id: str) -> str:
"""
Retrieve image uri based on image id
:param image_id: the desired image's id
:return: return the image uri
"""
pass
@abstractmethod
def get_label(self, label_id: str, label_tag: str) -> Any:
"""
Retrieve image object based on label id
:param label_id: the desired label's id
:param label_tag: the matching label's tag
:return: return the "label"
"""
pass
@abstractmethod
def get_label_uri(self, label_id: str, label_tag: str) -> str:
"""
Retrieve label uri based on image id
:param label_id: the desired label's id
:param label_tag: the matching label's tag
:return: return the label uri
"""
pass
@abstractmethod
def get_image_info(self, image_id: str) -> Dict[str, Any]:
"""
Get the image information for the given image id
:param image_id: the desired image id
:return: image info as a list of dictionaries Dict[str, Any]
"""
pass
@abstractmethod
def get_label_info(self, label_id: str, label_tag: str) -> Dict[str, Any]:
"""
Get the label information for the given label id
:param label_id: the desired label id
:param label_tag: the matching label tag
:return: label info as a list of dictionaries Dict[str, Any]
"""
pass
@abstractmethod
def get_labeled_images(self) -> List[str]:
"""
Get all images that have a corresponding final label
:return: list of image ids List[str]
"""
pass
@abstractmethod
def get_unlabeled_images(self) -> List[str]:
"""
Get all images that have no corresponding final label
:return: list of image ids List[str]
"""
pass
@abstractmethod
def list_images(self) -> List[str]:
"""
Return list of image ids available in the datastore
:return: list of image ids List[str]
"""
pass
@abstractmethod
def refresh(self) -> None:
"""
Refresh the datastore
"""
pass
@abstractmethod
def add_image(self, image_id: str, image_filename: str, image_info: Dict[str, Any]) -> str:
"""
Save a image for the given image id and return the newly saved image's id
:param image_id: the image id for the image; If None then base filename will be used
:param image_filename: the path to the image file
:param image_info: additional info for the image
:return: the image id for the saved image filename
"""
pass
@abstractmethod
def remove_image(self, image_id: str) -> None:
"""
Remove image for the datastore. This will also remove all associated labels.
:param image_id: the image id for the image to be removed from datastore
"""
pass
@abstractmethod
def save_label(self, image_id: str, label_filename: str, label_tag: str, label_info: Dict[str, Any]) -> str:
"""
Save a label for the given image id and return the newly saved label's id
:param image_id: the image id for the label
:param label_filename: the path to the label file
:param label_tag: the user-provided tag for the label
:param label_info: additional info for the label
:return: the label id for the given label filename
"""
pass
@abstractmethod
def remove_label(self, label_id: str, label_tag: str) -> None:
"""
Remove label from the datastore
:param label_id: the label id for the label to be removed from datastore
:param label_tag: the label tag for the label to be removed from datastore
"""
pass
@abstractmethod
def update_image_info(self, image_id: str, info: Dict[str, Any]) -> None:
"""
Update (or create a new) info tag for the desired image
:param image_id: the id of the image we want to add/update info
:param info: a dictionary of custom image information Dict[str, Any]
"""
pass
@abstractmethod
def update_label_info(self, label_id: str, label_tag: str, info: Dict[str, Any]) -> None:
"""
Update (or create a new) info tag for the desired label
:param label_id: the id of the label we want to add/update info
:param label_tag: the matching label tag
:param info: a dictionary of custom label information Dict[str, Any]
"""
pass
@abstractmethod
def status(self) -> Dict[str, Any]:
"""
Return current statistics of datastore
"""
pass
@abstractmethod
def json(self):
"""
Return json representation of datastore
"""
pass
| 29.030189 | 112 | 0.614065 |
253b6de596da4b2a054c70b2f0d60a9634fa1e77 | 2,552 | py | Python | tests/parser/dictionary/encoder/record/test_interested_party_agreement.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 37 | 2015-04-21T15:33:53.000Z | 2022-02-07T00:02:29.000Z | tests/parser/dictionary/encoder/record/test_interested_party_agreement.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 86 | 2015-02-01T22:26:02.000Z | 2021-07-09T08:49:36.000Z | tests/parser/dictionary/encoder/record/test_interested_party_agreement.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 27 | 2015-01-26T16:01:09.000Z | 2021-11-08T23:53:55.000Z | # -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import \
InterestedPartyForAgreementDictionaryEncoder
from cwr.agreement import InterestedPartyForAgreementRecord
"""
InterestedPartyForAgreementRecord to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestAgreementInterestedPartyRecordDictionaryEncoding(unittest.TestCase):
def setUp(self):
self._encoder = InterestedPartyForAgreementDictionaryEncoder()
def test_encoded(self):
data = InterestedPartyForAgreementRecord(record_type='ACK',
transaction_sequence_n=3,
record_sequence_n=15,
ip_n='AB12',
ip_last_name='LAST NAME',
agreement_role_code='AS',
ip_writer_first_name='FIRST NAME',
ipi_name_n='00014107338',
ipi_base_n='I-000000229-7',
pr_society=12,
pr_share=50.5,
mr_society=13,
mr_share=60.5,
sr_society=14,
sr_share=70.5)
encoded = self._encoder.encode(data)
self.assertEqual('ACK', encoded['record_type'])
self.assertEqual(3, encoded['transaction_sequence_n'])
self.assertEqual(15, encoded['record_sequence_n'])
self.assertEqual('AB12', encoded['ip_n'])
self.assertEqual('LAST NAME', encoded['ip_last_name'])
self.assertEqual('AS', encoded['agreement_role_code'])
self.assertEqual('FIRST NAME', encoded['ip_writer_first_name'])
self.assertEqual('00014107338', encoded['ipi_name_n'])
self.assertEqual(12, encoded['pr_society'])
self.assertEqual(50.5, encoded['pr_share'])
self.assertEqual(13, encoded['mr_society'])
self.assertEqual(60.5, encoded['mr_share'])
self.assertEqual(14, encoded['sr_society'])
self.assertEqual(70.5, encoded['sr_share'])
self.assertEqual('I-000000229-7', encoded['ipi_base_n'])
| 43.254237 | 83 | 0.53174 |
916fbd0eb1115bc296798fc2698bfb4b39566a56 | 9,025 | py | Python | src/Summary/get_possible_diplotypes.py | Genomic-Medicine-Linkoping/pgx_module | 0254071a2e2d9fdb74a5ad70fcec2845a53fb087 | [
"MIT"
] | null | null | null | src/Summary/get_possible_diplotypes.py | Genomic-Medicine-Linkoping/pgx_module | 0254071a2e2d9fdb74a5ad70fcec2845a53fb087 | [
"MIT"
] | null | null | null | src/Summary/get_possible_diplotypes.py | Genomic-Medicine-Linkoping/pgx_module | 0254071a2e2d9fdb74a5ad70fcec2845a53fb087 | [
"MIT"
] | null | null | null | import pandas as pd
import re
import argparse
import sys
import numpy as np
class ArrangeHaplotype:
"""
Get possible haplotypes from variant combinations detected in file.
Add clinical guidelines based on the haplotypes detected
"""
def __init__(self,
detected_variants,
haplotype_definitions,
activity_scores,
clinical_guidelines):
self.detected_variants = pd.read_csv(detected_variants, sep="\t")
self.haplotype_definitions = pd.read_csv(haplotype_definitions, sep="\t")
self.activity_scores = pd.read_csv(activity_scores, sep="\t")
self.clinical_guidelines = pd.read_csv(clinical_guidelines, sep="\t")
if not self.detected_variants.empty:
self.merge_data()
def merge_data(self):
"""
Join possible haplotypes containing ids
:return:
"""
self.detected_variants["multival_haplotype"] = \
self.detected_variants.ID.apply(
lambda x: list(self.haplotype_definitions["HAPLOTYPE"][
self.haplotype_definitions.ID == x
])
)
self.detected_variants["CN"] = self.detected_variants["GT"].apply(
lambda x: sum(map(int, re.split('[/|]+', x)))
)
def get_haplotypes(self):
"""
Return tree-structure of possible combinations of haplotypes explaning seen variants.
Assumptions: Haplotype explaning most variants goes first, if any of variants in haplotype
is zero the all haplotypes containing that variant is removed for futher chocies.
:return: Gene - haplotype-tree dict
"""
def _get_haplotypes(variant_subdf, current_haplotype, depth=2):
idx = variant_subdf["multival_haplotype"].apply(lambda x: current_haplotype in x)
variant_subdf.loc[idx, "CN"] -= 1
if any(variant_subdf["CN"] == 0):
remove_hap = lambda x, y: x.remove(y) if y in x else x
variant_subdf["multival_haplotype"] = variant_subdf["multival_haplotype"].apply(
lambda x: remove_hap(x, current_haplotype)
)
variant_subdf = variant_subdf[variant_subdf["CN"] != 0]
if depth == 1:
if len(variant_subdf) == 0:
return [current_haplotype, True]
else:
return [current_haplotype, False]
if len(variant_subdf) == 0 or not any(variant_subdf["multival_haplotype"].apply(lambda x: bool(x))):
wt_haplotype = "WT"
return [current_haplotype, [_get_haplotypes(variant_subdf.copy(), wt_haplotype, depth - 1)]]
remaining_haplo = set([element for halpolist in variant_subdf["multival_haplotype"]
for element in halpolist])
return [current_haplotype, [
_get_haplotypes(variant_subdf.copy(), hap, depth - 1) for hap in remaining_haplo
]]
genes = set(self.detected_variants["GENE"])
full_mat = {}
for gene in genes:
gene_subset = self.detected_variants[self.detected_variants.GENE == gene]
print(gene_subset)
candidate_haplotypes = np.array(list(set(
[element for halpolist in gene_subset["multival_haplotype"] for element in halpolist]
)))
order = list(reversed(np.argsort(
[sum(gene_subset["multival_haplotype"].apply(lambda y: x in y)) for x in candidate_haplotypes]
)))
candidate_haplotypes = candidate_haplotypes[order]
gene_haps = []
for current_haplotype in candidate_haplotypes:
gene_haps.append(_get_haplotypes(gene_subset.copy(), current_haplotype))
idx = gene_subset["multival_haplotype"].apply(lambda x: current_haplotype in x)
cn = gene_subset.loc[idx, "CN"]
if any([(c - 1) == 0 for c in cn]):
remove_hap = lambda x, y: x.remove(y) if y in x else x
gene_subset["multival_haplotype"] = gene_subset["multival_haplotype"].apply(
lambda x: remove_hap(x, current_haplotype)
)
full_mat.update({gene: gene_haps})
return full_mat
def get_haplotype_dataframe(self): # Wow what a mess
hap_mat = self.get_haplotypes()
def _haplot_to_row(hap, gene):
def prim_haplot_to_row(hap, gene):
current_hap = (f"{gene}-1" if hap[0] == "WT" else hap[0])
if not type(hap[1]) is list:
return [current_hap, gene, hap[1]]
else:
next_hap = [prim_haplot_to_row(c_hap, gene) for c_hap in hap[1]]
return [[current_hap] + c_hap for c_hap in next_hap][0]
return [prim_haplot_to_row(c_hap, gene) for c_hap in hap]
out = []
for gene, hap in hap_mat.items():
if len(hap) != 0:
out += _haplot_to_row(hap, gene)
hap_df = pd.DataFrame(out, columns=["Haplotype1", "Haplotype2", "gene", "pass"])
hap_df = hap_df[hap_df["pass"]]
return hap_df[["gene", "Haplotype1", "Haplotype2"]]
def get_clinical_guidelines_table(self):
if self.detected_variants.empty:
columns = [
"gene", "Haplotype1", "Haplotype2",
"HAPLOTYPE1", "ACTIVITY_SCORE1", "HAPLOTYPE2",
"ACTIVITY_SCORE2", "Genotype_activity",
"Gene", "Activity", "Guideline"
]
return pd.DataFrame(columns=columns)
hap_df = self.get_haplotype_dataframe()
hap_df = hap_df.merge(self.activity_scores, how="left",
left_on="Haplotype1", right_on="HAPLOTYPE")
hap_df = hap_df.merge(self.activity_scores, how="left",
left_on="Haplotype2", right_on="HAPLOTYPE", suffixes=("1", "2"))
hap_df["Genotype_activity"] = hap_df["ACTIVITY_SCORE1"] + hap_df["ACTIVITY_SCORE2"]
hap_df = hap_df.merge(self.clinical_guidelines, how="left",
left_on=["gene", "Genotype_activity"], right_on=["Gene", "Activity"])
return hap_df
def get_wildtypes(self, hap_df):
hap_genes = list(hap_df.gene.values)
for gene in set(self.clinical_guidelines.Gene):
if hap_df.empty or gene not in hap_genes:
gene_df = pd.DataFrame(
{"gene": [gene], "Haplotype1": [gene + "-1"], "Haplotype2": [gene + "-1"],
"HAPLOTYPE1": [gene + "-1"], "ACTIVITY_SCORE1": [1], "HAPLOTYPE2": [gene + "-1"],
"ACTIVITY_SCORE2": [1], "Genotype_activity": [2.0]})
gene_df = gene_df.merge(self.clinical_guidelines, how="left",
left_on=["gene", "Genotype_activity"], right_on=["Gene", "Activity"])
hap_df = hap_df.append(gene_df, ignore_index=True)
return hap_df
def main():
parser = argparse.ArgumentParser(
description="Finds selected RSIDs form bed file in input VCF"
)
parser.add_argument("--variant_csv", type=str)
parser.add_argument("--haplotype_definitions", type=str)
parser.add_argument("--clinical_guidelines", type=str)
parser.add_argument("--haplotype_activity", type=str)
parser.add_argument("--hidden_haplotypes", type=str)
parser.add_argument("--output", type=str, help="Location of output")
args = parser.parse_args(sys.argv[1:])
variant_csv = args.variant_csv
haplotype_definitions = args.haplotype_definitions
clinical_guidelines = args.clinical_guidelines
haplotype_activity = args.haplotype_activity
output = args.output
hidden_haplotypes = args.hidden_haplotypes
ah = ArrangeHaplotype(
variant_csv,
haplotype_definitions,
haplotype_activity,
clinical_guidelines
)
df = ah.get_clinical_guidelines_table()
df = ah.get_wildtypes(df)
columns = [
"gene", "Haplotype1", "Haplotype2",
"HAPLOTYPE1", "ACTIVITY_SCORE1", "HAPLOTYPE2",
"ACTIVITY_SCORE2", "Genotype_activity",
"Gene", "Activity", "Guideline"
]
if not df.empty:
hidden_haplotypes = pd.read_csv(
hidden_haplotypes, sep="\t"
)
hidden_haplotypes["comb"] = hidden_haplotypes[["Haplotype1", "Haplotype2"]].apply(
lambda x: "".join(sorted(x.tolist())), axis=1
)
df["comb"] = df[["Haplotype1", "Haplotype2"]].apply(
lambda x: "".join(sorted(x.tolist())), axis=1
)
print(df["comb"])
print(hidden_haplotypes["comb"])
df = df[~df["comb"].isin(hidden_haplotypes["comb"])]
df.to_csv(output, sep="\t", index=False, columns=columns)
if __name__ == '__main__':
main()
| 41.022727 | 112 | 0.58903 |
4e4fdcd6310f0c1150f45896ccd1d5a6b6cd2e49 | 2,627 | py | Python | site_scons/site_tools/abilink.py | MartinNeupauer/mongo | 6cc2dfe7edd312b8596355edef454e15988e350e | [
"Apache-2.0"
] | 1 | 2015-11-08T17:16:08.000Z | 2015-11-08T17:16:08.000Z | site_scons/site_tools/abilink.py | MartinNeupauer/mongo | 6cc2dfe7edd312b8596355edef454e15988e350e | [
"Apache-2.0"
] | 2 | 2021-03-26T00:01:11.000Z | 2021-03-26T00:02:19.000Z | site_scons/site_tools/abilink.py | MartinNeupauer/mongo | 6cc2dfe7edd312b8596355edef454e15988e350e | [
"Apache-2.0"
] | 1 | 2021-06-18T05:00:06.000Z | 2021-06-18T05:00:06.000Z | # Copyright 2015 MongoDB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import SCons
import subprocess
# TODO: Make a SUFF variable for the suffix to write to
# TODO: Prevent using abilink when -gsplit-dwarf is in play, since it doesn't work
# TODO: Make a variable for the md5sum utility (allow any hasher)
# TODO: Add an ABILINKCOM variable to the Action, so it can be silenced.
def _detect(env):
try:
abidw = env['ABIDW']
if not abidw:
return None
return abidw
except KeyError:
pass
return env.WhereIs('abidw')
def _add_emitter(builder):
base_emitter = builder.emitter
def new_emitter(target, source, env):
new_targets = []
for t in target:
abidw = str(t) + ".abidw"
abidw = (t.builder.target_factory or env.File)(abidw)
new_targets.append(abidw)
setattr(t.attributes, "abidw", abidw)
targets = target + new_targets
return (targets, source)
new_emitter = SCons.Builder.ListEmitter([base_emitter, new_emitter])
builder.emitter = new_emitter
def _add_scanner(builder):
old_scanner = builder.target_scanner
path_function = old_scanner.path_function
def new_scanner(node, env, path):
old_results = old_scanner(node, env, path)
new_results = []
for base in old_results:
abidw = getattr(env.Entry(base).attributes, "abidw", None)
new_results.append(abidw if abidw else base)
return new_results
builder.target_scanner = SCons.Scanner.Scanner(function=new_scanner, path_function=path_function)
def _add_action(builder):
actions = builder.action
builder.action = actions + SCons.Action.Action("$ABIDW $TARGET | md5sum > ${TARGET}.abidw")
def exists(env):
result = _detect(env) != None
return result
def generate(env):
if not exists(env):
return
builder = env['BUILDERS']['SharedLibrary']
_add_emitter(builder)
_add_action(builder)
_add_scanner(builder)
_add_scanner(env['BUILDERS']['Program'])
_add_scanner(env['BUILDERS']['LoadableModule'])
| 31.650602 | 101 | 0.687096 |
2003e1b11126e1f0bbd412d6d4196770feec97d6 | 1,804 | py | Python | modules/action/apt2_whois.py | Marx314/apt2 | f0782ec121057bd5d8c0c5fa438d0a13dc4dd448 | [
"MIT"
] | 2 | 2017-05-11T22:08:47.000Z | 2020-04-07T06:31:26.000Z | modules/action/apt2_whois.py | Marx314/apt2 | f0782ec121057bd5d8c0c5fa438d0a13dc4dd448 | [
"MIT"
] | null | null | null | modules/action/apt2_whois.py | Marx314/apt2 | f0782ec121057bd5d8c0c5fa438d0a13dc4dd448 | [
"MIT"
] | 1 | 2020-04-07T06:31:27.000Z | 2020-04-07T06:31:27.000Z | import re
import sys
try:
import whois
except ImportError:
raise ImportError('Missing whois library. To install run: pip install whois')
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.utils import Utils
class apt2_whois(actionModule):
def __init__(self, config, display, lock):
super(apt2_whois, self).__init__(config, display, lock)
self.title = "run whois"
self.shortName = "Whois"
self.description = "execute [whois] on each target"
self.types = ["osint"]
self.requirements = []
self.triggers = ["newHostname", "newDomain"]
self.safeLevel = 5
def getTargets(self):
self.targets = kb.get(['osint/hostname', 'osint/domain'])
def process(self):
# early out if the osint depth is reached
if (int(self.getVectorDepth()) > int(self.config['max_osint_depth'])):
return
# load any targets we are interested in
self.getTargets()
# loop over each target
for t in self.targets:
# verify we have not tested this host before
if not self.seentarget(t):
# add the to the already seen list
self.addseentarget(t)
# make outfile
temp_file = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
result = whois.whois(t)
address = result['address']
if address:
kb.add("osint/address/" + address
emails = result['emails']
if emails:
for email in emails:
kb.add("osint/email/" + email
Utils.writeFile(str(result), temp_file)
return
| 32.8 | 108 | 0.576497 |
6f6eb2d3cb0743e8458276c48c132c8a69fe7c76 | 29 | py | Python | anubis-management-api/anubis/version.py | orchestracities/anubis | 34cfeeef4485f2e011da6e6216d8c67041366c63 | [
"Apache-2.0"
] | null | null | null | anubis-management-api/anubis/version.py | orchestracities/anubis | 34cfeeef4485f2e011da6e6216d8c67041366c63 | [
"Apache-2.0"
] | 21 | 2022-02-21T14:07:04.000Z | 2022-03-29T14:19:37.000Z | anubis-management-api/anubis/version.py | orchestracities/anubis | 34cfeeef4485f2e011da6e6216d8c67041366c63 | [
"Apache-2.0"
] | null | null | null | ANUBIS_VERSION = '0.3.0-dev'
| 14.5 | 28 | 0.689655 |
607b8097713bcea13b07f082bceeebcc2fb0a7eb | 1,100 | py | Python | setup.py | violafanfani/github_settings_tool | 64143112ee99266dea0c250880e3324313570169 | [
"MIT"
] | null | null | null | setup.py | violafanfani/github_settings_tool | 64143112ee99266dea0c250880e3324313570169 | [
"MIT"
] | null | null | null | setup.py | violafanfani/github_settings_tool | 64143112ee99266dea0c250880e3324313570169 | [
"MIT"
] | null | null | null | import os
from setuptools import find_packages, setup
# determining the directory containing setup.py
setup_path = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(setup_path, 'README.rst'), encoding='utf-8') as f:
readme = f.read()
setup(
# package information
name = 'github_settings_tool',
packages = find_packages(),
version = '0.0.3',
description = 'tool to test how to publish a package',
long_description = readme,
license = 'MIT',
url='https://github.com/violafanfani/github_settings_tool.git',
keywords='',
# author information
author = 'Viola Fanfani',
author_email = 'viola.fanfani@gmail.com',
# installation info and requirements
install_requires=[],
setup_requires=[],
# test info and requirements
test_suite='tests',
tests_require=[],
# package deployment info
include_package_data=True,
zip_safe=False,
# all tools have cli interface
entry_points={
'console_scripts': [
'github_settings_tool=github_settings_tool.cli:main',
],
},
)
| 25 | 73 | 0.671818 |
899f10ff91da824ee133cb54a85fc2654652f249 | 5,737 | py | Python | docs/conf.py | cffbots/flamingo | 647df63b75443d3a56643e2bfdc623ebb203d850 | [
"Apache-2.0"
] | 1 | 2021-01-12T21:52:06.000Z | 2021-01-12T21:52:06.000Z | docs/conf.py | cffbots/flamingo | 647df63b75443d3a56643e2bfdc623ebb203d850 | [
"Apache-2.0"
] | 61 | 2020-11-04T17:17:35.000Z | 2022-02-03T14:02:56.000Z | docs/conf.py | cffbots/flamingo | 647df63b75443d3a56643e2bfdc623ebb203d850 | [
"Apache-2.0"
] | 1 | 2022-02-03T13:43:50.000Z | 2022-02-03T13:43:50.000Z | # -*- coding: utf-8 -*-
#
# flamingo documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 02 09:45:25 2020.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
here = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(here, '..')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'flamingo'
copyright = u'2020, Netherlands eScience Center'
author = u"Felipe Zapata"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
here = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(here, '..')))
vers = {}
with open(os.path.join(here, '..', 'flamingo', '__version__.py')) as f:
exec(f.read(), vers)
version = vers["__version__"]
# The full version, including alpha/beta/rc tags
release = vers["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('https://numpy.org/doc/stable/', None)
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'flamingo_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'flamingo.tex', u'flamingo Documentation',
u"Felipe Zapata", 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flamingo', u'flamingo Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'flamingo', u'flamingo Documentation',
author, 'flamingo', "Compute and filter molecular properties",
'Miscellaneous'),
]
| 30.844086 | 79 | 0.678926 |
67b910dfd27a7747893b089964174ac061399519 | 587 | py | Python | keyvalue.py | nanspro/nuBox-CLI | 5e4d906a49378f3a9968cc627668dec1ae5d8e32 | [
"MIT"
] | null | null | null | keyvalue.py | nanspro/nuBox-CLI | 5e4d906a49378f3a9968cc627668dec1ae5d8e32 | [
"MIT"
] | null | null | null | keyvalue.py | nanspro/nuBox-CLI | 5e4d906a49378f3a9968cc627668dec1ae5d8e32 | [
"MIT"
] | null | null | null | import requests
import urllib.parse
api_key = '52f33b0158c319fd8afe53610f55289965993970164ea71212fe63'
def set_value(key, value):
key = urllib.parse.quote(key)
url = f"https://meeiot.org/put/{api_key}/{key}"
x = requests.post(url, json=value, verify=False)
res = x.text
if res[0] == "0":
return True
return False
def get_value(key):
key = urllib.parse.quote(key)
url = f"https://meeiot.org/get/{api_key}/{key}"
x = requests.get(url, verify=False)
try:
return x.json()
except Exception as e:
return None
| 21.740741 | 66 | 0.635434 |
82741c6fbf304bc99295d7bdcc1e7ac965b0493b | 1,856 | py | Python | core/domain/platform_parameter_list.py | swyuan27/oppia | da4c733659b8813eccf738ff8be19123ebcdeb15 | [
"Apache-2.0"
] | 5,422 | 2015-08-14T01:56:44.000Z | 2022-03-31T23:31:56.000Z | core/domain/platform_parameter_list.py | omprakash1999mina/oppia | 00282e533b5832cb763100de1a5cc727644d64ef | [
"Apache-2.0"
] | 14,178 | 2015-08-14T05:21:45.000Z | 2022-03-31T23:54:10.000Z | core/domain/platform_parameter_list.py | omprakash1999mina/oppia | 00282e533b5832cb763100de1a5cc727644d64ef | [
"Apache-2.0"
] | 3,574 | 2015-08-14T04:20:06.000Z | 2022-03-29T01:52:37.000Z | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of platform parameters."""
from __future__ import annotations
import enum
from core.domain import platform_parameter_domain
from core.domain import platform_parameter_registry as registry
Registry = registry.Registry
FEATURE_STAGES = platform_parameter_domain.FEATURE_STAGES # pylint: disable=invalid-name
DATA_TYPES = platform_parameter_domain.DATA_TYPES # pylint: disable=invalid-name
# TODO(#14419): Change naming style of Enum class from SCREAMING_SNAKE_CASE
# to PascalCase and its values to UPPER_CASE. Because we want to be consistent
# throughout the codebase according to the coding style guide.
# https://github.com/oppia/oppia/wiki/Coding-style-guide
class PARAM_NAMES(enum.Enum): # pylint: disable=invalid-name
"""Enum for parameter names."""
dummy_feature = 'dummy_feature' # pylint: disable=invalid-name
dummy_parameter = 'dummy_parameter' # pylint: disable=invalid-name
# Platform parameters should all be defined below.
Registry.create_feature_flag(
PARAM_NAMES.dummy_feature,
'This is a dummy feature flag.',
FEATURE_STAGES.dev,
)
Registry.create_platform_parameter(
PARAM_NAMES.dummy_parameter,
'This is a dummy platform parameter.',
DATA_TYPES.string
)
| 33.745455 | 88 | 0.77694 |
bc364f14cceed2bc41fadf8a479fe0a72249abde | 3,078 | py | Python | test/test_docinit.py | mesca/docinit | b2d1c3bc4b2fc510497530d645e81b52d10a56c9 | [
"MIT"
] | null | null | null | test/test_docinit.py | mesca/docinit | b2d1c3bc4b2fc510497530d645e81b52d10a56c9 | [
"MIT"
] | null | null | null | test/test_docinit.py | mesca/docinit | b2d1c3bc4b2fc510497530d645e81b52d10a56c9 | [
"MIT"
] | null | null | null | import os
import pytest
from datetime import datetime
from docinit.docinit import Parse, Config, Git
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'setup.cfg')
def test_parse_bool():
assert Parse.option('tRUe') == True
assert Parse.option('1') == True
assert Parse.option('yEs') == True
assert Parse.option('FalSE') == False
assert Parse.option('0') == False
assert Parse.option('No') == False
def test_parse_int():
assert Parse.option('42') == 42
def test_parse_float():
assert Parse.option('1.337') == 1.337
def test_parse_none():
assert Parse.option('noNe') == None
def test_parse_list():
assert Parse.option('trUE,42, 7.8, hello, none') == [True, 42, 7.8, 'hello', None]
assert Parse.option('\n42 \n hello') == [42, 'hello']
def test_parse_dict():
assert Parse.option('\nfoo=bar\nbaz = 42') == {'foo': 'bar', 'baz': 42}
assert Parse.option('a=b,b=yes') == {'a': 'b', 'b': True}
def test_config_default():
config = Config(path).config['docinit']
assert config['doc_dir'] == 'doc'
assert config['name'] == 'docinit'
assert config['parent_url'] == None
def test_config_doc_dir():
config = Config(path)
assert config.config['docinit']['doc_dir'] == 'doc'
config.config['build_sphinx']['source-dir'] = 'foo'
config._set_doc_dir()
assert config.config['docinit']['doc_dir'] == 'foo'
def test_config_name():
config = Config(path)
assert config.config['docinit']['name'] == 'docinit'
config.config['metadata']['name'] = 'Foo'
config._set_name()
assert config.config['docinit']['name'] == 'Foo'
config.config['build_sphinx']['project'] = 'Bar'
config._set_name()
assert config.config['docinit']['name'] == 'Bar'
def test_config_version():
config = Config(path)
config.config['metadata']['version'] = '42.0.0'
config._set_version()
assert config.config['docinit']['version'] == '42.0.0'
def test_config_author():
config = Config(path)
assert config.config['docinit']['author'] == 'mesca'
config.config['metadata']['author'] = 'Me'
config._set_author()
assert config.config['docinit']['author'] == 'Me'
def test_config_packages():
config = Config(path)
config.config['options']['packages'] = ['foo']
config._set_packages()
assert config.config['docinit']['packages'] == ['foo']
def test_config_copyright():
year = str(datetime.now().year)
config = Config(path)
assert config.config['docinit']['copyright'].startswith('2022')
config.config['git']['year'] = '2000'
config._set_copyright()
assert config.config['docinit']['copyright'] == f'2000-{year}, mesca'
config.config['git']['year'] = year
config._set_copyright()
assert config.config['docinit']['copyright'] == f'{year}, mesca'
config.config['build_sphinx']['copyright'] = 'foobar'
config._set_copyright()
assert config.config['docinit']['copyright'] == 'foobar'
def test_git():
info = Git().info
assert info['name'] == 'docinit'
assert info['year'] == '2022'
| 33.096774 | 89 | 0.64165 |
88fd4af0a125097cd4f2dda0b9c90b0a7d1ba326 | 988 | py | Python | src/param_scan/fns/calc_full_scan.py | nt409/HRHR | 62ab397650f4e2a1b1d0e6ef289b4e73790c777e | [
"MIT"
] | null | null | null | src/param_scan/fns/calc_full_scan.py | nt409/HRHR | 62ab397650f4e2a1b1d0e6ef289b4e73790c777e | [
"MIT"
] | null | null | null | src/param_scan/fns/calc_full_scan.py | nt409/HRHR | 62ab397650f4e2a1b1d0e6ef289b4e73790c777e | [
"MIT"
] | null | null | null | from tqdm import tqdm
import numpy as np
from param_scan.fns.calc_sing_ps_run import SinglePSRun, ScanOutput
class ParameterScan:
"""
Inputs:
- config: param scan config detailing runs to do and bounds on params
- seed: random seed for these runs
Outputs:
- ScanOutput object
"""
def __init__(self, config, seed) -> None:
self.config = config
self.seed = seed
def run(self):
"""
Run random scan over uniform dists and save output
"""
output = self._get_scan_output()
output.save(self.config, self.seed)
def _get_scan_output(self):
np.random.seed(self.seed)
scan_output = ScanOutput()
N_ITS = self.config["n_iterations"]
for r_ind in tqdm(range(N_ITS)):
run_index = self.seed*N_ITS + r_ind
this_run = SinglePSRun(self.config, run_index)
scan_output.add_new_output(this_run.output)
return scan_output
| 21.021277 | 73 | 0.62753 |
68a44b72c551b7b2e240efdfc52defb199d0905d | 432 | py | Python | game-watch-api/games/migrations/0011_alter_game_game_engines.py | fouadsan/game_watch | ca38d283ef8f55499ea520eb52a78ebfac8a77a4 | [
"MIT"
] | null | null | null | game-watch-api/games/migrations/0011_alter_game_game_engines.py | fouadsan/game_watch | ca38d283ef8f55499ea520eb52a78ebfac8a77a4 | [
"MIT"
] | null | null | null | game-watch-api/games/migrations/0011_alter_game_game_engines.py | fouadsan/game_watch | ca38d283ef8f55499ea520eb52a78ebfac8a77a4 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-03-21 09:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0010_alter_engine_name_alter_mode_name_and_more'),
]
operations = [
migrations.AlterField(
model_name='game',
name='game_engines',
field=models.ManyToManyField(blank=True, to='games.Engine'),
),
]
| 22.736842 | 72 | 0.62963 |
f59778064500ae4744ea6ea2ca9758744eee39f1 | 5,247 | py | Python | pytest_bdd/cucumber_json.py | Prisma-Saas-Dev/public-repo1 | f7850807e7302effe6b654e2b3fecf2851d8dfca | [
"MIT"
] | 1 | 2015-11-21T17:46:49.000Z | 2015-11-21T17:46:49.000Z | pytest_bdd/cucumber_json.py | The-Compiler/pytest-bdd | 30076d478c34afc67c41460a2956b430f15e7f90 | [
"MIT"
] | null | null | null | pytest_bdd/cucumber_json.py | The-Compiler/pytest-bdd | 30076d478c34afc67c41460a2956b430f15e7f90 | [
"MIT"
] | null | null | null | """Cucumber json output formatter."""
import json
import math
import os
import time
import py
import six
from .feature import force_unicode
if six.PY3:
long = int
def add_options(parser):
"""Add pytest-bdd options."""
group = parser.getgroup("bdd", "Cucumber JSON")
group.addoption(
"--cucumberjson",
"--cucumber-json",
action="store",
dest="cucumber_json_path",
metavar="path",
default=None,
help="create cucumber json style report file at given path.",
)
def configure(config):
cucumber_json_path = config.option.cucumber_json_path
# prevent opening json log on slave nodes (xdist)
if cucumber_json_path and not hasattr(config, "slaveinput"):
config._bddcucumberjson = LogBDDCucumberJSON(cucumber_json_path)
config.pluginmanager.register(config._bddcucumberjson)
def unconfigure(config):
xml = getattr(config, "_bddcucumberjson", None)
if xml is not None:
del config._bddcucumberjson
config.pluginmanager.unregister(xml)
class LogBDDCucumberJSON(object):
"""Logging plugin for cucumber like json output."""
def __init__(self, logfile):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.features = {}
def append(self, obj):
self.features[-1].append(obj)
def _get_result(self, step, report, error_message=False):
"""Get scenario test run result.
:param step: `Step` step we get result for
:param report: pytest `Report` object
:return: `dict` in form {"status": "<passed|failed|skipped>", ["error_message": "<error_message>"]}
"""
result = {}
if report.passed or not step["failed"]: # ignore setup/teardown
result = {"status": "passed"}
elif report.failed and step["failed"]:
result = {
"status": "failed",
"error_message": force_unicode(report.longrepr) if error_message else "",
}
elif report.skipped:
result = {"status": "skipped"}
result['duration'] = long(math.floor((10 ** 9) * step["duration"])) # nanosec
return result
def _serialize_tags(self, item):
"""Serialize item's tags.
:param item: json-serialized `Scenario` or `Feature`.
:return: `list` of `dict` in the form of:
[
{
"name": "<tag>",
"line": 2,
}
]
"""
return [
{
"name": tag,
"line": item["line_number"] - 1
}
for tag in item["tags"]
]
def pytest_runtest_logreport(self, report):
try:
scenario = report.scenario
except AttributeError:
# skip reporting for non-bdd tests
return
if not scenario["steps"] or report.when != "call":
# skip if there isn't a result or scenario has no steps
return
def stepmap(step):
error_message = False
if step['failed'] and not scenario.setdefault('failed', False):
scenario['failed'] = True
error_message = True
return {
"keyword": step['keyword'],
"name": step['name'],
"line": step['line_number'],
"match": {
"location": "",
},
"result": self._get_result(step, report, error_message),
}
if scenario["feature"]["filename"] not in self.features:
self.features[scenario["feature"]["filename"]] = {
"keyword": "Feature",
"uri": scenario["feature"]["rel_filename"],
"name": scenario["feature"]["name"] or scenario["feature"]["rel_filename"],
"id": scenario["feature"]["rel_filename"].lower().replace(" ", "-"),
"line": scenario['feature']["line_number"],
"description": scenario["feature"]["description"],
"tags": self._serialize_tags(scenario["feature"]),
"elements": [],
}
self.features[scenario["feature"]["filename"]]["elements"].append({
"keyword": "Scenario",
"id": report.item["name"],
"name": scenario["name"],
"line": scenario["line_number"],
"description": "",
"tags": self._serialize_tags(scenario),
"type": "scenario",
"steps": [stepmap(step) for step in scenario["steps"]],
})
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
if py.std.sys.version_info[0] < 3:
logfile_open = py.std.codecs.open
else:
logfile_open = open
with logfile_open(self.logfile, "w", encoding="utf-8") as logfile:
logfile.write(json.dumps(list(self.features.values())))
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", "generated json file: %s" % (self.logfile))
| 32.590062 | 107 | 0.554031 |
eafa493c7a8f949da66341d4ea1dd7380f645dbf | 318 | py | Python | py/orbit/py_linac/linac_parsers/__init__.py | LeoRya/py-orbit | 340b14b6fd041ed8ec2cc25b0821b85742aabe0c | [
"MIT"
] | 17 | 2018-02-09T23:39:06.000Z | 2022-03-04T16:27:04.000Z | py/orbit/py_linac/linac_parsers/__init__.py | LeoRya/py-orbit | 340b14b6fd041ed8ec2cc25b0821b85742aabe0c | [
"MIT"
] | 22 | 2017-05-31T19:40:14.000Z | 2021-09-24T22:07:47.000Z | py/orbit/py_linac/linac_parsers/__init__.py | LeoRya/py-orbit | 340b14b6fd041ed8ec2cc25b0821b85742aabe0c | [
"MIT"
] | 37 | 2016-12-08T19:39:35.000Z | 2022-02-11T19:59:34.000Z | ## \namespace orbit::py_linac::linac_parsers
## \Classes and packages of ORBIT Linac.
##
from sns_linac_lattice_factory import SNS_LinacLatticeFactory
from jparc_linac_lattice_factory import JPARC_LinacLatticeFactory
__all__ = []
__all__.append("SNS_LinacLatticeFactory")
__all__.append("JPARC_LinacLatticeFactory")
| 28.909091 | 65 | 0.836478 |
0a37ce52a1774ee36652cf5b531f6bf94f74d271 | 915 | py | Python | caluma/core/jexl.py | czosel/caluma | 4a3e81b2000961ab934bfc1c6840ec00f0ba2c19 | [
"MIT"
] | null | null | null | caluma/core/jexl.py | czosel/caluma | 4a3e81b2000961ab934bfc1c6840ec00f0ba2c19 | [
"MIT"
] | null | null | null | caluma/core/jexl.py | czosel/caluma | 4a3e81b2000961ab934bfc1c6840ec00f0ba2c19 | [
"MIT"
] | null | null | null | import pyjexl
from pyjexl.analysis import ValidatingAnalyzer
from pyjexl.exceptions import ParseError
class JEXL(pyjexl.JEXL):
def validate(self, expression, ValidatingAnalyzerClass=ValidatingAnalyzer):
try:
for res in self.analyze(expression, ValidatingAnalyzerClass):
yield res
except ParseError as err:
yield str(err)
class ExtractTransformSubjectAnalyzer(ValidatingAnalyzer):
"""
Extract all subject values of given transforms.
If no transforms are given all subjects of all transforms will be extracted.
"""
def __init__(self, config, transforms=[]):
self.transforms = transforms
super().__init__(config)
def visit_Transform(self, transform):
if not self.transforms or transform.name in self.transforms:
yield transform.subject.value
yield from self.generic_visit(transform)
| 30.5 | 80 | 0.702732 |
f05334c5e7e4e88c6659366d392314df39af0590 | 474 | py | Python | app/urls.py | AbdulAhadSiddiqui011/covidTracker | b8a1d2ba40c4e271db3b2e1814a88afd9b35ba1f | [
"MIT"
] | null | null | null | app/urls.py | AbdulAhadSiddiqui011/covidTracker | b8a1d2ba40c4e271db3b2e1814a88afd9b35ba1f | [
"MIT"
] | 2 | 2021-06-04T23:43:52.000Z | 2021-09-22T19:29:13.000Z | app/urls.py | AbdulAhadSiddiqui011/covidTracker | b8a1d2ba40c4e271db3b2e1814a88afd9b35ba1f | [
"MIT"
] | 1 | 2020-10-01T06:04:55.000Z | 2020-10-01T06:04:55.000Z | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.urls import path, re_path
from app import views
urlpatterns = [
# Matches any html file - to be used for gentella
# Avoid using your .html in your resources.
# Or create a separate django app.
re_path(r'^.*\.html', views.pages, name='pages'),
# The home page
path('', views.index, name='home'),
path('stats.html', views.stats, name='stats'),
]
| 23.7 | 53 | 0.64135 |
235fa5da3fd2a097c39a861cab2a6c7b3962db14 | 1,248 | py | Python | utils/email_helper.py | bobjiangps/python3_test_framework | cfcdc10f5bcf96bb31e417b800d829c61fbba07f | [
"MIT"
] | 2 | 2019-06-28T07:33:36.000Z | 2020-01-12T12:47:20.000Z | utils/email_helper.py | bobjiangps/python3_test_framework | cfcdc10f5bcf96bb31e417b800d829c61fbba07f | [
"MIT"
] | null | null | null | utils/email_helper.py | bobjiangps/python3_test_framework | cfcdc10f5bcf96bb31e417b800d829c61fbba07f | [
"MIT"
] | null | null | null | import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
def send_email(content, subject, email_setting, filename=None):
msg = MIMEMultipart("alternative")
# msg = MIMEText(content,"html")
msg["Subject"] = subject
msg["From"] = email_setting["sender"]
msg["To"] = email_setting["receiver"]
msg["Cc"] = email_setting["cc"]
if filename:
ctype = "application/octet-stream"
maintype, subtype = ctype.split("/", 1)
attachment_file = MIMEImage((lambda f: (f.read(), f.close()))(open(filename, "rb"))[0], _subtype = subtype)
attachment_file.add_header("Content-Disposition", "attachment", filename=filename.replace("\\","/").split("/")[-1])
msg.attach(attachment_file)
msg.attach(MIMEText(content, "html", _charset="utf-8"))
smtp = smtplib.SMTP_SSL(email_setting["smtp_server"])
smtp.login(email_setting["sender_username"], email_setting["sender_password"])
smtp.sendmail(email_setting["sender"], (email_setting["receiver"].split(";")) + (email_setting["cc"].split(";") if email_setting["cc"] is not None else []), msg.as_string())
smtp.quit()
| 49.92 | 178 | 0.660256 |
e44b6d5d718b305e33a1f2532215181c1c048738 | 4,140 | py | Python | app/api/install_endpoint.py | ryomahan/read-tracardi-api | d0a012fb097ca81daf046b314000301eb54bfad8 | [
"MIT"
] | null | null | null | app/api/install_endpoint.py | ryomahan/read-tracardi-api | d0a012fb097ca81daf046b314000301eb54bfad8 | [
"MIT"
] | null | null | null | app/api/install_endpoint.py | ryomahan/read-tracardi-api | d0a012fb097ca81daf046b314000301eb54bfad8 | [
"MIT"
] | null | null | null | import logging
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException
from app.config import server
from elasticsearch import ElasticsearchException
from tracardi.config import tracardi
from tracardi.domain.credentials import Credentials
from tracardi.domain.user import User
from tracardi.exceptions.log_handler import log_handler
from tracardi.service.setup.setup_indices import create_indices
from tracardi.service.setup.setup_plugins import add_plugins
from tracardi.service.storage.driver import storage
from tracardi.service.storage.index import resources
from tracardi.service.storage.indices_manager import get_indices_status, remove_index
router = APIRouter()
logger = logging.getLogger(__name__)
logger.setLevel(tracardi.logging_level)
logger.addHandler(log_handler)
@router.get("/install", tags=["installation"], include_in_schema=server.expose_gui_api, response_model=dict)
async def check_if_installation_complete():
"""
Returns list of missing and updated indices
"""
try:
# Missing indices
_indices = [item async for item in get_indices_status()]
missing_indices = [idx[1] for idx in _indices if idx[0] == 'missing_index']
existing_indices = [idx[1] for idx in _indices if idx[0] == 'existing_index']
missing_templates = [idx[1] for idx in _indices if idx[0] == 'missing_template']
missing_aliases = [idx[1] for idx in _indices if idx[0] == 'missing_alias']
# existing_templates = [idx[1] for idx in _indices if idx[0] == 'existing_template']
# Missing admin
index = resources.get_index('user')
if index.get_aliased_data_index() in existing_indices:
admins = await storage.driver.user.search_by_role('admin')
admins = admins.dict()
else:
admins = {
"total": 0,
"result": []
}
return {
"missing": missing_indices,
"admins": admins,
"missing_template": missing_templates,
"missing_alias": missing_aliases,
}
except ElasticsearchException as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/install/plugins", tags=["installation"], include_in_schema=server.expose_gui_api, response_model=dict)
async def install_plugins():
try:
return await add_plugins()
except ElasticsearchException as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/install", tags=["installation"], include_in_schema=server.expose_gui_api, response_model=dict)
async def install(credentials: Optional[Credentials]):
try:
if server.reset_plugins is True:
await remove_index('action')
result = {"created": await create_indices(), 'admin': False}
# Add admin
admins = await storage.driver.user.search_by_role('admin')
if admins.total == 0:
if credentials.not_empty() and credentials.username_as_email():
user = User(
id=credentials.username,
password=credentials.password,
roles=['admin'],
email=credentials.username,
full_name="Default Admin"
)
if not await storage.driver.user.check_if_exists(credentials.username):
await storage.driver.user.add_user(user)
logger.info("Default admin account created.")
result['admin'] = True
else:
logger.warning("There is at least one admin account. New admin account not created.")
result['admin'] = True
if result['admin'] is True and server.update_plugins_on_start_up is not False:
logger.info(f"Updating plugins on startup due to: UPDATE_PLUGINS_ON_STARTUP={server.update_plugins_on_start_up}")
result['plugins'] = await add_plugins()
return result
except Exception as e:
logger.error(f"Error on install. Reason: {str(e)}.")
raise HTTPException(status_code=500, detail=str(e))
| 36.637168 | 125 | 0.665942 |
a1a681c626cb39382777f5aee07c9228b47ca12b | 12,487 | py | Python | Codigos/NYT FQ/NYT 2007 FQ.py | rafgui12/Newspaper-Library | 96de4f91a25fa06e7bfb8b8f5981b032fb7b8bf8 | [
"Unlicense"
] | null | null | null | Codigos/NYT FQ/NYT 2007 FQ.py | rafgui12/Newspaper-Library | 96de4f91a25fa06e7bfb8b8f5981b032fb7b8bf8 | [
"Unlicense"
] | null | null | null | Codigos/NYT FQ/NYT 2007 FQ.py | rafgui12/Newspaper-Library | 96de4f91a25fa06e7bfb8b8f5981b032fb7b8bf8 | [
"Unlicense"
] | null | null | null | # importing the requests library
import requests
import numpy as np
import os
# Cadenas de Pais Y Palabras
country = ["Colombia","Canada","Cuba","China","Cameroon","Cambodia","Costa Rica","Croatia","Czech Republic","Argentina"]
country2 = ["Afghanistan","Australia","Algeria","Austria","Brazil","Bolivia","Belgium","Bangladesh","Denmark","Dominican Republic"]
country3 = ["Egypt","Ethiopia","Finland","Ghana","Germany","Greece","Guatemala","Hungary","Iceland","India"]
country4 = ["Indonesia","Iran","Iraq","Ireland","Israel","Italy","Jamaica","Japan","Kenya","Lithuania"]
country5 = ["Luxembourg","Malaysia","Morocco","Netherlands","New Zealand","Namibia","Norway","Nicaragua","Pakistan","Panama"]
country6 = ["Portugal","Peru","Poland","Philippines","Russia","Singapore","South Africa","South Korea","Sweden","Switzerland"]
country7 = ["Thailand","Turkey","United Arab Emirates","United Kingdom","United States","Vietnam","Mexico","Ecuador","Venezuela","Spain"]
country8 = ["France","Estonia","Slovakia","Slovenia","Uruguay","Paraguay","Chile","Sri Lanka","Romania","Tanzania"]
country9 = ["Tunisia","Bulgaria","Nigeria","Latvia","Saudi Arabia","Belarus","Serbia","Senegal","Scotland"]
keywords = ['section_name:("Arts")']
#keywords = ['section_name:("Science") AND ("Technology")']
apikeyPrimary = "zfGlCSdzjVTQielCCrGlNNXbUnkoWO6W"
#apikeySecond = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
countryTotal = []
year = []
datos = []
StartT = 20070101
EndT = 20071231
csvList = []
urlList = []
timer = 160000000
# api-endpoint
URL = "https://api.nytimes.com/svc/search/v2/articlesearch.json"
######################## COUNTRY ################################################
for c in country:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\"" + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
t = 0
while t < timer:
t = t + 1
print("El Timer ha finalizado")
######################## COUNTRY 2 ################################################
for c in country2:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
t = 0
while t < timer:
t = t +1
print("El Timer ha finalizado")
######################## COUNTRY 3 ################################################
for c in country3:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
t = 0
while t < timer:
t = t +1
print("El Timer ha finalizado")
######################## COUNTRY 4 ################################################
for c in country4:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
t = 0
while t < timer:
t = t +1
print("El Timer ha finalizado")
######################## COUNTRY 5 ################################################
for c in country5:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
t = 0
while t < timer:
t = t +1
print("El Timer ha finalizado")
######################## COUNTRY 6 ################################################
for c in country6:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
t = 0
while t < timer:
t = t +1
print("El Timer ha finalizado")
######################## COUNTRY 7 ################################################
for c in country7:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
t = 0
while t < timer:
t = t +1
print("El Timer ha finalizado")
######################## COUNTRY 8 ################################################
for c in country8:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
t = 0
while t < timer:
t = t +1
print("El Timer ha finalizado")
######################## COUNTRY 9 ################################################
for c in country9:
for k in keywords:
apikey = apikeyPrimary
begindate = StartT
enddate = EndT
q = "\"" + c + "\""
fq = k
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'fq':fq, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
csvList = [countryTotal, datos, year]
#TXT
l = np.asarray(csvList)
ruta = os.getcwd() + os.sep
np.savetxt( "output_"+str(StartT)+"_NYT_FQ.txt", # Archivo de salida
l.T, # Trasponemos los datos
fmt="%s", # Usamos números enteros
delimiter=",")
#CSV
l = np.asarray(csvList)
ruta = os.getcwd() + os.sep
np.savetxt( "output_"+str(StartT)+"_NYT_FQ.csv", # Archivo de salida
l.T, # Trasponemos los datos
fmt="%s", # Usamos números enteros
delimiter=",")
print("Los archivos se han exportado satisfactoriamente")
| 33.932065 | 137 | 0.509009 |
e87cfd05afa018c844278a2e0406b0a215b3292b | 1,801 | py | Python | ai4good/webapp/cm_admin_page.py | kariso2000/model-server | 2a7cbc433d5bd71cfc334450ddc227babfb0dc1c | [
"MIT"
] | 1 | 2020-07-17T11:37:33.000Z | 2020-07-17T11:37:33.000Z | ai4good/webapp/cm_admin_page.py | kariso2000/model-server | 2a7cbc433d5bd71cfc334450ddc227babfb0dc1c | [
"MIT"
] | 2 | 2020-09-24T06:57:43.000Z | 2020-09-24T08:19:52.000Z | ai4good/webapp/cm_admin_page.py | kariso2000/model-server | 2a7cbc433d5bd71cfc334450ddc227babfb0dc1c | [
"MIT"
] | 1 | 2020-09-24T03:56:48.000Z | 2020-09-24T03:56:48.000Z | import dash_html_components as html
from ai4good.webapp.apps import dash_app, facade, _redis, cache, local_cache
from ai4good.models.cm.cm_model import CompartmentalModel
from dash.dependencies import Input, Output
import dash
def layout():
return html.Div(
[
html.H3('Admin page'),
html.Div([
html.B("CM Model Cache"),
html.Pre(f'{facade.rs.list(CompartmentalModel.ID)}', id='cache_contents'),
html.Button('Clear', id='clear_button'),
]),
html.Div([
html.Button('Clear redis', id='clear_redis_button'),
html.Div(id='notification_div1')
]),
html.Div([
html.Button('Clear cache', id='clear_cache_button'),
html.Div(id='notification_div2'),
]),
], style={'margin': 10}
)
@dash_app.callback(
Output("cache_contents", "children"),
[Input("clear_button", "n_clicks")],
)
def update_output(n_clicks):
if n_clicks and n_clicks > 0:
facade.rs.remove_all(CompartmentalModel.ID)
return f'{facade.rs.list(CompartmentalModel.ID)}'
else:
return dash.no_update
@dash_app.callback(
Output("notification_div1", "children"),
[Input("clear_redis_button", "n_clicks")],
)
def handle_clear_redis(n_clicks):
if n_clicks and n_clicks > 0:
_redis.flushdb()
return 'async clear called'
else:
return dash.no_update
@dash_app.callback(
Output("notification_div2", "children"),
[Input("clear_cache_button", "n_clicks")],
)
def handle_clear_cache(n_clicks):
if n_clicks and n_clicks > 0:
cache.clear()
local_cache.clear()
return 'cache cleared'
else:
return dash.no_update
| 28.140625 | 90 | 0.608551 |
737ae5392afe84f7d30aa0c0d3dff2f2a9ad0c81 | 6,861 | py | Python | lm_perplexity/models.py | EleutherAI/lm_perplexity | 24c5a04beb2f073b2230c7c7420bd1ec7776f513 | [
"MIT"
] | 42 | 2021-01-02T00:15:59.000Z | 2022-03-04T10:59:58.000Z | lm_perplexity/models.py | EleutherAI/lm_perplexity | 24c5a04beb2f073b2230c7c7420bd1ec7776f513 | [
"MIT"
] | null | null | null | lm_perplexity/models.py | EleutherAI/lm_perplexity | 24c5a04beb2f073b2230c7c7420bd1ec7776f513 | [
"MIT"
] | 7 | 2021-01-02T12:57:19.000Z | 2022-03-01T15:23:59.000Z | import numpy as np
import os
from typing import Optional
import openai
import torch
import torch.nn as nn
import transformers
import lm_perplexity.utils as utils
class LM:
def get_perplexity_data(self, text) -> Optional[dict]:
raise NotImplementedError
@classmethod
def create_from_config(cls, path):
raise NotImplementedError
class GPT3LM(LM):
def __init__(self, engine, context_len=1024, max_seq_len=2048, verbose=False):
import openai
self.engine = engine
self.context_len = context_len
self.max_seq_len = max_seq_len
self.wb = utils.WaitBlocker()
self.verbose = verbose
self.tokenizer = transformers.GPT2Tokenizer.from_pretrained('gpt2-xl')
self.end_of_text_token_id = self.tokenizer.convert_tokens_to_ids(["<|endoftext|>"])[0]
# Read from environment variable OPENAI_API_SECRET_KEY
openai.api_key = os.environ["OPENAI_API_SECRET_KEY"]
# noinspection DuplicatedCode
def get_perplexity_data(self, text) -> Optional[dict]:
input_ids = self.tokenizer.encode_plus(text)["input_ids"]
rolling_token_windows = utils.get_rolling_token_windows(
token_list=input_ids,
prefix_token=self.end_of_text_token_id,
max_seq_len=self.max_seq_len,
context_len=self.context_len,
)
# noinspection PyListCreation
all_logprobs = []
all_positions = []
# Remaining windows
for input_tokens, pred_tokens in rolling_token_windows:
block_output = self.get_token_logprobs(
input_tokens=input_tokens,
pred_tokens=pred_tokens,
)
all_logprobs.append(block_output["logprobs"])
all_positions.append(block_output["positions"])
if not all_logprobs:
return None
# Gather
all_logprobs = np.concatenate(all_logprobs)
all_positions = np.concatenate(all_positions)
assert len(all_logprobs) == len(input_ids)
return {
"logprobs": all_logprobs,
"positions": all_positions,
"length": len(all_logprobs),
"utf8_length": len(text.encode('utf-8')),
}
def get_token_logprobs(self, input_tokens, pred_tokens):
pred_start = len(input_tokens) - len(pred_tokens) + 1
# We're going to stitch together the input_tokens and pred_tokens
# In the longest case, this gets us to length = max_seq_len+1 (which the API works with)
assert input_tokens[pred_start:] == pred_tokens[:-1]
token_ids = input_tokens + [pred_tokens[-1]]
with self.wb.check_valid():
response = openai.Completion.create(
engine=self.engine,
prompt=token_ids,
max_tokens=0,
temperature=0.0,
logprobs=0,
echo=True,
)
logprobs = np.array(response["choices"][0]["logprobs"]["token_logprobs"][pred_start:])
if self.verbose:
print("Context:", self.tokenizer.convert_ids_to_tokens(token_ids))
print("Predicting:", self.tokenizer.convert_ids_to_tokens(token_ids)[pred_start:])
print("Perplexity:", np.exp(-logprobs.mean()))
print()
positions = np.arange(pred_start-1, pred_start-1 + len(token_ids[pred_start:]))
return {
"logprobs": logprobs,
"positions": positions,
}
@classmethod
def create_from_config(cls, config):
return cls(**config)
class GPT2LM(LM):
def __init__(self, model_name, device="cuda:0", context_len=512, max_seq_len=1024, verbose=False):
self.model_name = model_name
self.device = torch.device(device)
self.context_len = context_len
self.max_seq_len = max_seq_len
self.verbose = verbose
torch.set_grad_enabled(False)
self.model = transformers.GPT2LMHeadModel.from_pretrained(model_name).eval().to(self.device)
self.tokenizer = transformers.GPT2TokenizerFast.from_pretrained(model_name)
self.end_of_text_token_id = self.tokenizer.convert_tokens_to_ids(["<|endoftext|>"])[0]
# noinspection DuplicatedCode
def get_perplexity_data(self, text) -> Optional[dict]:
input_ids = self.tokenizer.encode_plus(text)["input_ids"]
rolling_token_windows = utils.get_rolling_token_windows(
token_list=input_ids,
prefix_token=self.end_of_text_token_id,
max_seq_len=self.max_seq_len,
context_len=self.context_len,
)
# noinspection PyListCreation
all_logprobs = []
all_positions = []
# Remaining windows
for input_tokens, pred_tokens in rolling_token_windows:
block_output = self.get_token_logprobs(
input_tokens=input_tokens,
pred_tokens=pred_tokens,
)
all_logprobs.append(block_output["logprobs"])
all_positions.append(block_output["positions"])
if not all_logprobs:
return None
# Gather
all_logprobs = np.concatenate(all_logprobs)
all_positions = np.concatenate(all_positions)
assert len(all_logprobs) == len(input_ids)
return {
"logprobs": all_logprobs,
"positions": all_positions,
"length": len(all_logprobs),
"utf8_length": len(text.encode('utf-8')),
}
def get_token_logprobs(self, input_tokens, pred_tokens):
input_tokens = torch.tensor(input_tokens).long().to(self.device)
pred_tokens = torch.tensor(pred_tokens).long().to(self.device)
output = self.model(input_tokens, return_dict=True)
loss_fct = nn.CrossEntropyLoss(reduction="none")
neg_logprobs = loss_fct(
output.logits[-len(pred_tokens):],
pred_tokens,
).detach().cpu().numpy()
if self.verbose:
print("Context:", self.tokenizer.convert_ids_to_tokens(input_tokens))
print("Predicting:", self.tokenizer.convert_ids_to_tokens(pred_tokens))
print("Perplexity:", np.exp(neg_logprobs.mean()))
print()
positions = np.arange(len(input_tokens) - len(pred_tokens), len(input_tokens))
return {
"logprobs": - neg_logprobs,
"positions": positions,
}
@classmethod
def create_from_config(cls, config):
return cls(**config)
def create_model(json_path):
config = utils.read_json(json_path)
model_type = config.pop("model_type")
if model_type == "gpt3":
model = GPT3LM.create_from_config(config)
elif model_type == "gpt2":
model = GPT2LM.create_from_config(config)
else:
raise KeyError(model_type)
return model
| 34.827411 | 102 | 0.63533 |
ccf3e84481404af3c4b3cd4070a56149409c72b0 | 326 | py | Python | ReadingGauges/ComputerSide/GrabberGaugesOutput_python3/ScriptRunGrabber.py | jerabaul29/PaddleAndUltrasonicGauges | 5c6ba80ddfd44190eb21d5c61979ac802a54cb99 | [
"MIT"
] | 2 | 2021-02-03T12:55:57.000Z | 2021-02-11T07:07:06.000Z | ReadingGauges/ComputerSide/GrabberGaugesOutput_python3/ScriptRunGrabber.py | jerabaul29/PaddleAndUltrasonicGauges | 5c6ba80ddfd44190eb21d5c61979ac802a54cb99 | [
"MIT"
] | null | null | null | ReadingGauges/ComputerSide/GrabberGaugesOutput_python3/ScriptRunGrabber.py | jerabaul29/PaddleAndUltrasonicGauges | 5c6ba80ddfd44190eb21d5c61979ac802a54cb99 | [
"MIT"
] | null | null | null | import Grabber
gauge_grabber = Grabber.grabb_serial_values()
gauge_grabber.init()
gauge_grabber.grabb(10)
gauge_grabber.convert_grabbed_to_numpy()
gauge_grabber.plot_grabbed_data()
gauge_grabber.clean_numpy_dict()
gauge_grabber.save_cleaned_dict_numpy('test_saving')
gauge_grabber.save_cleaned_dict_numpy_csv('test_saving')
| 27.166667 | 56 | 0.868098 |
727426a462f51e7b311ba94555308f578f95ef1b | 492 | py | Python | Bugscan_exploits-master/exp_list/exp-1770.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-1770.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-1770.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#refer: http://www.wooyun.org/bugs/wooyun-2010-0121875
def assign(service, arg):
if service == "ewebs":
return True, arg
def audit(arg):
vul_url = arg + 'testweb.php'
code, _, body, _, _ = curl.curl2(vul_url)
if code == 200 and '<td>access.log</td>' in body:
security_warning(vul_url)
if __name__ == '__main__':
from dummy import *
audit(assign('ewebs', 'http://60.190.163.51:8888/')[1]) | 28.941176 | 59 | 0.595528 |
6e6cccd5189caffb66ec8db09e94af5e6bcaf47f | 45,600 | py | Python | lib/python2.7/site-packages/coverage/parser.py | DPNT-Sourcecode/CHK-uimw01 | 87144ae10115d7a8df565f5109666f00bc001ce4 | [
"Apache-2.0"
] | 15 | 2018-04-09T00:44:22.000Z | 2021-08-03T08:08:07.000Z | lib/python2.7/site-packages/coverage/parser.py | DPNT-Sourcecode/CHK-uimw01 | 87144ae10115d7a8df565f5109666f00bc001ce4 | [
"Apache-2.0"
] | 22 | 2018-04-23T13:52:20.000Z | 2019-09-20T15:11:32.000Z | lib/python2.7/site-packages/coverage/parser.py | DPNT-Sourcecode/CHK-uimw01 | 87144ae10115d7a8df565f5109666f00bc001ce4 | [
"Apache-2.0"
] | 4 | 2018-07-07T16:45:56.000Z | 2019-06-09T13:30:13.000Z | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Code parsing for coverage.py."""
import ast
import collections
import os
import re
import token
import tokenize
from coverage import env
from coverage.backward import range # pylint: disable=redefined-builtin
from coverage.backward import bytes_to_ints, string_class
from coverage.bytecode import CodeObjects
from coverage.debug import short_stack
from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of
from coverage.misc import NoSource, NotPython, StopEverything
from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration
class PythonParser(object):
"""Parse code to find executable lines, excluded lines, etc.
This information is all based on static analysis: no code execution is
involved.
"""
@contract(text='unicode|None')
def __init__(self, text=None, filename=None, exclude=None):
"""
Source can be provided as `text`, the text itself, or `filename`, from
which the text will be read. Excluded lines are those that match
`exclude`, a regex.
"""
assert text or filename, "PythonParser needs either text or filename"
self.filename = filename or "<code>"
self.text = text
if not self.text:
from coverage.python import get_python_source
try:
self.text = get_python_source(self.filename)
except IOError as err:
raise NoSource(
"No source for code: '%s': %s" % (self.filename, err)
)
self.exclude = exclude
# The text lines of the parsed code.
self.lines = self.text.split('\n')
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
# lines.
self.statements = set()
# The normalized line numbers of the excluded lines in the code,
# adjusted to their first lines.
self.excluded = set()
# The raw_* attributes are only used in this class, and in
# lab/parser.py to show how this class is working.
# The line numbers that start statements, as reported by the line
# number table in the bytecode.
self.raw_statements = set()
# The raw line numbers of excluded lines of code, as marked by pragmas.
self.raw_excluded = set()
# The line numbers of class and function definitions.
self.raw_classdefs = set()
# The line numbers of docstring lines.
self.raw_docstrings = set()
# Internal detail, used by lab/parser.py.
self.show_tokens = False
# A dict mapping line numbers to lexical statement starts for
# multi-line statements.
self._multiline = {}
# Lazily-created ByteParser, arc data, and missing arc descriptions.
self._byte_parser = None
self._all_arcs = None
self._missing_arc_fragments = None
@property
def byte_parser(self):
"""Create a ByteParser on demand."""
if not self._byte_parser:
self._byte_parser = ByteParser(self.text, filename=self.filename)
return self._byte_parser
def lines_matching(self, *regexes):
"""Find the lines matching one of a list of regexes.
Returns a set of line numbers, the lines that contain a match for one
of the regexes in `regexes`. The entire line needn't match, just a
part of it.
"""
combined = join_regex(regexes)
if env.PY2:
combined = combined.decode("utf8")
regex_c = re.compile(combined)
matches = set()
for i, ltext in enumerate(self.lines, start=1):
if regex_c.search(ltext):
matches.add(i)
return matches
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
A handful of attributes are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
self.raw_excluded = self.lines_matching(self.exclude)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
indent = 0
exclude_indent = 0
excluding = False
excluding_decorators = False
prev_toktype = token.INDENT
first_line = None
empty = True
first_on_line = True
tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: debugging
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
elif toktype == token.NAME:
if ttext == 'class':
# Class definitions look like branches in the bytecode, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
self.raw_classdefs.add(slineno)
elif toktype == token.OP:
if ttext == ':':
should_exclude = (elineno in self.raw_excluded) or excluding_decorators
if not excluding and should_exclude:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
self.raw_excluded.add(elineno)
exclude_indent = indent
excluding = True
excluding_decorators = False
elif ttext == '@' and first_on_line:
# A decorator.
if elineno in self.raw_excluded:
excluding_decorators = True
if excluding_decorators:
self.raw_excluded.add(elineno)
elif toktype == token.STRING and prev_toktype == token.INDENT:
# Strings that are first on an indented line are docstrings.
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
for l in range(first_line, elineno+1):
self._multiline[l] = first_line
first_line = None
first_on_line = True
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-whitespace token.
empty = False
if first_line is None:
# The token is not whitespace, and is the first in a
# statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
self.raw_excluded.add(elineno)
first_on_line = False
prev_toktype = toktype
# Find the starts of the executable statements.
if not empty:
self.raw_statements.update(self.byte_parser._find_statements())
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
return self._multiline.get(line, line)
def first_lines(self, lines):
"""Map the line numbers in `lines` to the correct first line of the
statement.
Returns a set of the first lines.
"""
return set(self.first_line(l) for l in lines)
def translate_lines(self, lines):
"""Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
def translate_arcs(self, arcs):
"""Implement `FileReporter.translate_arcs`."""
return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
Sets the .excluded and .statements attributes, normalized to the first
line of multi-line statements.
"""
try:
self._raw_parse()
except (tokenize.TokenError, IndentationError) as err:
if hasattr(err, "lineno"):
lineno = err.lineno # IndentationError
else:
lineno = err.args[1][0] # TokenError
raise NotPython(
u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
self.filename, err.args[0], lineno
)
)
self.excluded = self.first_lines(self.raw_excluded)
ignore = self.excluded | self.raw_docstrings
starts = self.raw_statements - ignore
self.statements = self.first_lines(starts) - ignore
def arcs(self):
"""Get information about the arcs available in the code.
Returns a set of line number pairs. Line numbers have been normalized
to the first line of multi-line statements.
"""
if self._all_arcs is None:
self._analyze_ast()
return self._all_arcs
def _analyze_ast(self):
"""Run the AstArcAnalyzer and save its results.
`_all_arcs` is the set of arcs in the code.
"""
aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline)
aaa.analyze()
self._all_arcs = set()
for l1, l2 in aaa.arcs:
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
self._all_arcs.add((fl1, fl2))
self._missing_arc_fragments = aaa.missing_arc_fragments
def exit_counts(self):
"""Get a count of exits from that each line.
Excluded lines are excluded.
"""
exit_counts = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
continue
if l1 in self.excluded:
# Don't report excluded lines as line numbers.
continue
if l2 in self.excluded:
# Arcs to excluded lines shouldn't count.
continue
exit_counts[l1] += 1
# Class definitions have one extra exit, so remove one for each:
for l in self.raw_classdefs:
# Ensure key is there: class definitions can include excluded lines.
if l in exit_counts:
exit_counts[l] -= 1
return exit_counts
def missing_arc_description(self, start, end, executed_arcs=None):
"""Provide an English sentence describing a missing arc."""
if self._missing_arc_fragments is None:
self._analyze_ast()
actual_start = start
if (
executed_arcs and
end < 0 and end == -start and
(end, start) not in executed_arcs and
(end, start) in self._missing_arc_fragments
):
# It's a one-line callable, and we never even started it,
# and we have a message about not starting it.
start, end = end, start
fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
msgs = []
for fragment_pair in fragment_pairs:
smsg, emsg = fragment_pair
if emsg is None:
if end < 0:
# Hmm, maybe we have a one-line callable, let's check.
if (-end, end) in self._missing_arc_fragments:
return self.missing_arc_description(-end, end)
emsg = "didn't jump to the function exit"
else:
emsg = "didn't jump to line {lineno}"
emsg = emsg.format(lineno=end)
msg = "line {start} {emsg}".format(start=actual_start, emsg=emsg)
if smsg is not None:
msg += ", because {smsg}".format(smsg=smsg.format(lineno=actual_start))
msgs.append(msg)
return " or ".join(msgs)
class ByteParser(object):
"""Parse bytecode to understand the structure of code."""
@contract(text='unicode')
def __init__(self, text, code=None, filename=None):
self.text = text
if code:
self.code = code
else:
try:
self.code = compile_unicode(text, filename, "exec")
except SyntaxError as synerr:
raise NotPython(
u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
filename, synerr.msg, synerr.lineno
)
)
# Alternative Python implementations don't always provide all the
# attributes on code objects that we need to do the analysis.
for attr in ['co_lnotab', 'co_firstlineno']:
if not hasattr(self.code, attr):
raise StopEverything( # pragma: only jython
"This implementation of Python doesn't support code analysis.\n"
"Run coverage.py under another Python for this command."
)
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
"""
children = CodeObjects(self.code)
return (ByteParser(self.text, code=c) for c in children)
def _bytes_lines(self):
"""Map byte offsets to line numbers in `code`.
Uses co_lnotab described in Python/compile.c to map byte offsets to
line numbers. Produces a sequence: (b0, l0), (b1, l1), ...
Only byte offsets that correspond to line numbers are included in the
results.
"""
# Adapted from dis.py in the standard library.
byte_increments = bytes_to_ints(self.code.co_lnotab[0::2])
line_increments = bytes_to_ints(self.code.co_lnotab[1::2])
last_line_num = None
line_num = self.code.co_firstlineno
byte_num = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if line_num != last_line_num:
yield (byte_num, line_num)
last_line_num = line_num
byte_num += byte_incr
line_num += line_incr
if line_num != last_line_num:
yield (byte_num, line_num)
def _find_statements(self):
"""Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`.
"""
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
yield l
#
# AST analysis
#
class LoopBlock(object):
"""A block on the block stack representing a `for` or `while` loop."""
@contract(start=int)
def __init__(self, start):
# The line number where the loop starts.
self.start = start
# A set of ArcStarts, the arcs from break statements exiting this loop.
self.break_exits = set()
class FunctionBlock(object):
"""A block on the block stack representing a function definition."""
@contract(start=int, name=str)
def __init__(self, start, name):
# The line number where the function starts.
self.start = start
# The name of the function.
self.name = name
class TryBlock(object):
"""A block on the block stack representing a `try` block."""
@contract(handler_start='int|None', final_start='int|None')
def __init__(self, handler_start, final_start):
# The line number of the first "except" handler, if any.
self.handler_start = handler_start
# The line number of the "finally:" clause, if any.
self.final_start = final_start
# The ArcStarts for breaks/continues/returns/raises inside the "try:"
# that need to route through the "finally:" clause.
self.break_from = set()
self.continue_from = set()
self.return_from = set()
self.raise_from = set()
class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
"""The information needed to start an arc.
`lineno` is the line number the arc starts from.
`cause` is an English text fragment used as the `startmsg` for
AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
arc wasn't executed, so should fit well into a sentence of the form,
"Line 17 didn't run because {cause}." The fragment can include "{lineno}"
to have `lineno` interpolated into it.
"""
def __new__(cls, lineno, cause=None):
return super(ArcStart, cls).__new__(cls, lineno, cause)
# Define contract words that PyContract doesn't have.
# ArcStarts is for a list or set of ArcStart's.
new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq))
# Turn on AST dumps with an environment variable.
AST_DUMP = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0)))
class NodeList(object):
"""A synthetic fictitious node, containing a sequence of nodes.
This is used when collapsing optimized if-statements, to represent the
unconditional execution of one of the clauses.
"""
def __init__(self, body):
self.body = body
self.lineno = body[0].lineno
class AstArcAnalyzer(object):
"""Analyze source text with an AST to find executable code paths."""
@contract(text='unicode', statements=set)
def __init__(self, text, statements, multiline):
self.root_node = ast.parse(neuter_encoding_declaration(text))
# TODO: I think this is happening in too many places.
self.statements = set(multiline.get(l, l) for l in statements)
self.multiline = multiline
if AST_DUMP: # pragma: debugging
# Dump the AST so that failing tests have helpful output.
print("Statements: {0}".format(self.statements))
print("Multiline map: {0}".format(self.multiline))
ast_dump(self.root_node)
self.arcs = set()
# A map from arc pairs to a list of pairs of sentence fragments:
# { (start, end): [(startmsg, endmsg), ...], }
#
# For an arc from line 17, they should be usable like:
# "Line 17 {endmsg}, because {startmsg}"
self.missing_arc_fragments = collections.defaultdict(list)
self.block_stack = []
self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
def analyze(self):
"""Examine the AST tree from `root_node` to determine possible arcs.
This sets the `arcs` attribute to be a set of (from, to) line number
pairs.
"""
for node in ast.walk(self.root_node):
node_name = node.__class__.__name__
code_object_handler = getattr(self, "_code_object__" + node_name, None)
if code_object_handler is not None:
code_object_handler(node)
def add_arc(self, start, end, smsg=None, emsg=None):
"""Add an arc, including message fragments to use if it is missing."""
if self.debug: # pragma: debugging
print("\nAdding arc: ({}, {}): {!r}, {!r}".format(start, end, smsg, emsg))
print(short_stack(limit=6))
self.arcs.add((start, end))
if smsg is not None or emsg is not None:
self.missing_arc_fragments[(start, end)].append((smsg, emsg))
def nearest_blocks(self):
"""Yield the blocks in nearest-to-farthest order."""
return reversed(self.block_stack)
@contract(returns=int)
def line_for_node(self, node):
"""What is the right line number to use for this node?
This dispatches to _line__Node functions where needed.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_line__" + node_name, None)
if handler is not None:
return handler(node)
else:
return node.lineno
def _line__Assign(self, node):
return self.line_for_node(node.value)
def _line__Dict(self, node):
# Python 3.5 changed how dict literals are made.
if env.PYVERSION >= (3, 5) and node.keys:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
# Unpacked dict literals `{**{'a':1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
return node.lineno
def _line__List(self, node):
if node.elts:
return self.line_for_node(node.elts[0])
else:
return node.lineno
def _line__Module(self, node):
if node.body:
return self.line_for_node(node.body[0])
else:
# Empty modules have no line number, they always start at 1.
return 1
# The node types that just flow to the next node with no complications.
OK_TO_DEFAULT = set([
"Assign", "Assert", "AugAssign", "Delete", "Exec", "Expr", "Global",
"Import", "ImportFrom", "Nonlocal", "Pass", "Print",
])
@contract(returns='ArcStarts')
def add_arcs(self, node):
"""Add the arcs for `node`.
Return a set of ArcStarts, exits from this node to the next. Because a
node represents an entire sub-tree (including its children), the exits
from a node can be arbitrarily complex::
if something(1):
if other(2):
doit(3)
else:
doit(5)
There are two exits from line 1: they start at line 3 and line 5.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_handle__" + node_name, None)
if handler is not None:
return handler(node)
else:
# No handler: either it's something that's ok to default (a simple
# statement), or it's something we overlooked. Change this 0 to 1
# to see if it's overlooked.
if 0:
if node_name not in self.OK_TO_DEFAULT:
print("*** Unhandled: {0}".format(node))
# Default for simple statements: one exit from this node.
return set([ArcStart(self.line_for_node(node))])
@one_of("from_start, prev_starts")
@contract(returns='ArcStarts')
def add_body_arcs(self, body, from_start=None, prev_starts=None):
"""Add arcs for the body of a compound statement.
`body` is the body node. `from_start` is a single `ArcStart` that can
be the previous line in flow before this body. `prev_starts` is a set
of ArcStarts that can be the previous line. Only one of them should be
given.
Returns a set of ArcStarts, the exits from this body.
"""
if prev_starts is None:
prev_starts = set([from_start])
for body_node in body:
lineno = self.line_for_node(body_node)
first_line = self.multiline.get(lineno, lineno)
if first_line not in self.statements:
body_node = self.find_non_missing_node(body_node)
if body_node is None:
continue
lineno = self.line_for_node(body_node)
for prev_start in prev_starts:
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
prev_starts = self.add_arcs(body_node)
return prev_starts
def find_non_missing_node(self, node):
"""Search `node` looking for a child that has not been optimized away.
This might return the node you started with, or it will work recursively
to find a child node in self.statements.
Returns a node, or None if none of the node remains.
"""
# This repeats work just done in add_body_arcs, but this duplication
# means we can avoid a function call in the 99.9999% case of not
# optimizing away statements.
lineno = self.line_for_node(node)
first_line = self.multiline.get(lineno, lineno)
if first_line in self.statements:
return node
missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None)
if missing_fn:
node = missing_fn(node)
else:
node = None
return node
def _missing__If(self, node):
# If the if-node is missing, then one of its children might still be
# here, but not both. So return the first of the two that isn't missing.
# Use a NodeList to hold the clauses as a single node.
non_missing = self.find_non_missing_node(NodeList(node.body))
if non_missing:
return non_missing
if node.orelse:
return self.find_non_missing_node(NodeList(node.orelse))
return None
def _missing__NodeList(self, node):
# A NodeList might be a mixture of missing and present nodes. Find the
# ones that are present.
non_missing_children = []
for child in node.body:
child = self.find_non_missing_node(child)
if child is not None:
non_missing_children.append(child)
# Return the simplest representation of the present children.
if not non_missing_children:
return None
if len(non_missing_children) == 1:
return non_missing_children[0]
return NodeList(non_missing_children)
def is_constant_expr(self, node):
"""Is this a compile-time constant?"""
node_name = node.__class__.__name__
if node_name in ["NameConstant", "Num"]:
return "Num"
elif node_name == "Name":
if node.id in ["True", "False", "None", "__debug__"]:
return "Name"
return None
# In the fullness of time, these might be good tests to write:
# while EXPR:
# while False:
# listcomps hidden deep in other expressions
# listcomps hidden in lists: x = [[i for i in range(10)]]
# nested function definitions
# Exit processing: process_*_exits
#
# These functions process the four kinds of jump exits: break, continue,
# raise, and return. To figure out where an exit goes, we have to look at
# the block stack context. For example, a break will jump to the nearest
# enclosing loop block, or the nearest enclosing finally block, whichever
# is nearer.
@contract(exits='ArcStarts')
def process_break_exits(self, exits):
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks():
if isinstance(block, LoopBlock):
block.break_exits.update(exits)
break
elif isinstance(block, TryBlock) and block.final_start is not None:
block.break_from.update(exits)
break
@contract(exits='ArcStarts')
def process_continue_exits(self, exits):
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks():
if isinstance(block, LoopBlock):
for xit in exits:
self.add_arc(xit.lineno, block.start, xit.cause)
break
elif isinstance(block, TryBlock) and block.final_start is not None:
block.continue_from.update(exits)
break
@contract(exits='ArcStarts')
def process_raise_exits(self, exits):
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if isinstance(block, TryBlock):
if block.handler_start is not None:
for xit in exits:
self.add_arc(xit.lineno, block.handler_start, xit.cause)
break
elif block.final_start is not None:
block.raise_from.update(exits)
break
elif isinstance(block, FunctionBlock):
for xit in exits:
self.add_arc(
xit.lineno, -block.start, xit.cause,
"didn't except from function '{0}'".format(block.name),
)
break
@contract(exits='ArcStarts')
def process_return_exits(self, exits):
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks():
if isinstance(block, TryBlock) and block.final_start is not None:
block.return_from.update(exits)
break
elif isinstance(block, FunctionBlock):
for xit in exits:
self.add_arc(
xit.lineno, -block.start, xit.cause,
"didn't return from function '{0}'".format(block.name),
)
break
# Handlers: _handle__*
#
# Each handler deals with a specific AST node type, dispatched from
# add_arcs. Each deals with a particular kind of node type, and returns
# the set of exits from that node. These functions mirror the Python
# semantics of each syntactic construct. See the docstring for add_arcs to
# understand the concept of exits from a node.
@contract(returns='ArcStarts')
def _handle__Break(self, node):
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
self.process_break_exits([break_start])
return set()
@contract(returns='ArcStarts')
def _handle_decorated(self, node):
"""Add arcs for things that can be decorated (classes and functions)."""
last = self.line_for_node(node)
if node.decorator_list:
for dec_node in node.decorator_list:
dec_start = self.line_for_node(dec_node)
if dec_start != last:
self.add_arc(last, dec_start)
last = dec_start
# The definition line may have been missed, but we should have it
# in `self.statements`. For some constructs, `line_for_node` is
# not what we'd think of as the first line in the statement, so map
# it to the first one.
if node.body:
body_start = self.line_for_node(node.body[0])
body_start = self.multiline.get(body_start, body_start)
for lineno in range(last+1, body_start):
if lineno in self.statements:
self.add_arc(last, lineno)
last = lineno
# The body is handled in collect_arcs.
return set([ArcStart(last)])
_handle__ClassDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__Continue(self, node):
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
self.process_continue_exits([continue_start])
return set()
@contract(returns='ArcStarts')
def _handle__For(self, node):
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
exits = self.add_body_arcs(node.body, from_start=from_start)
# Any exit from the body will go back to the top of the loop.
for xit in exits:
self.add_arc(xit.lineno, start, xit.cause)
my_block = self.block_stack.pop()
exits = my_block.break_exits
from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No else clause: exit from the for line.
exits.add(from_start)
return exits
_handle__AsyncFor = _handle__For
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__If(self, node):
start = self.line_for_node(node.test)
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
exits |= self.add_body_arcs(node.orelse, from_start=from_start)
return exits
@contract(returns='ArcStarts')
def _handle__NodeList(self, node):
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
@contract(returns='ArcStarts')
def _handle__Raise(self, node):
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
self.process_raise_exits([raise_start])
# `raise` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Return(self, node):
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
self.process_return_exits([return_start])
# `return` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Try(self, node):
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
handler_start = None
if node.finalbody:
final_start = self.line_for_node(node.finalbody[0])
else:
final_start = None
try_block = TryBlock(handler_start, final_start)
self.block_stack.append(try_block)
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
# We're done with the `try` body, so this block no longer handles
# exceptions. We keep the block so the `finally` clause can pick up
# flows from the handlers and `else` clause.
if node.finalbody:
try_block.handler_start = None
if node.handlers:
# If there are `except` clauses, then raises in the try body
# will already jump to them. Start this set over for raises in
# `except` and `else`.
try_block.raise_from = set([])
else:
self.block_stack.pop()
handler_exits = set()
if node.handlers:
last_handler_start = None
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
if last_handler_start is not None:
self.add_arc(last_handler_start, handler_start)
last_handler_start = handler_start
from_cause = "the exception caught by line {lineno} didn't happen"
from_start = ArcStart(handler_start, cause=from_cause)
handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start)
if node.orelse:
exits = self.add_body_arcs(node.orelse, prev_starts=exits)
exits |= handler_exits
if node.finalbody:
self.block_stack.pop()
final_from = ( # You can get to the `finally` clause from:
exits | # the exits of the body or `else` clause,
try_block.break_from | # or a `break`,
try_block.continue_from | # or a `continue`,
try_block.raise_from | # or a `raise`,
try_block.return_from # or a `return`.
)
final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from)
if try_block.break_from:
self.process_break_exits(
self._combine_finally_starts(try_block.break_from, final_exits)
)
if try_block.continue_from:
self.process_continue_exits(
self._combine_finally_starts(try_block.continue_from, final_exits)
)
if try_block.raise_from:
self.process_raise_exits(
self._combine_finally_starts(try_block.raise_from, final_exits)
)
if try_block.return_from:
self.process_return_exits(
self._combine_finally_starts(try_block.return_from, final_exits)
)
if exits:
# The finally clause's exits are only exits for the try block
# as a whole if the try block had some exits to begin with.
exits = final_exits
return exits
@contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts')
def _combine_finally_starts(self, starts, exits):
"""Helper for building the cause of `finally` branches.
"finally" clauses might not execute their exits, and the causes could
be due to a failure to execute any of the exits in the try block. So
we use the causes from `starts` as the causes for `exits`.
"""
causes = []
for start in sorted(starts):
if start.cause is not None:
causes.append(start.cause.format(lineno=start.lineno))
cause = " or ".join(causes)
exits = set(ArcStart(xit.lineno, cause) for xit in exits)
return exits
@contract(returns='ArcStarts')
def _handle__TryExcept(self, node):
# Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
# TryExcept, it means there was no finally, so fake it, and treat as
# a general Try node.
node.finalbody = []
return self._handle__Try(node)
@contract(returns='ArcStarts')
def _handle__TryFinally(self, node):
# Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
# TryFinally, see if there's a TryExcept nested inside. If so, merge
# them. Otherwise, fake fields to complete a Try node.
node.handlers = []
node.orelse = []
first = node.body[0]
if first.__class__.__name__ == "TryExcept" and node.lineno == first.lineno:
assert len(node.body) == 1
node.body = first.body
node.handlers = first.handlers
node.orelse = first.orelse
return self._handle__Try(node)
@contract(returns='ArcStarts')
def _handle__While(self, node):
constant_test = self.is_constant_expr(node.test)
start = to_top = self.line_for_node(node.test)
if constant_test and (env.PY3 or constant_test == "Num"):
to_top = self.line_for_node(node.body[0])
self.block_stack.append(LoopBlock(start=to_top))
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
for xit in exits:
self.add_arc(xit.lineno, to_top, xit.cause)
exits = set()
my_block = self.block_stack.pop()
exits.update(my_block.break_exits)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No `else` clause: you can exit from the start.
if not constant_test:
exits.add(from_start)
return exits
@contract(returns='ArcStarts')
def _handle__With(self, node):
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
_handle__AsyncWith = _handle__With
def _code_object__Module(self, node):
start = self.line_for_node(node)
if node.body:
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
for xit in exits:
self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module")
else:
# Empty module.
self.add_arc(-start, start)
self.add_arc(start, -start)
def _code_object__FunctionDef(self, node):
start = self.line_for_node(node)
self.block_stack.append(FunctionBlock(start=start, name=node.name))
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
self.process_return_exits(exits)
self.block_stack.pop()
_code_object__AsyncFunctionDef = _code_object__FunctionDef
def _code_object__ClassDef(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
for xit in exits:
self.add_arc(
xit.lineno, -start, xit.cause,
"didn't exit the body of class '{0}'".format(node.name),
)
def _make_oneline_code_method(noun): # pylint: disable=no-self-argument
"""A function to make methods for online callable _code_object__ methods."""
def _code_object__oneline_callable(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start, None, "didn't run the {0} on line {1}".format(noun, start))
self.add_arc(
start, -start, None,
"didn't finish the {0} on line {1}".format(noun, start),
)
return _code_object__oneline_callable
_code_object__Lambda = _make_oneline_code_method("lambda")
_code_object__GeneratorExp = _make_oneline_code_method("generator expression")
_code_object__DictComp = _make_oneline_code_method("dictionary comprehension")
_code_object__SetComp = _make_oneline_code_method("set comprehension")
if env.PY3:
_code_object__ListComp = _make_oneline_code_method("list comprehension")
if AST_DUMP: # pragma: debugging
# Code only used when dumping the AST for debugging.
SKIP_DUMP_FIELDS = ["ctx"]
def _is_simple_value(value):
"""Is `value` simple enough to be displayed on a single line?"""
return (
value in [None, [], (), {}, set()] or
isinstance(value, (string_class, int, float))
)
def ast_dump(node, depth=0):
"""Dump the AST for `node`.
This recursively walks the AST, printing a readable version.
"""
indent = " " * depth
if not isinstance(node, ast.AST):
print("{0}<{1} {2!r}>".format(indent, node.__class__.__name__, node))
return
lineno = getattr(node, "lineno", None)
if lineno is not None:
linemark = " @ {0}".format(node.lineno)
else:
linemark = ""
head = "{0}<{1}{2}".format(indent, node.__class__.__name__, linemark)
named_fields = [
(name, value)
for name, value in ast.iter_fields(node)
if name not in SKIP_DUMP_FIELDS
]
if not named_fields:
print("{0}>".format(head))
elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
field_name, value = named_fields[0]
print("{0} {1}: {2!r}>".format(head, field_name, value))
else:
print(head)
if 0:
print("{0}# mro: {1}".format(
indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
))
next_indent = indent + " "
for field_name, value in named_fields:
prefix = "{0}{1}:".format(next_indent, field_name)
if _is_simple_value(value):
print("{0} {1!r}".format(prefix, value))
elif isinstance(value, list):
print("{0} [".format(prefix))
for n in value:
ast_dump(n, depth + 8)
print("{0}]".format(next_indent))
else:
print(prefix)
ast_dump(value, depth + 8)
print("{0}>".format(indent))
| 38.742566 | 99 | 0.594846 |
aa31df183eb96b22bd3d1b4b73cf3a55aeb444af | 1,231 | py | Python | mobile_payments/defaults.py | ZendaInnocent/mobile-payments | d3e7e9321c39bde2e7e368e78c096b36cf66dd3b | [
"MIT"
] | 1 | 2020-11-19T05:22:39.000Z | 2020-11-19T05:22:39.000Z | mobile_payments/defaults.py | ZendaInnocent/mobile-payments | d3e7e9321c39bde2e7e368e78c096b36cf66dd3b | [
"MIT"
] | null | null | null | mobile_payments/defaults.py | ZendaInnocent/mobile-payments | d3e7e9321c39bde2e7e368e78c096b36cf66dd3b | [
"MIT"
] | null | null | null | MPESA_PUBLIC_KEY = 'MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEArv9yxA69XQKBo24BaF/D+fvlqmGdYjqLQ5WtNBb5tquqGvAvG3WMFETVUSow/LizQalxj2ElMVrUmzu5mGGkxK08bWEXF7a1DEvtVJs6nppIlFJc2SnrU14AOrIrB28ogm58JjAl5BOQawOXD5dfSk7MaAA82pVHoIqEu0FxA8BOKU+RGTihRU+ptw1j4bsAJYiPbSX6i71gfPvwHPYamM0bfI4CmlsUUR3KvCG24rB6FNPcRBhM3jDuv8ae2kC33w9hEq8qNB55uw51vK7hyXoAa+U7IqP1y6nBdlN25gkxEA8yrsl1678cspeXr+3ciRyqoRgj9RD/ONbJhhxFvt1cLBh+qwK2eqISfBb06eRnNeC71oBokDm3zyCnkOtMDGl7IvnMfZfEPFCfg5QgJVk1msPpRvQxmEsrX9MQRyFVzgy2CWNIb7c+jPapyrNwoUbANlN8adU1m6yOuoX7F49x+OjiG2se0EJ6nafeKUXw/+hiJZvELUYgzKUtMAZVTNZfT8jjb58j8GVtuS+6TM2AutbejaCV84ZK58E2CRJqhmjQibEUO6KPdD7oTlEkFy52Y1uOOBXgYpqMzufNPmfdqqqSM4dU70PO8ogyKGiLAIxCetMjjm6FCMEA3Kc8K0Ig7/XtFm9By6VxTJK1Mg36TlHaZKP6VzVLXMtesJECAwEAAQ =='
MPESA_API_KEY = ''
MPESA_BASE_URL = 'openapi.m-pesa.com'
MPESA_GET_SESSION_URL = '/sandbox/ipg/v2/vodacomTZN/getSession/'
MPESA_C2BPAYMENT_URL = '/sandbox/ipg/v2/vodacomTZN/c2bPayment/singleStage/'
MPESA_REVERSAL_URL = '/sandbox/ipg/v2/vodacomTZN/reversal/'
MPESA_B2CPAYMENT_URL = '/sandbox/ipg/v2/vodacomTZN/b2cPayment/'
MPESA_B2BPAYMENT_URL = '/openapi/ipg/v2/vodacomTZN/b2bPayment/'
MPESA_TRANSACTION_STATUS_URL = '/openapi/ipg/v2/vodacomTZN/queryTransactionStatus/'
| 102.583333 | 758 | 0.907392 |
315db687b09a63ab4808173544184decb14fb3e5 | 463 | py | Python | setup.py | OpenPeerPower/frontend | e61601f2e664ba743853bac550da8f28671ba4c0 | [
"Apache-2.0"
] | null | null | null | setup.py | OpenPeerPower/frontend | e61601f2e664ba743853bac550da8f28671ba4c0 | [
"Apache-2.0"
] | null | null | null | setup.py | OpenPeerPower/frontend | e61601f2e664ba743853bac550da8f28671ba4c0 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="openpeerpower-frontend",
version="20210523.2",
description="The Open Peer Power frontend",
url="https://github.com/openpeerpower/openpeerpower-polymer",
author="The Open Peer Power Authors",
author_email="hello@openpeerpower.io",
license="Apache-2.0",
packages=find_packages(include=["opp_frontend", "opp_frontend.*"]),
include_package_data=True,
zip_safe=False,
)
| 30.866667 | 71 | 0.721382 |
f89f7fb66ed4f6ff815ca011b45420ea52af58ba | 4,362 | py | Python | aws_management/rds.py | mbisahaTG/aws-management | 52f8633458866778abc4bb187057a5aee6e134b4 | [
"MIT"
] | null | null | null | aws_management/rds.py | mbisahaTG/aws-management | 52f8633458866778abc4bb187057a5aee6e134b4 | [
"MIT"
] | null | null | null | aws_management/rds.py | mbisahaTG/aws-management | 52f8633458866778abc4bb187057a5aee6e134b4 | [
"MIT"
] | null | null | null | import boto3, botocore
from os import environ
import logging
import jsonschema
class AwsRdsProvisionError(Exception):
pass
class AwsRdsManager:
log = logging.getLogger(__name__)
db_schema = dict(
type="object",
properties=dict(
database=dict(type="string"),
password=dict(type="string"),
username=dict(type="string"),
),
required=["database", "password", "username"],
)
config_schema = dict(
type="object",
properties=dict(
AllocatedStorage=dict(type="integer"),
DBInstanceClass=dict(type="string"),
Engine=dict(type="string"),
BackupRetentionPeriod=dict(type="integer"),
MultiAZ=dict(type="boolean"),
EngineVersion=dict(type="string"),
PubliclyAccessible=dict(type="boolean"),
VpcSecurityGroupIds=dict(type="array", items=dict(type="string")),
DBSubnetGroupName=dict(type="string"),
),
required=[
"AllocatedStorage",
"DBInstanceClass",
"Engine",
"BackupRetentionPeriod",
"MultiAZ",
"EngineVersion",
"PubliclyAccessible",
"VpcSecurityGroupIds",
"DBSubnetGroupName",
],
)
@classmethod
def set_config(cls, config: dict):
jsonschema.validate(config, cls.config_schema)
cls.RDS_CONFIG = config
def __init__(self, db: dict):
jsonschema.validate(db, self.db_schema)
self.db = db
@property
def boto_client(self):
return boto3.client("rds")
def database_description(self):
rds = self.boto_client
try:
instances = rds.describe_db_instances(
DBInstanceIdentifier=self.db["database"]
)
return instances["DBInstances"][0]
except (rds.exceptions.DBInstanceNotFoundFault, KeyError, IndexError) as ex:
raise AwsRdsProvisionError(f"No Database Provisioned for {self.db}")
def provision(self, silent=True, wait=None):
assert hasattr(
self.__class__, "RDS_CONFIG"
), f"No RDS Configuration Given. See {self.__class__.__name__}.set_config"
db_vars = self.RDS_CONFIG.copy()
db_vars.update(
{
"DBName": self.db["database"],
"DBInstanceIdentifier": self.db["database"],
"MasterUsername": self.db["username"],
"MasterUserPassword": self.db["password"],
}
)
try:
self.boto_client.create_db_instance(**db_vars)
except botocore.exceptions.ClientError as ex:
if silent and "DBInstanceAlreadyExists" in ex.__str__():
self.log.info(f"{self.db} instance already exists. Skipping.")
return
else:
raise ex
if wait:
self.log.info(
f"Waiting for {self.db} instance to spawn... This could take a while..."
)
waiter = self.boto_client.get_waiter(wait)
waiter.wait(DBInstanceIdentifier=db_vars["DBInstanceIdentifier"])
self.log.info(f"{self.db} instance available.")
else:
self.log.warn(
f"Warning: did not wait for {self.db} instance to spawn. Instance may not be available."
)
def deprovision(self, wait=False, silent=True):
try:
self.boto_client.delete_db_instance(
DBInstanceIdentifier=self.db["database"],
SkipFinalSnapshot=True,
DeleteAutomatedBackups=True,
)
except botocore.exceptions.ClientError as ex:
if silent and "DBInstanceNotFound" in ex.__str__():
self.log.info(f"{self.db} instance already terminated. Skipping.")
return
else:
raise ex
if wait:
self.log.info(f"Waiting for {self.db} instance to terminate...")
waiter = self.boto_client.get_waiter("db_instance_deleted")
waiter.wait(DBInstanceIdentifier=self.db["database"])
self.log.info(f"{self.db} instance terminated.")
else:
self.log.warn(f"Warning: did not wait for {self.db} instance to terminate.")
| 35.177419 | 104 | 0.574278 |
59c541071ced586b31313b7d9db4cfba9243a615 | 39,964 | py | Python | devito/types/basic.py | Geophysics-OpenSource/devito | ef96319eaab0735316c75b0825681f37a8a923f0 | [
"MIT"
] | 1 | 2020-06-08T20:44:35.000Z | 2020-06-08T20:44:35.000Z | devito/types/basic.py | Geophysics-OpenSource/devito | ef96319eaab0735316c75b0825681f37a8a923f0 | [
"MIT"
] | null | null | null | devito/types/basic.py | Geophysics-OpenSource/devito | ef96319eaab0735316c75b0825681f37a8a923f0 | [
"MIT"
] | 1 | 2021-01-05T07:27:35.000Z | 2021-01-05T07:27:35.000Z | import abc
from collections import namedtuple
from ctypes import POINTER, Structure, byref
from functools import reduce
from math import ceil
from operator import mul
import numpy as np
import sympy
from sympy.core.assumptions import _assume_rules
from cached_property import cached_property
from cgen import Struct, Value
from frozendict import frozendict
from devito.data import default_allocator
from devito.finite_differences import Evaluable
from devito.parameters import configuration
from devito.symbolics import aligned_indices
from devito.tools import Pickable, ctypes_to_cstr, dtype_to_cstr, dtype_to_ctype
from devito.types.args import ArgProvider
from devito.types.caching import Cached
from devito.types.utils import DimensionTuple
__all__ = ['Symbol', 'Scalar', 'Array', 'Indexed', 'Object',
'LocalObject', 'CompositeObject']
Size = namedtuple('Size', 'left right')
Offset = namedtuple('Offset', 'left right')
class Basic(object):
"""
Three relevant types inherit from this class:
* AbstractSymbol: represents a scalar; may carry data; may be used
to build equations.
* AbstractFunction: represents a discrete R^n -> R function; may
carry data; may be used to build equations.
* AbstractTensor: represents a discrete 2nd order tensor or vector:
R^n -> R^(nd x nd) tensor (nd dimensions),
R^n -> R^nd vector (nd dimensions),
may carry data; may be used to build equations.
* AbstractObject: represents a generic object, for example a (pointer
to) data structure.
Basic
|
--------------------------------------------------------------
| | | |
AbstractSymbol AbstractFunction AbstractTensor AbstractObject
All these subtypes must implement a number of methods/properties to enable
code generation via the Devito compiler. These methods/properties are
easily recognizable as their name starts with _C_.
Notes
-----
The AbstractFunction sub-hierarchy is implemented in :mod:`dense.py`.
The AbstractTensor sub-hierarchy is implemented in :mod:`tensor.py`.
"""
# Top hierarchy
is_AbstractFunction = False
is_AbstractSymbol = False
is_AbstractObject = False
# Symbolic objects created internally by Devito
is_Symbol = False
is_Array = False
is_Object = False
is_LocalObject = False
# Created by the user
is_Input = False
# Scalar symbolic objects created by the user
is_Dimension = False
is_Constant = False
# Tensor symbolic objects created by the user
is_DiscreteFunction = False
is_Function = False
is_TimeFunction = False
is_SparseTimeFunction = False
is_SparseFunction = False
is_PrecomputedSparseFunction = False
is_PrecomputedSparseTimeFunction = False
# Time dependence
is_TimeDependent = False
# Tensor and Vector valued objects
is_VectorValued = False
is_TensorValued = False
# Basic symbolic object properties
is_Scalar = False
is_Tensor = False
# Some other properties
is_PerfKnob = False # Does it impact the Operator performance?
@abc.abstractmethod
def __init__(self, *args, **kwargs):
return
@abc.abstractproperty
def _C_name(self):
"""
The C-level name of the object.
Returns
-------
str
"""
return
@abc.abstractproperty
def _C_typename(self):
"""
The C-level type of the object.
Returns
-------
str
"""
return
@abc.abstractproperty
def _C_typedata(self):
"""
The C-level type of the data values.
Returns
-------
str
"""
return
@abc.abstractproperty
def _C_ctype(self):
"""
The C-level type of the object, as a ctypes object, suitable for type
checking when calling functions via ctypes.
Returns
-------
ctypes type
"""
return
@property
def _C_typedecl(self):
"""
The C-level struct declaration representing the object.
Returns
-------
cgen.Struct or None
None if the object C type can be expressed with a basic C type,
such as float or int.
"""
return
class AbstractSymbol(sympy.Symbol, Basic, Pickable, Evaluable):
"""
Base class for scalar symbols.
The hierarchy is structured as follows
AbstractSymbol
|
-------------------------------------
| |
DataSymbol Symbol
| |
---------------- -------------------
| | | |
Constant DefaultDimension Scalar Dimension
<:mod:`dimension.py`>
All symbols can be used to build equations. However, while DataSymbol
carries data, Symbol is a pure symbolic object.
Constant, DefaultDimension, and Dimension (and most of its subclasses) are
part of the user API; Scalar, instead, is only used internally by Devito.
DefaultDimension and Dimension define a problem dimension (in other words,
an "iteration space"). They can be used to index into Functions. For more
information, refer to :mod:`dimension.py`.
"""
is_AbstractSymbol = True
is_Symbol = True
# SymPy default assumptions
is_real = True
is_imaginary = False
is_commutative = True
@classmethod
def _filter_assumptions(cls, **kwargs):
"""Extract sympy.Symbol-specific kwargs."""
assumptions = {}
for i in list(kwargs):
if i in _assume_rules.defined_facts:
assumptions[i] = kwargs.pop(i)
return assumptions, kwargs
def __new__(cls, *args, **kwargs):
name = kwargs.get('name') or args[0]
assumptions, kwargs = cls._filter_assumptions(**kwargs)
# Create the new Symbol
# Note: use __xnew__ to bypass sympy caching
newobj = sympy.Symbol.__xnew__(cls, name, **assumptions)
# Initialization
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
return newobj
@classmethod
def __dtype_setup__(cls, **kwargs):
"""Extract the object data type from ``kwargs``."""
return kwargs.get('dtype', np.int32)
def __init__(self, *args, **kwargs):
# no-op, the true init is performed by __init_finalize__
pass
def __init_finalize__(self, *args, **kwargs):
self._is_const = kwargs.get('is_const', False)
@property
def dtype(self):
"""The data type of the object."""
return self._dtype
@property
def indices(self):
return ()
@property
def dimensions(self):
return self.indices
@property
def shape(self):
return ()
@property
def ndim(self):
return 0
@property
def symbolic_shape(self):
return ()
@property
def base(self):
return self
@property
def function(self):
return self
@property
def evaluate(self):
return self
def indexify(self):
return self
@property
def is_const(self):
"""
True if the symbol value cannot be modified within an Operator (and thus
its value is provided by the user directly from Python-land), False otherwise.
"""
return self._is_const
@property
def _C_name(self):
return self.name
@property
def _C_typename(self):
return '%s%s' % ('const ' if self.is_const else '',
dtype_to_cstr(self.dtype))
@property
def _C_typedata(self):
return dtype_to_cstr(self.dtype)
@property
def _C_ctype(self):
return dtype_to_ctype(self.dtype)
def _subs(self, old, new, **hints):
"""
This stub allows sympy.Basic.subs to operate on an expression
involving devito Scalars. Ordinarily the comparisons between
devito subclasses of sympy types are quite strict.
"""
try:
if old.name == self.name:
return new
except AttributeError:
pass
return self
# Pickling support
_pickle_args = []
_pickle_kwargs = ['name', 'dtype', 'is_const']
__reduce_ex__ = Pickable.__reduce_ex__
class Symbol(AbstractSymbol, Cached):
"""
A scalar symbol, cached by both Devito and SymPy, which does not carry
any data.
Notes
-----
A Symbol may not be in the SymPy cache, but still be present in the
Devito cache. This is because SymPy caches operations, rather than
actual objects.
"""
@classmethod
def _cache_key(cls, *args, **kwargs):
args = list(args)
key = {}
# The base type is necessary, otherwise two objects such as
# `Scalar(name='s')` and `Dimension(name='s')` would have the same key
key['cls'] = cls
# The name is always present, and added as if it were an arg
key['name'] = kwargs.pop('name', None) or args.pop(0)
# From the args
key['args'] = tuple(args)
# From the kwargs
key.update(kwargs)
return frozendict(key)
def __new__(cls, *args, **kwargs):
key = cls._cache_key(*args, **kwargs)
obj = cls._cache_get(key)
if obj is not None:
return obj
# Not in cache. Create a new Symbol via sympy.Symbol
name = kwargs.get('name') or args[0]
assumptions, kwargs = cls._filter_assumptions(**kwargs)
# Note: use __xnew__ to bypass sympy caching
newobj = sympy.Symbol.__xnew__(cls, name, **assumptions)
# Initialization
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
# Store new instance in symbol cache
Cached.__init__(newobj, key)
return newobj
__hash__ = Cached.__hash__
class DataSymbol(AbstractSymbol, Cached):
"""
A scalar symbol, cached by both Devito and SymPy, which carries data.
"""
@classmethod
def _cache_key(cls, *args, **kwargs):
"""A DataSymbol caches on the class type itself."""
return cls
def __new__(cls, *args, **kwargs):
key = cls._cache_key(*args, **kwargs)
obj = cls._cache_get(key)
if obj is not None:
return obj
# Not in cache. Create a new Symbol via sympy.Symbol
name = kwargs.get('name') or args[0]
assumptions, kwargs = cls._filter_assumptions(**kwargs)
# Create new, unique type instance from cls and the symbol name
newcls = type(name, (cls,), dict(cls.__dict__))
# Create the new Symbol and invoke __init__
newobj = sympy.Symbol.__new__(newcls, name, **assumptions)
# Initialization
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
# Store new instance in symbol cache
Cached.__init__(newobj, newcls)
return newobj
__hash__ = Cached.__hash__
# Pickling support
@property
def _pickle_reconstruct(self):
return self.__class__.__base__
class Scalar(Symbol, ArgProvider):
"""
Like a Symbol, but in addition it can pass runtime values to an Operator.
Parameters
----------
name : str
Name of the symbol.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type. Defaults
to ``np.float32``.
is_const : bool, optional
True if the symbol value cannot be modified within an Operator,
False otherwise. Defaults to False.
**assumptions
Any SymPy assumptions, such as ``nonnegative=True``. Refer to the
SymPy documentation for more information.
"""
is_Scalar = True
@classmethod
def __dtype_setup__(cls, **kwargs):
return kwargs.get('dtype', np.float32)
class AbstractTensor(sympy.ImmutableDenseMatrix, Basic, Cached, Pickable, Evaluable):
"""
Base class for vector and tensor valued functions. It inherits from and
mimicks the behavior of a sympy.ImmutableDenseMatrix.
The sub-hierachy is as follows
AbstractTensor
|
TensorFunction
|
---------------------------------
| |
VectorFunction TensorTimeFunction
\-------\ |
\------- VectorTimeFunction
There are four relevant AbstractTensor sub-types: ::
* TensorFunction: A space-varying tensor valued function
* VectorFunction: A space-varying vector valued function
* TensorTimeFunction: A time-space-varying tensor valued function
* VectorTimeFunction: A time-space-varying vector valued function
"""
# Sympy attributes
is_MatrixLike = True
is_Matrix = True
# Devito attributes
is_AbstractTensor = True
is_TensorValued = True
is_VectorValued = False
@classmethod
def _cache_key(cls, *args, **kwargs):
"""An AbstractTensor caches on the class type itself."""
return cls
def __new__(cls, *args, **kwargs):
options = kwargs.get('options', {})
key = cls._cache_key(*args, **kwargs)
obj = cls._cache_get(key)
if obj is not None:
newobj = sympy.Matrix.__new__(cls, *args, **options)
newobj.__init_cached__(key)
return newobj
name = kwargs.get('name')
# Number of dimensions
indices, _ = cls.__indices_setup__(**kwargs)
# Create new, unique type instance from cls and the symbol name
newcls = type(name, (cls,), dict(cls.__dict__))
# Create the new Function object and invoke __init__
comps = cls.__subfunc_setup__(*args, **kwargs)
newobj = sympy.ImmutableDenseMatrix.__new__(newcls, comps)
# Initialization. The following attributes must be available
newobj._indices = indices
newobj._name = name
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
# Store new instance in symbol cache
Cached.__init__(newobj, newcls)
return newobj
__hash__ = Cached.__hash__
def __init_finalize__(self, *args, **kwargs):
pass
@classmethod
def __dtype_setup__(cls, **kwargs):
"""Extract the object data type from ``kwargs``."""
return None
@classmethod
def __subfunc_setup__(cls, *args, **kwargs):
"""Setup each component of the tensor as a Devito type."""
return []
@classmethod
def __indices_setup__(cls, *args, **kwargs):
"""Extract the object indices from ``kwargs``."""
return (), ()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
@classmethod
def _new2(cls, *args, **kwargs):
"""Bypass sympy `_new` that hard codes `Matrix.__new__` to call our own."""
return cls.__new__(cls, *args, **kwargs)
def applyfunc(self, f):
return self._new2(self.rows, self.cols, [f(x) for x in self])
class AbstractFunction(sympy.Function, Basic, Cached, Pickable, Evaluable):
"""
Base class for tensor symbols, cached by both SymPy and Devito. It inherits
from and mimicks the behaviour of a sympy.Function.
The hierarchy is structured as follows
AbstractFunction
|
---------------------------------
| |
DiscreteFunction Array
|
----------------------------------------
| |
| AbstractSparseFunction
| |
| -----------------------------------------------------
| | | |
Function SparseFunction AbstractSparseTimeFunction PrecomputedSparseFunction
| | | |
| | ------------------------------------ --------
| | | | |
TimeFunction SparseTimeFunction PrecomputedSparseTimeFunction
There are five relevant AbstractFunction sub-types: ::
* Array: A function that does not carry data.
* Function: A space-varying discrete function, which carries user data.
* TimeFunction: A time- and space-varying discrete function, which carries
user data.
* SparseFunction: A space-varying discrete function representing "sparse"
points, i.e. points that are not aligned with the
computational grid.
* SparseTimeFunction: A time- and space-varying function representing "sparse"
points, i.e. points that are not aligned with the
computational grid.
* PrecomputedSparseFunction: A SparseFunction that uses a custom interpolation
scheme, instead of linear interpolators.
* PrecomputedSparseTimeFunction: A SparseTimeFunction that uses a custom
interpolation scheme, instead of linear
interpolators.
"""
is_AbstractFunction = True
# SymPy default assumptions
is_real = True
is_imaginary = False
is_commutative = True
@classmethod
def _cache_key(cls, *args, **kwargs):
"""An AbstractFunction caches on the class type itself."""
return cls
def __new__(cls, *args, **kwargs):
options = kwargs.get('options', {})
key = cls._cache_key(*args, **kwargs)
obj = cls._cache_get(key)
if obj is not None:
newobj = sympy.Function.__new__(cls, *args, **options)
newobj.__init_cached__(key)
return newobj
# Not in cache. Create a new Function via sympy.Function
name = kwargs.get('name')
dimensions, indices = cls.__indices_setup__(**kwargs)
# Create new, unique type instance from cls and the symbol name
newcls = type(name, (cls,), dict(cls.__dict__))
# Create the new Function object and invoke __init__
newobj = sympy.Function.__new__(newcls, *indices, **options)
# Initialization. The following attributes must be available
# when executing __init_finalize__
newobj._name = name
newobj._dimensions = dimensions
newobj._shape = cls.__shape_setup__(**kwargs)
newobj._dtype = cls.__dtype_setup__(**kwargs)
newobj.__init_finalize__(*args, **kwargs)
# All objects cached on the AbstractFunction `newobj` keep a reference
# to `newobj` through the `function` field. Thus, all indexified
# object will point to `newobj`, the "actual Function".
newobj.function = newobj
# Store new instance in symbol cache
Cached.__init__(newobj, newcls)
return newobj
def __init__(self, *args, **kwargs):
# no-op, the true init is performed by __init_finalize__
pass
def __init_finalize__(self, *args, **kwargs):
# Setup halo and padding regions
self._is_halo_dirty = False
self._halo = self.__halo_setup__(**kwargs)
self._padding = self.__padding_setup__(**kwargs)
__hash__ = Cached.__hash__
@classmethod
def __indices_setup__(cls, **kwargs):
"""Extract the object indices from ``kwargs``."""
return (), ()
@classmethod
def __shape_setup__(cls, **kwargs):
"""Extract the object shape from ``kwargs``."""
return ()
@classmethod
def __dtype_setup__(cls, **kwargs):
"""Extract the object data type from ``kwargs``."""
return None
def __halo_setup__(self, **kwargs):
return tuple(kwargs.get('halo', [(0, 0) for i in range(self.ndim)]))
def __padding_setup__(self, **kwargs):
return tuple(kwargs.get('padding', [(0, 0) for i in range(self.ndim)]))
@cached_property
def _honors_autopadding(self):
"""
True if the actual padding is greater or equal than whatever autopadding
would produce, False otherwise.
"""
autopadding = self.__padding_setup__(autopadding=True)
return all(l0 >= l1 and r0 >= r1
for (l0, r0), (l1, r1) in zip(self.padding, autopadding))
@property
def name(self):
"""The name of the object."""
return self._name
@property
def indices(self):
"""The indices (aka dimensions) of the object."""
return self.args
@property
def indices_ref(self):
"""The reference indices of the object (indices at first creation)."""
return DimensionTuple(*self.function.indices, getters=self.dimensions)
@property
def origin(self):
"""
Origin of the AbstractFunction in term of Dimension
f(x) : origin = 0
f(x + hx/2) : origin = hx/2
"""
return tuple(r - d for d, r in zip(self.dimensions, self.indices_ref))
@property
def dimensions(self):
"""Tuple of Dimensions representing the object indices."""
return self._dimensions
@property
def _eval_deriv(self):
return self
@cached_property
def _is_on_grid(self):
"""
Check whether the object is on the grid or need averaging.
For example, if the original non-staggered function is f(x)
then f(x) is on the grid and f(x + h_x/2) is off the grid.
"""
return all([aligned_indices(i, j, d.spacing) for i, j, d in
zip(self.indices, self.indices_ref, self.dimensions)])
@property
def evaluate(self):
# Average values if at a location not on the Function's grid
if self._is_on_grid:
return self
weight = 1.0
avg_list = [self]
is_averaged = False
for i, ir, d in zip(self.indices, self.indices_ref, self.dimensions):
off = (i - ir)/d.spacing
if not isinstance(off, sympy.Number) or int(off) == off:
pass
else:
weight *= 1/2
is_averaged = True
avg_list = [(a.xreplace({i: i - d.spacing/2}) +
a.xreplace({i: i + d.spacing/2})) for a in avg_list]
if not is_averaged:
return self
return weight * sum(avg_list)
@property
def shape(self):
"""The shape of the object."""
return self._shape
@property
def dtype(self):
"""The data type of the object."""
return self._dtype
@property
def ndim(self):
"""The rank of the object."""
return len(self.indices)
@property
def symbolic_shape(self):
"""
The symbolic shape of the object. This includes the domain, halo, and
padding regions. While halo and padding are known quantities (integers),
the domain size is given as a symbol.
"""
halo = [sympy.Add(*i, evaluate=False) for i in self._size_halo]
padding = [sympy.Add(*i, evaluate=False) for i in self._size_padding]
domain = [i.symbolic_size for i in self.dimensions]
ret = tuple(sympy.Add(i, j, k, evaluate=False)
for i, j, k in zip(domain, halo, padding))
return DimensionTuple(*ret, getters=self.dimensions)
@cached_property
def indexed(self):
"""The wrapped IndexedData object."""
return IndexedData(self.name, shape=self.shape, function=self.function)
@property
def _mem_external(self):
"""
True if the associated data was/is/will be allocated directly
from Python (e.g., via NumPy arrays), False otherwise.
"""
return False
@property
def _mem_stack(self):
"""
True if the associated data should be allocated on the stack, False otherwise.
"""
return False
@property
def _mem_heap(self):
"""
True if the associated data was/is/will be allocated on the heap,
False otherwise.
"""
return False
@property
def size(self):
"""
The number of elements this object is expected to store in memory.
Note that this would need to be combined with self.dtype to give the actual
size in bytes.
"""
return reduce(mul, self.shape)
@property
def halo(self):
return self._halo
@property
def padding(self):
return self._padding
@property
def is_const(self):
"""
True if the carried data values cannot be modified within an Operator,
False otherwise.
"""
return False
@property
def _C_name(self):
return "%s_vec" % self.name
@property
def _C_typedata(self):
return dtype_to_cstr(self.dtype)
@cached_property
def _size_domain(self):
"""Number of points in the domain region."""
return DimensionTuple(*self.shape, getters=self.dimensions)
@cached_property
def _size_halo(self):
"""Number of points in the halo region."""
left = tuple(zip(*self._halo))[0]
right = tuple(zip(*self._halo))[1]
sizes = tuple(Size(i, j) for i, j in self._halo)
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@cached_property
def _size_owned(self):
"""Number of points in the owned region."""
left = tuple(self._size_halo.right)
right = tuple(self._size_halo.left)
sizes = tuple(Size(i.right, i.left) for i in self._size_halo)
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@cached_property
def _size_padding(self):
"""Number of points in the padding region."""
left = tuple(zip(*self._padding))[0]
right = tuple(zip(*self._padding))[1]
sizes = tuple(Size(i, j) for i, j in self._padding)
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@cached_property
def _size_nopad(self):
"""Number of points in the domain+halo region."""
sizes = tuple(i+sum(j) for i, j in zip(self._size_domain, self._size_halo))
return DimensionTuple(*sizes, getters=self.dimensions)
@cached_property
def _size_nodomain(self):
"""Number of points in the padding+halo region."""
left = tuple(i for i, _ in np.add(self._halo, self._padding))
right = tuple(i for _, i in np.add(self._halo, self._padding))
sizes = tuple(Size(i, j) for i, j in np.add(self._halo, self._padding))
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@cached_property
def _offset_domain(self):
"""Number of points before the first domain element."""
offsets = tuple(np.add(self._size_padding.left, self._size_halo.left))
return DimensionTuple(*offsets, getters=self.dimensions)
@cached_property
def _offset_halo(self):
"""Number of points before the first and last halo elements."""
left = tuple(self._size_padding.left)
right = tuple(np.add(np.add(left, self._size_halo.left), self._size_domain))
offsets = tuple(Offset(i, j) for i, j in zip(left, right))
return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right)
@cached_property
def _offset_owned(self):
"""Number of points before the first and last owned elements."""
left = tuple(self._offset_domain)
right = tuple(np.add(self._offset_halo.left, self._size_domain))
offsets = tuple(Offset(i, j) for i, j in zip(left, right))
return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right)
@property
def _data_alignment(self):
"""
The base virtual address of the data carried by the object is a multiple
of the alignment.
"""
return default_allocator().guaranteed_alignment
def indexify(self, indices=None, lshift=False, subs=None):
"""Create a types.Indexed from the current object."""
if indices is not None:
return Indexed(self.indexed, *indices)
# Substitution for each index (spacing only used in own dimension)
subs = subs or {}
subs = [{**{d.spacing: 1, -d.spacing: -1}, **subs} for d in self.dimensions]
# Add halo shift
shift = self._size_nodomain.left if lshift else tuple([0]*len(self.dimensions))
# Indices after substitutions
indices = [(a - o + f).xreplace(s) for a, o, f, s in
zip(self.args, self.origin, shift, subs)]
return self.indexed[indices]
def __getitem__(self, index):
"""Shortcut for ``self.indexed[index]``."""
return self.indexed[index]
# Pickling support
_pickle_kwargs = ['name', 'dtype', 'halo', 'padding']
__reduce_ex__ = Pickable.__reduce_ex__
@property
def _pickle_reconstruct(self):
return self.__class__.__base__
class Array(AbstractFunction):
"""
Tensor symbol representing an array in symbolic equations.
An Array is very similar to a sympy.Indexed, though it also carries
metadata essential for code generation.
Parameters
----------
name : str
Name of the symbol.
dimensions : tuple of Dimension
Dimensions associated with the object.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type. Defaults
to ``np.float32``.
halo : iterable of 2-tuples, optional
The halo region of the object.
padding : iterable of 2-tuples, optional
The padding region of the object.
scope : str, optional
Control memory allocation. Allowed values: 'heap', 'stack'. Defaults
to 'heap'.
Warnings
--------
Arrays are created and managed directly by Devito (IOW, they are not
expected to be used directly in user code).
"""
is_Array = True
is_Tensor = True
def __new__(cls, *args, **kwargs):
kwargs.update({'options': {'evaluate': False}})
return AbstractFunction.__new__(cls, *args, **kwargs)
def __init_finalize__(self, *args, **kwargs):
super(Array, self).__init_finalize__(*args, **kwargs)
self._scope = kwargs.get('scope', 'heap')
assert self._scope in ['heap', 'stack']
def __padding_setup__(self, **kwargs):
padding = kwargs.get('padding')
if padding is None:
padding = [(0, 0) for _ in range(self.ndim)]
if kwargs.get('autopadding', configuration['autopadding']):
# Heuristic 1; Arrays are typically introduced for temporaries
# introduced during compilation, and are almost always used together
# with loop blocking. Since the typical block size is a multiple of
# the SIMD vector length, `vl`, padding is made such that the
# NODOMAIN size is a multiple of `vl` too
# Heuristic 2: the right-NODOMAIN size is not only a multiple of
# `vl`, but also guaranteed to be *at least* greater or equal than
# `vl`, so that the compiler can tweak loop trip counts to maximize
# the effectiveness of SIMD vectorization
# Let UB be a function that rounds up a value `x` to the nearest
# multiple of the SIMD vector length
vl = configuration['platform'].simd_items_per_reg(self.dtype)
ub = lambda x: int(ceil(x / vl)) * vl
fvd_halo_size = sum(self.halo[-1])
fvd_pad_size = (ub(fvd_halo_size) - fvd_halo_size) + vl
padding[-1] = (0, fvd_pad_size)
return tuple(padding)
elif isinstance(padding, int):
return tuple((0, padding) for _ in range(self.ndim))
elif isinstance(padding, tuple) and len(padding) == self.ndim:
return tuple((0, i) if isinstance(i, int) else i for i in padding)
else:
raise TypeError("`padding` must be int or %d-tuple of ints" % self.ndim)
@classmethod
def __indices_setup__(cls, **kwargs):
return tuple(kwargs['dimensions']), tuple(kwargs['dimensions'])
@classmethod
def __dtype_setup__(cls, **kwargs):
return kwargs.get('dtype', np.float32)
@property
def shape(self):
return self.symbolic_shape
@property
def scope(self):
return self._scope
@property
def _mem_stack(self):
return self._scope == 'stack'
@property
def _mem_heap(self):
return self._scope == 'heap'
@property
def _C_typename(self):
return ctypes_to_cstr(POINTER(dtype_to_ctype(self.dtype)))
@cached_property
def free_symbols(self):
return super().free_symbols - {d for d in self.dimensions if d.is_Default}
# Pickling support
_pickle_kwargs = AbstractFunction._pickle_kwargs + ['dimensions', 'scope']
# Objects belonging to the Devito API not involving data, such as data structures
# that need to be passed to external libraries
class AbstractObject(Basic, sympy.Basic, Pickable):
"""
Symbol representing a generic pointer object.
"""
is_AbstractObject = True
def __new__(cls, *args, **kwargs):
obj = sympy.Basic.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
def __init__(self, name, dtype):
self.name = name
self.dtype = dtype
def __repr__(self):
return self.name
__str__ = __repr__
def _hashable_content(self):
return (self.name, self.dtype)
@property
def free_symbols(self):
return {self}
@property
def _C_name(self):
return self.name
@property
def _C_typename(self):
return ctypes_to_cstr(self.dtype)
@property
def _C_ctype(self):
return self.dtype
@property
def function(self):
return self
# Pickling support
_pickle_args = ['name', 'dtype']
__reduce_ex__ = Pickable.__reduce_ex__
class Object(AbstractObject, ArgProvider):
"""
Symbol representing a generic pointer object, provided by an outer scope.
"""
is_Object = True
def __init__(self, name, dtype, value=None):
super(Object, self).__init__(name, dtype)
self.value = value
@property
def _arg_names(self):
return (self.name,)
def _arg_defaults(self):
if callable(self.value):
return {self.name: self.value()}
else:
return {self.name: self.value}
def _arg_values(self, args=None, **kwargs):
"""
Produce runtime values for this Object after evaluating user input.
Parameters
----------
args : dict, optional
Known argument values.
**kwargs
Dictionary of user-provided argument overrides.
"""
if self.name in kwargs:
return {self.name: kwargs.pop(self.name)}
else:
return self._arg_defaults()
class CompositeObject(Object):
"""
Symbol representing a pointer to a composite type (e.g., a C struct),
provided by an outer scope.
"""
_dtype_cache = {}
@classmethod
def _generate_unique_dtype(cls, pname, pfields):
dtype = POINTER(type(pname, (Structure,), {'_fields_': pfields}))
key = (pname, tuple(pfields))
return cls._dtype_cache.setdefault(key, dtype)
def __init__(self, name, pname, pfields, value=None):
dtype = CompositeObject._generate_unique_dtype(pname, pfields)
value = self.__value_setup__(dtype, value)
super(CompositeObject, self).__init__(name, dtype, value)
def __value_setup__(self, dtype, value):
return value or byref(dtype._type_())
@property
def pfields(self):
return tuple(self.dtype._type_._fields_)
@property
def pname(self):
return self.dtype._type_.__name__
@property
def fields(self):
return [i for i, _ in self.pfields]
def _hashable_content(self):
return (self.name, self.pfields)
@cached_property
def _C_typedecl(self):
return Struct(self.pname, [Value(ctypes_to_cstr(j), i) for i, j in self.pfields])
# Pickling support
_pickle_args = ['name', 'pname', 'pfields']
_pickle_kwargs = []
class LocalObject(AbstractObject):
"""
Symbol representing a generic pointer object, defined in the local scope.
"""
is_LocalObject = True
# Extended SymPy hierarchy follows, for essentially two reasons:
# - To keep track of `function`
# - To override SymPy caching behaviour
class IndexedData(sympy.IndexedBase, Pickable):
"""
Wrapper class that inserts a pointer to the symbolic data object.
"""
def __new__(cls, label, shape=None, function=None):
# Make sure `label` is a devito.Symbol, not a sympy.Symbol
if isinstance(label, str):
label = Symbol(name=label, dtype=function.dtype)
obj = sympy.IndexedBase.__new__(cls, label, shape)
obj.function = function
return obj
def func(self, *args):
obj = super(IndexedData, self).func(*args)
obj.function = self.function
return obj
def __getitem__(self, indices, **kwargs):
"""Produce a types.Indexed, rather than a sympy.Indexed."""
indexed = super(IndexedData, self).__getitem__(indices, **kwargs)
return Indexed(*indexed.args)
# Pickling support
_pickle_kwargs = ['label', 'shape', 'function']
__reduce_ex__ = Pickable.__reduce_ex__
class Indexed(sympy.Indexed):
# The two type flags have changed in upstream sympy as of version 1.1,
# but the below interpretation is used throughout the compiler to
# identify Indexed objects. With the sympy-1.1 changes a new flag
# obj.is_Indexed was introduced which should be preferred, but the
# required changes are cumbersome and many...
is_Symbol = False
is_Atom = False
is_Dimension = False
def _hashable_content(self):
return super(Indexed, self)._hashable_content() + (self.base.function,)
@property
def function(self):
return self.base.function
@property
def dtype(self):
return self.function.dtype
@property
def name(self):
return self.function.name
@property
def origin(self):
return self.function.origin
@cached_property
def free_symbols(self):
# Make it cached, since it's relatively expensive and called often
return super(Indexed, self).free_symbols
| 30.765204 | 89 | 0.598263 |
19bfd46142d964bb4e5bb8d10337d8a4007ff7fe | 594 | py | Python | doc/source/ReleaseNotes/generate.py | PaulDoessel/gaffer-play | 8b72dabb388e12424c230acfb0bd209049b01bd6 | [
"BSD-3-Clause"
] | null | null | null | doc/source/ReleaseNotes/generate.py | PaulDoessel/gaffer-play | 8b72dabb388e12424c230acfb0bd209049b01bd6 | [
"BSD-3-Clause"
] | null | null | null | doc/source/ReleaseNotes/generate.py | PaulDoessel/gaffer-play | 8b72dabb388e12424c230acfb0bd209049b01bd6 | [
"BSD-3-Clause"
] | 1 | 2020-02-15T16:15:54.000Z | 2020-02-15T16:15:54.000Z | import re
index = open( "./index.md", "w" )
index.write( "Release Notes\n" )
index.write( "=============\n\n" )
changes = open( "../../../Changes" )
versionFile = None
for line in changes :
m = re.match( r"^(Gaffer )?(([0-9]+\.){2,3}[0-9]+)", line )
if m :
print m.group( 2 )
index.write( "- [{0}]({0}.md)\n".format( m.group( 2 ) ) )
versionFile = open( m.group( 2 ) + ".md", "w" )
versionFile.write( m.group( 2 ) + "\n" )
continue
if not versionFile :
continue
versionFile.write(
re.sub( r"#([0-9]+)", r"[#\1](https://github.com/GafferHQ/gaffer/issues/\1)", line )
)
| 22 | 86 | 0.543771 |
48c6b64d62ccc2ee9b90404725503b0d5d9faa50 | 339,338 | py | Python | manila-8.0.0/manila/tests/share/test_manager.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | manila-8.0.0/manila/tests/share/test_manager.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | manila-8.0.0/manila/tests/share/test_manager.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Share Manager for Manila."""
import datetime
import hashlib
import random
import ddt
import mock
from oslo_concurrency import lockutils
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from manila.common import constants
from manila import context
from manila import coordination
from manila.data import rpcapi as data_rpc
from manila import db
from manila.db.sqlalchemy import models
from manila import exception
from manila.message import message_field
from manila import quota
from manila.share import api
from manila.share import drivers_private_data
from manila.share import manager
from manila.share import migration as migration_api
from manila.share import rpcapi
from manila.share import share_types
from manila import test
from manila.tests.api import fakes as test_fakes
from manila.tests import db_utils
from manila.tests import fake_share as fakes
from manila.tests import fake_utils
from manila.tests import utils as test_utils
from manila import utils
def fake_replica(**kwargs):
return fakes.fake_replica(for_manager=True, **kwargs)
class CustomTimeSleepException(Exception):
pass
class LockedOperationsTestCase(test.TestCase):
class FakeManager(object):
@manager.locked_share_replica_operation
def fake_replica_operation(self, context, replica, share_id=None):
pass
def setUp(self):
super(LockedOperationsTestCase, self).setUp()
self.manager = self.FakeManager()
self.fake_context = test_fakes.FakeRequestContext
self.lock_call = self.mock_object(
coordination, 'synchronized', mock.Mock(return_value=lambda f: f))
@ddt.data({'id': 'FAKE_REPLICA_ID'}, 'FAKE_REPLICA_ID')
@ddt.unpack
def test_locked_share_replica_operation(self, **replica):
self.manager.fake_replica_operation(self.fake_context, replica,
share_id='FAKE_SHARE_ID')
self.assertTrue(self.lock_call.called)
@ddt.ddt
class ShareManagerTestCase(test.TestCase):
def setUp(self):
super(ShareManagerTestCase, self).setUp()
self.flags(share_driver='manila.tests.fake_driver.FakeShareDriver')
# Define class directly, because this test suite dedicated
# to specific manager.
self.share_manager = importutils.import_object(
"manila.share.manager.ShareManager")
self.mock_object(self.share_manager.driver, 'do_setup')
self.mock_object(self.share_manager.driver, 'check_for_setup_error')
self.share_manager.driver._stats = {
'share_group_stats': {'consistent_snapshot_support': None},
}
self.mock_object(self.share_manager.message_api, 'create')
self.context = context.get_admin_context()
self.share_manager.driver.initialized = True
mock.patch.object(
lockutils, 'lock', fake_utils.get_fake_lock_context())
self.synchronized_lock_decorator_call = self.mock_object(
coordination, 'synchronized', mock.Mock(return_value=lambda f: f))
def test_share_manager_instance(self):
fake_service_name = "fake_service"
import_mock = mock.Mock()
self.mock_object(importutils, "import_object", import_mock)
private_data_mock = mock.Mock()
self.mock_object(drivers_private_data, "DriverPrivateData",
private_data_mock)
self.mock_object(manager.ShareManager, '_init_hook_drivers')
share_manager = manager.ShareManager(service_name=fake_service_name)
private_data_mock.assert_called_once_with(
context=mock.ANY,
backend_host=share_manager.host,
config_group=fake_service_name
)
self.assertTrue(import_mock.called)
self.assertTrue(manager.ShareManager._init_hook_drivers.called)
def test__init_hook_drivers(self):
fake_service_name = "fake_service"
import_mock = mock.Mock()
self.mock_object(importutils, "import_object", import_mock)
self.mock_object(drivers_private_data, "DriverPrivateData")
share_manager = manager.ShareManager(service_name=fake_service_name)
share_manager.configuration.safe_get = mock.Mock(
return_value=["Foo", "Bar"])
self.assertEqual(0, len(share_manager.hooks))
import_mock.reset()
share_manager._init_hook_drivers()
self.assertEqual(
len(share_manager.configuration.safe_get.return_value),
len(share_manager.hooks))
import_mock.assert_has_calls([
mock.call(
hook,
configuration=share_manager.configuration,
host=share_manager.host
) for hook in share_manager.configuration.safe_get.return_value
], any_order=True)
def test__execute_periodic_hook(self):
share_instances_mock = mock.Mock()
hook_data_mock = mock.Mock()
self.mock_object(
self.share_manager.db,
"share_instances_get_all_by_host",
share_instances_mock)
self.mock_object(
self.share_manager.driver,
"get_periodic_hook_data",
hook_data_mock)
self.share_manager.hooks = [mock.Mock(return_value=i) for i in (0, 1)]
self.share_manager._execute_periodic_hook(self.context)
share_instances_mock.assert_called_once_with(
context=self.context, host=self.share_manager.host)
hook_data_mock.assert_called_once_with(
context=self.context,
share_instances=share_instances_mock.return_value)
for mock_hook in self.share_manager.hooks:
mock_hook.execute_periodic_hook.assert_called_once_with(
context=self.context,
periodic_hook_data=hook_data_mock.return_value)
def test_init_host_with_no_shares(self):
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_host',
mock.Mock(return_value=[]))
self.share_manager.init_host()
self.assertTrue(self.share_manager.driver.initialized)
(self.share_manager.db.share_instances_get_all_by_host.
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
self.share_manager.host))
self.share_manager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
(self.share_manager.driver.check_for_setup_error.
assert_called_once_with())
@ddt.data(
"connection_get_info",
"migration_cancel",
"migration_get_progress",
"migration_complete",
"migration_start",
"create_share_instance",
"manage_share",
"unmanage_share",
"delete_share_instance",
"delete_free_share_servers",
"create_snapshot",
"delete_snapshot",
"update_access",
"_report_driver_status",
"_execute_periodic_hook",
"publish_service_capabilities",
"delete_share_server",
"extend_share",
"shrink_share",
"create_share_group",
"delete_share_group",
"create_share_group_snapshot",
"delete_share_group_snapshot",
"create_share_replica",
"delete_share_replica",
"promote_share_replica",
"periodic_share_replica_update",
"update_share_replica",
"create_replicated_snapshot",
"delete_replicated_snapshot",
"periodic_share_replica_snapshot_update",
)
def test_call_driver_when_its_init_failed(self, method_name):
self.mock_object(self.share_manager.driver, 'do_setup',
mock.Mock(side_effect=Exception()))
# break the endless retry loop
with mock.patch("time.sleep",
side_effect=CustomTimeSleepException()):
self.assertRaises(CustomTimeSleepException,
self.share_manager.init_host)
self.assertRaises(
exception.DriverNotInitialized,
getattr(self.share_manager, method_name),
'foo', 'bar', 'quuz'
)
@ddt.data("do_setup", "check_for_setup_error")
def test_init_host_with_driver_failure(self, method_name):
self.mock_object(self.share_manager.driver, method_name,
mock.Mock(side_effect=Exception()))
self.mock_object(manager.LOG, 'exception')
self.share_manager.driver.initialized = False
with mock.patch("time.sleep",
side_effect=CustomTimeSleepException()):
self.assertRaises(CustomTimeSleepException,
self.share_manager.init_host)
manager.LOG.exception.assert_called_once_with(
mock.ANY, "%(name)s@%(host)s" %
{'name': self.share_manager.driver.__class__.__name__,
'host': self.share_manager.host})
self.assertFalse(self.share_manager.driver.initialized)
def _setup_init_mocks(self, setup_access_rules=True):
instances = [
db_utils.create_share(id='fake_id_1',
status=constants.STATUS_AVAILABLE,
display_name='fake_name_1').instance,
db_utils.create_share(id='fake_id_2',
status=constants.STATUS_ERROR,
display_name='fake_name_2').instance,
db_utils.create_share(id='fake_id_3',
status=constants.STATUS_AVAILABLE,
display_name='fake_name_3').instance,
db_utils.create_share(
id='fake_id_4',
status=constants.STATUS_MIGRATING,
task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS,
display_name='fake_name_4').instance,
db_utils.create_share(id='fake_id_5',
status=constants.STATUS_AVAILABLE,
display_name='fake_name_5').instance,
db_utils.create_share(
id='fake_id_6',
status=constants.STATUS_MIGRATING,
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
display_name='fake_name_6').instance,
]
instances[4]['access_rules_status'] = (
constants.SHARE_INSTANCE_RULES_SYNCING)
if not setup_access_rules:
return instances
rules = [
db_utils.create_access(share_id='fake_id_1'),
db_utils.create_access(share_id='fake_id_3'),
]
return instances, rules
@ddt.data(("some_hash", {"db_version": "test_version"}),
("ddd86ec90923b686597501e2f2431f3af59238c0",
{"db_version": "test_version"}),
(None, {"db_version": "test_version"}),
(None, None))
@ddt.unpack
def test_init_host_with_shares_and_rules(
self, old_backend_info_hash, new_backend_info):
# initialization of test data
def raise_share_access_exists(*args, **kwargs):
raise exception.ShareAccessExists(
access_type='fake_access_type', access='fake_access')
new_backend_info_hash = (hashlib.sha1(six.text_type(
sorted(new_backend_info.items())).encode('utf-8')).hexdigest() if
new_backend_info else None)
old_backend_info = {'info_hash': old_backend_info_hash}
share_server = 'fake_share_server_does_not_matter'
instances, rules = self._setup_init_mocks()
fake_export_locations = ['fake/path/1', 'fake/path']
fake_update_instances = {
instances[0]['id']: {'export_locations': fake_export_locations},
instances[2]['id']: {'export_locations': fake_export_locations}
}
instances[0]['access_rules_status'] = ''
instances[2]['access_rules_status'] = ''
self.mock_object(self.share_manager.db,
'backend_info_get',
mock.Mock(return_value=old_backend_info))
mock_backend_info_update = self.mock_object(
self.share_manager.db, 'backend_info_update')
self.mock_object(self.share_manager.driver, 'get_backend_info',
mock.Mock(return_value=new_backend_info))
mock_share_get_all_by_host = self.mock_object(
self.share_manager.db, 'share_instances_get_all_by_host',
mock.Mock(return_value=instances))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[instances[0], instances[2],
instances[4]]))
self.mock_object(self.share_manager.db,
'share_export_locations_update')
mock_ensure_shares = self.mock_object(
self.share_manager.driver, 'ensure_shares',
mock.Mock(return_value=fake_update_instances))
self.mock_object(self.share_manager, '_ensure_share_instance_has_pool')
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=share_server))
self.mock_object(self.share_manager, 'publish_service_capabilities',
mock.Mock())
self.mock_object(self.share_manager.db,
'share_access_get_all_for_share',
mock.Mock(return_value=rules))
self.mock_object(
self.share_manager.access_helper,
'update_access_rules',
mock.Mock(side_effect=raise_share_access_exists)
)
dict_instances = [self._get_share_replica_dict(
instance, share_server=share_server) for instance in instances]
# call of 'init_host' method
self.share_manager.init_host()
# verification of call
exports_update = self.share_manager.db.share_export_locations_update
self.share_manager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
(self.share_manager.driver.check_for_setup_error.
assert_called_once_with())
if new_backend_info_hash == old_backend_info_hash:
mock_backend_info_update.assert_not_called()
mock_ensure_shares.assert_not_called()
mock_share_get_all_by_host.assert_not_called()
else:
mock_backend_info_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.share_manager.host, new_backend_info_hash)
self.share_manager.driver.ensure_shares.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
[dict_instances[0], dict_instances[2], dict_instances[4]])
mock_share_get_all_by_host.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.share_manager.host)
exports_update.assert_has_calls([
mock.call(mock.ANY, instances[0]['id'], fake_export_locations),
mock.call(mock.ANY, instances[2]['id'], fake_export_locations)
])
(self.share_manager._ensure_share_instance_has_pool.
assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext),
instances[0]),
mock.call(utils.IsAMatcher(context.RequestContext),
instances[2]),
]))
self.share_manager._get_share_server.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext),
instances[0]),
mock.call(utils.IsAMatcher(context.RequestContext),
instances[2]),
])
(self.share_manager.publish_service_capabilities.
assert_called_once_with(
utils.IsAMatcher(context.RequestContext)))
(self.share_manager.access_helper.update_access_rules.
assert_has_calls([
mock.call(mock.ANY, instances[0]['id'],
share_server=share_server),
mock.call(mock.ANY, instances[2]['id'],
share_server=share_server),
]))
@ddt.data(("some_hash", {"db_version": "test_version"}),
("ddd86ec90923b686597501e2f2431f3af59238c0",
{"db_version": "test_version"}),
(None, {"db_version": "test_version"}),
(None, None))
@ddt.unpack
def test_init_host_without_shares_and_rules(
self, old_backend_info_hash, new_backend_info):
old_backend_info = {'info_hash': old_backend_info_hash}
new_backend_info_hash = (hashlib.sha1(six.text_type(
sorted(new_backend_info.items())).encode('utf-8')).hexdigest() if
new_backend_info else None)
mock_backend_info_update = self.mock_object(
self.share_manager.db, 'backend_info_update')
self.mock_object(
self.share_manager.db, 'backend_info_get',
mock.Mock(return_value=old_backend_info))
self.mock_object(self.share_manager.driver, 'get_backend_info',
mock.Mock(return_value=new_backend_info))
self.mock_object(self.share_manager, 'publish_service_capabilities',
mock.Mock())
mock_ensure_shares = self.mock_object(
self.share_manager.driver, 'ensure_shares')
mock_share_instances_get_all_by_host = self.mock_object(
self.share_manager.db, 'share_instances_get_all_by_host',
mock.Mock(return_value=[]))
# call of 'init_host' method
self.share_manager.init_host()
if new_backend_info_hash == old_backend_info_hash:
mock_backend_info_update.assert_not_called()
mock_ensure_shares.assert_not_called()
mock_share_instances_get_all_by_host.assert_not_called()
else:
mock_backend_info_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.share_manager.host, new_backend_info_hash)
self.share_manager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
self.share_manager.db.backend_info_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.share_manager.host)
self.share_manager.driver.get_backend_info.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
mock_ensure_shares.assert_not_called()
mock_share_instances_get_all_by_host.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.share_manager.host)
@ddt.data(exception.ManilaException, ['fake/path/1', 'fake/path'])
def test_init_host_with_ensure_share(self, expected_ensure_share_result):
def raise_NotImplementedError(*args, **kwargs):
raise NotImplementedError
instances = self._setup_init_mocks(setup_access_rules=False)
share_server = 'fake_share_server_does_not_matter'
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_host',
mock.Mock(return_value=instances))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[instances[0], instances[2],
instances[3]]))
self.mock_object(
self.share_manager.driver, 'ensure_shares',
mock.Mock(side_effect=raise_NotImplementedError))
self.mock_object(self.share_manager.driver, 'ensure_share',
mock.Mock(side_effect=expected_ensure_share_result))
self.mock_object(
self.share_manager, '_ensure_share_instance_has_pool')
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=share_server))
self.mock_object(self.share_manager, 'publish_service_capabilities')
self.mock_object(manager.LOG, 'error')
self.mock_object(manager.LOG, 'info')
dict_instances = [self._get_share_replica_dict(
instance, share_server=share_server) for instance in instances]
# call of 'init_host' method
self.share_manager.init_host()
# verification of call
(self.share_manager.db.share_instances_get_all_by_host.
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
self.share_manager.host))
self.share_manager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
self.share_manager.driver.check_for_setup_error.assert_called_with()
self.share_manager._ensure_share_instance_has_pool.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext), instances[0]),
mock.call(utils.IsAMatcher(context.RequestContext), instances[2]),
])
self.share_manager.driver.ensure_shares.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
[dict_instances[0], dict_instances[2], dict_instances[3]])
self.share_manager._get_share_server.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext), instances[0]),
mock.call(utils.IsAMatcher(context.RequestContext), instances[2]),
])
self.share_manager.driver.ensure_share.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext),
dict_instances[0],
share_server=share_server),
mock.call(utils.IsAMatcher(context.RequestContext),
dict_instances[2],
share_server=share_server),
])
(self.share_manager.publish_service_capabilities.
assert_called_once_with(
utils.IsAMatcher(context.RequestContext)))
manager.LOG.info.assert_any_call(
mock.ANY,
{'task': constants.TASK_STATE_MIGRATION_IN_PROGRESS,
'id': instances[3]['id']},
)
manager.LOG.info.assert_any_call(
mock.ANY,
{'id': instances[1]['id'], 'status': instances[1]['status']},
)
def _get_share_replica_dict(self, share_replica, **kwargs):
# TODO(gouthamr): remove method when the db layer returns primitives
share_replica_ref = {
'id': share_replica.get('id'),
'name': share_replica.get('name'),
'share_id': share_replica.get('share_id'),
'host': share_replica.get('host'),
'status': share_replica.get('status'),
'replica_state': share_replica.get('replica_state'),
'availability_zone_id': share_replica.get('availability_zone_id'),
'export_locations': share_replica.get('export_locations') or [],
'share_network_id': share_replica.get('share_network_id'),
'share_server_id': share_replica.get('share_server_id'),
'deleted': share_replica.get('deleted'),
'terminated_at': share_replica.get('terminated_at'),
'launched_at': share_replica.get('launched_at'),
'scheduled_at': share_replica.get('scheduled_at'),
'updated_at': share_replica.get('updated_at'),
'deleted_at': share_replica.get('deleted_at'),
'created_at': share_replica.get('created_at'),
'share_server': kwargs.get('share_server'),
'access_rules_status': share_replica.get('access_rules_status'),
# Share details
'user_id': share_replica.get('user_id'),
'project_id': share_replica.get('project_id'),
'size': share_replica.get('size'),
'display_name': share_replica.get('display_name'),
'display_description': share_replica.get('display_description'),
'snapshot_id': share_replica.get('snapshot_id'),
'share_proto': share_replica.get('share_proto'),
'share_type_id': share_replica.get('share_type_id'),
'is_public': share_replica.get('is_public'),
'share_group_id': share_replica.get('share_group_id'),
'source_share_group_snapshot_member_id': share_replica.get(
'source_share_group_snapshot_member_id'),
'availability_zone': share_replica.get('availability_zone'),
}
return share_replica_ref
def test_init_host_with_exception_on_ensure_shares(self):
def raise_exception(*args, **kwargs):
raise exception.ManilaException(message="Fake raise")
instances = self._setup_init_mocks(setup_access_rules=False)
mock_ensure_share = self.mock_object(
self.share_manager.driver, 'ensure_share')
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_host',
mock.Mock(return_value=instances))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[instances[0], instances[2],
instances[3]]))
self.mock_object(
self.share_manager.driver, 'ensure_shares',
mock.Mock(side_effect=raise_exception))
self.mock_object(
self.share_manager, '_ensure_share_instance_has_pool')
dict_instances = [self._get_share_replica_dict(instance)
for instance in instances]
# call of 'init_host' method
self.share_manager.init_host()
# verification of call
(self.share_manager.db.share_instances_get_all_by_host.
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
self.share_manager.host))
self.share_manager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
self.share_manager.driver.check_for_setup_error.assert_called_with()
self.share_manager._ensure_share_instance_has_pool.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext), instances[0]),
mock.call(utils.IsAMatcher(context.RequestContext), instances[2]),
])
self.share_manager.driver.ensure_shares.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
[dict_instances[0], dict_instances[2], dict_instances[3]])
mock_ensure_share.assert_not_called()
def test_init_host_with_exception_on_get_backend_info(self):
def raise_exception(*args, **kwargs):
raise exception.ManilaException(message="Fake raise")
old_backend_info = {'info_hash': "test_backend_info"}
mock_ensure_share = self.mock_object(
self.share_manager.driver, 'ensure_share')
mock_ensure_shares = self.mock_object(
self.share_manager.driver, 'ensure_shares')
self.mock_object(self.share_manager.db,
'backend_info_get',
mock.Mock(return_value=old_backend_info))
self.mock_object(
self.share_manager.driver, 'get_backend_info',
mock.Mock(side_effect=raise_exception))
# call of 'init_host' method
self.assertRaises(
exception.ManilaException,
self.share_manager.init_host,
)
# verification of call
self.share_manager.db.backend_info_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.share_manager.host)
self.share_manager.driver.get_backend_info.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
mock_ensure_share.assert_not_called()
mock_ensure_shares.assert_not_called()
def test_init_host_with_exception_on_update_access_rules(self):
def raise_exception(*args, **kwargs):
raise exception.ManilaException(message="Fake raise")
instances, rules = self._setup_init_mocks()
share_server = 'fake_share_server_does_not_matter'
fake_update_instances = {
instances[0]['id']: {'status': 'available'},
instances[2]['id']: {'status': 'available'},
instances[4]['id']: {'status': 'available'}
}
smanager = self.share_manager
self.mock_object(smanager.db, 'share_instances_get_all_by_host',
mock.Mock(return_value=instances))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[instances[0], instances[2],
instances[4]]))
self.mock_object(self.share_manager.driver, 'ensure_share',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.driver, 'ensure_shares',
mock.Mock(return_value=fake_update_instances))
self.mock_object(smanager, '_ensure_share_instance_has_pool')
self.mock_object(smanager, '_get_share_server',
mock.Mock(return_value=share_server))
self.mock_object(smanager, 'publish_service_capabilities')
self.mock_object(manager.LOG, 'exception')
self.mock_object(manager.LOG, 'info')
self.mock_object(smanager.db, 'share_access_get_all_for_share',
mock.Mock(return_value=rules))
self.mock_object(smanager.access_helper, 'update_access_rules',
mock.Mock(side_effect=raise_exception))
dict_instances = [self._get_share_replica_dict(
instance, share_server=share_server) for instance in instances]
# call of 'init_host' method
smanager.init_host()
# verification of call
(smanager.db.share_instances_get_all_by_host.
assert_called_once_with(utils.IsAMatcher(context.RequestContext),
smanager.host))
smanager.driver.do_setup.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
smanager.driver.check_for_setup_error.assert_called_with()
smanager._ensure_share_instance_has_pool.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext), instances[0]),
mock.call(utils.IsAMatcher(context.RequestContext), instances[2]),
])
smanager.driver.ensure_shares.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
[dict_instances[0], dict_instances[2], dict_instances[4]])
(self.share_manager.publish_service_capabilities.
assert_called_once_with(
utils.IsAMatcher(context.RequestContext)))
manager.LOG.info.assert_any_call(
mock.ANY,
{'task': constants.TASK_STATE_MIGRATION_IN_PROGRESS,
'id': instances[3]['id']},
)
manager.LOG.info.assert_any_call(
mock.ANY,
{'id': instances[1]['id'], 'status': instances[1]['status']},
)
smanager.access_helper.update_access_rules.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext),
instances[4]['id'], share_server=share_server),
])
manager.LOG.exception.assert_has_calls([
mock.call(mock.ANY, mock.ANY),
])
def test_create_share_instance_from_snapshot_with_server(self):
"""Test share can be created from snapshot if server exists."""
network = db_utils.create_share_network()
server = db_utils.create_share_server(
share_network_id=network['id'], host='fake_host',
backend_details=dict(fake='fake'))
parent_share = db_utils.create_share(share_network_id='net-id',
share_server_id=server['id'])
share = db_utils.create_share()
share_id = share['id']
snapshot = db_utils.create_snapshot(share_id=parent_share['id'])
snapshot_id = snapshot['id']
self.share_manager.create_share_instance(
self.context, share.instance['id'], snapshot_id=snapshot_id)
self.assertEqual(share_id, db.share_get(context.get_admin_context(),
share_id).id)
shr = db.share_get(self.context, share_id)
self.assertEqual(constants.STATUS_AVAILABLE, shr['status'])
self.assertEqual(server['id'], shr['instance']['share_server_id'])
def test_create_share_instance_from_snapshot_with_server_not_found(self):
"""Test creation from snapshot fails if server not found."""
parent_share = db_utils.create_share(share_network_id='net-id',
share_server_id='fake-id')
share = db_utils.create_share()
share_id = share['id']
snapshot = db_utils.create_snapshot(share_id=parent_share['id'])
snapshot_id = snapshot['id']
self.assertRaises(exception.ShareServerNotFound,
self.share_manager.create_share_instance,
self.context,
share.instance['id'],
snapshot_id=snapshot_id
)
shr = db.share_get(self.context, share_id)
self.assertEqual(constants.STATUS_ERROR, shr['status'])
def test_create_share_instance_from_snapshot(self):
"""Test share can be created from snapshot."""
share = db_utils.create_share()
share_id = share['id']
snapshot = db_utils.create_snapshot(share_id=share_id)
snapshot_id = snapshot['id']
self.share_manager.create_share_instance(
self.context, share.instance['id'], snapshot_id=snapshot_id)
self.assertEqual(share_id, db.share_get(context.get_admin_context(),
share_id).id)
shr = db.share_get(self.context, share_id)
self.assertEqual(constants.STATUS_AVAILABLE, shr['status'])
self.assertGreater(len(shr['export_location']), 0)
self.assertEqual(2, len(shr['export_locations']))
def test_create_share_instance_for_share_with_replication_support(self):
"""Test update call is made to update replica_state."""
share = db_utils.create_share(replication_type='writable')
share_id = share['id']
self.share_manager.create_share_instance(self.context,
share.instance['id'])
self.assertEqual(share_id, db.share_get(context.get_admin_context(),
share_id).id)
shr = db.share_get(self.context, share_id)
shr_instance = db.share_instance_get(self.context,
share.instance['id'])
self.assertEqual(constants.STATUS_AVAILABLE, shr['status'],)
self.assertEqual(constants.REPLICA_STATE_ACTIVE,
shr_instance['replica_state'])
@ddt.data([], None)
def test_create_share_replica_no_active_replicas(self, active_replicas):
replica = fake_replica()
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=active_replicas))
self.mock_object(
db, 'share_replica_get', mock.Mock(return_value=replica))
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_driver_replica_call = self.mock_object(
self.share_manager.driver, 'create_replica')
self.assertRaises(exception.ReplicationException,
self.share_manager.create_share_replica,
self.context, replica)
mock_replica_update_call.assert_called_once_with(
mock.ANY, replica['id'], {'status': constants.STATUS_ERROR,
'replica_state': constants.STATUS_ERROR})
self.assertFalse(mock_driver_replica_call.called)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
replica['project_id'],
resource_type=message_field.Resource.SHARE_REPLICA,
resource_id=replica['id'],
detail=message_field.Detail.NO_ACTIVE_REPLICA)
def test_create_share_replica_with_share_network_id_and_not_dhss(self):
replica = fake_replica()
manager.CONF.set_default('driver_handles_share_servers', False)
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(return_value=[]))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=fake_replica(id='fake2')))
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_driver_replica_call = self.mock_object(
self.share_manager.driver, 'create_replica')
self.assertRaises(exception.InvalidDriverMode,
self.share_manager.create_share_replica,
self.context, replica)
mock_replica_update_call.assert_called_once_with(
mock.ANY, replica['id'], {'status': constants.STATUS_ERROR,
'replica_state': constants.STATUS_ERROR})
self.assertFalse(mock_driver_replica_call.called)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
replica['project_id'],
resource_type=message_field.Resource.SHARE_REPLICA,
resource_id=replica['id'],
detail=message_field.Detail.UNEXPECTED_NETWORK)
def test_create_share_replica_with_share_server_exception(self):
replica = fake_replica()
manager.CONF.set_default('driver_handles_share_servers', True)
self.mock_object(db, 'share_instance_access_copy',
mock.Mock(return_value=[]))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=fake_replica(id='fake2')))
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_driver_replica_call = self.mock_object(
self.share_manager.driver, 'create_replica')
self.assertRaises(exception.NotFound,
self.share_manager.create_share_replica,
self.context, replica)
mock_replica_update_call.assert_called_once_with(
mock.ANY, replica['id'], {'status': constants.STATUS_ERROR,
'replica_state': constants.STATUS_ERROR})
self.assertFalse(mock_driver_replica_call.called)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
replica['project_id'],
resource_type=message_field.Resource.SHARE_REPLICA,
resource_id=replica['id'],
detail=message_field.Detail.NO_SHARE_SERVER)
def test_create_share_replica_driver_error_on_creation(self):
fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}]
replica = fake_replica(share_network_id='')
replica_2 = fake_replica(id='fake2')
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_instance_access_copy',
mock.Mock(return_value=fake_access_rules))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=replica_2))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, replica_2]))
self.mock_object(self.share_manager,
'_provide_share_server_for_share',
mock.Mock(return_value=('FAKE_SERVER', replica)))
self.mock_object(self.share_manager,
'_get_replica_snapshots_for_snapshot',
mock.Mock(return_value=[]))
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_export_locs_update_call = self.mock_object(
db, 'share_export_locations_update')
mock_log_error = self.mock_object(manager.LOG, 'error')
mock_log_info = self.mock_object(manager.LOG, 'info')
self.mock_object(db, 'share_instance_access_get',
mock.Mock(return_value=fake_access_rules[0]))
mock_share_replica_access_update = self.mock_object(
self.share_manager.access_helper,
'get_and_update_share_instance_access_rules_status')
self.mock_object(self.share_manager, '_get_share_server')
driver_call = self.mock_object(
self.share_manager.driver, 'create_replica',
mock.Mock(side_effect=exception.ManilaException))
self.assertRaises(exception.ManilaException,
self.share_manager.create_share_replica,
self.context, replica)
mock_replica_update_call.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), replica['id'],
{'status': constants.STATUS_ERROR,
'replica_state': constants.STATUS_ERROR})
mock_share_replica_access_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
share_instance_id=replica['id'],
status=constants.SHARE_INSTANCE_RULES_ERROR)
self.assertFalse(mock_export_locs_update_call.called)
self.assertTrue(mock_log_error.called)
self.assertFalse(mock_log_info.called)
self.assertTrue(driver_call.called)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
replica['project_id'],
resource_type=message_field.Resource.SHARE_REPLICA,
resource_id=replica['id'],
exception=mock.ANY)
def test_create_share_replica_invalid_locations_state(self):
driver_retval = {
'export_locations': 'FAKE_EXPORT_LOC',
}
replica = fake_replica(share_network='',
access_rules_status=constants.STATUS_ACTIVE)
replica_2 = fake_replica(id='fake2')
fake_access_rules = [{'id': '1'}, {'id': '2'}]
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=replica_2))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, replica_2]))
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_instance_access_copy',
mock.Mock(return_value=fake_access_rules))
self.mock_object(self.share_manager,
'_provide_share_server_for_share',
mock.Mock(return_value=('FAKE_SERVER', replica)))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager,
'_get_replica_snapshots_for_snapshot',
mock.Mock(return_value=[]))
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_export_locs_update_call = self.mock_object(
db, 'share_export_locations_update')
mock_log_info = self.mock_object(manager.LOG, 'info')
mock_log_warning = self.mock_object(manager.LOG, 'warning')
mock_log_error = self.mock_object(manager.LOG, 'error')
driver_call = self.mock_object(
self.share_manager.driver, 'create_replica',
mock.Mock(return_value=driver_retval))
self.mock_object(db, 'share_instance_access_get',
mock.Mock(return_value=fake_access_rules[0]))
mock_share_replica_access_update = self.mock_object(
self.share_manager.access_helper,
'get_and_update_share_instance_access_rules_status')
self.share_manager.create_share_replica(self.context, replica)
self.assertFalse(mock_replica_update_call.called)
mock_share_replica_access_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
share_instance_id=replica['id'], status=constants.STATUS_ACTIVE)
self.assertFalse(mock_export_locs_update_call.called)
self.assertTrue(mock_log_info.called)
self.assertTrue(mock_log_warning.called)
self.assertFalse(mock_log_error.called)
self.assertTrue(driver_call.called)
call_args = driver_call.call_args_list[0][0]
replica_list_arg = call_args[1]
r_ids = [r['id'] for r in replica_list_arg]
for r in (replica, replica_2):
self.assertIn(r['id'], r_ids)
self.assertEqual(2, len(r_ids))
def test_create_share_replica_no_availability_zone(self):
replica = fake_replica(
availability_zone=None, share_network='',
replica_state=constants.REPLICA_STATE_OUT_OF_SYNC)
replica_2 = fake_replica(id='fake2')
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, replica_2]))
self.share_manager.availability_zone = 'fake_az'
fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}]
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_instance_access_copy',
mock.Mock(return_value=fake_access_rules))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=replica_2))
self.mock_object(self.share_manager,
'_provide_share_server_for_share',
mock.Mock(return_value=('FAKE_SERVER', replica)))
self.mock_object(self.share_manager,
'_get_replica_snapshots_for_snapshot',
mock.Mock(return_value=[]))
mock_replica_update_call = self.mock_object(
db, 'share_replica_update', mock.Mock(return_value=replica))
mock_calls = [
mock.call(mock.ANY, replica['id'],
{'availability_zone': 'fake_az'}, with_share_data=True),
mock.call(mock.ANY, replica['id'],
{'status': constants.STATUS_AVAILABLE,
'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}),
]
mock_export_locs_update_call = self.mock_object(
db, 'share_export_locations_update')
mock_log_info = self.mock_object(manager.LOG, 'info')
mock_log_warning = self.mock_object(manager.LOG, 'warning')
mock_log_error = self.mock_object(manager.LOG, 'warning')
self.mock_object(db, 'share_instance_access_get',
mock.Mock(return_value=fake_access_rules[0]))
mock_share_replica_access_update = self.mock_object(
self.share_manager, '_update_share_replica_access_rules_state')
driver_call = self.mock_object(
self.share_manager.driver, 'create_replica',
mock.Mock(return_value=replica))
self.mock_object(self.share_manager, '_get_share_server', mock.Mock())
self.share_manager.create_share_replica(self.context, replica)
mock_replica_update_call.assert_has_calls(mock_calls, any_order=False)
mock_share_replica_access_update.assert_called_once_with(
mock.ANY, replica['id'], replica['access_rules_status'])
self.assertTrue(mock_export_locs_update_call.called)
self.assertTrue(mock_log_info.called)
self.assertFalse(mock_log_warning.called)
self.assertFalse(mock_log_error.called)
self.assertTrue(driver_call.called)
@ddt.data(True, False)
def test_create_share_replica(self, has_snapshots):
replica = fake_replica(
share_network='', replica_state=constants.REPLICA_STATE_IN_SYNC)
replica_2 = fake_replica(id='fake2')
snapshots = ([fakes.fake_snapshot(create_instance=True)]
if has_snapshots else [])
snapshot_instances = [
fakes.fake_snapshot_instance(share_instance_id=replica['id']),
fakes.fake_snapshot_instance(share_instance_id='fake2'),
]
fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}]
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_instance_access_copy',
mock.Mock(return_value=fake_access_rules))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=replica_2))
self.mock_object(self.share_manager,
'_provide_share_server_for_share',
mock.Mock(return_value=('FAKE_SERVER', replica)))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, replica_2]))
self.mock_object(db, 'share_snapshot_get_all_for_share', mock.Mock(
return_value=snapshots))
mock_instance_get_call = self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_export_locs_update_call = self.mock_object(
db, 'share_export_locations_update')
mock_log_info = self.mock_object(manager.LOG, 'info')
mock_log_warning = self.mock_object(manager.LOG, 'warning')
mock_log_error = self.mock_object(manager.LOG, 'warning')
self.mock_object(db, 'share_instance_access_get',
mock.Mock(return_value=fake_access_rules[0]))
mock_share_replica_access_update = self.mock_object(
self.share_manager.access_helper,
'get_and_update_share_instance_access_rules_status')
driver_call = self.mock_object(
self.share_manager.driver, 'create_replica',
mock.Mock(return_value=replica))
self.mock_object(self.share_manager, '_get_share_server')
self.share_manager.create_share_replica(self.context, replica)
mock_replica_update_call.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), replica['id'],
{'status': constants.STATUS_AVAILABLE,
'replica_state': constants.REPLICA_STATE_IN_SYNC})
mock_share_replica_access_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
share_instance_id=replica['id'],
status=replica['access_rules_status'])
self.assertTrue(mock_export_locs_update_call.called)
self.assertTrue(mock_log_info.called)
self.assertFalse(mock_log_warning.called)
self.assertFalse(mock_log_error.called)
self.assertTrue(driver_call.called)
call_args = driver_call.call_args_list[0][0]
replica_list_arg = call_args[1]
snapshot_list_arg = call_args[4]
r_ids = [r['id'] for r in replica_list_arg]
for r in (replica, replica_2):
self.assertIn(r['id'], r_ids)
self.assertEqual(2, len(r_ids))
if has_snapshots:
for snapshot_dict in snapshot_list_arg:
self.assertIn('active_replica_snapshot', snapshot_dict)
self.assertIn('share_replica_snapshot', snapshot_dict)
else:
self.assertFalse(mock_instance_get_call.called)
def test_delete_share_replica_access_rules_exception(self):
replica = fake_replica()
replica_2 = fake_replica(id='fake_2')
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, replica_2]))
active_replica = fake_replica(
id='Current_active_replica',
replica_state=constants.REPLICA_STATE_ACTIVE)
mock_exception_log = self.mock_object(manager.LOG, 'exception')
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=active_replica))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(self.share_manager.access_helper,
'update_access_rules')
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
mock_drv_delete_replica_call = self.mock_object(
self.share_manager.driver, 'delete_replica')
self.mock_object(
self.share_manager.access_helper, 'update_access_rules',
mock.Mock(side_effect=exception.ManilaException))
self.assertRaises(exception.ManilaException,
self.share_manager.delete_share_replica,
self.context, replica['id'],
share_id=replica['share_id'])
mock_replica_update_call.assert_called_once_with(
mock.ANY, replica['id'], {'status': constants.STATUS_ERROR})
self.assertFalse(mock_drv_delete_replica_call.called)
self.assertFalse(mock_replica_delete_call.called)
self.assertFalse(mock_exception_log.called)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.DELETE_ACCESS_RULES,
replica['project_id'],
resource_type=message_field.Resource.SHARE_REPLICA,
resource_id=replica['id'],
exception=mock.ANY)
def test_delete_share_replica_drv_misbehavior_ignored_with_the_force(self):
replica = fake_replica()
active_replica = fake_replica(id='Current_active_replica')
mock_exception_log = self.mock_object(manager.LOG, 'exception')
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, active_replica]))
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=active_replica))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.access_helper,
'update_access_rules')
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[]))
mock_snap_instance_delete = self.mock_object(
db, 'share_snapshot_instance_delete')
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
mock_drv_delete_replica_call = self.mock_object(
self.share_manager.driver, 'delete_replica',
mock.Mock(side_effect=exception.ManilaException))
self.mock_object(
self.share_manager.access_helper, 'update_access_rules')
self.share_manager.delete_share_replica(
self.context, replica['id'], share_id=replica['share_id'],
force=True)
self.assertFalse(mock_replica_update_call.called)
self.assertTrue(mock_replica_delete_call.called)
self.assertEqual(1, mock_exception_log.call_count)
self.assertTrue(mock_drv_delete_replica_call.called)
self.assertFalse(mock_snap_instance_delete.called)
def test_delete_share_replica_driver_exception(self):
replica = fake_replica()
active_replica = fake_replica(id='Current_active_replica')
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, active_replica]))
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=active_replica))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
mock_snapshot_get_call = self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[]))
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
self.mock_object(
self.share_manager.access_helper, 'update_access_rules')
mock_drv_delete_replica_call = self.mock_object(
self.share_manager.driver, 'delete_replica',
mock.Mock(side_effect=exception.ManilaException))
self.assertRaises(exception.ManilaException,
self.share_manager.delete_share_replica,
self.context, replica['id'],
share_id=replica['share_id'])
self.assertTrue(mock_replica_update_call.called)
self.assertFalse(mock_replica_delete_call.called)
self.assertTrue(mock_drv_delete_replica_call.called)
self.assertTrue(mock_snapshot_get_call.called)
def test_delete_share_replica_both_exceptions_ignored_with_the_force(self):
replica = fake_replica()
active_replica = fake_replica(id='Current_active_replica')
snapshots = [
fakes.fake_snapshot(share_id=replica['id'],
status=constants.STATUS_AVAILABLE),
fakes.fake_snapshot(share_id=replica['id'],
id='test_creating_to_err',
status=constants.STATUS_CREATING)
]
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, active_replica]))
mock_exception_log = self.mock_object(manager.LOG, 'exception')
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=active_replica))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshots))
mock_snapshot_instance_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
self.mock_object(
self.share_manager.access_helper, 'update_access_rules',
mock.Mock(side_effect=exception.ManilaException))
mock_drv_delete_replica_call = self.mock_object(
self.share_manager.driver, 'delete_replica',
mock.Mock(side_effect=exception.ManilaException))
self.share_manager.delete_share_replica(
self.context, replica['id'], share_id=replica['share_id'],
force=True)
mock_replica_update_call.assert_called_once_with(
mock.ANY, replica['id'], {'status': constants.STATUS_ERROR})
self.assertTrue(mock_replica_delete_call.called)
self.assertEqual(2, mock_exception_log.call_count)
self.assertTrue(mock_drv_delete_replica_call.called)
self.assertEqual(2, mock_snapshot_instance_delete_call.call_count)
def test_delete_share_replica(self):
replica = fake_replica()
active_replica = fake_replica(id='current_active_replica')
snapshots = [
fakes.fake_snapshot(share_id=replica['share_id'],
status=constants.STATUS_AVAILABLE),
fakes.fake_snapshot(share_id=replica['share_id'],
id='test_creating_to_err',
status=constants.STATUS_CREATING)
]
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshots))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, active_replica]))
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=active_replica))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
mock_info_log = self.mock_object(manager.LOG, 'info')
mock_snapshot_instance_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
self.mock_object(
self.share_manager.access_helper, 'update_access_rules')
mock_drv_delete_replica_call = self.mock_object(
self.share_manager.driver, 'delete_replica')
self.share_manager.delete_share_replica(self.context, replica)
self.assertFalse(mock_replica_update_call.called)
self.assertTrue(mock_replica_delete_call.called)
self.assertTrue(mock_info_log.called)
self.assertTrue(mock_drv_delete_replica_call.called)
self.assertEqual(2, mock_snapshot_instance_delete_call.call_count)
def test_promote_share_replica_no_active_replica(self):
replica = fake_replica()
replica_list = [replica]
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_replicas_get_available_active_replica',
mock.Mock(return_value=replica_list))
mock_info_log = self.mock_object(manager.LOG, 'info')
mock_driver_call = self.mock_object(self.share_manager.driver,
'promote_replica')
mock_replica_update = self.mock_object(db, 'share_replica_update')
expected_update_call = mock.call(
mock.ANY, replica['id'], {'status': constants.STATUS_AVAILABLE})
self.assertRaises(exception.ReplicationException,
self.share_manager.promote_share_replica,
self.context, replica)
self.assertFalse(mock_info_log.called)
self.assertFalse(mock_driver_call.called)
mock_replica_update.assert_has_calls([expected_update_call])
def test_promote_share_replica_driver_exception(self):
replica = fake_replica()
active_replica = fake_replica(
id='current_active_replica',
replica_state=constants.REPLICA_STATE_ACTIVE)
replica_list = [replica, active_replica]
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(return_value=[]))
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replica_list))
self.mock_object(self.share_manager.driver, 'promote_replica',
mock.Mock(side_effect=exception.ManilaException))
mock_info_log = self.mock_object(manager.LOG, 'info')
mock_replica_update = self.mock_object(db, 'share_replica_update')
expected_update_calls = [mock.call(
mock.ANY, r['id'], {'status': constants.STATUS_ERROR})
for r in(replica, active_replica)]
self.assertRaises(exception.ManilaException,
self.share_manager.promote_share_replica,
self.context, replica)
mock_replica_update.assert_has_calls(expected_update_calls)
self.assertFalse(mock_info_log.called)
expected_message_calls = [
mock.call(
utils.IsAMatcher(context.RequestContext),
message_field.Action.PROMOTE,
r['project_id'],
resource_type=message_field.Resource.SHARE_REPLICA,
resource_id=r['id'],
exception=mock.ANY)
for r in (replica, active_replica)]
self.share_manager.message_api.create.assert_has_calls(
expected_message_calls)
@ddt.data([], None)
def test_promote_share_replica_driver_update_nothing_has_snaps(self,
retval):
replica = fake_replica(
replication_type=constants.REPLICATION_TYPE_READABLE)
active_replica = fake_replica(
id='current_active_replica',
replica_state=constants.REPLICA_STATE_ACTIVE)
snapshots_instances = [
fakes.fake_snapshot(create_instance=True,
share_id=replica['share_id'],
status=constants.STATUS_AVAILABLE),
fakes.fake_snapshot(create_instance=True,
share_id=replica['share_id'],
id='test_creating_to_err',
status=constants.STATUS_CREATING)
]
replica_list = [replica, active_replica]
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(return_value=[]))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replica_list))
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshots_instances))
self.mock_object(
self.share_manager.driver, 'promote_replica',
mock.Mock(return_value=retval))
mock_snap_instance_update = self.mock_object(
db, 'share_snapshot_instance_update')
mock_info_log = self.mock_object(manager.LOG, 'info')
mock_export_locs_update = self.mock_object(
db, 'share_export_locations_update')
mock_replica_update = self.mock_object(db, 'share_replica_update')
call_1 = mock.call(mock.ANY, replica['id'],
{'status': constants.STATUS_AVAILABLE,
'replica_state': constants.REPLICA_STATE_ACTIVE,
'cast_rules_to_readonly': False})
call_2 = mock.call(
mock.ANY, 'current_active_replica',
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
'cast_rules_to_readonly': True})
expected_update_calls = [call_1, call_2]
self.share_manager.promote_share_replica(self.context, replica)
self.assertFalse(mock_export_locs_update.called)
mock_replica_update.assert_has_calls(expected_update_calls,
any_order=True)
mock_snap_instance_update.assert_called_once_with(
mock.ANY, 'test_creating_to_err',
{'status': constants.STATUS_ERROR})
self.assertEqual(2, mock_info_log.call_count)
@ddt.data(constants.REPLICATION_TYPE_READABLE,
constants.REPLICATION_TYPE_WRITABLE,
constants.REPLICATION_TYPE_DR)
def test_promote_share_replica_driver_updates_replica_list(self, rtype):
replica = fake_replica(replication_type=rtype)
active_replica = fake_replica(
id='current_active_replica',
replica_state=constants.REPLICA_STATE_ACTIVE)
replica_list = [
replica, active_replica, fake_replica(id=3),
fake_replica(id='one_more_replica'),
]
updated_replica_list = [
{
'id': replica['id'],
'export_locations': ['TEST1', 'TEST2'],
'replica_state': constants.REPLICA_STATE_ACTIVE,
},
{
'id': 'current_active_replica',
'export_locations': 'junk_return_value',
'replica_state': constants.REPLICA_STATE_IN_SYNC,
},
{
'id': 'other_replica',
'export_locations': ['TEST3', 'TEST4'],
},
{
'id': replica_list[3]['id'],
'export_locations': ['TEST5', 'TEST6'],
'replica_state': constants.REPLICA_STATE_IN_SYNC,
},
]
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[]))
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(return_value=[]))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replica_list))
mock_snap_instance_update = self.mock_object(
db, 'share_snapshot_instance_update')
self.mock_object(
self.share_manager.driver, 'promote_replica',
mock.Mock(return_value=updated_replica_list))
mock_info_log = self.mock_object(manager.LOG, 'info')
mock_export_locs_update = self.mock_object(
db, 'share_export_locations_update')
mock_replica_update = self.mock_object(db, 'share_replica_update')
reset_replication_change_updates = {
'replica_state': constants.STATUS_ACTIVE,
'status': constants.STATUS_AVAILABLE,
'cast_rules_to_readonly': False,
}
demoted_replica_updates = {
'replica_state': constants.REPLICA_STATE_IN_SYNC,
'cast_rules_to_readonly': False,
}
if rtype == constants.REPLICATION_TYPE_READABLE:
demoted_replica_updates['cast_rules_to_readonly'] = True
reset_replication_change_call = mock.call(
mock.ANY, replica['id'], reset_replication_change_updates)
demoted_replica_update_call = mock.call(
mock.ANY, active_replica['id'], demoted_replica_updates
)
additional_replica_update_call = mock.call(
mock.ANY, replica_list[3]['id'], {
'replica_state': constants.REPLICA_STATE_IN_SYNC,
}
)
self.share_manager.promote_share_replica(self.context, replica)
self.assertEqual(3, mock_export_locs_update.call_count)
mock_replica_update.assert_has_calls([
demoted_replica_update_call,
additional_replica_update_call,
reset_replication_change_call,
])
self.assertTrue(mock_info_log.called)
self.assertFalse(mock_snap_instance_update.called)
@ddt.data('openstack1@watson#_pool0', 'openstack1@newton#_pool0')
def test_periodic_share_replica_update(self, host):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
replicas = [
fake_replica(host='openstack1@watson#pool4'),
fake_replica(host='openstack1@watson#pool5'),
fake_replica(host='openstack1@newton#pool5'),
fake_replica(host='openstack1@newton#pool5'),
]
self.mock_object(self.share_manager.db, 'share_replicas_get_all',
mock.Mock(return_value=replicas))
mock_update_method = self.mock_object(
self.share_manager, '_share_replica_update')
self.share_manager.host = host
self.share_manager.periodic_share_replica_update(self.context)
self.assertEqual(2, mock_update_method.call_count)
self.assertEqual(1, mock_debug_log.call_count)
@ddt.data(constants.REPLICA_STATE_IN_SYNC,
constants.REPLICA_STATE_OUT_OF_SYNC)
def test__share_replica_update_driver_exception(self, replica_state):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
replica = fake_replica(replica_state=replica_state)
active_replica = fake_replica(
replica_state=constants.REPLICA_STATE_ACTIVE)
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, active_replica]))
self.mock_object(self.share_manager.db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_server_get',
mock.Mock(return_value='fake_share_server'))
self.mock_object(self.share_manager.driver, 'update_replica_state',
mock.Mock(side_effect=exception.ManilaException))
mock_db_update_call = self.mock_object(
self.share_manager.db, 'share_replica_update')
self.share_manager._share_replica_update(
self.context, replica, share_id=replica['share_id'])
mock_db_update_call.assert_called_once_with(
self.context, replica['id'],
{'replica_state': constants.STATUS_ERROR,
'status': constants.STATUS_ERROR}
)
self.assertEqual(1, mock_debug_log.call_count)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.UPDATE,
replica['project_id'],
resource_type=message_field.Resource.SHARE_REPLICA,
resource_id=replica['id'],
exception=mock.ANY)
def test__share_replica_update_driver_exception_ignored(self):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
replica = fake_replica(replica_state=constants.STATUS_ERROR)
active_replica = fake_replica(replica_state=constants.STATUS_ACTIVE)
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, active_replica]))
self.mock_object(self.share_manager.db, 'share_replica_get',
mock.Mock(return_value=replica))
self.mock_object(db, 'share_server_get',
mock.Mock(return_value='fake_share_server'))
self.share_manager.host = replica['host']
self.mock_object(self.share_manager.driver, 'update_replica_state',
mock.Mock(side_effect=exception.ManilaException))
mock_db_update_call = self.mock_object(
self.share_manager.db, 'share_replica_update')
self.share_manager._share_replica_update(
self.context, replica, share_id=replica['share_id'])
mock_db_update_call.assert_called_once_with(
self.context, replica['id'],
{'replica_state': constants.STATUS_ERROR,
'status': constants.STATUS_ERROR}
)
self.assertEqual(1, mock_debug_log.call_count)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.UPDATE,
replica['project_id'],
resource_type=message_field.Resource.SHARE_REPLICA,
resource_id=replica['id'],
exception=mock.ANY)
@ddt.data({'status': constants.STATUS_AVAILABLE,
'replica_state': constants.REPLICA_STATE_ACTIVE, },
{'status': constants.STATUS_DELETING,
'replica_state': constants.REPLICA_STATE_IN_SYNC, },
{'status': constants.STATUS_CREATING,
'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, },
{'status': constants.STATUS_MANAGING,
'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, },
{'status': constants.STATUS_UNMANAGING,
'replica_state': constants.REPLICA_STATE_ACTIVE, },
{'status': constants.STATUS_EXTENDING,
'replica_state': constants.REPLICA_STATE_IN_SYNC, },
{'status': constants.STATUS_SHRINKING,
'replica_state': constants.REPLICA_STATE_IN_SYNC, })
def test__share_replica_update_unqualified_replica(self, state):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
mock_warning_log = self.mock_object(manager.LOG, 'warning')
mock_driver_call = self.mock_object(
self.share_manager.driver, 'update_replica_state')
mock_db_update_call = self.mock_object(
self.share_manager.db, 'share_replica_update')
replica = fake_replica(**state)
self.mock_object(db, 'share_server_get',
mock.Mock(return_value='fake_share_server'))
self.mock_object(db, 'share_replica_get',
mock.Mock(return_value=replica))
self.share_manager._share_replica_update(self.context, replica,
share_id=replica['share_id'])
self.assertFalse(mock_debug_log.called)
self.assertFalse(mock_warning_log.called)
self.assertFalse(mock_driver_call.called)
self.assertFalse(mock_db_update_call.called)
@ddt.data(None, constants.REPLICA_STATE_IN_SYNC,
constants.REPLICA_STATE_OUT_OF_SYNC,
constants.REPLICA_STATE_ACTIVE,
constants.STATUS_ERROR)
def test__share_replica_update(self, retval):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
mock_warning_log = self.mock_object(manager.LOG, 'warning')
replica_states = [constants.REPLICA_STATE_IN_SYNC,
constants.REPLICA_STATE_OUT_OF_SYNC]
replica = fake_replica(replica_state=random.choice(replica_states),
share_server='fake_share_server')
active_replica = fake_replica(
id='fake2', replica_state=constants.STATUS_ACTIVE)
snapshots = [fakes.fake_snapshot(
create_instance=True, aggregate_status=constants.STATUS_AVAILABLE)]
snapshot_instances = [
fakes.fake_snapshot_instance(share_instance_id=replica['id']),
fakes.fake_snapshot_instance(share_instance_id='fake2'),
]
del replica['availability_zone']
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica, active_replica]))
self.mock_object(db, 'share_server_get',
mock.Mock(return_value='fake_share_server'))
mock_db_update_calls = []
self.mock_object(self.share_manager.db, 'share_replica_get',
mock.Mock(return_value=replica))
mock_driver_call = self.mock_object(
self.share_manager.driver, 'update_replica_state',
mock.Mock(return_value=retval))
mock_db_update_call = self.mock_object(
self.share_manager.db, 'share_replica_update')
self.mock_object(db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=snapshots))
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.share_manager._share_replica_update(
self.context, replica, share_id=replica['share_id'])
if retval == constants.REPLICA_STATE_ACTIVE:
self.assertEqual(1, mock_warning_log.call_count)
elif retval:
self.assertEqual(0, mock_warning_log.call_count)
self.assertTrue(mock_driver_call.called)
snapshot_list_arg = mock_driver_call.call_args[0][4]
self.assertIn('active_replica_snapshot', snapshot_list_arg[0])
self.assertIn('share_replica_snapshot', snapshot_list_arg[0])
mock_db_update_call.assert_has_calls(mock_db_update_calls)
self.assertEqual(1, mock_debug_log.call_count)
def test_update_share_replica_replica_not_found(self):
replica = fake_replica()
self.mock_object(
self.share_manager.db, 'share_replica_get', mock.Mock(
side_effect=exception.ShareReplicaNotFound(replica_id='fake')))
self.mock_object(self.share_manager, '_get_share_server')
driver_call = self.mock_object(
self.share_manager, '_share_replica_update')
self.assertRaises(
exception.ShareReplicaNotFound,
self.share_manager.update_share_replica,
self.context, replica, share_id=replica['share_id'])
self.assertFalse(driver_call.called)
def test_update_share_replica_replica(self):
replica_update_call = self.mock_object(
self.share_manager, '_share_replica_update')
self.mock_object(self.share_manager.db, 'share_replica_get')
retval = self.share_manager.update_share_replica(
self.context, 'fake_replica_id', share_id='fake_share_id')
self.assertIsNone(retval)
self.assertTrue(replica_update_call.called)
def _get_snapshot_instance_dict(self, snapshot_instance, share,
snapshot=None):
expected_snapshot_instance_dict = {
'status': constants.STATUS_CREATING,
'share_id': share['id'],
'share_name': snapshot_instance['share_name'],
'deleted': snapshot_instance['deleted'],
'share': share,
'updated_at': snapshot_instance['updated_at'],
'snapshot_id': snapshot_instance['snapshot_id'],
'id': snapshot_instance['id'],
'name': snapshot_instance['name'],
'created_at': snapshot_instance['created_at'],
'share_instance_id': snapshot_instance['share_instance_id'],
'progress': snapshot_instance['progress'],
'deleted_at': snapshot_instance['deleted_at'],
'provider_location': snapshot_instance['provider_location'],
}
if snapshot:
expected_snapshot_instance_dict.update({
'size': snapshot['size'],
})
return expected_snapshot_instance_dict
def test_create_snapshot_driver_exception(self):
def _raise_not_found(self, *args, **kwargs):
raise exception.NotFound()
share_id = 'FAKE_SHARE_ID'
share = fakes.fake_share(id=share_id, instance={'id': 'fake_id'})
snapshot_instance = fakes.fake_snapshot_instance(
share_id=share_id, share=share, name='fake_snapshot')
snapshot = fakes.fake_snapshot(
share_id=share_id, share=share, instance=snapshot_instance,
project_id=self.context.project_id)
snapshot_id = snapshot['id']
self.mock_object(self.share_manager.driver, "create_snapshot",
mock.Mock(side_effect=_raise_not_found))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
db_update = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
expected_snapshot_instance_dict = self._get_snapshot_instance_dict(
snapshot_instance, share)
self.assertRaises(exception.NotFound,
self.share_manager.create_snapshot,
self.context, share_id, snapshot_id)
db_update.assert_called_once_with(self.context,
snapshot_instance['id'],
{'status': constants.STATUS_ERROR})
self.share_manager.driver.create_snapshot.assert_called_once_with(
self.context, expected_snapshot_instance_dict, share_server=None)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
snapshot['project_id'],
resource_type=message_field.Resource.SHARE_SNAPSHOT,
resource_id=snapshot_instance['id'],
exception=mock.ANY)
@ddt.data({'model_update': {}, 'mount_snapshot_support': True},
{'model_update': {}, 'mount_snapshot_support': False},
{'model_update': {'export_locations': [
{'path': '/path1', 'is_admin_only': True},
{'path': '/path2', 'is_admin_only': False}
]}, 'mount_snapshot_support': True},
{'model_update': {'export_locations': [
{'path': '/path1', 'is_admin_only': True},
{'path': '/path2', 'is_admin_only': False}
]}, 'mount_snapshot_support': False})
@ddt.unpack
def test_create_snapshot(self, model_update, mount_snapshot_support):
export_locations = model_update.get('export_locations')
share_id = 'FAKE_SHARE_ID'
share = fakes.fake_share(
id=share_id,
instance={'id': 'fake_id'},
mount_snapshot_support=mount_snapshot_support)
snapshot_instance = fakes.fake_snapshot_instance(
share_id=share_id, share=share, name='fake_snapshot')
snapshot = fakes.fake_snapshot(
share_id=share_id, share=share, instance=snapshot_instance)
snapshot_id = snapshot['id']
self.mock_object(self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager.db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
mock_export_update = self.mock_object(
self.share_manager.db,
'share_snapshot_instance_export_location_create')
expected_update_calls = [
mock.call(self.context, snapshot_instance['id'],
{'status': constants.STATUS_AVAILABLE,
'progress': '100%'})
]
expected_snapshot_instance_dict = self._get_snapshot_instance_dict(
snapshot_instance, share)
self.mock_object(
self.share_manager.driver, 'create_snapshot',
mock.Mock(return_value=model_update))
db_update = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
return_value = self.share_manager.create_snapshot(
self.context, share_id, snapshot_id)
self.assertIsNone(return_value)
self.share_manager.driver.create_snapshot.assert_called_once_with(
self.context, expected_snapshot_instance_dict, share_server=None)
db_update.assert_has_calls(expected_update_calls, any_order=True)
if mount_snapshot_support and export_locations:
snap_ins_id = snapshot.instance['id']
for i in range(0, 2):
export_locations[i]['share_snapshot_instance_id'] = snap_ins_id
mock_export_update.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext),
export_locations[0]),
mock.call(utils.IsAMatcher(context.RequestContext),
export_locations[1]),
])
else:
mock_export_update.assert_not_called()
@ddt.data(exception.ShareSnapshotIsBusy(snapshot_name='fake_name'),
exception.NotFound())
def test_delete_snapshot_driver_exception(self, exc):
share_id = 'FAKE_SHARE_ID'
share = fakes.fake_share(id=share_id, instance={'id': 'fake_id'},
mount_snapshot_support=True)
snapshot_instance = fakes.fake_snapshot_instance(
share_id=share_id, share=share, name='fake_snapshot')
snapshot = fakes.fake_snapshot(
share_id=share_id, share=share, instance=snapshot_instance,
project_id=self.context.project_id)
snapshot_id = snapshot['id']
update_access = self.mock_object(
self.share_manager.snapshot_access_helper, 'update_access_rules')
self.mock_object(self.share_manager.driver, "delete_snapshot",
mock.Mock(side_effect=exc))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(
self.share_manager.db, 'share_get', mock.Mock(return_value=share))
db_update = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
db_destroy_call = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_delete')
expected_snapshot_instance_dict = self._get_snapshot_instance_dict(
snapshot_instance, share)
mock_exception_log = self.mock_object(manager.LOG, 'exception')
self.assertRaises(type(exc), self.share_manager.delete_snapshot,
self.context, snapshot_id)
db_update.assert_called_once_with(
mock.ANY, snapshot_instance['id'],
{'status': constants.STATUS_ERROR_DELETING})
self.share_manager.driver.delete_snapshot.assert_called_once_with(
mock.ANY, expected_snapshot_instance_dict,
share_server=None)
update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
snapshot_instance['id'], delete_all_rules=True, share_server=None)
self.assertFalse(db_destroy_call.called)
self.assertFalse(mock_exception_log.called)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.DELETE,
snapshot['project_id'],
resource_type=message_field.Resource.SHARE_SNAPSHOT,
resource_id=snapshot_instance['id'],
exception=mock.ANY)
@ddt.data(True, False)
def test_delete_snapshot_with_quota_error(self, quota_error):
share_id = 'FAKE_SHARE_ID'
share = fakes.fake_share(id=share_id)
snapshot_instance = fakes.fake_snapshot_instance(
share_id=share_id, share=share, name='fake_snapshot')
snapshot = fakes.fake_snapshot(
share_id=share_id, share=share, instance=snapshot_instance,
project_id=self.context.project_id, size=1)
snapshot_id = snapshot['id']
self.mock_object(self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager.db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
mock_exception_log = self.mock_object(manager.LOG, 'exception')
expected_exc_count = 1 if quota_error else 0
expected_snapshot_instance_dict = self._get_snapshot_instance_dict(
snapshot_instance, share)
self.mock_object(self.share_manager.driver, 'delete_snapshot')
db_update_call = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
snapshot_destroy_call = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_delete')
side_effect = exception.QuotaError(code=500) if quota_error else None
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(side_effect=side_effect))
quota_commit_call = self.mock_object(quota.QUOTAS, 'commit')
retval = self.share_manager.delete_snapshot(
self.context, snapshot_id)
self.assertIsNone(retval)
self.share_manager.driver.delete_snapshot.assert_called_once_with(
mock.ANY, expected_snapshot_instance_dict, share_server=None)
self.assertFalse(db_update_call.called)
self.assertTrue(snapshot_destroy_call.called)
self.assertTrue(manager.QUOTAS.reserve.called)
quota.QUOTAS.reserve.assert_called_once_with(
mock.ANY, project_id=self.context.project_id, snapshots=-1,
snapshot_gigabytes=-snapshot['size'], user_id=snapshot['user_id'],
share_type_id=share['instance']['share_type_id'])
self.assertEqual(not quota_error, quota_commit_call.called)
self.assertEqual(quota_error, mock_exception_log.called)
self.assertEqual(expected_exc_count, mock_exception_log.call_count)
@ddt.data(exception.ShareSnapshotIsBusy, exception.ManilaException)
def test_delete_snapshot_ignore_exceptions_with_the_force(self, exc):
def _raise_quota_error():
raise exception.QuotaError(code='500')
share_id = 'FAKE_SHARE_ID'
share = fakes.fake_share(id=share_id)
snapshot_instance = fakes.fake_snapshot_instance(
share_id=share_id, share=share, name='fake_snapshot')
snapshot = fakes.fake_snapshot(
share_id=share_id, share=share, instance=snapshot_instance,
project_id=self.context.project_id, size=1)
snapshot_id = snapshot['id']
self.mock_object(self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager.db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
mock_exception_log = self.mock_object(manager.LOG, 'exception')
self.mock_object(self.share_manager.driver, 'delete_snapshot',
mock.Mock(side_effect=exc))
db_update_call = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
snapshot_destroy_call = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_delete')
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(side_effect=_raise_quota_error))
quota_commit_call = self.mock_object(quota.QUOTAS, 'commit')
retval = self.share_manager.delete_snapshot(
self.context, snapshot_id, force=True)
self.assertIsNone(retval)
self.assertEqual(2, mock_exception_log.call_count)
snapshot_destroy_call.assert_called_once_with(
mock.ANY, snapshot_instance['id'])
self.assertFalse(quota_commit_call.called)
self.assertFalse(db_update_call.called)
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.DELETE,
snapshot['project_id'],
resource_type=message_field.Resource.SHARE_SNAPSHOT,
resource_id=snapshot_instance['id'],
exception=mock.ANY)
def test_create_share_instance_with_share_network_dhss_false(self):
manager.CONF.set_default('driver_handles_share_servers', False)
self.mock_object(
self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=False))
share_network_id = 'fake_sn'
share = db_utils.create_share(share_network_id=share_network_id)
share_instance = share.instance
self.mock_object(
self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=share_instance))
self.mock_object(self.share_manager.db, 'share_instance_update')
self.assertRaisesRegex(
exception.ManilaException,
'.*%s.*' % share_instance['id'],
self.share_manager.create_share_instance, self.context,
share_instance['id'])
self.share_manager.db.share_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
share_instance['id'],
with_share_data=True
)
self.share_manager.db.share_instance_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_instance['id'],
{'status': constants.STATUS_ERROR})
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
six.text_type(share.project_id),
resource_type=message_field.Resource.SHARE,
resource_id=share['id'],
detail=mock.ANY)
def test_create_share_instance_with_share_network_server_not_exists(self):
"""Test share can be created without share server."""
share_net = db_utils.create_share_network()
share = db_utils.create_share(share_network_id=share_net['id'])
share_id = share['id']
def fake_setup_server(context, share_network, *args, **kwargs):
return db_utils.create_share_server(
share_network_id=share_network['id'],
host='fake_host')
self.mock_object(manager.LOG, 'info')
self.share_manager.driver.create_share = mock.Mock(
return_value='fake_location')
self.share_manager._setup_server = fake_setup_server
self.share_manager.create_share_instance(self.context,
share.instance['id'])
self.assertEqual(share_id, db.share_get(context.get_admin_context(),
share_id).id)
manager.LOG.info.assert_called_with(mock.ANY, share.instance['id'])
def test_create_share_instance_with_share_network_server_fail(self):
fake_share = db_utils.create_share(share_network_id='fake_sn_id',
size=1)
fake_server = {
'id': 'fake_srv_id',
'status': constants.STATUS_CREATING,
}
self.mock_object(db, 'share_server_create',
mock.Mock(return_value=fake_server))
self.mock_object(db, 'share_instance_update',
mock.Mock(return_value=fake_share.instance))
self.mock_object(db, 'share_instance_get',
mock.Mock(return_value=fake_share.instance))
self.mock_object(manager.LOG, 'error')
def raise_share_server_not_found(*args, **kwargs):
raise exception.ShareServerNotFound(
share_server_id=fake_server['id'])
def raise_manila_exception(*args, **kwargs):
raise exception.ManilaException()
self.mock_object(db,
'share_server_get_all_by_host_and_share_net_valid',
mock.Mock(side_effect=raise_share_server_not_found))
self.mock_object(self.share_manager, '_setup_server',
mock.Mock(side_effect=raise_manila_exception))
self.assertRaises(
exception.ManilaException,
self.share_manager.create_share_instance,
self.context,
fake_share.instance['id'],
)
(db.share_server_get_all_by_host_and_share_net_valid.
assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.share_manager.host,
fake_share['share_network_id'],
))
db.share_server_create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), mock.ANY)
db.share_instance_update.assert_has_calls([
mock.call(
utils.IsAMatcher(context.RequestContext),
fake_share.instance['id'],
{'status': constants.STATUS_ERROR},
)
])
self.share_manager._setup_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_server,
metadata={'request_host': 'fake_host'})
manager.LOG.error.assert_called_with(mock.ANY,
fake_share.instance['id'])
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
six.text_type(fake_share.project_id),
resource_type=message_field.Resource.SHARE,
resource_id=fake_share['id'],
detail=message_field.Detail.NO_SHARE_SERVER)
def test_create_share_instance_with_share_network_not_found(self):
"""Test creation fails if share network not found."""
self.mock_object(manager.LOG, 'error')
share = db_utils.create_share(share_network_id='fake-net-id')
share_id = share['id']
self.assertRaises(
exception.ShareNetworkNotFound,
self.share_manager.create_share_instance,
self.context,
share.instance['id']
)
manager.LOG.error.assert_called_with(mock.ANY, share.instance['id'])
shr = db.share_get(self.context, share_id)
self.assertEqual(constants.STATUS_ERROR, shr['status'])
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
six.text_type(shr.project_id),
resource_type=message_field.Resource.SHARE,
resource_id=shr['id'],
detail=message_field.Detail.NO_SHARE_SERVER)
def test_create_share_instance_with_share_network_server_exists(self):
"""Test share can be created with existing share server."""
share_net = db_utils.create_share_network()
share = db_utils.create_share(share_network_id=share_net['id'])
share_srv = db_utils.create_share_server(
share_network_id=share_net['id'], host=self.share_manager.host)
share_id = share['id']
self.mock_object(manager.LOG, 'info')
driver_mock = mock.Mock()
driver_mock.create_share.return_value = "fake_location"
driver_mock.choose_share_server_compatible_with_share.return_value = (
share_srv
)
self.share_manager.driver = driver_mock
self.share_manager.create_share_instance(self.context,
share.instance['id'])
self.assertFalse(self.share_manager.driver.setup_network.called)
self.assertEqual(share_id, db.share_get(context.get_admin_context(),
share_id).id)
shr = db.share_get(self.context, share_id)
self.assertEqual(shr['status'], constants.STATUS_AVAILABLE)
self.assertEqual(shr['share_server_id'], share_srv['id'])
self.assertGreater(len(shr['export_location']), 0)
self.assertEqual(1, len(shr['export_locations']))
manager.LOG.info.assert_called_with(mock.ANY, share.instance['id'])
@ddt.data('export_location', 'export_locations')
def test_create_share_instance_with_error_in_driver(self, details_key):
"""Test db updates if share creation fails in driver."""
share = db_utils.create_share()
share_id = share['id']
some_data = 'fake_location'
self.share_manager.driver = mock.Mock()
e = exception.ManilaException(detail_data={details_key: some_data})
self.share_manager.driver.create_share.side_effect = e
self.assertRaises(
exception.ManilaException,
self.share_manager.create_share_instance,
self.context,
share.instance['id']
)
self.assertTrue(self.share_manager.driver.create_share.called)
shr = db.share_get(self.context, share_id)
self.assertEqual(some_data, shr['export_location'])
self.share_manager.message_api.create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
message_field.Action.CREATE,
six.text_type(share.project_id),
resource_type=message_field.Resource.SHARE,
resource_id=share['id'],
exception=mock.ANY)
def test_create_share_instance_with_server_created(self):
"""Test share can be created and share server is created."""
share_net = db_utils.create_share_network()
share = db_utils.create_share(share_network_id=share_net['id'])
db_utils.create_share_server(
share_network_id=share_net['id'], host=self.share_manager.host,
status=constants.STATUS_ERROR)
share_id = share['id']
fake_server = {
'id': 'fake_srv_id',
'status': constants.STATUS_CREATING,
}
self.mock_object(db, 'share_server_create',
mock.Mock(return_value=fake_server))
self.mock_object(self.share_manager, '_setup_server',
mock.Mock(return_value=fake_server))
self.share_manager.create_share_instance(self.context,
share.instance['id'])
self.assertEqual(share_id, db.share_get(context.get_admin_context(),
share_id).id)
shr = db.share_get(self.context, share_id)
self.assertEqual(constants.STATUS_AVAILABLE, shr['status'])
self.assertEqual('fake_srv_id', shr['share_server_id'])
db.share_server_create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), mock.ANY)
self.share_manager._setup_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_server,
metadata={'request_host': 'fake_host'})
def test_create_share_instance_update_replica_state(self):
share_net = db_utils.create_share_network()
share = db_utils.create_share(share_network_id=share_net['id'],
replication_type='dr')
db_utils.create_share_server(
share_network_id=share_net['id'], host=self.share_manager.host,
status=constants.STATUS_ERROR)
share_id = share['id']
fake_server = {
'id': 'fake_srv_id',
'status': constants.STATUS_CREATING,
}
self.mock_object(db, 'share_server_create',
mock.Mock(return_value=fake_server))
self.mock_object(self.share_manager, '_setup_server',
mock.Mock(return_value=fake_server))
self.share_manager.create_share_instance(self.context,
share.instance['id'])
self.assertEqual(share_id, db.share_get(context.get_admin_context(),
share_id).id)
shr = db.share_get(self.context, share_id)
shr_instances = db.share_instances_get_all_by_share(
self.context, shr['id'])
self.assertEqual(1, len(shr_instances))
self.assertEqual(constants.STATUS_AVAILABLE, shr['status'])
self.assertEqual(
constants.REPLICA_STATE_ACTIVE, shr_instances[0]['replica_state'])
self.assertEqual('fake_srv_id', shr['share_server_id'])
db.share_server_create.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), mock.ANY)
self.share_manager._setup_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_server,
metadata={'request_host': 'fake_host'})
@mock.patch('manila.tests.fake_notifier.FakeNotifier._notify')
def test_create_delete_share_instance(self, mock_notify):
"""Test share can be created and deleted."""
share = db_utils.create_share()
mock_notify.assert_not_called()
self.share_manager.create_share_instance(
self.context, share.instance['id'])
self.assert_notify_called(mock_notify,
(['INFO', 'share.create.start'],
['INFO', 'share.create.end']))
self.share_manager.delete_share_instance(
self.context, share.instance['id'])
self.assert_notify_called(mock_notify,
(['INFO', 'share.create.start'],
['INFO', 'share.create.end'],
['INFO', 'share.delete.start'],
['INFO', 'share.delete.end']))
@ddt.data(True, False)
def test_create_delete_share_instance_error(self, exception_update_access):
"""Test share can be created and deleted with error."""
def _raise_exception(self, *args, **kwargs):
raise exception.ManilaException('fake')
self.mock_object(self.share_manager.driver, "create_share",
mock.Mock(side_effect=_raise_exception))
self.mock_object(self.share_manager.driver, "delete_share",
mock.Mock(side_effect=_raise_exception))
if exception_update_access:
self.mock_object(
self.share_manager.access_helper, "update_access_rules",
mock.Mock(side_effect=_raise_exception))
share = db_utils.create_share()
share_id = share['id']
self.assertRaises(exception.ManilaException,
self.share_manager.create_share_instance,
self.context,
share.instance['id'])
shr = db.share_get(self.context, share_id)
self.assertEqual(constants.STATUS_ERROR, shr['status'])
self.assertRaises(exception.ManilaException,
self.share_manager.delete_share_instance,
self.context,
share.instance['id'])
shr = db.share_get(self.context, share_id)
self.assertEqual(constants.STATUS_ERROR_DELETING, shr['status'])
self.share_manager.driver.create_share.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
utils.IsAMatcher(models.ShareInstance),
share_server=None)
if not exception_update_access:
self.share_manager.driver.delete_share.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
utils.IsAMatcher(models.ShareInstance),
share_server=None)
def test_create_share_instance_update_availability_zone(self):
share = db_utils.create_share(availability_zone=None)
share_id = share['id']
self.share_manager.create_share_instance(
self.context, share.instance['id'])
actual_share = db.share_get(context.get_admin_context(), share_id)
self.assertIsNotNone(actual_share.availability_zone)
self.assertEqual(manager.CONF.storage_availability_zone,
actual_share.availability_zone)
def test_provide_share_server_for_share_incompatible_servers(self):
fake_exception = exception.ManilaException("fake")
fake_share_server = {'id': 'fake'}
share = db_utils.create_share()
self.mock_object(db,
'share_server_get_all_by_host_and_share_net_valid',
mock.Mock(return_value=[fake_share_server]))
self.mock_object(
self.share_manager.driver,
"choose_share_server_compatible_with_share",
mock.Mock(side_effect=fake_exception)
)
self.assertRaises(exception.ManilaException,
self.share_manager._provide_share_server_for_share,
self.context, "fake_id", share.instance)
driver_mock = self.share_manager.driver
driver_method_mock = (
driver_mock.choose_share_server_compatible_with_share
)
driver_method_mock.assert_called_once_with(
self.context, [fake_share_server], share.instance, snapshot=None,
share_group=None)
def test_provide_share_server_for_share_invalid_arguments(self):
self.assertRaises(ValueError,
self.share_manager._provide_share_server_for_share,
self.context, None, None)
def test_provide_share_server_for_share_parent_ss_not_found(self):
fake_parent_id = "fake_server_id"
fake_exception = exception.ShareServerNotFound("fake")
share = db_utils.create_share()
fake_snapshot = {
'share': {
'instance': {
'share_server_id': fake_parent_id
}
}
}
self.mock_object(db, 'share_server_get',
mock.Mock(side_effect=fake_exception))
self.assertRaises(exception.ShareServerNotFound,
self.share_manager._provide_share_server_for_share,
self.context, "fake_id", share.instance,
snapshot=fake_snapshot)
db.share_server_get.assert_called_once_with(
self.context, fake_parent_id)
def test_provide_share_server_for_share_parent_ss_invalid(self):
fake_parent_id = "fake_server_id"
share = db_utils.create_share()
fake_snapshot = {
'share': {
'instance': {
'share_server_id': fake_parent_id
}
}
}
fake_parent_share_server = {'status': 'fake'}
self.mock_object(db, 'share_server_get',
mock.Mock(return_value=fake_parent_share_server))
self.assertRaises(exception.InvalidShareServer,
self.share_manager._provide_share_server_for_share,
self.context, "fake_id", share.instance,
snapshot=fake_snapshot)
db.share_server_get.assert_called_once_with(
self.context, fake_parent_id)
def test_provide_share_server_for_share_group_incompatible_servers(self):
fake_exception = exception.ManilaException("fake")
fake_share_server = {'id': 'fake'}
sg = db_utils.create_share_group()
self.mock_object(db,
'share_server_get_all_by_host_and_share_net_valid',
mock.Mock(return_value=[fake_share_server]))
self.mock_object(
self.share_manager.driver,
"choose_share_server_compatible_with_share_group",
mock.Mock(side_effect=fake_exception)
)
self.assertRaises(
exception.ManilaException,
self.share_manager._provide_share_server_for_share_group,
self.context, "fake_id", sg)
driver_mock = self.share_manager.driver
driver_method_mock = (
driver_mock.choose_share_server_compatible_with_share_group)
driver_method_mock.assert_called_once_with(
self.context, [fake_share_server], sg, share_group_snapshot=None)
def test_provide_share_server_for_share_group_invalid_arguments(self):
self.assertRaises(
exception.InvalidInput,
self.share_manager._provide_share_server_for_share_group,
self.context, None, None)
def test_manage_share_driver_exception(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
CustomException = type('CustomException', (Exception,), dict())
self.mock_object(self.share_manager.driver,
'manage_existing',
mock.Mock(side_effect=CustomException))
self.mock_object(share_types,
'get_share_type_extra_specs',
mock.Mock(return_value='False'))
self.mock_object(self.share_manager.db, 'share_update', mock.Mock())
share = db_utils.create_share()
share_id = share['id']
driver_options = {'fake': 'fake'}
self.assertRaises(
CustomException,
self.share_manager.manage_share,
self.context, share_id, driver_options)
(self.share_manager.driver.manage_existing.
assert_called_once_with(mock.ANY, driver_options))
self.share_manager.db.share_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id,
{'status': constants.STATUS_MANAGE_ERROR, 'size': 1})
def test_manage_share_invalid_size(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
self.mock_object(share_types,
'get_share_type_extra_specs',
mock.Mock(return_value='False'))
self.mock_object(self.share_manager.driver,
"manage_existing",
mock.Mock(return_value=None))
self.mock_object(self.share_manager.db, 'share_update', mock.Mock())
share = db_utils.create_share()
share_id = share['id']
driver_options = {'fake': 'fake'}
self.assertRaises(
exception.InvalidShare,
self.share_manager.manage_share,
self.context, share_id, driver_options)
(self.share_manager.driver.manage_existing.
assert_called_once_with(mock.ANY, driver_options))
self.share_manager.db.share_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id,
{'status': constants.STATUS_MANAGE_ERROR, 'size': 1})
def test_manage_share_quota_error(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
self.mock_object(share_types,
'get_share_type_extra_specs',
mock.Mock(return_value='False'))
self.mock_object(self.share_manager.driver,
"manage_existing",
mock.Mock(return_value={'size': 3}))
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(side_effect=exception.QuotaError))
self.mock_object(self.share_manager.db, 'share_update', mock.Mock())
share = db_utils.create_share()
share_id = share['id']
driver_options = {'fake': 'fake'}
self.assertRaises(
exception.QuotaError,
self.share_manager.manage_share,
self.context, share_id, driver_options)
(self.share_manager.driver.manage_existing.
assert_called_once_with(mock.ANY, driver_options))
self.share_manager.db.share_update.assert_called_once_with(
mock.ANY, share_id,
{'status': constants.STATUS_MANAGE_ERROR, 'size': 1})
def test_manage_share_incompatible_dhss(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
share = db_utils.create_share()
self.mock_object(share_types,
'get_share_type_extra_specs',
mock.Mock(return_value="True"))
self.assertRaises(
exception.InvalidShare, self.share_manager.manage_share,
self.context, share['id'], {})
@ddt.data({'dhss': True,
'driver_data': {'size': 1, 'replication_type': None}},
{'dhss': False,
'driver_data': {'size': 2, 'name': 'fake',
'replication_type': 'dr'}},
{'dhss': False,
'driver_data': {'size': 3,
'export_locations': ['foo', 'bar', 'quuz'],
'replication_type': 'writable'}})
@ddt.unpack
def test_manage_share_valid_share(self, dhss, driver_data):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = dhss
replication_type = driver_data.pop('replication_type')
export_locations = driver_data.get('export_locations')
self.mock_object(self.share_manager.db, 'share_update', mock.Mock())
self.mock_object(quota.QUOTAS, 'reserve', mock.Mock())
self.mock_object(
self.share_manager.db,
'share_export_locations_update',
mock.Mock(side_effect=(
self.share_manager.db.share_export_locations_update)))
self.mock_object(share_types,
'get_share_type_extra_specs',
mock.Mock(return_value=six.text_type(dhss)))
if dhss:
mock_manage = self.mock_object(
self.share_manager.driver,
"manage_existing_with_server",
mock.Mock(return_value=driver_data))
else:
mock_manage = self.mock_object(
self.share_manager.driver,
"manage_existing",
mock.Mock(return_value=driver_data))
share = db_utils.create_share(replication_type=replication_type)
share_id = share['id']
driver_options = {'fake': 'fake'}
self.share_manager.manage_share(self.context, share_id, driver_options)
if dhss:
mock_manage.assert_called_once_with(mock.ANY, driver_options, None)
else:
mock_manage.assert_called_once_with(mock.ANY, driver_options)
if export_locations:
(self.share_manager.db.share_export_locations_update.
assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
share.instance['id'], export_locations, delete=True))
else:
self.assertFalse(
self.share_manager.db.share_export_locations_update.called)
valid_share_data = {
'status': constants.STATUS_AVAILABLE, 'launched_at': mock.ANY}
if replication_type:
valid_share_data['replica_state'] = constants.REPLICA_STATE_ACTIVE
valid_share_data.update(driver_data)
self.share_manager.db.share_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
share_id, valid_share_data)
def test_update_quota_usages_new(self):
self.mock_object(self.share_manager.db, 'quota_usage_get',
mock.Mock(return_value={'in_use': 1}))
self.mock_object(self.share_manager.db, 'quota_usage_update')
project_id = 'fake_project_id'
resource_name = 'fake'
usage = 1
self.share_manager._update_quota_usages(
self.context, project_id, {resource_name: usage})
self.share_manager.db.quota_usage_get.assert_called_once_with(
mock.ANY, project_id, resource_name, mock.ANY)
self.share_manager.db.quota_usage_update.assert_called_once_with(
mock.ANY, project_id, mock.ANY, resource_name, in_use=2)
def test_update_quota_usages_update(self):
project_id = 'fake_project_id'
resource_name = 'fake'
usage = 1
side_effect = exception.QuotaUsageNotFound(project_id=project_id)
self.mock_object(
self.share_manager.db,
'quota_usage_get',
mock.Mock(side_effect=side_effect))
self.mock_object(self.share_manager.db, 'quota_usage_create')
self.share_manager._update_quota_usages(
self.context, project_id, {resource_name: usage})
self.share_manager.db.quota_usage_get.assert_called_once_with(
mock.ANY, project_id, resource_name, mock.ANY)
self.share_manager.db.quota_usage_create.assert_called_once_with(
mock.ANY, project_id, mock.ANY, resource_name, usage)
def _setup_unmanage_mocks(self, mock_driver=True, mock_unmanage=None,
dhss=False):
if mock_driver:
self.mock_object(self.share_manager, 'driver')
if mock_unmanage:
if dhss:
self.mock_object(
self.share_manager.driver, "unmanage_with_share_server",
mock_unmanage)
else:
self.mock_object(self.share_manager.driver, "unmanage",
mock_unmanage)
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager.db, 'share_instance_delete')
def test_unmanage_share_invalid_share(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
unmanage = mock.Mock(side_effect=exception.InvalidShare(reason="fake"))
self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=unmanage)
share = db_utils.create_share()
self.share_manager.unmanage_share(self.context, share['id'])
self.share_manager.db.share_update.assert_called_once_with(
mock.ANY, share['id'], {'status': constants.STATUS_UNMANAGE_ERROR})
def test_unmanage_share_valid_share(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
self._setup_unmanage_mocks(mock_driver=False,
mock_unmanage=mock.Mock())
share = db_utils.create_share()
share_id = share['id']
share_instance_id = share.instance['id']
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=share.instance))
self.share_manager.unmanage_share(self.context, share_id)
(self.share_manager.driver.unmanage.
assert_called_once_with(share.instance))
self.share_manager.db.share_instance_delete.assert_called_once_with(
mock.ANY, share_instance_id)
def test_unmanage_share_valid_share_with_share_server(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = True
self._setup_unmanage_mocks(mock_driver=False,
mock_unmanage=mock.Mock(),
dhss=True)
server = db_utils.create_share_server(id='fake_server_id')
share = db_utils.create_share(share_server_id='fake_server_id')
self.mock_object(self.share_manager.db, 'share_server_update')
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(return_value=server))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=share.instance))
share_id = share['id']
share_instance_id = share.instance['id']
self.share_manager.unmanage_share(self.context, share_id)
(self.share_manager.driver.unmanage_with_server.
assert_called_once_with(share.instance, server))
self.share_manager.db.share_instance_delete.assert_called_once_with(
mock.ANY, share_instance_id)
self.share_manager.db.share_server_update.assert_called_once_with(
mock.ANY, server['id'], {'is_auto_deletable': False})
def test_unmanage_share_valid_share_with_quota_error(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
self._setup_unmanage_mocks(mock_driver=False,
mock_unmanage=mock.Mock())
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(side_effect=Exception()))
share = db_utils.create_share()
share_instance_id = share.instance['id']
self.share_manager.unmanage_share(self.context, share['id'])
self.share_manager.driver.unmanage.assert_called_once_with(mock.ANY)
self.share_manager.db.share_instance_delete.assert_called_once_with(
mock.ANY, share_instance_id)
def test_unmanage_share_remove_access_rules_error(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
manager.CONF.unmanage_remove_access_rules = True
self._setup_unmanage_mocks(mock_driver=False,
mock_unmanage=mock.Mock())
self.mock_object(
self.share_manager.access_helper,
'update_access_rules',
mock.Mock(side_effect=Exception())
)
self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=[]))
share = db_utils.create_share()
self.share_manager.unmanage_share(self.context, share['id'])
self.share_manager.db.share_update.assert_called_once_with(
mock.ANY, share['id'], {'status': constants.STATUS_UNMANAGE_ERROR})
def test_unmanage_share_valid_share_remove_access_rules(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
manager.CONF.unmanage_remove_access_rules = True
self._setup_unmanage_mocks(mock_driver=False,
mock_unmanage=mock.Mock())
smanager = self.share_manager
self.mock_object(smanager.access_helper, 'update_access_rules')
self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=[]))
share = db_utils.create_share()
share_id = share['id']
share_instance_id = share.instance['id']
smanager.unmanage_share(self.context, share_id)
smanager.driver.unmanage.assert_called_once_with(mock.ANY)
smanager.access_helper.update_access_rules.assert_called_once_with(
mock.ANY, mock.ANY, delete_all_rules=True, share_server=None
)
smanager.db.share_instance_delete.assert_called_once_with(
mock.ANY, share_instance_id)
def test_delete_share_instance_share_server_not_found(self):
share_net = db_utils.create_share_network()
share = db_utils.create_share(share_network_id=share_net['id'],
share_server_id='fake-id')
self.assertRaises(
exception.ShareServerNotFound,
self.share_manager.delete_share_instance,
self.context,
share.instance['id']
)
@ddt.data(True, False)
def test_delete_share_instance_last_on_srv_with_sec_service(
self, with_details):
share_net = db_utils.create_share_network()
sec_service = db_utils.create_security_service(
share_network_id=share_net['id'])
backend_details = dict(
security_service_ldap=jsonutils.dumps(sec_service))
if with_details:
share_srv = db_utils.create_share_server(
share_network_id=share_net['id'],
host=self.share_manager.host,
backend_details=backend_details)
else:
share_srv = db_utils.create_share_server(
share_network_id=share_net['id'],
host=self.share_manager.host)
db.share_server_backend_details_set(
context.get_admin_context(), share_srv['id'], backend_details)
share = db_utils.create_share(share_network_id=share_net['id'],
share_server_id=share_srv['id'])
mock_access_helper_call = self.mock_object(
self.share_manager.access_helper, 'update_access_rules')
self.share_manager.driver = mock.Mock()
manager.CONF.delete_share_server_with_last_share = True
self.share_manager.delete_share_instance(self.context,
share.instance['id'])
mock_access_helper_call.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share.instance['id'],
delete_all_rules=True, share_server=mock.ANY)
self.share_manager.driver.teardown_server.assert_called_once_with(
server_details=backend_details,
security_services=[jsonutils.loads(
backend_details['security_service_ldap'])])
@ddt.data({'force': True, 'side_effect': 'update_access'},
{'force': True, 'side_effect': 'delete_share'},
{'force': False, 'side_effect': None})
@ddt.unpack
def test_delete_share_instance_last_on_server(self, force, side_effect):
share_net = db_utils.create_share_network()
share_srv = db_utils.create_share_server(
share_network_id=share_net['id'],
host=self.share_manager.host
)
share = db_utils.create_share(share_network_id=share_net['id'],
share_server_id=share_srv['id'])
share_srv = db.share_server_get(self.context, share_srv['id'])
mock_access_helper_call = self.mock_object(
self.share_manager.access_helper, 'update_access_rules')
self.share_manager.driver = mock.Mock()
if side_effect == 'update_access':
mock_access_helper_call.side_effect = exception.ManilaException
if side_effect == 'delete_share':
self.mock_object(self.share_manager.driver, 'delete_share',
mock.Mock(side_effect=Exception('fake')))
self.mock_object(manager.LOG, 'error')
manager.CONF.delete_share_server_with_last_share = True
self.share_manager.delete_share_instance(
self.context, share.instance['id'], force=force)
mock_access_helper_call.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share.instance['id'],
delete_all_rules=True, share_server=mock.ANY)
self.share_manager.driver.teardown_server.assert_called_once_with(
server_details=share_srv.get('backend_details'),
security_services=[])
self.assertEqual(force, manager.LOG.error.called)
def test_delete_share_instance_last_on_server_deletion_disabled(self):
share_net = db_utils.create_share_network()
share_srv = db_utils.create_share_server(
share_network_id=share_net['id'],
host=self.share_manager.host
)
share = db_utils.create_share(share_network_id=share_net['id'],
share_server_id=share_srv['id'])
share_srv = db.share_server_get(self.context, share_srv['id'])
manager.CONF.delete_share_server_with_last_share = False
self.share_manager.driver = mock.Mock()
mock_access_helper_call = self.mock_object(
self.share_manager.access_helper, 'update_access_rules')
self.mock_object(db, 'share_server_get',
mock.Mock(return_value=share_srv))
self.share_manager.delete_share_instance(self.context,
share.instance['id'])
mock_access_helper_call.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share.instance['id'],
delete_all_rules=True, share_server=share_srv)
self.assertFalse(self.share_manager.driver.teardown_network.called)
def test_delete_share_instance_not_last_on_server(self):
share_net = db_utils.create_share_network()
share_srv = db_utils.create_share_server(
share_network_id=share_net['id'],
host=self.share_manager.host
)
share = db_utils.create_share(share_network_id=share_net['id'],
share_server_id=share_srv['id'])
db_utils.create_share(share_network_id=share_net['id'],
share_server_id=share_srv['id'])
share_srv = db.share_server_get(self.context, share_srv['id'])
manager.CONF.delete_share_server_with_last_share = True
self.share_manager.driver = mock.Mock()
self.mock_object(db, 'share_server_get',
mock.Mock(return_value=share_srv))
mock_access_helper_call = self.mock_object(
self.share_manager.access_helper, 'update_access_rules')
self.share_manager.delete_share_instance(self.context,
share.instance['id'])
mock_access_helper_call.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share.instance['id'],
delete_all_rules=True, share_server=share_srv)
self.assertFalse(self.share_manager.driver.teardown_network.called)
@ddt.data('update_access', 'delete_share')
def test_delete_share_instance_not_found(self, side_effect):
share_net = db_utils.create_share_network()
share_srv = db_utils.create_share_server(
share_network_id=share_net['id'],
host=self.share_manager.host)
share = db_utils.create_share(share_network_id=share_net['id'],
share_server_id=share_srv['id'])
access = db_utils.create_access(share_id=share['id'])
db_utils.create_share(share_network_id=share_net['id'],
share_server_id=share_srv['id'])
share_srv = db.share_server_get(self.context, share_srv['id'])
manager.CONF.delete_share_server_with_last_share = False
self.mock_object(db, 'share_server_get',
mock.Mock(return_value=share_srv))
self.mock_object(db, 'share_instance_get',
mock.Mock(return_value=share.instance))
self.mock_object(db, 'share_access_get_all_for_instance',
mock.Mock(return_value=[access]))
self.share_manager.driver = mock.Mock()
self.share_manager.access_helper.driver = mock.Mock()
if side_effect == 'update_access':
mock_access_helper_call = self.mock_object(
self.share_manager.access_helper, 'update_access_rules',
mock.Mock(side_effect=exception.ShareResourceNotFound(
share_id=share['id'])))
if side_effect == 'delete_share':
mock_access_helper_call = self.mock_object(
self.share_manager.access_helper, 'update_access_rules',
mock.Mock(return_value=None)
)
self.mock_object(
self.share_manager.driver, 'delete_share',
mock.Mock(side_effect=exception.ShareResourceNotFound(
share_id=share['id'])))
self.mock_object(manager.LOG, 'warning')
self.share_manager.delete_share_instance(self.context,
share.instance['id'])
self.assertFalse(self.share_manager.driver.teardown_network.called)
mock_access_helper_call.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share.instance['id'],
delete_all_rules=True, share_server=share_srv)
self.assertTrue(manager.LOG.warning.called)
def test_setup_server(self):
# Setup required test data
share_server = {
'id': 'fake_id',
'share_network_id': 'fake_sn_id',
}
metadata = {'fake_metadata_key': 'fake_metadata_value'}
share_network = {'id': 'fake_sn_id'}
network_info = {'security_services': []}
for ss_type in constants.SECURITY_SERVICES_ALLOWED_TYPES:
network_info['security_services'].append({
'name': 'fake_name' + ss_type,
'ou': 'fake_ou' + ss_type,
'domain': 'fake_domain' + ss_type,
'server': 'fake_server' + ss_type,
'dns_ip': 'fake_dns_ip' + ss_type,
'user': 'fake_user' + ss_type,
'type': ss_type,
'password': 'fake_password' + ss_type,
})
sec_services = network_info['security_services']
server_info = {'fake_server_info_key': 'fake_server_info_value'}
network_info['network_type'] = 'fake_network_type'
# mock required stuff
self.mock_object(self.share_manager.db, 'share_network_get',
mock.Mock(return_value=share_network))
self.mock_object(self.share_manager.driver, 'allocate_network')
self.mock_object(self.share_manager, '_form_server_setup_info',
mock.Mock(return_value=network_info))
self.mock_object(self.share_manager, '_validate_segmentation_id')
self.mock_object(self.share_manager.driver, 'setup_server',
mock.Mock(return_value=server_info))
self.mock_object(self.share_manager.db,
'share_server_backend_details_set')
self.mock_object(self.share_manager.db, 'share_server_update',
mock.Mock(return_value=share_server))
# execute method _setup_server
result = self.share_manager._setup_server(
self.context, share_server, metadata=metadata)
# verify results
self.assertEqual(share_server, result)
self.share_manager.db.share_network_get.assert_has_calls([
mock.call(self.context, share_server['share_network_id']),
mock.call(self.context, share_server['share_network_id']),
])
self.share_manager.driver.allocate_network.assert_called_once_with(
self.context, share_server, share_network)
self.share_manager._form_server_setup_info.assert_called_once_with(
self.context, share_server, share_network)
self.share_manager._validate_segmentation_id.assert_called_once_with(
network_info)
self.share_manager.driver.setup_server.assert_called_once_with(
network_info, metadata=metadata)
(self.share_manager.db.share_server_backend_details_set.
assert_has_calls([
mock.call(self.context, share_server['id'],
{'security_service_' + sec_services[0]['type']:
jsonutils.dumps(sec_services[0])}),
mock.call(self.context, share_server['id'],
{'security_service_' + sec_services[1]['type']:
jsonutils.dumps(sec_services[1])}),
mock.call(self.context, share_server['id'],
{'security_service_' + sec_services[2]['type']:
jsonutils.dumps(sec_services[2])}),
mock.call(self.context, share_server['id'], server_info),
]))
self.share_manager.db.share_server_update.assert_called_once_with(
self.context, share_server['id'],
{'status': constants.STATUS_ACTIVE,
'identifier': share_server['id']})
def test_setup_server_server_info_not_present(self):
# Setup required test data
share_server = {
'id': 'fake_id',
'share_network_id': 'fake_sn_id',
}
metadata = {'fake_metadata_key': 'fake_metadata_value'}
share_network = {'id': 'fake_sn_id'}
network_info = {
'fake_network_info_key': 'fake_network_info_value',
'security_services': [],
'network_type': 'fake_network_type',
}
server_info = {}
# mock required stuff
self.mock_object(self.share_manager.db, 'share_network_get',
mock.Mock(return_value=share_network))
self.mock_object(self.share_manager, '_form_server_setup_info',
mock.Mock(return_value=network_info))
self.mock_object(self.share_manager.driver, 'setup_server',
mock.Mock(return_value=server_info))
self.mock_object(self.share_manager.db, 'share_server_update',
mock.Mock(return_value=share_server))
self.mock_object(self.share_manager.driver, 'allocate_network')
# execute method _setup_server
result = self.share_manager._setup_server(
self.context, share_server, metadata=metadata)
# verify results
self.assertEqual(share_server, result)
self.share_manager.db.share_network_get.assert_has_calls([
mock.call(self.context, share_server['share_network_id']),
mock.call(self.context, share_server['share_network_id'])])
self.share_manager._form_server_setup_info.assert_called_once_with(
self.context, share_server, share_network)
self.share_manager.driver.setup_server.assert_called_once_with(
network_info, metadata=metadata)
self.share_manager.db.share_server_update.assert_called_once_with(
self.context, share_server['id'],
{'status': constants.STATUS_ACTIVE,
'identifier': share_server['id']})
self.share_manager.driver.allocate_network.assert_called_once_with(
self.context, share_server, share_network)
def setup_server_raise_exception(self, detail_data_proper):
# Setup required test data
share_server = {
'id': 'fake_id',
'share_network_id': 'fake_sn_id',
}
server_info = {'details_key': 'value'}
share_network = {'id': 'fake_sn_id'}
network_info = {
'fake_network_info_key': 'fake_network_info_value',
'security_services': [],
'network_type': 'fake_network_type',
}
if detail_data_proper:
detail_data = {'server_details': server_info}
self.mock_object(self.share_manager.db,
'share_server_backend_details_set')
else:
detail_data = 'not dictionary detail data'
# Mock required parameters
self.mock_object(self.share_manager.db, 'share_network_get',
mock.Mock(return_value=share_network))
self.mock_object(self.share_manager.db, 'share_server_update')
for m in ['deallocate_network', 'allocate_network']:
self.mock_object(self.share_manager.driver, m)
self.mock_object(self.share_manager, '_form_server_setup_info',
mock.Mock(return_value=network_info))
self.mock_object(self.share_manager.db,
'share_server_backend_details_set')
self.mock_object(self.share_manager.driver, 'setup_server',
mock.Mock(side_effect=exception.ManilaException(
detail_data=detail_data)))
# execute method _setup_server
self.assertRaises(
exception.ManilaException,
self.share_manager._setup_server,
self.context,
share_server,
)
# verify results
if detail_data_proper:
(self.share_manager.db.share_server_backend_details_set.
assert_called_once_with(
self.context, share_server['id'], server_info))
self.share_manager._form_server_setup_info.assert_called_once_with(
self.context, share_server, share_network)
self.share_manager.db.share_server_update.assert_called_once_with(
self.context, share_server['id'],
{'status': constants.STATUS_ERROR})
self.share_manager.db.share_network_get.assert_has_calls([
mock.call(self.context, share_server['share_network_id']),
mock.call(self.context, share_server['share_network_id'])])
self.share_manager.driver.allocate_network.assert_has_calls([
mock.call(self.context, share_server, share_network)])
self.share_manager.driver.deallocate_network.assert_has_calls([
mock.call(self.context, share_server['id'])])
def test_setup_server_incorrect_detail_data(self):
self.setup_server_raise_exception(detail_data_proper=False)
def test_setup_server_exception_in_driver(self):
self.setup_server_raise_exception(detail_data_proper=True)
@ddt.data({},
{'detail_data': 'fake'},
{'detail_data': {'server_details': 'fake'}},
{'detail_data': {'server_details': {'fake': 'fake'}}},
{'detail_data': {
'server_details': {'fake': 'fake', 'fake2': 'fake2'}}},)
def test_setup_server_exception_in_cleanup_after_error(self, data):
def get_server_details_from_data(data):
d = data.get('detail_data')
if not isinstance(d, dict):
return {}
d = d.get('server_details')
if not isinstance(d, dict):
return {}
return d
share_server = {'id': 'fake', 'share_network_id': 'fake'}
details = get_server_details_from_data(data)
exc_mock = mock.Mock(side_effect=exception.ManilaException(**data))
details_mock = mock.Mock(side_effect=exception.ManilaException())
self.mock_object(self.share_manager.db, 'share_network_get', exc_mock)
self.mock_object(self.share_manager.db,
'share_server_backend_details_set', details_mock)
self.mock_object(self.share_manager.db, 'share_server_update')
self.mock_object(self.share_manager.driver, 'deallocate_network')
self.mock_object(manager.LOG, 'debug')
self.mock_object(manager.LOG, 'warning')
self.assertRaises(
exception.ManilaException,
self.share_manager._setup_server,
self.context,
share_server,
)
self.assertTrue(self.share_manager.db.share_network_get.called)
if details:
self.assertEqual(len(details), details_mock.call_count)
expected = [mock.call(mock.ANY, share_server['id'], {k: v})
for k, v in details.items()]
self.assertEqual(expected, details_mock.call_args_list)
self.share_manager.db.share_server_update.assert_called_once_with(
self.context,
share_server['id'],
{'status': constants.STATUS_ERROR})
self.share_manager.driver.deallocate_network.assert_called_once_with(
self.context, share_server['id']
)
self.assertFalse(manager.LOG.warning.called)
if get_server_details_from_data(data):
self.assertTrue(manager.LOG.debug.called)
def test_ensure_share_instance_has_pool_with_only_host(self):
fake_share = {
'status': constants.STATUS_AVAILABLE, 'host': 'host1', 'id': 1}
host = self.share_manager._ensure_share_instance_has_pool(
context.get_admin_context(), fake_share)
self.assertIsNone(host)
def test_ensure_share_instance_has_pool_with_full_pool_name(self):
fake_share = {'host': 'host1#pool0', 'id': 1,
'status': constants.STATUS_AVAILABLE}
fake_share_expected_value = 'pool0'
host = self.share_manager._ensure_share_instance_has_pool(
context.get_admin_context(), fake_share)
self.assertEqual(fake_share_expected_value, host)
def test_ensure_share_instance_has_pool_unable_to_fetch_share(self):
fake_share = {'host': 'host@backend', 'id': 1,
'status': constants.STATUS_AVAILABLE}
with mock.patch.object(self.share_manager.driver, 'get_pool',
side_effect=Exception):
with mock.patch.object(manager, 'LOG') as mock_LOG:
self.share_manager._ensure_share_instance_has_pool(
context.get_admin_context(), fake_share)
self.assertEqual(1, mock_LOG.exception.call_count)
def test_ensure_share_instance_pool_notexist_and_get_from_driver(self):
fake_share_instance = {'host': 'host@backend', 'id': 1,
'status': constants.STATUS_AVAILABLE}
fake_host_expected_value = 'fake_pool'
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.driver, 'get_pool',
mock.Mock(return_value='fake_pool'))
host = self.share_manager._ensure_share_instance_has_pool(
context.get_admin_context(), fake_share_instance)
self.share_manager.db.share_instance_update.assert_any_call(
mock.ANY, 1, {'host': 'host@backend#fake_pool'})
self.assertEqual(fake_host_expected_value, host)
def test__form_server_setup_info(self):
def fake_network_allocations_get_for_share_server(*args, **kwargs):
if kwargs.get('label') != 'admin':
return ['foo', 'bar']
return ['admin-foo', 'admin-bar']
self.mock_object(
self.share_manager.db, 'network_allocations_get_for_share_server',
mock.Mock(
side_effect=fake_network_allocations_get_for_share_server))
fake_share_server = dict(
id='fake_share_server_id', backend_details=dict(foo='bar'))
fake_share_network = dict(
segmentation_id='fake_segmentation_id',
cidr='fake_cidr',
neutron_net_id='fake_neutron_net_id',
neutron_subnet_id='fake_neutron_subnet_id',
security_services='fake_security_services',
network_type='fake_network_type')
expected = dict(
server_id=fake_share_server['id'],
segmentation_id=fake_share_network['segmentation_id'],
cidr=fake_share_network['cidr'],
neutron_net_id=fake_share_network['neutron_net_id'],
neutron_subnet_id=fake_share_network['neutron_subnet_id'],
security_services=fake_share_network['security_services'],
network_allocations=(
fake_network_allocations_get_for_share_server()),
admin_network_allocations=(
fake_network_allocations_get_for_share_server(label='admin')),
backend_details=fake_share_server['backend_details'],
network_type=fake_share_network['network_type'])
network_info = self.share_manager._form_server_setup_info(
self.context, fake_share_server, fake_share_network)
self.assertEqual(expected, network_info)
(self.share_manager.db.network_allocations_get_for_share_server.
assert_has_calls([
mock.call(self.context, fake_share_server['id'], label='user'),
mock.call(self.context, fake_share_server['id'], label='admin')
]))
@ddt.data(
{'network_info': {'network_type': 'vlan', 'segmentation_id': '100'}},
{'network_info': {'network_type': 'vlan', 'segmentation_id': '1'}},
{'network_info': {'network_type': 'vlan', 'segmentation_id': '4094'}},
{'network_info': {'network_type': 'vxlan', 'segmentation_id': '100'}},
{'network_info': {'network_type': 'vxlan', 'segmentation_id': '1'}},
{'network_info': {'network_type': 'vxlan',
'segmentation_id': '16777215'}},
{'network_info': {'network_type': 'gre', 'segmentation_id': '100'}},
{'network_info': {'network_type': 'gre', 'segmentation_id': '1'}},
{'network_info': {'network_type': 'gre',
'segmentation_id': '4294967295'}},
{'network_info': {'network_type': 'flat', 'segmentation_id': None}},
{'network_info': {'network_type': 'flat', 'segmentation_id': 0}},
{'network_info': {'network_type': None, 'segmentation_id': None}},
{'network_info': {'network_type': None, 'segmentation_id': 0}})
@ddt.unpack
def test_validate_segmentation_id_with_valid_values(self, network_info):
self.share_manager._validate_segmentation_id(network_info)
@ddt.data(
{'network_info': {'network_type': 'vlan', 'segmentation_id': None}},
{'network_info': {'network_type': 'vlan', 'segmentation_id': -1}},
{'network_info': {'network_type': 'vlan', 'segmentation_id': 0}},
{'network_info': {'network_type': 'vlan', 'segmentation_id': '4095'}},
{'network_info': {'network_type': 'vxlan', 'segmentation_id': None}},
{'network_info': {'network_type': 'vxlan', 'segmentation_id': 0}},
{'network_info': {'network_type': 'vxlan',
'segmentation_id': '16777216'}},
{'network_info': {'network_type': 'gre', 'segmentation_id': None}},
{'network_info': {'network_type': 'gre', 'segmentation_id': 0}},
{'network_info': {'network_type': 'gre',
'segmentation_id': '4294967296'}},
{'network_info': {'network_type': 'flat', 'segmentation_id': '1000'}},
{'network_info': {'network_type': None, 'segmentation_id': '1000'}})
@ddt.unpack
def test_validate_segmentation_id_with_invalid_values(self, network_info):
self.assertRaises(exception.NetworkBadConfigurationException,
self.share_manager._validate_segmentation_id,
network_info)
@ddt.data(10, 36, 60)
def test_verify_server_cleanup_interval_valid_cases(self, val):
data = dict(DEFAULT=dict(unused_share_server_cleanup_interval=val))
with test_utils.create_temp_config_with_opts(data):
manager.ShareManager()
@mock.patch.object(db, 'share_server_get_all_unused_deletable',
mock.Mock())
@mock.patch.object(manager.ShareManager, 'delete_share_server',
mock.Mock())
def test_delete_free_share_servers_cleanup_disabled(self):
data = dict(DEFAULT=dict(automatic_share_server_cleanup=False))
with test_utils.create_temp_config_with_opts(data):
share_manager = manager.ShareManager()
share_manager.driver.initialized = True
share_manager.delete_free_share_servers(self.context)
self.assertFalse(db.share_server_get_all_unused_deletable.called)
@mock.patch.object(db, 'share_server_get_all_unused_deletable',
mock.Mock())
@mock.patch.object(manager.ShareManager, 'delete_share_server',
mock.Mock())
def test_delete_free_share_servers_driver_handles_ss_disabled(self):
data = dict(DEFAULT=dict(driver_handles_share_servers=False))
with test_utils.create_temp_config_with_opts(data):
share_manager = manager.ShareManager()
share_manager.driver.initialized = True
share_manager.delete_free_share_servers(self.context)
self.assertFalse(db.share_server_get_all_unused_deletable.called)
self.assertFalse(share_manager.delete_share_server.called)
@mock.patch.object(db, 'share_server_get_all_unused_deletable',
mock.Mock(return_value=['server1', ]))
@mock.patch.object(manager.ShareManager, 'delete_share_server',
mock.Mock())
@mock.patch.object(timeutils, 'utcnow', mock.Mock(
return_value=datetime.timedelta(minutes=20)))
def test_delete_free_share_servers(self):
self.share_manager.delete_free_share_servers(self.context)
db.share_server_get_all_unused_deletable.assert_called_once_with(
self.context,
self.share_manager.host,
datetime.timedelta(minutes=10))
self.share_manager.delete_share_server.assert_called_once_with(
self.context,
'server1')
timeutils.utcnow.assert_called_once_with()
@mock.patch('manila.tests.fake_notifier.FakeNotifier._notify')
def test_extend_share_invalid(self, mock_notify):
share = db_utils.create_share()
share_id = share['id']
reservations = {}
mock_notify.assert_not_called()
self.mock_object(self.share_manager, 'driver')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(quota.QUOTAS, 'rollback')
self.mock_object(self.share_manager.driver, 'extend_share',
mock.Mock(side_effect=Exception('fake')))
self.assertRaises(
exception.ShareExtendingError,
self.share_manager.extend_share, self.context, share_id, 123, {})
quota.QUOTAS.rollback.assert_called_once_with(
mock.ANY,
reservations,
project_id=six.text_type(share['project_id']),
user_id=six.text_type(share['user_id']),
share_type_id=None,
)
@mock.patch('manila.tests.fake_notifier.FakeNotifier._notify')
def test_extend_share(self, mock_notify):
share = db_utils.create_share()
share_id = share['id']
new_size = 123
shr_update = {
'size': int(new_size),
'status': constants.STATUS_AVAILABLE.lower()
}
reservations = {}
fake_share_server = 'fake'
mock_notify.assert_not_called()
manager = self.share_manager
self.mock_object(manager, 'driver')
self.mock_object(manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(manager.db, 'share_update',
mock.Mock(return_value=share))
self.mock_object(quota.QUOTAS, 'commit')
self.mock_object(manager.driver, 'extend_share')
self.mock_object(manager, '_get_share_server',
mock.Mock(return_value=fake_share_server))
self.share_manager.extend_share(self.context, share_id,
new_size, reservations)
self.assertTrue(manager._get_share_server.called)
manager.driver.extend_share.assert_called_once_with(
utils.IsAMatcher(models.ShareInstance),
new_size, share_server=fake_share_server
)
quota.QUOTAS.commit.assert_called_once_with(
mock.ANY, reservations, project_id=share['project_id'],
user_id=share['user_id'], share_type_id=None)
manager.db.share_update.assert_called_once_with(
mock.ANY, share_id, shr_update
)
self.assert_notify_called(mock_notify,
(['INFO', 'share.extend.start'],
['INFO', 'share.extend.end']))
def test_shrink_share_quota_error(self):
size = 5
new_size = 1
share = db_utils.create_share(size=size)
share_id = share['id']
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(quota.QUOTAS, 'reserve',
mock.Mock(side_effect=Exception('fake')))
self.assertRaises(
exception.ShareShrinkingError,
self.share_manager.shrink_share, self.context, share_id, new_size)
quota.QUOTAS.reserve.assert_called_with(
mock.ANY,
project_id=six.text_type(share['project_id']),
user_id=six.text_type(share['user_id']),
share_type_id=None,
gigabytes=new_size - size
)
self.assertTrue(self.share_manager.db.share_update.called)
@ddt.data({'exc': exception.InvalidShare('fake'),
'status': constants.STATUS_SHRINKING_ERROR},
{'exc': exception.ShareShrinkingPossibleDataLoss("fake"),
'status': constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR})
@ddt.unpack
def test_shrink_share_invalid(self, exc, status):
share = db_utils.create_share()
new_size = 1
share_id = share['id']
size_decrease = int(share['size']) - new_size
self.mock_object(self.share_manager, 'driver')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(quota.QUOTAS, 'reserve')
self.mock_object(quota.QUOTAS, 'rollback')
self.mock_object(self.share_manager.driver, 'shrink_share',
mock.Mock(side_effect=exc))
self.assertRaises(
exception.ShareShrinkingError,
self.share_manager.shrink_share, self.context, share_id, new_size)
self.share_manager.driver.shrink_share.assert_called_once_with(
utils.IsAMatcher(models.ShareInstance),
new_size, share_server=None
)
self.share_manager.db.share_update.assert_called_once_with(
mock.ANY, share_id, {'status': status}
)
quota.QUOTAS.reserve.assert_called_once_with(
mock.ANY, gigabytes=-size_decrease, project_id=share['project_id'],
share_type_id=None, user_id=share['user_id'],
)
quota.QUOTAS.rollback.assert_called_once_with(
mock.ANY, mock.ANY, project_id=share['project_id'],
share_type_id=None, user_id=share['user_id'],
)
self.assertTrue(self.share_manager.db.share_get.called)
@mock.patch('manila.tests.fake_notifier.FakeNotifier._notify')
def test_shrink_share(self, mock_notify):
share = db_utils.create_share()
share_id = share['id']
new_size = 123
shr_update = {
'size': int(new_size),
'status': constants.STATUS_AVAILABLE
}
fake_share_server = 'fake'
size_decrease = int(share['size']) - new_size
mock_notify.assert_not_called()
manager = self.share_manager
self.mock_object(manager, 'driver')
self.mock_object(manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(manager.db, 'share_update',
mock.Mock(return_value=share))
self.mock_object(quota.QUOTAS, 'commit')
self.mock_object(quota.QUOTAS, 'reserve')
self.mock_object(manager.driver, 'shrink_share')
self.mock_object(manager, '_get_share_server',
mock.Mock(return_value=fake_share_server))
self.share_manager.shrink_share(self.context, share_id, new_size)
self.assertTrue(manager._get_share_server.called)
manager.driver.shrink_share.assert_called_once_with(
utils.IsAMatcher(models.ShareInstance),
new_size, share_server=fake_share_server
)
quota.QUOTAS.reserve.assert_called_once_with(
mock.ANY, gigabytes=-size_decrease, project_id=share['project_id'],
share_type_id=None, user_id=share['user_id'],
)
quota.QUOTAS.commit.assert_called_once_with(
mock.ANY, mock.ANY, project_id=share['project_id'],
share_type_id=None, user_id=share['user_id'],
)
manager.db.share_update.assert_called_once_with(
mock.ANY, share_id, shr_update
)
self.assert_notify_called(mock_notify,
(['INFO', 'share.shrink.start'],
['INFO', 'share.shrink.end']))
def test_report_driver_status_driver_handles_ss_false(self):
fake_stats = {'field': 'val'}
fake_pool = {'name': 'pool1'}
self.share_manager.last_capabilities = {'field': 'old_val'}
self.mock_object(self.share_manager, 'driver', mock.Mock())
driver = self.share_manager.driver
driver.get_share_stats = mock.Mock(return_value=fake_stats)
self.mock_object(db, 'share_server_get_all_by_host', mock.Mock())
driver.driver_handles_share_servers = False
driver.get_share_server_pools = mock.Mock(return_value=fake_pool)
self.share_manager._report_driver_status(self.context)
driver.get_share_stats.assert_called_once_with(
refresh=True)
self.assertFalse(db.share_server_get_all_by_host.called)
self.assertFalse(driver.get_share_server_pools.called)
self.assertEqual(fake_stats, self.share_manager.last_capabilities)
def test_report_driver_status_driver_handles_ss(self):
fake_stats = {'field': 'val'}
fake_ss = {'id': '1234'}
fake_pool = {'name': 'pool1'}
self.mock_object(self.share_manager, 'driver', mock.Mock())
driver = self.share_manager.driver
driver.get_share_stats = mock.Mock(return_value=fake_stats)
self.mock_object(db, 'share_server_get_all_by_host', mock.Mock(
return_value=[fake_ss]))
driver.driver_handles_share_servers = True
driver.get_share_server_pools = mock.Mock(return_value=fake_pool)
self.share_manager._report_driver_status(self.context)
driver.get_share_stats.assert_called_once_with(refresh=True)
db.share_server_get_all_by_host.assert_called_once_with(
self.context,
self.share_manager.host)
driver.get_share_server_pools.assert_called_once_with(fake_ss)
expected_stats = {
'field': 'val',
'server_pools_mapping': {
'1234': fake_pool},
}
self.assertEqual(expected_stats, self.share_manager.last_capabilities)
def test_report_driver_status_empty_share_stats(self):
old_capabilities = {'field': 'old_val'}
fake_pool = {'name': 'pool1'}
self.share_manager.last_capabilities = old_capabilities
self.mock_object(self.share_manager, 'driver', mock.Mock())
driver = self.share_manager.driver
driver.get_share_stats = mock.Mock(return_value={})
self.mock_object(db, 'share_server_get_all_by_host', mock.Mock())
driver.driver_handles_share_servers = True
driver.get_share_server_pools = mock.Mock(return_value=fake_pool)
self.share_manager._report_driver_status(self.context)
driver.get_share_stats.assert_called_once_with(refresh=True)
self.assertFalse(db.share_server_get_all_by_host.called)
self.assertFalse(driver.get_share_server_pools.called)
self.assertEqual(old_capabilities,
self.share_manager.last_capabilities)
def test_create_share_group(self):
fake_group = {
'id': 'fake_id',
'availability_zone_id': 'fake_az',
}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.driver,
'create_share_group',
mock.Mock(return_value=None))
self.share_manager.create_share_group(self.context, "fake_id")
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id', {
'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id'],
}
)
def test_create_cg_with_share_network_driver_not_handles_servers(self):
manager.CONF.set_default('driver_handles_share_servers', False)
self.mock_object(
self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=False))
cg_id = 'fake_group_id'
share_network_id = 'fake_sn'
fake_group = {'id': 'fake_id', 'share_network_id': share_network_id}
self.mock_object(
self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_update')
self.assertRaises(
exception.ManilaException,
self.share_manager.create_share_group, self.context, cg_id)
self.share_manager.db.share_group_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), cg_id)
self.share_manager.db.share_group_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), cg_id,
{'status': constants.STATUS_ERROR})
def test_create_sg_with_share_network_driver_handles_servers(self):
manager.CONF.set_default('driver_handles_share_servers', True)
self.mock_object(
self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=True))
share_network_id = 'fake_sn'
fake_group = {
'id': 'fake_id',
'share_network_id': share_network_id,
'host': "fake_host",
'availability_zone_id': 'fake_az',
}
self.mock_object(
self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(
self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(
self.share_manager, '_provide_share_server_for_share_group',
mock.Mock(return_value=({}, fake_group)))
self.mock_object(
self.share_manager.driver, 'create_share_group',
mock.Mock(return_value=None))
self.share_manager.create_share_group(self.context, "fake_id")
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id', {
'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id'],
}
)
def test_create_share_group_with_update(self):
fake_group = {
'id': 'fake_id',
'availability_zone_id': 'fake_az',
}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.driver,
'create_share_group',
mock.Mock(return_value={'foo': 'bar'}))
self.share_manager.create_share_group(self.context, "fake_id")
(self.share_manager.db.share_group_update.
assert_any_call(mock.ANY, 'fake_id', {'foo': 'bar'}))
self.share_manager.db.share_group_update.assert_any_call(
mock.ANY, 'fake_id', {
'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id'],
}
)
def test_create_share_group_with_error(self):
fake_group = {
'id': 'fake_id',
'availability_zone_id': 'fake_az',
}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.driver,
'create_share_group',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.create_share_group,
self.context, "fake_id")
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id', {
'status': constants.STATUS_ERROR,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id'],
}
)
def test_create_share_group_from_sg_snapshot(self):
fake_group = {
'id': 'fake_id',
'source_share_group_snapshot_id': 'fake_snap_id',
'shares': [],
'share_server_id': 'fake_ss_id',
'availability_zone_id': 'fake_az',
}
fake_ss = {'id': 'fake_ss_id', 'share_network_id': 'fake_sn'}
fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': [],
'share_group': {'share_server_id': fake_ss['id']}}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(
return_value=fake_ss))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
mock_create_sg_from_sg_snap = self.mock_object(
self.share_manager.driver,
'create_share_group_from_share_group_snapshot',
mock.Mock(return_value=(None, None)))
self.share_manager.create_share_group(self.context, "fake_id")
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY,
'availability_zone_id': fake_group['availability_zone_id'],
'consistent_snapshot_support': None})
self.share_manager.db.share_server_get(mock.ANY, 'fake_ss_id')
mock_create_sg_from_sg_snap.assert_called_once_with(
mock.ANY, fake_group, fake_snap, share_server=fake_ss)
def test_create_sg_snapshot_share_network_driver_not_handles_servers(self):
manager.CONF.set_default('driver_handles_share_servers', False)
self.mock_object(
self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=False))
sg_id = 'fake_share_group_id'
share_network_id = 'fake_sn'
fake_group = {
'id': 'fake_id',
'source_share_group_snapshot_id': 'fake_snap_id',
'shares': [],
'share_network_id': share_network_id,
'host': "fake_host",
}
self.mock_object(
self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []}
self.mock_object(self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'share_group_update')
self.assertRaises(exception.ManilaException,
self.share_manager.create_share_group,
self.context, sg_id)
self.share_manager.db.share_group_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), sg_id)
self.share_manager.db.share_group_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), sg_id,
{'status': constants.STATUS_ERROR})
def test_create_share_group_from_sg_snapshot_share_network_dhss(self):
manager.CONF.set_default('driver_handles_share_servers', True)
self.mock_object(self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=True))
share_network_id = 'fake_sn'
fake_group = {
'id': 'fake_id',
'source_share_group_snapshot_id': 'fake_snap_id',
'shares': [],
'share_network_id': share_network_id,
'availability_zone_id': 'fake_az',
}
fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(
self.share_manager, '_provide_share_server_for_share_group',
mock.Mock(return_value=({}, fake_group)))
self.mock_object(
self.share_manager.driver,
'create_share_group_from_share_group_snapshot',
mock.Mock(return_value=(None, None)))
self.share_manager.create_share_group(self.context, "fake_id")
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id']})
def test_create_share_group_from_share_group_snapshot_with_update(self):
fake_group = {
'id': 'fake_id',
'source_share_group_snapshot_id': 'fake_snap_id',
'shares': [],
'availability_zone_id': 'fake_az',
}
fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.driver,
'create_share_group_from_share_group_snapshot',
mock.Mock(return_value=({'foo': 'bar'}, None)))
self.share_manager.create_share_group(self.context, "fake_id")
self.share_manager.db.share_group_update.assert_any_call(
mock.ANY, 'fake_id', {'foo': 'bar'})
self.share_manager.db.share_group_update.assert_any_call(
mock.ANY, 'fake_id', {
'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id'],
}
)
def test_create_share_group_from_sg_snapshot_with_share_update(self):
fake_share = {'id': 'fake_share_id'}
fake_export_locations = ['my_export_location']
fake_group = {
'id': 'fake_id',
'source_share_group_snapshot_id': 'fake_snap_id',
'shares': [fake_share],
'availability_zone_id': 'fake_az',
}
fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'share_group_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db,
'share_export_locations_update')
fake_share_update_list = [{'id': fake_share['id'],
'foo': 'bar',
'export_locations': fake_export_locations}]
self.mock_object(self.share_manager.driver,
'create_share_group_from_share_group_snapshot',
mock.Mock(
return_value=(None, fake_share_update_list)))
self.share_manager.create_share_group(self.context, "fake_id")
self.share_manager.db.share_instance_update.assert_any_call(
mock.ANY, 'fake_share_id', {'foo': 'bar'})
self.share_manager.db.share_export_locations_update.assert_any_call(
mock.ANY, 'fake_share_id', fake_export_locations)
self.share_manager.db.share_group_update.assert_any_call(
mock.ANY, 'fake_id', {
'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id'],
}
)
def test_create_share_group_from_sg_snapshot_with_error(self):
fake_group = {
'id': 'fake_id',
'source_share_group_snapshot_id': 'fake_snap_id',
'shares': [],
'availability_zone_id': 'fake_az',
}
fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_share_group_id',
mock.Mock(return_value=[]))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.driver,
'create_share_group_from_share_group_snapshot',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.create_share_group,
self.context, "fake_id")
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id', {
'status': constants.STATUS_ERROR,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id'],
}
)
def test_create_share_group_from_sg_snapshot_with_share_error(self):
fake_share = {'id': 'fake_share_id'}
fake_group = {
'id': 'fake_id',
'source_share_group_snapshot_id': 'fake_snap_id',
'shares': [fake_share],
'availability_zone_id': 'fake_az',
}
fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_share_group_id',
mock.Mock(return_value=[fake_share]))
self.mock_object(self.share_manager.db, 'share_group_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.driver,
'create_share_group_from_share_group_snapshot',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.create_share_group,
self.context, "fake_id")
self.share_manager.db.share_instance_update.assert_any_call(
mock.ANY, 'fake_share_id', {'status': constants.STATUS_ERROR})
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id', {
'status': constants.STATUS_ERROR,
'consistent_snapshot_support': None,
'availability_zone_id': fake_group['availability_zone_id'],
}
)
def test_delete_share_group(self):
fake_group = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_destroy',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.driver,
'delete_share_group',
mock.Mock(return_value=None))
self.share_manager.delete_share_group(self.context, "fake_id")
self.share_manager.db.share_group_destroy.assert_called_once_with(
mock.ANY, 'fake_id')
def test_delete_share_group_with_update(self):
fake_group = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_destroy',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.driver,
'delete_share_group',
mock.Mock(return_value={'foo': 'bar'}))
self.share_manager.delete_share_group(self.context, "fake_id")
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id', {'foo': 'bar'})
self.share_manager.db.share_group_destroy.assert_called_once_with(
mock.ANY, 'fake_id')
def test_delete_share_group_with_error(self):
fake_group = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.db, 'share_group_update',
mock.Mock(return_value=fake_group))
self.mock_object(self.share_manager.driver,
'delete_share_group',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.delete_share_group,
self.context, "fake_id")
self.share_manager.db.share_group_update.assert_called_once_with(
mock.ANY, 'fake_id', {'status': constants.STATUS_ERROR})
def test_create_share_group_snapshot(self):
fake_snap = {
'id': 'fake_snap_id',
'share_group': {},
'share_group_snapshot_members': [],
}
self.mock_object(
self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
mock_sg_snap_update = self.mock_object(
self.share_manager.db, 'share_group_snapshot_update',
mock.Mock(return_value=fake_snap))
self.mock_object(
self.share_manager.driver,
'create_share_group_snapshot',
mock.Mock(return_value=(None, None)))
self.share_manager.create_share_group_snapshot(
self.context, fake_snap['id'])
mock_sg_snap_update.assert_called_once_with(
mock.ANY, fake_snap['id'],
{'status': constants.STATUS_AVAILABLE, 'updated_at': mock.ANY})
def test_create_share_group_snapshot_with_update(self):
fake_snap = {'id': 'fake_snap_id', 'share_group': {},
'share_group_snapshot_members': []}
self.mock_object(self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'share_group_snapshot_update',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.driver,
'create_share_group_snapshot',
mock.Mock(return_value=({'foo': 'bar'}, None)))
self.share_manager.create_share_group_snapshot(
self.context, fake_snap['id'])
self.share_manager.db.share_group_snapshot_update.assert_any_call(
mock.ANY, 'fake_snap_id', {'foo': 'bar'})
self.share_manager.db.share_group_snapshot_update.assert_any_call(
mock.ANY, fake_snap['id'],
{'status': constants.STATUS_AVAILABLE, 'updated_at': mock.ANY})
def test_create_share_group_snapshot_with_member_update(self):
fake_member1 = {'id': 'fake_member_id_1', 'share_instance_id': 'si_1'}
fake_member2 = {'id': 'fake_member_id_2', 'share_instance_id': 'si_2'}
fake_member3 = {'id': 'fake_member_id_3', 'share_instance_id': 'si_3'}
fake_member_update1 = {
'id': fake_member1['id'],
'provider_location': 'fake_provider_location_1',
'size': 13,
'export_locations': ['fake_el_1_1', 'fake_el_1_2'],
'should_not_be_used_k1': 'should_not_be_used_v1',
}
fake_member_update2 = {
'id': fake_member2['id'],
'provider_location': 'fake_provider_location_2',
'size': 31,
'export_locations': ['fake_el_2_1', 'fake_el_2_2'],
'status': 'fake_status_for_update',
'should_not_be_used_k2': 'should_not_be_used_k2',
}
fake_member_update3 = {
'provider_location': 'fake_provider_location_3',
'size': 42,
'export_locations': ['fake_el_3_1', 'fake_el_3_2'],
'should_not_be_used_k3': 'should_not_be_used_k3',
}
expected_member_update1 = {
'id': fake_member_update1['id'],
'provider_location': fake_member_update1['provider_location'],
'size': fake_member_update1['size'],
}
expected_member_update2 = {
'id': fake_member_update2['id'],
'provider_location': fake_member_update2['provider_location'],
'size': fake_member_update2['size'],
'status': fake_member_update2['status'],
}
fake_snap = {
'id': 'fake_snap_id',
'share_group': {},
'share_group_snapshot_members': [
fake_member1, fake_member2, fake_member3],
}
self.mock_object(
self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
mock_sg_snapshot_update = self.mock_object(
self.share_manager.db, 'share_group_snapshot_update',
mock.Mock(return_value=fake_snap))
mock_sg_snapshot_member_update = self.mock_object(
self.share_manager.db, 'share_group_snapshot_member_update')
self.mock_object(
self.share_manager.db, 'share_instance_get',
mock.Mock(return_value={'id': 'blah'}))
self.mock_object(
timeutils, 'utcnow', mock.Mock(side_effect=range(1, 10)))
mock_driver_create_sg_snapshot = self.mock_object(
self.share_manager.driver, 'create_share_group_snapshot',
mock.Mock(return_value=(
None, [fake_member_update1, fake_member_update2,
fake_member_update3])))
self.share_manager.create_share_group_snapshot(
self.context, fake_snap['id'])
mock_driver_create_sg_snapshot.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
fake_snap, share_server=None)
mock_sg_snapshot_update.assert_called_once_with(
mock.ANY, fake_snap['id'],
{'status': constants.STATUS_AVAILABLE, 'updated_at': mock.ANY})
mock_sg_snapshot_member_update.assert_has_calls([
mock.call(
utils.IsAMatcher(context.RequestContext),
expected_member_update1['id'],
{'provider_location': expected_member_update1[
'provider_location'],
'size': expected_member_update1['size'],
'updated_at': 1,
'status': manager.constants.STATUS_AVAILABLE}),
mock.call(
utils.IsAMatcher(context.RequestContext),
expected_member_update2['id'],
{'provider_location': expected_member_update2[
'provider_location'],
'size': expected_member_update2['size'],
'updated_at': 1,
'status': expected_member_update2['status']}),
])
def test_create_group_snapshot_with_error(self):
fake_snap = {'id': 'fake_snap_id', 'share_group': {},
'share_group_snapshot_members': []}
self.mock_object(
self.share_manager.db, 'share_group_snapshot_get',
mock.Mock(return_value=fake_snap))
mock_sg_snap_update = self.mock_object(
self.share_manager.db, 'share_group_snapshot_update',
mock.Mock(return_value=fake_snap))
self.mock_object(
self.share_manager.driver,
'create_share_group_snapshot',
mock.Mock(side_effect=exception.Error))
self.assertRaises(
exception.Error,
self.share_manager.create_share_group_snapshot,
self.context, fake_snap['id'])
mock_sg_snap_update.assert_called_once_with(
mock.ANY, fake_snap['id'], {'status': constants.STATUS_ERROR})
def test_connection_get_info(self):
share_instance = {'share_server_id': 'fake_server_id'}
share_instance_id = 'fake_id'
share_server = 'fake_share_server'
connection_info = 'fake_info'
# mocks
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=share_instance))
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(return_value=share_server))
self.mock_object(self.share_manager.driver, 'connection_get_info',
mock.Mock(return_value=connection_info))
# run
result = self.share_manager.connection_get_info(
self.context, share_instance_id)
# asserts
self.assertEqual(connection_info, result)
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, share_instance_id, with_share_data=True)
self.share_manager.driver.connection_get_info.assert_called_once_with(
self.context, share_instance, share_server)
@ddt.data(True, False)
def test_migration_start(self, success):
instance = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_AVAILABLE,
share_server_id='fake_server_id',
host='fake@backend#pool')
share = db_utils.create_share(id='fake_id', instances=[instance])
fake_service = {'availability_zone_id': 'fake_az_id'}
host = 'fake2@backend#pool'
# mocks
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=instance))
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager, '_migration_start_driver',
mock.Mock(return_value=success))
self.mock_object(self.share_manager.db, 'service_get_by_args',
mock.Mock(return_value=fake_service))
if not success:
self.mock_object(
self.share_manager, '_migration_start_host_assisted')
# run
self.share_manager.migration_start(
self.context, 'fake_id', host, False, False, False, False, False,
'fake_net_id', 'fake_type_id')
# asserts
self.share_manager.db.share_get.assert_called_once_with(
self.context, share['id'])
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, instance['id'], with_share_data=True)
share_update_calls = [
mock.call(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}),
]
if not success:
share_update_calls.append(mock.call(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}))
self.share_manager.db.share_update.assert_has_calls(share_update_calls)
self.share_manager._migration_start_driver.assert_called_once_with(
self.context, share, instance, host, False, False, False, False,
'fake_net_id', 'fake_az_id', 'fake_type_id')
if not success:
(self.share_manager._migration_start_host_assisted.
assert_called_once_with(
self.context, share, instance, host, 'fake_net_id',
'fake_az_id', 'fake_type_id'))
self.share_manager.db.service_get_by_args.assert_called_once_with(
self.context, 'fake2@backend', 'manila-share')
@ddt.data({'writable': False, 'preserve_metadata': False,
'nondisruptive': False, 'preserve_snapshots': True,
'has_snapshots': False},
{'writable': False, 'preserve_metadata': False,
'nondisruptive': True, 'preserve_snapshots': False,
'has_snapshots': False},
{'writable': False, 'preserve_metadata': True,
'nondisruptive': False, 'preserve_snapshots': False,
'has_snapshots': False},
{'writable': True, 'preserve_metadata': False,
'nondisruptive': False, 'preserve_snapshots': False,
'has_snapshots': False},
{'writable': False, 'preserve_metadata': False,
'nondisruptive': False, 'preserve_snapshots': False,
'has_snapshots': True}
)
@ddt.unpack
def test_migration_start_prevent_host_assisted(
self, writable, preserve_metadata, nondisruptive,
preserve_snapshots, has_snapshots):
share = db_utils.create_share()
instance = share.instance
host = 'fake@backend#pool'
fake_service = {'availability_zone_id': 'fake_az_id'}
if has_snapshots:
snapshot = db_utils.create_snapshot(share_id=share['id'])
self.mock_object(
self.share_manager.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=[snapshot]))
# mocks
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
self.mock_object(self.share_manager.db, 'service_get_by_args',
mock.Mock(return_value=fake_service))
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(return_value=share))
# run
self.assertRaises(
exception.ShareMigrationFailed, self.share_manager.migration_start,
self.context, 'share_id', host, True, writable, preserve_metadata,
nondisruptive, preserve_snapshots, 'fake_net_id')
self.share_manager.db.share_update.assert_has_calls([
mock.call(
self.context, 'share_id',
{'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}),
mock.call(
self.context, 'share_id',
{'task_state': constants.TASK_STATE_MIGRATION_ERROR}),
])
self.share_manager.db.share_instance_update.assert_called_once_with(
self.context, instance['id'],
{'status': constants.STATUS_AVAILABLE})
self.share_manager.db.share_get.assert_called_once_with(
self.context, 'share_id')
self.share_manager.db.service_get_by_args.assert_called_once_with(
self.context, 'fake@backend', 'manila-share')
(self.share_manager._reset_read_only_access_rules.
assert_called_once_with(self.context, share, instance['id']))
def test_migration_start_exception(self):
instance = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_AVAILABLE,
share_server_id='fake_server_id',
host='fake@backend#pool')
share = db_utils.create_share(id='fake_id', instances=[instance])
host = 'fake2@backend#pool'
fake_service = {'availability_zone_id': 'fake_az_id'}
# mocks
self.mock_object(self.share_manager.db, 'service_get_by_args',
mock.Mock(return_value=fake_service))
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=instance))
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager, '_migration_start_driver',
mock.Mock(side_effect=Exception('fake_exc_1')))
self.mock_object(self.share_manager, '_migration_start_host_assisted',
mock.Mock(side_effect=Exception('fake_exc_2')))
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
# run
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager.migration_start,
self.context, 'fake_id', host, False, False, False, False, False,
'fake_net_id', 'fake_type_id')
# asserts
self.share_manager.db.share_get.assert_called_once_with(
self.context, share['id'])
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, instance['id'], with_share_data=True)
share_update_calls = [
mock.call(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}),
mock.call(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
]
(self.share_manager._reset_read_only_access_rules.
assert_called_once_with(self.context, share, instance['id']))
self.share_manager.db.share_update.assert_has_calls(share_update_calls)
self.share_manager.db.share_instance_update.assert_called_once_with(
self.context, instance['id'],
{'status': constants.STATUS_AVAILABLE})
self.share_manager._migration_start_driver.assert_called_once_with(
self.context, share, instance, host, False, False, False, False,
'fake_net_id', 'fake_az_id', 'fake_type_id')
self.share_manager.db.service_get_by_args.assert_called_once_with(
self.context, 'fake2@backend', 'manila-share')
@ddt.data(None, Exception('fake'))
def test__migration_start_host_assisted(self, exc):
instance = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_AVAILABLE,
share_server_id='fake_server_id')
new_instance = db_utils.create_share_instance(
share_id='new_fake_id',
status=constants.STATUS_AVAILABLE)
share = db_utils.create_share(id='fake_id', instances=[instance])
server = 'share_server'
src_connection_info = 'src_fake_info'
dest_connection_info = 'dest_fake_info'
instance_updates = [
mock.call(
self.context, instance['id'],
{'cast_rules_to_readonly': True})
]
# mocks
helper = mock.Mock()
self.mock_object(migration_api, 'ShareMigrationHelper',
mock.Mock(return_value=helper))
self.mock_object(helper, 'cleanup_new_instance')
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(return_value=server))
self.mock_object(self.share_manager.db, 'share_instance_update',
mock.Mock(return_value=server))
self.mock_object(self.share_manager.access_helper,
'get_and_update_share_instance_access_rules')
self.mock_object(self.share_manager.access_helper,
'update_access_rules')
self.mock_object(utils, 'wait_for_access_update')
if exc is None:
self.mock_object(helper,
'create_instance_and_wait',
mock.Mock(return_value=new_instance))
self.mock_object(self.share_manager.driver, 'connection_get_info',
mock.Mock(return_value=src_connection_info))
self.mock_object(rpcapi.ShareAPI, 'connection_get_info',
mock.Mock(return_value=dest_connection_info))
self.mock_object(data_rpc.DataAPI, 'migration_start',
mock.Mock(side_effect=Exception('fake')))
self.mock_object(helper, 'cleanup_new_instance')
instance_updates.append(
mock.call(self.context, new_instance['id'],
{'status': constants.STATUS_MIGRATING_TO}))
else:
self.mock_object(helper, 'create_instance_and_wait',
mock.Mock(side_effect=exc))
# run
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager._migration_start_host_assisted,
self.context, share, instance, 'fake_host', 'fake_net_id',
'fake_az_id', 'fake_type_id')
# asserts
self.share_manager.db.share_server_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
instance['share_server_id'])
(self.share_manager.access_helper.update_access_rules.
assert_called_once_with(
self.context, instance['id'], share_server=server))
helper.create_instance_and_wait.assert_called_once_with(
share, 'fake_host', 'fake_net_id', 'fake_az_id', 'fake_type_id')
utils.wait_for_access_update.assert_called_once_with(
self.context, self.share_manager.db, instance,
self.share_manager.migration_wait_access_rules_timeout)
if exc is None:
(self.share_manager.driver.connection_get_info.
assert_called_once_with(self.context, instance, server))
rpcapi.ShareAPI.connection_get_info.assert_called_once_with(
self.context, new_instance)
data_rpc.DataAPI.migration_start.assert_called_once_with(
self.context, share['id'], ['lost+found'], instance['id'],
new_instance['id'], src_connection_info, dest_connection_info)
helper.cleanup_new_instance.assert_called_once_with(new_instance)
@ddt.data({'share_network_id': 'fake_net_id', 'exc': None,
'has_snapshots': True},
{'share_network_id': None, 'exc': Exception('fake'),
'has_snapshots': True},
{'share_network_id': None, 'exc': None, 'has_snapshots': False})
@ddt.unpack
def test__migration_start_driver(
self, exc, share_network_id, has_snapshots):
fake_dest_host = 'fake_host'
src_server = db_utils.create_share_server()
if share_network_id:
dest_server = db_utils.create_share_server()
else:
dest_server = None
share = db_utils.create_share(
id='fake_id',
share_server_id='fake_src_server_id',
share_network_id=share_network_id)
migrating_instance = db_utils.create_share_instance(
share_id='fake_id',
share_network_id=share_network_id)
if has_snapshots:
snapshot = db_utils.create_snapshot(
status=(constants.STATUS_AVAILABLE if not exc
else constants.STATUS_ERROR),
share_id=share['id'])
migrating_snap_instance = db_utils.create_snapshot(
status=constants.STATUS_MIGRATING,
share_id=share['id'])
dest_snap_instance = db_utils.create_snapshot_instance(
status=constants.STATUS_AVAILABLE,
snapshot_id=snapshot['id'],
share_instance_id=migrating_instance['id'])
snapshot_mappings = {snapshot.instance['id']: dest_snap_instance}
else:
snapshot_mappings = {}
src_instance = share.instance
compatibility = {
'compatible': True,
'writable': False,
'preserve_metadata': False,
'nondisruptive': False,
'preserve_snapshots': has_snapshots,
}
# mocks
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=migrating_instance))
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(return_value=src_server))
self.mock_object(self.share_manager.driver,
'migration_check_compatibility',
mock.Mock(return_value=compatibility))
self.mock_object(
api.API, 'create_share_instance_and_get_request_spec',
mock.Mock(return_value=({}, migrating_instance)))
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(rpcapi.ShareAPI, 'provide_share_server',
mock.Mock(return_value='fake_dest_share_server_id'))
self.mock_object(rpcapi.ShareAPI, 'create_share_server')
self.mock_object(
migration_api.ShareMigrationHelper, 'wait_for_share_server',
mock.Mock(return_value=dest_server))
self.mock_object(
self.share_manager.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=[snapshot] if has_snapshots else []))
if has_snapshots:
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_create',
mock.Mock(return_value=dest_snap_instance))
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
self.mock_object(
self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[migrating_snap_instance]))
self.mock_object(self.share_manager.driver, 'migration_start')
self.mock_object(self.share_manager, '_migration_delete_instance')
self.mock_object(self.share_manager.access_helper,
'update_access_rules')
self.mock_object(utils, 'wait_for_access_update')
# run
if exc:
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager._migration_start_driver,
self.context, share, src_instance, fake_dest_host, False,
False, False, False, share_network_id, 'fake_az_id',
'fake_type_id')
else:
result = self.share_manager._migration_start_driver(
self.context, share, src_instance, fake_dest_host, False,
False, False, False, share_network_id, 'fake_az_id',
'fake_type_id')
# asserts
if not exc:
self.assertTrue(result)
self.share_manager.db.share_update.assert_has_calls([
mock.call(
self.context, share['id'],
{'task_state':
constants.TASK_STATE_MIGRATION_DRIVER_STARTING}),
mock.call(
self.context, share['id'],
{'task_state':
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS})
])
(self.share_manager.db.share_instance_update.assert_has_calls([
mock.call(self.context, migrating_instance['id'],
{'status': constants.STATUS_MIGRATING_TO}),
mock.call(self.context, src_instance['id'],
{'cast_rules_to_readonly': True})]))
(self.share_manager.access_helper.update_access_rules.
assert_called_once_with(
self.context, src_instance['id'], share_server=src_server))
self.share_manager.driver.migration_start.assert_called_once_with(
self.context, src_instance, migrating_instance,
[snapshot.instance] if has_snapshots else [],
snapshot_mappings, src_server, dest_server)
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, migrating_instance['id'], with_share_data=True)
self.share_manager.db.share_server_get.assert_called_once_with(
self.context, 'fake_src_server_id')
(api.API.create_share_instance_and_get_request_spec.
assert_called_once_with(self.context, share, 'fake_az_id', None,
'fake_host', share_network_id,
'fake_type_id'))
(self.share_manager.driver.migration_check_compatibility.
assert_called_once_with(self.context, src_instance,
migrating_instance, src_server, dest_server))
(self.share_manager.db.share_snapshot_get_all_for_share.
assert_called_once_with(self.context, share['id']))
if share_network_id:
(rpcapi.ShareAPI.provide_share_server.
assert_called_once_with(
self.context, migrating_instance, share_network_id))
rpcapi.ShareAPI.create_share_server.assert_called_once_with(
self.context, migrating_instance, 'fake_dest_share_server_id')
(migration_api.ShareMigrationHelper.wait_for_share_server.
assert_called_once_with('fake_dest_share_server_id'))
if exc:
(self.share_manager._migration_delete_instance.
assert_called_once_with(self.context, migrating_instance['id']))
if has_snapshots:
(self.share_manager.db.share_snapshot_instance_update.
assert_called_once_with(
self.context, migrating_snap_instance['id'],
{'status': constants.STATUS_AVAILABLE}))
(self.share_manager.db.
share_snapshot_instance_get_all_with_filters(
self.context,
{'share_instance_ids': [src_instance['id']]}))
else:
if has_snapshots:
snap_data = {
'status': constants.STATUS_MIGRATING_TO,
'progress': '0%',
'share_instance_id': migrating_instance['id'],
}
(self.share_manager.db.share_snapshot_instance_create.
assert_called_once_with(self.context, snapshot['id'],
snap_data))
(self.share_manager.db.share_snapshot_instance_update.
assert_called_once_with(
self.context, snapshot.instance['id'],
{'status': constants.STATUS_MIGRATING}))
@ddt.data({'writable': False, 'preserve_metadata': True,
'nondisruptive': True, 'compatible': True,
'preserve_snapshots': True, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': False,
'nondisruptive': True, 'compatible': True,
'preserve_snapshots': True, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': True,
'nondisruptive': False, 'compatible': True,
'preserve_snapshots': True, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': True,
'nondisruptive': True, 'compatible': False,
'preserve_snapshots': True, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': True,
'nondisruptive': True, 'compatible': True,
'preserve_snapshots': False, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': True,
'nondisruptive': True, 'compatible': True,
'preserve_snapshots': False, 'has_snapshots': True})
@ddt.unpack
def test__migration_start_driver_not_compatible(
self, compatible, writable, preserve_metadata, nondisruptive,
preserve_snapshots, has_snapshots):
share = db_utils.create_share()
src_instance = db_utils.create_share_instance(
share_id='fake_id',
share_server_id='src_server_id',
share_network_id='fake_share_network_id')
fake_dest_host = 'fake_host'
src_server = db_utils.create_share_server()
dest_server = db_utils.create_share_server()
migrating_instance = db_utils.create_share_instance(
share_id='fake_id',
share_network_id='fake_net_id')
compatibility = {
'compatible': compatible,
'writable': writable,
'preserve_metadata': preserve_metadata,
'nondisruptive': nondisruptive,
'preserve_snapshots': preserve_snapshots,
}
snapshot = db_utils.create_snapshot(share_id=share['id'])
# mocks
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(return_value=src_server))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=migrating_instance))
self.mock_object(
api.API, 'create_share_instance_and_get_request_spec',
mock.Mock(return_value=({}, migrating_instance)))
self.mock_object(rpcapi.ShareAPI, 'provide_share_server',
mock.Mock(return_value='fake_dest_share_server_id'))
self.mock_object(rpcapi.ShareAPI, 'create_share_server')
self.mock_object(
migration_api.ShareMigrationHelper, 'wait_for_share_server',
mock.Mock(return_value=dest_server))
self.mock_object(self.share_manager, '_migration_delete_instance')
self.mock_object(self.share_manager.driver,
'migration_check_compatibility',
mock.Mock(return_value=compatibility))
self.mock_object(utils, 'wait_for_access_update')
self.mock_object(
self.share_manager.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=[snapshot] if has_snapshots else []))
# run
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager._migration_start_driver,
self.context, share, src_instance, fake_dest_host, True, True,
True, not has_snapshots, 'fake_net_id', 'fake_az_id',
'fake_new_type_id')
# asserts
self.share_manager.db.share_server_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), 'src_server_id')
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, migrating_instance['id'], with_share_data=True)
(rpcapi.ShareAPI.provide_share_server.
assert_called_once_with(
self.context, migrating_instance, 'fake_net_id'))
rpcapi.ShareAPI.create_share_server.assert_called_once_with(
self.context, migrating_instance, 'fake_dest_share_server_id')
(migration_api.ShareMigrationHelper.wait_for_share_server.
assert_called_once_with('fake_dest_share_server_id'))
(api.API.create_share_instance_and_get_request_spec.
assert_called_once_with(self.context, share, 'fake_az_id', None,
'fake_host', 'fake_net_id',
'fake_new_type_id'))
self.share_manager._migration_delete_instance.assert_called_once_with(
self.context, migrating_instance['id'])
@ddt.data(Exception('fake'), False, True)
def test_migration_driver_continue(self, finished):
src_server = db_utils.create_share_server()
dest_server = db_utils.create_share_server()
share = db_utils.create_share(
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
id='share_id',
share_server_id=src_server['id'],
status=constants.STATUS_MIGRATING)
share_cancelled = db_utils.create_share(
task_state=constants.TASK_STATE_MIGRATION_CANCELLED)
if finished:
share_cancelled = share
regular_instance = db_utils.create_share_instance(
status=constants.STATUS_AVAILABLE,
share_id='other_id')
dest_instance = db_utils.create_share_instance(
share_id='share_id',
host='fake_host',
share_server_id=dest_server['id'],
status=constants.STATUS_MIGRATING_TO)
src_instance = share.instance
snapshot = db_utils.create_snapshot(share_id=share['id'])
dest_snap_instance = db_utils.create_snapshot_instance(
snapshot_id=snapshot['id'],
share_instance_id=dest_instance['id'])
migrating_snap_instance = db_utils.create_snapshot(
status=constants.STATUS_MIGRATING,
share_id=share['id'])
snapshot_mappings = {snapshot.instance['id']: dest_snap_instance}
self.mock_object(manager.LOG, 'warning')
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_host', mock.Mock(
return_value=[regular_instance, src_instance]))
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(side_effect=[share, share_cancelled]))
self.mock_object(api.API, 'get_migrating_instances',
mock.Mock(return_value=(
src_instance['id'], dest_instance['id'])))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=dest_instance))
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(side_effect=[src_server, dest_server]))
self.mock_object(self.share_manager.driver, 'migration_continue',
mock.Mock(side_effect=[finished]))
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager, '_migration_delete_instance')
side_effect = [[dest_snap_instance], [snapshot.instance]]
if isinstance(finished, Exception):
side_effect.append([migrating_snap_instance])
self.mock_object(
self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(side_effect=side_effect))
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
share_get_calls = [mock.call(self.context, 'share_id')]
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
self.share_manager.migration_driver_continue(self.context)
snapshot_instance_get_all_calls = [
mock.call(self.context,
{'share_instance_ids': [dest_instance['id']]}),
mock.call(self.context,
{'share_instance_ids': [src_instance['id']]})
]
if isinstance(finished, Exception):
self.share_manager.db.share_update.assert_called_once_with(
self.context, 'share_id',
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
(self.share_manager.db.share_instance_update.
assert_called_once_with(
self.context, src_instance['id'],
{'status': constants.STATUS_AVAILABLE}))
(self.share_manager._migration_delete_instance.
assert_called_once_with(self.context, dest_instance['id']))
(self.share_manager._reset_read_only_access_rules.
assert_called_once_with(self.context, share, src_instance['id']))
(self.share_manager.db.share_snapshot_instance_update.
assert_called_once_with(
self.context, migrating_snap_instance['id'],
{'status': constants.STATUS_AVAILABLE}))
snapshot_instance_get_all_calls.append(
mock.call(
self.context,
{'share_instance_ids': [src_instance['id']]}))
else:
if finished:
self.share_manager.db.share_update.assert_called_once_with(
self.context, 'share_id',
{'task_state':
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE})
else:
share_get_calls.append(mock.call(self.context, 'share_id'))
self.assertTrue(manager.LOG.warning.called)
self.share_manager.db.share_instances_get_all_by_host(
self.context, self.share_manager.host)
self.share_manager.db.share_get.assert_has_calls(share_get_calls)
api.API.get_migrating_instances.assert_called_once_with(share)
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, dest_instance['id'], with_share_data=True)
self.share_manager.db.share_server_get.assert_has_calls([
mock.call(self.context, src_server['id']),
mock.call(self.context, dest_server['id']),
])
self.share_manager.driver.migration_continue.assert_called_once_with(
self.context, src_instance, dest_instance,
[snapshot.instance], snapshot_mappings, src_server, dest_server)
(self.share_manager.db.share_snapshot_instance_get_all_with_filters.
assert_has_calls(snapshot_instance_get_all_calls))
@ddt.data({'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
'exc': None},
{'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
'exc': Exception('fake')},
{'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED,
'exc': None},
{'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED,
'exc': Exception('fake')})
@ddt.unpack
def test_migration_complete(self, task_state, exc):
instance_1 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING,
share_server_id='fake_server_id',
share_type_id='fake_type_id')
instance_2 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING_TO,
share_server_id='fake_server_id',
share_type_id='fake_type_id')
share = db_utils.create_share(
id='fake_id',
instances=[instance_1, instance_2],
task_state=task_state)
model_type_update = {'create_share_from_snapshot_support': False}
share_update = model_type_update
share_update['task_state'] = constants.TASK_STATE_MIGRATION_SUCCESS
# mocks
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[instance_1, instance_2]))
self.mock_object(api.API, 'get_share_attributes_from_share_type',
mock.Mock(return_value=model_type_update))
self.mock_object(share_types, 'get_share_type',
mock.Mock(return_value='fake_type'))
self.mock_object(self.share_manager.db, 'share_update')
if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE:
self.mock_object(
self.share_manager, '_migration_complete_driver',
mock.Mock(side_effect=exc))
else:
self.mock_object(
self.share_manager, '_migration_complete_host_assisted',
mock.Mock(side_effect=exc))
if exc:
snapshot = db_utils.create_snapshot(share_id=share['id'])
snapshot_ins1 = db_utils.create_snapshot_instance(
snapshot_id=snapshot['id'],
share_instance_id=instance_1['id'],
status=constants.STATUS_MIGRATING,)
snapshot_ins2 = db_utils.create_snapshot_instance(
snapshot_id=snapshot['id'],
share_instance_id=instance_2['id'],
status=constants.STATUS_MIGRATING_TO)
self.mock_object(manager.LOG, 'exception')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db,
'share_snapshot_instance_update')
self.mock_object(self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(
return_value=[snapshot_ins1, snapshot_ins2]))
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager.migration_complete,
self.context, instance_1['id'], instance_2['id'])
else:
self.share_manager.migration_complete(
self.context, instance_1['id'], instance_2['id'])
# asserts
self.share_manager.db.share_get.assert_called_once_with(
self.context, share['id'])
self.share_manager.db.share_instance_get.assert_has_calls([
mock.call(self.context, instance_1['id'], with_share_data=True),
mock.call(self.context, instance_2['id'], with_share_data=True)])
if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE:
(self.share_manager._migration_complete_driver.
assert_called_once_with(
self.context, share, instance_1, instance_2))
else:
(self.share_manager._migration_complete_host_assisted.
assert_called_once_with(
self.context, share, instance_1['id'], instance_2['id']))
if exc:
self.assertTrue(manager.LOG.exception.called)
self.share_manager.db.share_update.assert_called_once_with(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE:
share_instance_update_calls = [
mock.call(self.context, instance_1['id'],
{'status': constants.STATUS_ERROR}),
mock.call(self.context, instance_2['id'],
{'status': constants.STATUS_ERROR})
]
else:
share_instance_update_calls = [
mock.call(self.context, instance_1['id'],
{'status': constants.STATUS_AVAILABLE}),
]
self.share_manager.db.share_instance_update.assert_has_calls(
share_instance_update_calls)
if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE:
(self.share_manager.db.share_snapshot_instance_update.
assert_has_calls([
mock.call(self.context, snapshot_ins1['id'],
{'status': constants.STATUS_ERROR}),
mock.call(self.context, snapshot_ins2['id'],
{'status': constants.STATUS_ERROR})]))
(self.share_manager.db.
share_snapshot_instance_get_all_with_filters.
assert_called_once_with(
self.context, {
'share_instance_ids': [instance_1['id'],
instance_2['id']]
}
))
else:
(api.API.get_share_attributes_from_share_type.
assert_called_once_with('fake_type'))
share_types.get_share_type.assert_called_once_with(
self.context, 'fake_type_id')
self.share_manager.db.share_update.assert_called_once_with(
self.context, share['id'], share_update)
@ddt.data(constants.TASK_STATE_DATA_COPYING_ERROR,
constants.TASK_STATE_DATA_COPYING_CANCELLED,
constants.TASK_STATE_DATA_COPYING_COMPLETED,
'other')
def test__migration_complete_host_assisted_status(self, status):
instance = db_utils.create_share_instance(
share_id='fake_id',
share_server_id='fake_server_id')
new_instance = db_utils.create_share_instance(share_id='fake_id')
share = db_utils.create_share(id='fake_id', task_state=status)
helper = mock.Mock()
# mocks
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[instance, new_instance]))
self.mock_object(helper, 'cleanup_new_instance')
self.mock_object(migration_api, 'ShareMigrationHelper',
mock.Mock(return_value=helper))
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
if status == constants.TASK_STATE_DATA_COPYING_COMPLETED:
self.mock_object(helper, 'apply_new_access_rules',
mock.Mock(side_effect=Exception('fake')))
self.mock_object(manager.LOG, 'exception')
# run
if status == constants.TASK_STATE_DATA_COPYING_CANCELLED:
self.share_manager._migration_complete_host_assisted(
self.context, share, instance['id'], new_instance['id'])
else:
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager._migration_complete_host_assisted,
self.context, share, instance['id'], new_instance['id'])
# asserts
self.share_manager.db.share_instance_get.assert_has_calls([
mock.call(self.context, instance['id'], with_share_data=True),
mock.call(self.context, new_instance['id'], with_share_data=True)
])
cancelled = not(status == constants.TASK_STATE_DATA_COPYING_CANCELLED)
if status != 'other':
helper.cleanup_new_instance.assert_called_once_with(new_instance)
(self.share_manager._reset_read_only_access_rules.
assert_called_once_with(self.context, share, instance['id'],
helper=helper, supress_errors=cancelled))
if status == constants.TASK_STATE_MIGRATION_CANCELLED:
(self.share_manager.db.share_instance_update.
assert_called_once_with(
self.context, instance['id'],
{'status': constants.STATUS_AVAILABLE}))
self.share_manager.db.share_update.assert_called_once_with(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
if status == constants.TASK_STATE_DATA_COPYING_COMPLETED:
helper.apply_new_access_rules. assert_called_once_with(
new_instance)
self.assertTrue(manager.LOG.exception.called)
@ddt.data({'mount_snapshot_support': True, 'snapshot_els': False},
{'mount_snapshot_support': True, 'snapshot_els': True},
{'mount_snapshot_support': False, 'snapshot_els': False},
{'mount_snapshot_support': False, 'snapshot_els': True},)
@ddt.unpack
def test__migration_complete_driver(
self, mount_snapshot_support, snapshot_els):
fake_src_host = 'src_host'
fake_dest_host = 'dest_host'
fake_rules = 'fake_rules'
src_server = db_utils.create_share_server()
dest_server = db_utils.create_share_server()
share_type = db_utils.create_share_type(
extra_specs={'mount_snapshot_support': mount_snapshot_support})
share = db_utils.create_share(
share_server_id='fake_src_server_id',
host=fake_src_host)
dest_instance = db_utils.create_share_instance(
share_id=share['id'],
share_server_id='fake_dest_server_id',
host=fake_dest_host,
share_type_id=share_type['id'])
src_instance = share.instance
snapshot = db_utils.create_snapshot(share_id=share['id'])
dest_snap_instance = db_utils.create_snapshot_instance(
snapshot_id=snapshot['id'],
share_instance_id=dest_instance['id'])
snapshot_mappings = {snapshot.instance['id']: dest_snap_instance}
model_update = {'fake_keys': 'fake_values'}
if snapshot_els:
el = {'path': 'fake_path', 'is_admin_only': False}
model_update['export_locations'] = [el]
fake_return_data = {
'export_locations': 'fake_export_locations',
'snapshot_updates': {dest_snap_instance['id']: model_update},
}
# mocks
self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(
side_effect=[src_server, dest_server]))
self.mock_object(
self.share_manager.db, 'share_access_get_all_for_instance',
mock.Mock(return_value=fake_rules))
self.mock_object(
self.share_manager.db, 'share_export_locations_update')
self.mock_object(self.share_manager.driver, 'migration_complete',
mock.Mock(return_value=fake_return_data))
self.mock_object(
self.share_manager.access_helper, '_check_needs_refresh',
mock.Mock(return_value=True))
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager, '_migration_delete_instance')
self.mock_object(migration_api.ShareMigrationHelper,
'apply_new_access_rules')
self.mock_object(
self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(side_effect=[[dest_snap_instance],
[snapshot.instance]]))
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
el_create = self.mock_object(
self.share_manager.db,
'share_snapshot_instance_export_location_create')
# run
self.share_manager._migration_complete_driver(
self.context, share, src_instance, dest_instance)
# asserts
self.share_manager.db.share_server_get.assert_has_calls([
mock.call(self.context, 'fake_src_server_id'),
mock.call(self.context, 'fake_dest_server_id')])
(self.share_manager.db.share_export_locations_update.
assert_called_once_with(self.context, dest_instance['id'],
'fake_export_locations'))
self.share_manager.driver.migration_complete.assert_called_once_with(
self.context, src_instance, dest_instance, [snapshot.instance],
snapshot_mappings, src_server, dest_server)
(migration_api.ShareMigrationHelper.apply_new_access_rules.
assert_called_once_with(dest_instance))
self.share_manager._migration_delete_instance.assert_called_once_with(
self.context, src_instance['id'])
self.share_manager.db.share_instance_update.assert_has_calls([
mock.call(self.context, dest_instance['id'],
{'status': constants.STATUS_AVAILABLE}),
mock.call(self.context, src_instance['id'],
{'status': constants.STATUS_INACTIVE})])
self.share_manager.db.share_update.assert_called_once_with(
self.context, dest_instance['share_id'],
{'task_state': constants.TASK_STATE_MIGRATION_COMPLETING})
(self.share_manager.db.share_snapshot_instance_get_all_with_filters.
assert_has_calls([
mock.call(self.context,
{'share_instance_ids': [dest_instance['id']]}),
mock.call(self.context,
{'share_instance_ids': [src_instance['id']]})]))
snap_data_update = (
fake_return_data['snapshot_updates'][dest_snap_instance['id']])
snap_data_update.update({
'status': constants.STATUS_AVAILABLE,
'progress': '100%',
})
(self.share_manager.db.share_snapshot_instance_update.
assert_called_once_with(self.context, dest_snap_instance['id'],
snap_data_update))
if mount_snapshot_support and snapshot_els:
el['share_snapshot_instance_id'] = dest_snap_instance['id']
el_create.assert_called_once_with(self.context, el)
else:
el_create.assert_not_called()
def test__migration_complete_host_assisted(self):
instance = db_utils.create_share_instance(
share_id='fake_id',
share_server_id='fake_server_id')
new_instance = db_utils.create_share_instance(share_id='fake_id')
share = db_utils.create_share(
id='fake_id',
task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED)
# mocks
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[instance, new_instance]))
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db, 'share_update')
delete_mock = self.mock_object(migration_api.ShareMigrationHelper,
'delete_instance_and_wait')
self.mock_object(migration_api.ShareMigrationHelper,
'apply_new_access_rules')
# run
self.share_manager._migration_complete_host_assisted(
self.context, share, instance['id'], new_instance['id'])
# asserts
self.share_manager.db.share_instance_get.assert_has_calls([
mock.call(self.context, instance['id'], with_share_data=True),
mock.call(self.context, new_instance['id'], with_share_data=True)
])
self.share_manager.db.share_instance_update.assert_has_calls([
mock.call(self.context, new_instance['id'],
{'status': constants.STATUS_AVAILABLE}),
mock.call(self.context, instance['id'],
{'status': constants.STATUS_INACTIVE})
])
self.share_manager.db.share_update.assert_called_once_with(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_COMPLETING})
(migration_api.ShareMigrationHelper.apply_new_access_rules.
assert_called_once_with(new_instance))
delete_mock.assert_called_once_with(instance)
@ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_DATA_COPYING_COMPLETED)
def test_migration_cancel(self, task_state):
dest_host = 'fake_host'
server_1 = db_utils.create_share_server()
server_2 = db_utils.create_share_server()
share = db_utils.create_share(task_state=task_state)
instance_1 = db_utils.create_share_instance(
share_id=share['id'], share_server_id=server_1['id'])
instance_2 = db_utils.create_share_instance(
share_id=share['id'], share_server_id=server_2['id'],
host=dest_host)
helper = mock.Mock()
self.mock_object(migration_api, 'ShareMigrationHelper',
mock.Mock(return_value=helper))
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.mock_object(db, 'share_instance_get',
mock.Mock(side_effect=[instance_1, instance_2]))
self.mock_object(db, 'share_update')
self.mock_object(db, 'share_instance_update')
self.mock_object(self.share_manager, '_migration_delete_instance')
self.mock_object(self.share_manager,
'_restore_migrating_snapshots_status')
self.mock_object(db, 'share_server_get',
mock.Mock(side_effect=[server_1, server_2]))
self.mock_object(self.share_manager.driver, 'migration_cancel')
self.mock_object(helper, 'cleanup_new_instance')
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
self.share_manager.migration_cancel(
self.context, instance_1['id'], instance_2['id'])
share_instance_update_calls = []
if task_state == constants.TASK_STATE_DATA_COPYING_COMPLETED:
share_instance_update_calls.append(mock.call(
self.context, instance_2['id'],
{'status': constants.STATUS_INACTIVE}))
(helper.cleanup_new_instance.assert_called_once_with(instance_2))
(self.share_manager._reset_read_only_access_rules.
assert_called_once_with(self.context, share, instance_1['id'],
helper=helper, supress_errors=False))
else:
self.share_manager.driver.migration_cancel.assert_called_once_with(
self.context, instance_1, instance_2, [], {}, server_1,
server_2)
(self.share_manager._migration_delete_instance.
assert_called_once_with(self.context, instance_2['id']))
(self.share_manager._restore_migrating_snapshots_status.
assert_called_once_with(self.context, instance_1['id']))
self.share_manager.db.share_get.assert_called_once_with(
self.context, share['id'])
self.share_manager.db.share_server_get.assert_has_calls([
mock.call(self.context, server_1['id']),
mock.call(self.context, server_2['id']),
])
self.share_manager.db.share_instance_get.assert_has_calls([
mock.call(self.context, instance_1['id'], with_share_data=True),
mock.call(self.context, instance_2['id'], with_share_data=True)
])
self.share_manager.db.share_update.assert_called_once_with(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
share_instance_update_calls.append(mock.call(
self.context, instance_1['id'],
{'status': constants.STATUS_AVAILABLE}))
self.share_manager.db.share_instance_update.assert_has_calls(
share_instance_update_calls)
@ddt.data(True, False)
def test__reset_read_only_access_rules(self, supress_errors):
share = db_utils.create_share()
server = db_utils.create_share_server()
instance = db_utils.create_share_instance(
share_id=share['id'],
cast_rules_to_readonly=True,
share_server_id=server['id'])
# mocks
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(return_value=server))
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=instance))
self.mock_object(migration_api.ShareMigrationHelper,
'cleanup_access_rules')
self.mock_object(migration_api.ShareMigrationHelper,
'revert_access_rules')
# run
self.share_manager._reset_read_only_access_rules(
self.context, share, instance['id'], supress_errors=supress_errors)
# asserts
self.share_manager.db.share_server_get.assert_called_once_with(
self.context, server['id'])
self.share_manager.db.share_instance_update.assert_called_once_with(
self.context, instance['id'],
{'cast_rules_to_readonly': False})
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, instance['id'], with_share_data=True)
if supress_errors:
(migration_api.ShareMigrationHelper.cleanup_access_rules.
assert_called_once_with(instance, server))
else:
(migration_api.ShareMigrationHelper.revert_access_rules.
assert_called_once_with(instance, server))
def test__migration_delete_instance(self):
share = db_utils.create_share(id='fake_id')
instance = share.instance
snapshot = db_utils.create_snapshot(share_id=share['id'])
rules = [{'id': 'rule_id_1'}, {'id': 'rule_id_2'}]
# mocks
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=instance))
mock_get_access_rules_call = self.mock_object(
self.share_manager.access_helper,
'get_and_update_share_instance_access_rules',
mock.Mock(return_value=rules))
mock_delete_access_rules_call = self.mock_object(
self.share_manager.access_helper,
'delete_share_instance_access_rules')
self.mock_object(self.share_manager.db, 'share_instance_delete')
self.mock_object(self.share_manager.db, 'share_instance_access_delete')
self.mock_object(self.share_manager, '_check_delete_share_server')
self.mock_object(self.share_manager.db,
'share_snapshot_instance_delete')
self.mock_object(self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[snapshot.instance]))
# run
self.share_manager._migration_delete_instance(
self.context, instance['id'])
# asserts
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, instance['id'], with_share_data=True)
mock_get_access_rules_call.assert_called_once_with(
self.context, share_instance_id=instance['id'])
mock_delete_access_rules_call.assert_called_once_with(
self.context, rules, instance['id'])
self.share_manager.db.share_instance_delete.assert_called_once_with(
self.context, instance['id'])
self.share_manager._check_delete_share_server.assert_called_once_with(
self.context, instance)
(self.share_manager.db.share_snapshot_instance_get_all_with_filters.
assert_called_once_with(self.context,
{'share_instance_ids': [instance['id']]}))
(self.share_manager.db.share_snapshot_instance_delete.
assert_called_once_with(self.context, snapshot.instance['id']))
def test_migration_cancel_invalid(self):
share = db_utils.create_share()
self.mock_object(db, 'share_instance_get',
mock.Mock(return_value=share.instance))
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.assertRaises(
exception.InvalidShare, self.share_manager.migration_cancel,
self.context, 'ins1_id', 'ins2_id')
def test_migration_get_progress(self):
expected = 'fake_progress'
dest_host = 'fake_host'
server_1 = db_utils.create_share_server()
server_2 = db_utils.create_share_server()
share = db_utils.create_share(
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
share_server_id=server_1['id'])
instance_1 = db_utils.create_share_instance(
share_id=share['id'], share_server_id=server_1['id'])
instance_2 = db_utils.create_share_instance(
share_id=share['id'], share_server_id=server_2['id'],
host=dest_host)
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.mock_object(db, 'share_instance_get',
mock.Mock(side_effect=[instance_1, instance_2]))
self.mock_object(db, 'share_server_get',
mock.Mock(side_effect=[server_1, server_2]))
self.mock_object(self.share_manager.driver, 'migration_get_progress',
mock.Mock(return_value=expected))
result = self.share_manager.migration_get_progress(
self.context, instance_1['id'], instance_2['id'])
self.assertEqual(expected, result)
(self.share_manager.driver.migration_get_progress.
assert_called_once_with(
self.context, instance_1, instance_2, [], {}, server_1,
server_2))
self.share_manager.db.share_get.assert_called_once_with(
self.context, share['id'])
self.share_manager.db.share_server_get.assert_has_calls([
mock.call(self.context, server_1['id']),
mock.call(self.context, server_2['id']),
])
self.share_manager.db.share_instance_get.assert_has_calls([
mock.call(self.context, instance_1['id'], with_share_data=True),
mock.call(self.context, instance_2['id'], with_share_data=True)
])
def test_migration_get_progress_invalid(self):
share = db_utils.create_share()
self.mock_object(db, 'share_instance_get',
mock.Mock(return_value=share.instance))
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.assertRaises(
exception.InvalidShare, self.share_manager.migration_get_progress,
self.context, 'ins1_id', 'ins2_id')
def test_provide_share_server(self):
instance = db_utils.create_share_instance(share_id='fake_id',
share_group_id='sg_id')
snapshot = db_utils.create_snapshot(with_share=True)
group = db_utils.create_share_group()
server = db_utils.create_share_server()
# mocks
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value=instance))
self.mock_object(self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager.db, 'share_group_get',
mock.Mock(return_value=group))
self.mock_object(self.share_manager, '_provide_share_server_for_share',
mock.Mock(return_value=(server, instance)))
# run
result = self.share_manager.provide_share_server(
self.context, 'ins_id', 'net_id', 'snap_id')
# asserts
self.assertEqual(server['id'], result)
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, 'ins_id', with_share_data=True)
self.share_manager.db.share_snapshot_get.assert_called_once_with(
self.context, 'snap_id')
self.share_manager.db.share_group_get.assert_called_once_with(
self.context, 'sg_id')
(self.share_manager._provide_share_server_for_share.
assert_called_once_with(self.context, 'net_id', instance, snapshot,
group, create_on_backend=False))
def test_create_share_server(self):
server = db_utils.create_share_server()
# mocks
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(return_value=server))
self.mock_object(self.share_manager, '_create_share_server_in_backend')
# run
self.share_manager.create_share_server(
self.context, 'server_id')
# asserts
self.share_manager.db.share_server_get.assert_called_once_with(
self.context, 'server_id')
(self.share_manager._create_share_server_in_backend.
assert_called_once_with(self.context, server))
@ddt.data({'admin_network_api': mock.Mock(),
'driver_return': ('new_identifier', {'some_id': 'some_value'})},
{'admin_network_api': None,
'driver_return': (None, None)})
@ddt.unpack
def test_manage_share_server(self, admin_network_api, driver_return):
driver_opts = {}
fake_share_server = fakes.fake_share_server_get()
fake_list_network_info = [{}, {}]
fake_list_empty_network_info = []
identifier = 'fake_id'
ss_data = {
'name': 'fake_name',
'ou': 'fake_ou',
'domain': 'fake_domain',
'server': 'fake_server',
'dns_ip': 'fake_dns_ip',
'user': 'fake_user',
'type': 'FAKE',
'password': 'fake_pass',
}
mock_manage_admin_network_allocations = mock.Mock()
share_server = db_utils.create_share_server(**fake_share_server)
security_service = db_utils.create_security_service(**ss_data)
share_network = db_utils.create_share_network()
db.share_network_add_security_service(context.get_admin_context(),
share_network['id'],
security_service['id'])
share_network = db.share_network_get(context.get_admin_context(),
share_network['id'])
self.share_manager.driver._admin_network_api = admin_network_api
mock_share_server_update = self.mock_object(
db, 'share_server_update')
mock_share_server_get = self.mock_object(
db, 'share_server_get', mock.Mock(return_value=share_server))
mock_share_network_get = self.mock_object(
db, 'share_network_get', mock.Mock(return_value=share_network))
mock_network_allocations_get = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=1))
mock_share_server_net_info = self.mock_object(
self.share_manager.driver, 'get_share_server_network_info',
mock.Mock(return_value=fake_list_network_info))
mock_manage_network_allocations = self.mock_object(
self.share_manager.driver.network_api,
'manage_network_allocations',
mock.Mock(return_value=fake_list_empty_network_info))
mock_manage_server = self.mock_object(
self.share_manager.driver, 'manage_server',
mock.Mock(return_value=driver_return))
mock_set_backend_details = self.mock_object(
db, 'share_server_backend_details_set')
ss_from_db = share_network['security_services'][0]
ss_data_from_db = {
'name': ss_from_db['name'],
'ou': ss_from_db['ou'],
'domain': ss_from_db['domain'],
'server': ss_from_db['server'],
'dns_ip': ss_from_db['dns_ip'],
'user': ss_from_db['user'],
'type': ss_from_db['type'],
'password': ss_from_db['password'],
}
expected_backend_details = {
'security_service_FAKE': jsonutils.dumps(ss_data_from_db),
}
if driver_return[1]:
expected_backend_details.update(driver_return[1])
if admin_network_api is not None:
mock_manage_admin_network_allocations = self.mock_object(
self.share_manager.driver.admin_network_api,
'manage_network_allocations',
mock.Mock(return_value=fake_list_network_info))
self.share_manager.manage_share_server(self.context,
fake_share_server['id'],
identifier,
driver_opts)
mock_share_server_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id']
)
mock_share_network_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
fake_share_server['share_network_id']
)
mock_network_allocations_get.assert_called_once_with()
mock_share_server_net_info.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_server, identifier,
driver_opts
)
mock_manage_network_allocations.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
fake_list_network_info, share_server, share_network
)
mock_manage_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_server, identifier,
driver_opts
)
mock_share_server_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id'],
{'status': constants.STATUS_ACTIVE,
'identifier': driver_return[0] or share_server['id']}
)
mock_set_backend_details.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_server['id'],
expected_backend_details
)
if admin_network_api is not None:
mock_manage_admin_network_allocations.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
fake_list_network_info, share_server
)
def test_manage_share_server_dhss_false(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
self.assertRaises(
exception.ManageShareServerError,
self.share_manager.manage_share_server,
self.context, "fake_id", "foo", {})
def test_manage_share_server_without_allocations(self):
driver_opts = {}
fake_share_server = fakes.fake_share_server_get()
fake_list_empty_network_info = []
identifier = 'fake_id'
share_server = db_utils.create_share_server(**fake_share_server)
share_network = db_utils.create_share_network()
self.share_manager.driver._admin_network_api = mock.Mock()
mock_share_server_get = self.mock_object(
db, 'share_server_get', mock.Mock(return_value=share_server))
mock_share_network_get = self.mock_object(
db, 'share_network_get', mock.Mock(return_value=share_network))
mock_network_allocations_get = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=1))
mock_get_share_network_info = self.mock_object(
self.share_manager.driver, 'get_share_server_network_info',
mock.Mock(return_value=fake_list_empty_network_info))
self.assertRaises(exception.ManageShareServerError,
self.share_manager.manage_share_server,
context=self.context,
share_server_id=fake_share_server['id'],
identifier=identifier,
driver_opts=driver_opts)
mock_share_server_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id']
)
mock_share_network_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
fake_share_server['share_network_id']
)
mock_network_allocations_get.assert_called_once_with()
mock_get_share_network_info.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_server, identifier,
driver_opts
)
def test_manage_share_server_allocations_not_managed(self):
driver_opts = {}
fake_share_server = fakes.fake_share_server_get()
fake_list_network_info = [{}, {}]
identifier = 'fake_id'
share_server = db_utils.create_share_server(**fake_share_server)
share_network = db_utils.create_share_network()
self.share_manager.driver._admin_network_api = mock.Mock()
mock_share_server_get = self.mock_object(
db, 'share_server_get', mock.Mock(return_value=share_server))
mock_share_network_get = self.mock_object(
db, 'share_network_get', mock.Mock(return_value=share_network))
mock_network_allocations_get = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=1))
mock_get_share_network_info = self.mock_object(
self.share_manager.driver, 'get_share_server_network_info',
mock.Mock(return_value=fake_list_network_info))
mock_manage_admin_network_allocations = self.mock_object(
self.share_manager.driver.admin_network_api,
'manage_network_allocations',
mock.Mock(return_value=fake_list_network_info))
mock_manage_network_allocations = self.mock_object(
self.share_manager.driver.network_api,
'manage_network_allocations',
mock.Mock(return_value=fake_list_network_info))
self.assertRaises(exception.ManageShareServerError,
self.share_manager.manage_share_server,
context=self.context,
share_server_id=fake_share_server['id'],
identifier=identifier,
driver_opts=driver_opts)
mock_share_server_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id']
)
mock_share_network_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
fake_share_server['share_network_id']
)
mock_network_allocations_get.assert_called_once_with()
mock_get_share_network_info.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_server, identifier,
driver_opts
)
mock_manage_admin_network_allocations.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
fake_list_network_info, share_server
)
mock_manage_network_allocations.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
fake_list_network_info, share_server, share_network
)
def test_manage_snapshot_driver_exception(self):
CustomException = type('CustomException', (Exception,), {})
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
self.mock_object(share_types,
'get_share_type_extra_specs',
mock.Mock(return_value="False"))
mock_manage = self.mock_object(self.share_manager.driver,
'manage_existing_snapshot',
mock.Mock(side_effect=CustomException))
share = db_utils.create_share()
snapshot = db_utils.create_snapshot(share_id=share['id'])
driver_options = {}
mock_get = self.mock_object(self.share_manager.db,
'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.assertRaises(
CustomException,
self.share_manager.manage_snapshot,
self.context, snapshot['id'], driver_options)
mock_manage.assert_called_once_with(mock.ANY, driver_options)
mock_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['id'])
def test_unmanage_share_server_no_allocations(self):
fake_share_server = fakes.fake_share_server_get()
ss_list = [
{'name': 'fake_AD'},
{'name': 'fake_LDAP'},
{'name': 'fake_kerberos'}
]
db_utils.create_share_server(**fake_share_server)
self.mock_object(self.share_manager.driver, 'unmanage_server',
mock.Mock(side_effect=NotImplementedError()))
self.mock_object(self.share_manager.db, 'share_server_delete')
mock_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=0)
)
mock_admin_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_admin_network_allocations_number',
mock.Mock(return_value=0)
)
self.share_manager.unmanage_share_server(
self.context, fake_share_server['id'], True)
mock_network_allocations_number.assert_called_once_with()
mock_admin_network_allocations_number.assert_called_once_with()
self.share_manager.driver.unmanage_server.assert_called_once_with(
fake_share_server['backend_details'], ss_list)
self.share_manager.db.share_server_delete.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id'])
def test_unmanage_share_server_no_allocations_driver_not_implemented(self):
fake_share_server = fakes.fake_share_server_get()
fake_share_server['status'] = constants.STATUS_UNMANAGING
ss_list = [
{'name': 'fake_AD'},
{'name': 'fake_LDAP'},
{'name': 'fake_kerberos'}
]
db_utils.create_share_server(**fake_share_server)
self.mock_object(self.share_manager.driver, 'unmanage_server',
mock.Mock(side_effect=NotImplementedError()))
self.mock_object(self.share_manager.db, 'share_server_update')
self.share_manager.unmanage_share_server(
self.context, fake_share_server['id'], False)
self.share_manager.driver.unmanage_server.assert_called_once_with(
fake_share_server['backend_details'], ss_list)
self.share_manager.db.share_server_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id'],
{'status': constants.STATUS_UNMANAGE_ERROR})
def test_unmanage_share_server_with_network_allocations(self):
fake_share_server = fakes.fake_share_server_get()
db_utils.create_share_server(**fake_share_server)
mock_unmanage_network_allocations = self.mock_object(
self.share_manager.driver.network_api,
'unmanage_network_allocations'
)
mock_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=1)
)
self.share_manager.unmanage_share_server(
self.context, fake_share_server['id'], True)
mock_network_allocations_number.assert_called_once_with()
mock_unmanage_network_allocations.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id'])
def test_unmanage_share_server_with_admin_network_allocations(self):
fake_share_server = fakes.fake_share_server_get()
db_utils.create_share_server(**fake_share_server)
mock_admin_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_admin_network_allocations_number',
mock.Mock(return_value=1)
)
mock_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=0)
)
self.share_manager.driver._admin_network_api = mock.Mock()
self.share_manager.unmanage_share_server(
self.context, fake_share_server['id'], True)
mock_admin_network_allocations_number.assert_called_once_with()
mock_network_allocations_number.assert_called_once_with()
def test_unmanage_share_server_error(self):
fake_share_server = fakes.fake_share_server_get()
db_utils.create_share_server(**fake_share_server)
mock_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=1)
)
error = mock.Mock(
side_effect=exception.ShareServerNotFound(share_server_id="fake"))
mock_share_server_delete = self.mock_object(
db, 'share_server_delete', error
)
mock_share_server_update = self.mock_object(
db, 'share_server_update'
)
self.share_manager.driver._admin_network_api = mock.Mock()
self.assertRaises(exception.ShareServerNotFound,
self.share_manager.unmanage_share_server,
self.context,
fake_share_server['id'],
True)
mock_network_allocations_number.assert_called_once_with()
mock_share_server_delete.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id']
)
mock_share_server_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id'],
{'status': constants.STATUS_UNMANAGE_ERROR}
)
def test_unmanage_share_server_network_allocations_error(self):
fake_share_server = fakes.fake_share_server_get()
db_utils.create_share_server(**fake_share_server)
mock_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=1)
)
error = mock.Mock(
side_effect=exception.ShareNetworkNotFound(share_network_id="fake")
)
mock_unmanage_network_allocations = self.mock_object(
self.share_manager.driver.network_api,
'unmanage_network_allocations', error)
mock_share_server_update = self.mock_object(
db, 'share_server_update'
)
self.share_manager.driver._admin_network_api = mock.Mock()
self.assertRaises(exception.ShareNetworkNotFound,
self.share_manager.unmanage_share_server,
self.context,
fake_share_server['id'],
True)
mock_network_allocations_number.assert_called_once_with()
mock_unmanage_network_allocations.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id']
)
mock_share_server_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id'],
{'status': constants.STATUS_UNMANAGE_ERROR}
)
def test_unmanage_share_server_admin_network_allocations_error(self):
fake_share_server = fakes.fake_share_server_get()
db_utils.create_share_server(**fake_share_server)
self.share_manager.driver._admin_network_api = mock.Mock()
mock_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_network_allocations_number',
mock.Mock(return_value=0)
)
mock_admin_network_allocations_number = self.mock_object(
self.share_manager.driver, 'get_admin_network_allocations_number',
mock.Mock(return_value=1)
)
error = mock.Mock(
side_effect=exception.ShareNetworkNotFound(share_network_id="fake")
)
mock_unmanage_admin_network_allocations = self.mock_object(
self.share_manager.driver._admin_network_api,
'unmanage_network_allocations', error
)
mock_unmanage_network_allocations = self.mock_object(
self.share_manager.driver.network_api,
'unmanage_network_allocations', error)
mock_share_server_update = self.mock_object(
db, 'share_server_update'
)
self.assertRaises(exception.ShareNetworkNotFound,
self.share_manager.unmanage_share_server,
self.context,
fake_share_server['id'],
True)
mock_network_allocations_number.assert_called_once_with()
mock_admin_network_allocations_number.assert_called_once_with()
mock_unmanage_network_allocations.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id']
)
mock_unmanage_admin_network_allocations.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id']
)
mock_share_server_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), fake_share_server['id'],
{'status': constants.STATUS_UNMANAGE_ERROR}
)
@ddt.data({'dhss': True, 'driver_data': {'size': 1},
'mount_snapshot_support': False},
{'dhss': True, 'driver_data': {'size': 2, 'name': 'fake'},
'mount_snapshot_support': False},
{'dhss': False, 'driver_data': {'size': 3},
'mount_snapshot_support': False},
{'dhss': False, 'driver_data': {'size': 3, 'export_locations': [
{'path': '/path1', 'is_admin_only': True},
{'path': '/path2', 'is_admin_only': False}
]}, 'mount_snapshot_support': False},
{'dhss': False, 'driver_data': {'size': 3, 'export_locations': [
{'path': '/path1', 'is_admin_only': True},
{'path': '/path2', 'is_admin_only': False}
]}, 'mount_snapshot_support': True})
@ddt.unpack
def test_manage_snapshot_valid_snapshot(
self, driver_data, mount_snapshot_support, dhss):
mock_get_share_server = self.mock_object(self.share_manager,
'_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.db, 'share_snapshot_update')
self.mock_object(self.share_manager, 'driver')
self.mock_object(quota.QUOTAS, 'reserve', mock.Mock())
self.share_manager.driver.driver_handles_share_servers = dhss
if dhss:
mock_manage = self.mock_object(
self.share_manager.driver,
"manage_existing_snapshot_with_server",
mock.Mock(return_value=driver_data))
else:
mock_manage = self.mock_object(
self.share_manager.driver,
"manage_existing_snapshot",
mock.Mock(return_value=driver_data))
size = driver_data['size']
export_locations = driver_data.get('export_locations')
share = db_utils.create_share(
size=size,
mount_snapshot_support=mount_snapshot_support)
snapshot = db_utils.create_snapshot(share_id=share['id'], size=size)
snapshot_id = snapshot['id']
driver_options = {}
mock_get = self.mock_object(self.share_manager.db,
'share_snapshot_get',
mock.Mock(return_value=snapshot))
mock_export_update = self.mock_object(
self.share_manager.db,
'share_snapshot_instance_export_location_create')
self.share_manager.manage_snapshot(self.context, snapshot_id,
driver_options)
if dhss:
mock_manage.assert_called_once_with(mock.ANY, driver_options, None)
else:
mock_manage.assert_called_once_with(mock.ANY, driver_options)
valid_snapshot_data = {
'status': constants.STATUS_AVAILABLE}
valid_snapshot_data.update(driver_data)
self.share_manager.db.share_snapshot_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
snapshot_id, valid_snapshot_data)
if dhss:
mock_get_share_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['share'])
mock_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot_id)
if mount_snapshot_support and export_locations:
snap_ins_id = snapshot.instance['id']
for i in range(0, 2):
export_locations[i]['share_snapshot_instance_id'] = snap_ins_id
mock_export_update.assert_has_calls([
mock.call(utils.IsAMatcher(context.RequestContext),
export_locations[0]),
mock.call(utils.IsAMatcher(context.RequestContext),
export_locations[1]),
])
else:
mock_export_update.assert_not_called()
def test_unmanage_snapshot_invalid_share(self):
manager.CONF.unmanage_remove_access_rules = False
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
mock_unmanage = mock.Mock(
side_effect=exception.UnmanageInvalidShareSnapshot(reason="fake"))
self.mock_object(self.share_manager.driver, "unmanage_snapshot",
mock_unmanage)
mock_get_share_server = self.mock_object(
self.share_manager,
'_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.db, 'share_snapshot_update')
share = db_utils.create_share()
snapshot = db_utils.create_snapshot(share_id=share['id'])
mock_get = self.mock_object(self.share_manager.db,
'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.share_manager.unmanage_snapshot(self.context, snapshot['id'])
self.share_manager.db.share_snapshot_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['id'],
{'status': constants.STATUS_UNMANAGE_ERROR})
self.share_manager.driver.unmanage_snapshot.assert_called_once_with(
mock.ANY)
mock_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['id'])
mock_get_share_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['share'])
@ddt.data({'dhss': False, 'quota_error': False},
{'dhss': True, 'quota_error': False},
{'dhss': False, 'quota_error': True},
{'dhss': True, 'quota_error': True})
@ddt.unpack
def test_unmanage_snapshot_valid_snapshot(self, dhss, quota_error):
if quota_error:
self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(
side_effect=exception.ManilaException(message='error')))
manager.CONF.unmanage_remove_access_rules = True
mock_log_warning = self.mock_object(manager.LOG, 'warning')
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = dhss
mock_update_access = self.mock_object(
self.share_manager.snapshot_access_helper, "update_access_rules")
if dhss:
mock_unmanage = self.mock_object(
self.share_manager.driver, "unmanage_snapshot_with_server")
else:
mock_unmanage = self.mock_object(
self.share_manager.driver, "unmanage_snapshot")
mock_get_share_server = self.mock_object(
self.share_manager,
'_get_share_server',
mock.Mock(return_value=None))
mock_snapshot_instance_destroy_call = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_delete')
share = db_utils.create_share()
snapshot = db_utils.create_snapshot(share_id=share['id'])
mock_get = self.mock_object(self.share_manager.db,
'share_snapshot_get',
mock.Mock(return_value=snapshot))
mock_snap_ins_get = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot.instance))
self.share_manager.unmanage_snapshot(self.context, snapshot['id'])
if dhss:
mock_unmanage.assert_called_once_with(snapshot.instance, None)
else:
mock_unmanage.assert_called_once_with(snapshot.instance)
mock_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot.instance['id'],
delete_all_rules=True, share_server=None)
mock_snapshot_instance_destroy_call.assert_called_once_with(
mock.ANY, snapshot['instance']['id'])
mock_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['id'])
mock_get_share_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['share'])
mock_snap_ins_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot.instance['id'],
with_share_data=True)
if quota_error:
self.assertTrue(mock_log_warning.called)
@ddt.data(True, False)
def test_revert_to_snapshot(self, has_replicas):
reservations = 'fake_reservations'
share_id = 'fake_share_id'
snapshot_id = 'fake_snapshot_id'
snapshot_instance_id = 'fake_snapshot_instance_id'
share_instance_id = 'fake_share_instance_id'
share_instance = fakes.fake_share_instance(
id=share_instance_id, share_id=share_id)
share = fakes.fake_share(
id=share_id, instance=share_instance,
project_id='fake_project', user_id='fake_user', size=2,
has_replicas=has_replicas)
snapshot_instance = fakes.fake_snapshot_instance(
id=snapshot_instance_id, share_id=share_instance_id, share=share,
name='fake_snapshot', share_instance=share_instance,
share_instance_id=share_instance_id)
snapshot = fakes.fake_snapshot(
id=snapshot_id, share_id=share_id, share=share,
instance=snapshot_instance, project_id='fake_project',
user_id='fake_user', size=1)
share_access_rules = ['fake_share_access_rule']
snapshot_access_rules = ['fake_snapshot_access_rule']
mock_share_snapshot_get = self.mock_object(
self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
mock_share_access_get = self.mock_object(
self.share_manager.access_helper,
'get_share_instance_access_rules',
mock.Mock(return_value=share_access_rules))
mock_snapshot_access_get = self.mock_object(
self.share_manager.snapshot_access_helper,
'get_snapshot_instance_access_rules',
mock.Mock(return_value=snapshot_access_rules))
mock_revert_to_snapshot = self.mock_object(
self.share_manager, '_revert_to_snapshot')
mock_revert_to_replicated_snapshot = self.mock_object(
self.share_manager, '_revert_to_replicated_snapshot')
self.share_manager.revert_to_snapshot(self.context, snapshot_id,
reservations)
mock_share_snapshot_get.assert_called_once_with(mock.ANY, snapshot_id)
mock_share_access_get.assert_called_once_with(
mock.ANY, filters={'state': constants.STATUS_ACTIVE},
share_instance_id=share_instance_id)
mock_snapshot_access_get.assert_called_once_with(
mock.ANY, snapshot_instance_id)
if not has_replicas:
mock_revert_to_snapshot.assert_called_once_with(
mock.ANY, share, snapshot, reservations, share_access_rules,
snapshot_access_rules)
self.assertFalse(mock_revert_to_replicated_snapshot.called)
else:
self.assertFalse(mock_revert_to_snapshot.called)
mock_revert_to_replicated_snapshot.assert_called_once_with(
mock.ANY, share, snapshot, reservations, share_access_rules,
snapshot_access_rules, share_id=share_id)
@ddt.data(None, 'fake_reservations')
def test__revert_to_snapshot(self, reservations):
mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback')
mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit')
self.mock_object(
self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
mock_driver = self.mock_object(self.share_manager, 'driver')
share_id = 'fake_share_id'
share = fakes.fake_share(
id=share_id, instance={'id': 'fake_instance_id',
'share_type_id': 'fake_share_type_id'},
project_id='fake_project', user_id='fake_user', size=2)
snapshot_instance = fakes.fake_snapshot_instance(
share_id=share_id, share=share, name='fake_snapshot',
share_instance=share['instance'])
snapshot = fakes.fake_snapshot(
id='fake_snapshot_id', share_id=share_id, share=share,
instance=snapshot_instance, project_id='fake_project',
user_id='fake_user', size=1)
share_access_rules = []
snapshot_access_rules = []
self.mock_object(
self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
mock_share_update = self.mock_object(
self.share_manager.db, 'share_update')
mock_share_snapshot_update = self.mock_object(
self.share_manager.db, 'share_snapshot_update')
self.share_manager._revert_to_snapshot(self.context, share, snapshot,
reservations,
share_access_rules,
snapshot_access_rules)
mock_driver.revert_to_snapshot.assert_called_once_with(
mock.ANY,
self._get_snapshot_instance_dict(
snapshot_instance, share, snapshot=snapshot),
share_access_rules, snapshot_access_rules,
share_server=None)
self.assertFalse(mock_quotas_rollback.called)
if reservations:
mock_quotas_commit.assert_called_once_with(
mock.ANY, reservations, project_id='fake_project',
user_id='fake_user',
share_type_id=(
snapshot_instance['share_instance']['share_type_id']))
else:
self.assertFalse(mock_quotas_commit.called)
mock_share_update.assert_called_once_with(
mock.ANY, share_id,
{'status': constants.STATUS_AVAILABLE, 'size': snapshot['size']})
mock_share_snapshot_update.assert_called_once_with(
mock.ANY, 'fake_snapshot_id',
{'status': constants.STATUS_AVAILABLE})
@ddt.data(None, 'fake_reservations')
def test__revert_to_snapshot_driver_exception(self, reservations):
mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback')
mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit')
self.mock_object(
self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
mock_driver = self.mock_object(self.share_manager, 'driver')
mock_driver.revert_to_snapshot.side_effect = exception.ManilaException
share_id = 'fake_share_id'
share = fakes.fake_share(
id=share_id, instance={'id': 'fake_instance_id',
'share_type_id': 'fake_share_type_id'},
project_id='fake_project', user_id='fake_user', size=2)
snapshot_instance = fakes.fake_snapshot_instance(
share_id=share_id, share=share, name='fake_snapshot',
share_instance=share['instance'])
snapshot = fakes.fake_snapshot(
id='fake_snapshot_id', share_id=share_id, share=share,
instance=snapshot_instance, project_id='fake_project',
user_id='fake_user', size=1)
share_access_rules = []
snapshot_access_rules = []
self.mock_object(
self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
mock_share_update = self.mock_object(
self.share_manager.db, 'share_update')
mock_share_snapshot_update = self.mock_object(
self.share_manager.db, 'share_snapshot_update')
self.assertRaises(exception.ManilaException,
self.share_manager._revert_to_snapshot,
self.context,
share,
snapshot,
reservations,
share_access_rules,
snapshot_access_rules)
mock_driver.revert_to_snapshot.assert_called_once_with(
mock.ANY,
self._get_snapshot_instance_dict(
snapshot_instance, share, snapshot=snapshot),
share_access_rules,
snapshot_access_rules,
share_server=None)
self.assertFalse(mock_quotas_commit.called)
if reservations:
mock_quotas_rollback.assert_called_once_with(
mock.ANY, reservations, project_id='fake_project',
user_id='fake_user',
share_type_id=(
snapshot_instance['share_instance']['share_type_id']))
else:
self.assertFalse(mock_quotas_rollback.called)
mock_share_update.assert_called_once_with(
mock.ANY, share_id,
{'status': constants.STATUS_REVERTING_ERROR})
mock_share_snapshot_update.assert_called_once_with(
mock.ANY, 'fake_snapshot_id',
{'status': constants.STATUS_AVAILABLE})
def test_unmanage_snapshot_update_access_rule_exception(self):
self.mock_object(self.share_manager, 'driver')
self.share_manager.driver.driver_handles_share_servers = False
share = db_utils.create_share()
snapshot = db_utils.create_snapshot(share_id=share['id'])
manager.CONF.unmanage_remove_access_rules = True
mock_get = self.mock_object(
self.share_manager.db, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
mock_get_share_server = self.mock_object(
self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.snapshot_access_helper,
'update_access_rules',
mock.Mock(side_effect=Exception))
mock_log_exception = self.mock_object(manager.LOG, 'exception')
mock_update = self.mock_object(self.share_manager.db,
'share_snapshot_update')
self.share_manager.unmanage_snapshot(self.context, snapshot['id'])
self.assertTrue(mock_log_exception.called)
mock_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['id'])
mock_get_share_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['share'])
mock_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), snapshot['id'],
{'status': constants.STATUS_UNMANAGE_ERROR})
def test_snapshot_update_access(self):
snapshot = fakes.fake_snapshot(create_instance=True)
snapshot_instance = fakes.fake_snapshot_instance(
base_snapshot=snapshot)
mock_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
mock_get_share_server = self.mock_object(self.share_manager,
'_get_share_server',
mock.Mock(return_value=None))
mock_update_access = self.mock_object(
self.share_manager.snapshot_access_helper, 'update_access_rules')
self.share_manager.snapshot_update_access(self.context,
snapshot_instance['id'])
mock_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
snapshot_instance['id'], with_share_data=True)
mock_get_share_server.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
snapshot_instance['share_instance'])
mock_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
snapshot_instance['id'], share_server=None)
def _setup_crud_replicated_snapshot_data(self):
snapshot = fakes.fake_snapshot(create_instance=True)
snapshot_instance = fakes.fake_snapshot_instance(
base_snapshot=snapshot)
snapshot_instances = [snapshot['instance'], snapshot_instance]
replicas = [fake_replica(), fake_replica()]
return snapshot, snapshot_instances, replicas
def test_create_replicated_snapshot_driver_exception(self):
snapshot, snapshot_instances, replicas = (
self._setup_crud_replicated_snapshot_data()
)
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
self.share_manager.driver, 'create_replicated_snapshot',
mock.Mock(side_effect=exception.ManilaException))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
self.assertRaises(exception.ManilaException,
self.share_manager.create_replicated_snapshot,
self.context, snapshot['id'], share_id='fake_share')
mock_db_update_call.assert_has_calls([
mock.call(
self.context, snapshot['instance']['id'],
{'status': constants.STATUS_ERROR}),
mock.call(
self.context, snapshot_instances[1]['id'],
{'status': constants.STATUS_ERROR}),
])
@ddt.data(None, [])
def test_create_replicated_snapshot_driver_updates_nothing(self, retval):
snapshot, snapshot_instances, replicas = (
self._setup_crud_replicated_snapshot_data()
)
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
self.share_manager.driver, 'create_replicated_snapshot',
mock.Mock(return_value=retval))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
return_value = self.share_manager.create_replicated_snapshot(
self.context, snapshot['id'], share_id='fake_share')
self.assertIsNone(return_value)
self.assertFalse(mock_db_update_call.called)
def test_create_replicated_snapshot_driver_updates_snapshot(self):
snapshot, snapshot_instances, replicas = (
self._setup_crud_replicated_snapshot_data()
)
snapshot_dict = {
'status': constants.STATUS_AVAILABLE,
'provider_location': 'spinners_end',
'progress': '100%',
'id': snapshot['instance']['id'],
}
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
self.share_manager.driver, 'create_replicated_snapshot',
mock.Mock(return_value=[snapshot_dict]))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
return_value = self.share_manager.create_replicated_snapshot(
self.context, snapshot['id'], share_id='fake_share')
self.assertIsNone(return_value)
mock_db_update_call.assert_called_once_with(
self.context, snapshot['instance']['id'], snapshot_dict)
@ddt.data(None, 'fake_reservations')
def test_revert_to_replicated_snapshot(self, reservations):
share_id = 'id1'
mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback')
mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit')
share = fakes.fake_share(
id=share_id, project_id='fake_project', user_id='fake_user')
snapshot = fakes.fake_snapshot(
create_instance=True, share=share, size=1)
snapshot_instance = fakes.fake_snapshot_instance(
base_snapshot=snapshot)
snapshot_instances = [snapshot['instance'], snapshot_instance]
active_replica = fake_replica(
id='rid1', share_id=share_id, host=self.share_manager.host,
replica_state=constants.REPLICA_STATE_ACTIVE, as_primitive=False)
replica = fake_replica(
id='rid2', share_id=share_id, host='secondary',
replica_state=constants.REPLICA_STATE_IN_SYNC, as_primitive=False)
replicas = [active_replica, replica]
share_access_rules = []
snapshot_access_rules = []
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(
self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(
db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(side_effect=[snapshot_instances,
[snapshot_instances[0]]]))
mock_driver = self.mock_object(self.share_manager, 'driver')
mock_share_update = self.mock_object(
self.share_manager.db, 'share_update')
mock_share_replica_update = self.mock_object(
self.share_manager.db, 'share_replica_update')
mock_share_snapshot_instance_update = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
self.share_manager._revert_to_replicated_snapshot(
self.context, share, snapshot, reservations, share_access_rules,
snapshot_access_rules, share_id=share_id)
self.assertTrue(mock_driver.revert_to_replicated_snapshot.called)
self.assertFalse(mock_quotas_rollback.called)
if reservations:
mock_quotas_commit.assert_called_once_with(
mock.ANY, reservations, project_id='fake_project',
user_id='fake_user', share_type_id=None)
else:
self.assertFalse(mock_quotas_commit.called)
mock_share_update.assert_called_once_with(
mock.ANY, share_id, {'size': snapshot['size']})
mock_share_replica_update.assert_called_once_with(
mock.ANY, active_replica['id'],
{'status': constants.STATUS_AVAILABLE})
mock_share_snapshot_instance_update.assert_called_once_with(
mock.ANY, snapshot['instance']['id'],
{'status': constants.STATUS_AVAILABLE})
@ddt.data(None, 'fake_reservations')
def test_revert_to_replicated_snapshot_driver_exception(
self, reservations):
mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback')
mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit')
share_id = 'id1'
share = fakes.fake_share(
id=share_id, project_id='fake_project', user_id='fake_user')
snapshot = fakes.fake_snapshot(
create_instance=True, share=share, size=1)
snapshot_instance = fakes.fake_snapshot_instance(
base_snapshot=snapshot)
snapshot_instances = [snapshot['instance'], snapshot_instance]
active_replica = fake_replica(
id='rid1', share_id=share_id, host=self.share_manager.host,
replica_state=constants.REPLICA_STATE_ACTIVE, as_primitive=False,
share_type_id='fake_share_type_id')
replica = fake_replica(
id='rid2', share_id=share_id, host='secondary',
replica_state=constants.REPLICA_STATE_IN_SYNC, as_primitive=False,
share_type_id='fake_share_type_id')
replicas = [active_replica, replica]
share_access_rules = []
snapshot_access_rules = []
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(
self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(
db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(side_effect=[snapshot_instances,
[snapshot_instances[0]]]))
mock_driver = self.mock_object(self.share_manager, 'driver')
mock_driver.revert_to_replicated_snapshot.side_effect = (
exception.ManilaException)
mock_share_update = self.mock_object(
self.share_manager.db, 'share_update')
mock_share_replica_update = self.mock_object(
self.share_manager.db, 'share_replica_update')
mock_share_snapshot_instance_update = self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
self.assertRaises(exception.ManilaException,
self.share_manager._revert_to_replicated_snapshot,
self.context,
share,
snapshot,
reservations,
share_access_rules,
snapshot_access_rules,
share_id=share_id)
self.assertTrue(mock_driver.revert_to_replicated_snapshot.called)
self.assertFalse(mock_quotas_commit.called)
if reservations:
mock_quotas_rollback.assert_called_once_with(
mock.ANY, reservations, project_id='fake_project',
user_id='fake_user', share_type_id=replica['share_type_id'])
else:
self.assertFalse(mock_quotas_rollback.called)
self.assertFalse(mock_share_update.called)
mock_share_replica_update.assert_called_once_with(
mock.ANY, active_replica['id'],
{'status': constants.STATUS_REVERTING_ERROR})
mock_share_snapshot_instance_update.assert_called_once_with(
mock.ANY, snapshot['instance']['id'],
{'status': constants.STATUS_AVAILABLE})
def delete_replicated_snapshot_driver_exception(self):
snapshot, snapshot_instances, replicas = (
self._setup_crud_replicated_snapshot_data()
)
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
self.share_manager.driver, 'delete_replicated_snapshot',
mock.Mock(side_effect=exception.ManilaException))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
self.assertRaises(exception.ManilaException,
self.share_manager.delete_replicated_snapshot,
self.context, snapshot['id'], share_id='fake_share')
mock_db_update_call.assert_has_calls([
mock.call(
self.context, snapshot['instance']['id'],
{'status': constants.STATUS_ERROR_DELETING}),
mock.call(
self.context, snapshot_instances[1]['id'],
{'status': constants.STATUS_ERROR_DELETING}),
])
self.assertFalse(mock_db_delete_call.called)
def delete_replicated_snapshot_driver_exception_ignored_with_force(self):
snapshot, snapshot_instances, replicas = (
self._setup_crud_replicated_snapshot_data()
)
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
self.share_manager.driver, 'delete_replicated_snapshot',
mock.Mock(side_effect=exception.ManilaException))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
retval = self.share_manager.delete_replicated_snapshot(
self.context, snapshot['id'], share_id='fake_share')
self.assertIsNone(retval)
mock_db_delete_call.assert_has_calls([
mock.call(
self.context, snapshot['instance']['id']),
mock.call(
self.context, snapshot_instances[1]['id']),
])
self.assertFalse(mock_db_update_call.called)
@ddt.data(None, [])
def delete_replicated_snapshot_driver_updates_nothing(self, retval):
snapshot, snapshot_instances, replicas = (
self._setup_crud_replicated_snapshot_data()
)
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
self.share_manager.driver, 'delete_replicated_snapshot',
mock.Mock(return_value=retval))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
return_value = self.share_manager.delete_replicated_snapshot(
self.context, snapshot['id'], share_id='fake_share')
self.assertIsNone(return_value)
self.assertFalse(mock_db_delete_call.called)
self.assertFalse(mock_db_update_call.called)
def delete_replicated_snapshot_driver_deletes_snapshots(self):
snapshot, snapshot_instances, replicas = (
self._setup_crud_replicated_snapshot_data()
)
retval = [{
'status': constants.STATUS_DELETED,
'id': snapshot['instance']['id'],
}]
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
self.share_manager.driver, 'delete_replicated_snapshot',
mock.Mock(return_value=retval))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
return_value = self.share_manager.delete_replicated_snapshot(
self.context, snapshot['id'], share_id='fake_share')
self.assertIsNone(return_value)
mock_db_delete_call.assert_called_once_with(
self.context, snapshot['instance']['id'])
self.assertFalse(mock_db_update_call.called)
@ddt.data(True, False)
def delete_replicated_snapshot_drv_del_and_updates_snapshots(self, force):
snapshot, snapshot_instances, replicas = (
self._setup_crud_replicated_snapshot_data()
)
updated_instance_details = {
'status': constants.STATUS_ERROR,
'id': snapshot_instances[1]['id'],
'provider_location': 'azkaban',
}
retval = [
{
'status': constants.STATUS_DELETED,
'id': snapshot['instance']['id'],
},
]
retval.append(updated_instance_details)
self.mock_object(
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
self.mock_object(self.share_manager, '_get_share_server')
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=replicas))
self.mock_object(
self.share_manager.driver, 'delete_replicated_snapshot',
mock.Mock(return_value=retval))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
return_value = self.share_manager.delete_replicated_snapshot(
self.context, snapshot['id'], share_id='fake_share', force=force)
self.assertIsNone(return_value)
if force:
self.assertTrue(2, mock_db_delete_call.call_count)
self.assertFalse(mock_db_update_call.called)
else:
mock_db_delete_call.assert_called_once_with(
self.context, snapshot['instance']['id'])
mock_db_update_call.assert_called_once_with(
self.context, snapshot_instances[1]['id'],
updated_instance_details)
def test_periodic_share_replica_snapshot_update(self):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
replicas = 3 * [
fake_replica(host='malfoy@manor#_pool0',
replica_state=constants.REPLICA_STATE_IN_SYNC)
]
replicas.append(fake_replica(replica_state=constants.STATUS_ACTIVE))
snapshot = fakes.fake_snapshot(create_instance=True,
status=constants.STATUS_DELETING)
snapshot_instances = 3 * [
fakes.fake_snapshot_instance(base_snapshot=snapshot)
]
self.mock_object(
db, 'share_replicas_get_all', mock.Mock(return_value=replicas))
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
mock_snapshot_update_call = self.mock_object(
self.share_manager, '_update_replica_snapshot')
retval = self.share_manager.periodic_share_replica_snapshot_update(
self.context)
self.assertIsNone(retval)
self.assertEqual(1, mock_debug_log.call_count)
self.assertEqual(0, mock_snapshot_update_call.call_count)
@ddt.data(True, False)
def test_periodic_share_replica_snapshot_update_nothing_to_update(
self, has_instances):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
replicas = 3 * [
fake_replica(host='malfoy@manor#_pool0',
replica_state=constants.REPLICA_STATE_IN_SYNC)
]
replicas.append(fake_replica(replica_state=constants.STATUS_ACTIVE))
snapshot = fakes.fake_snapshot(create_instance=True,
status=constants.STATUS_DELETING)
snapshot_instances = 3 * [
fakes.fake_snapshot_instance(base_snapshot=snapshot)
]
self.mock_object(db, 'share_replicas_get_all',
mock.Mock(side_effect=[[], replicas]))
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(side_effect=[snapshot_instances, []]))
mock_snapshot_update_call = self.mock_object(
self.share_manager, '_update_replica_snapshot')
retval = self.share_manager.periodic_share_replica_snapshot_update(
self.context)
self.assertIsNone(retval)
self.assertEqual(1, mock_debug_log.call_count)
self.assertEqual(0, mock_snapshot_update_call.call_count)
def test__update_replica_snapshot_replica_deleted_from_database(self):
replica_not_found = exception.ShareReplicaNotFound(replica_id='xyzzy')
self.mock_object(db, 'share_replica_get', mock.Mock(
side_effect=replica_not_found))
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
mock_driver_update_call = self.mock_object(
self.share_manager.driver, 'update_replicated_snapshot')
snaphot_instance = fakes.fake_snapshot_instance()
retval = self.share_manager._update_replica_snapshot(
self.context, snaphot_instance)
self.assertIsNone(retval)
mock_db_delete_call.assert_called_once_with(
self.context, snaphot_instance['id'])
self.assertFalse(mock_driver_update_call.called)
self.assertFalse(mock_db_update_call.called)
def test__update_replica_snapshot_both_deleted_from_database(self):
replica_not_found = exception.ShareReplicaNotFound(replica_id='xyzzy')
instance_not_found = exception.ShareSnapshotInstanceNotFound(
instance_id='spoon!')
self.mock_object(db, 'share_replica_get', mock.Mock(
side_effect=replica_not_found))
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete', mock.Mock(
side_effect=instance_not_found))
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
mock_driver_update_call = self.mock_object(
self.share_manager.driver, 'update_replicated_snapshot')
snapshot_instance = fakes.fake_snapshot_instance()
retval = self.share_manager._update_replica_snapshot(
self.context, snapshot_instance)
self.assertIsNone(retval)
mock_db_delete_call.assert_called_once_with(
self.context, snapshot_instance['id'])
self.assertFalse(mock_driver_update_call.called)
self.assertFalse(mock_db_update_call.called)
def test__update_replica_snapshot_driver_raises_Not_Found_exception(self):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
replica = fake_replica()
snapshot_instance = fakes.fake_snapshot_instance(
status=constants.STATUS_DELETING)
self.mock_object(
db, 'share_replica_get', mock.Mock(return_value=replica))
self.mock_object(db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica]))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(
self.share_manager.driver, 'update_replicated_snapshot',
mock.Mock(
side_effect=exception.SnapshotResourceNotFound(name='abc')))
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
retval = self.share_manager._update_replica_snapshot(
self.context, snapshot_instance, replica_snapshots=None)
self.assertIsNone(retval)
self.assertEqual(1, mock_debug_log.call_count)
mock_db_delete_call.assert_called_once_with(
self.context, snapshot_instance['id'])
self.assertFalse(mock_db_update_call.called)
@ddt.data(exception.NotFound, exception.ManilaException)
def test__update_replica_snapshot_driver_raises_other_exception(self, exc):
mock_debug_log = self.mock_object(manager.LOG, 'debug')
mock_info_log = self.mock_object(manager.LOG, 'info')
mock_exception_log = self.mock_object(manager.LOG, 'exception')
replica = fake_replica()
snapshot_instance = fakes.fake_snapshot_instance(
status=constants.STATUS_CREATING)
self.mock_object(
db, 'share_replica_get', mock.Mock(return_value=replica))
self.mock_object(db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica]))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.driver,
'update_replicated_snapshot',
mock.Mock(side_effect=exc))
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
retval = self.share_manager._update_replica_snapshot(
self.context, snapshot_instance)
self.assertIsNone(retval)
self.assertEqual(1, mock_exception_log.call_count)
self.assertEqual(1, mock_debug_log.call_count)
self.assertFalse(mock_info_log.called)
mock_db_update_call.assert_called_once_with(
self.context, snapshot_instance['id'], {'status': 'error'})
self.assertFalse(mock_db_delete_call.called)
@ddt.data(True, False)
def test__update_replica_snapshot_driver_updates_replica(self, update):
replica = fake_replica()
snapshot_instance = fakes.fake_snapshot_instance()
driver_update = {}
if update:
driver_update = {
'id': snapshot_instance['id'],
'provider_location': 'knockturn_alley',
'status': constants.STATUS_AVAILABLE,
}
mock_debug_log = self.mock_object(manager.LOG, 'debug')
mock_info_log = self.mock_object(manager.LOG, 'info')
self.mock_object(
db, 'share_replica_get', mock.Mock(return_value=replica))
self.mock_object(db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(db, 'share_snapshot_instance_get',
mock.Mock(return_value=snapshot_instance))
self.mock_object(db, 'share_replicas_get_all_by_share',
mock.Mock(return_value=[replica]))
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value=None))
self.mock_object(self.share_manager.driver,
'update_replicated_snapshot',
mock.Mock(return_value=driver_update))
mock_db_delete_call = self.mock_object(
db, 'share_snapshot_instance_delete')
mock_db_update_call = self.mock_object(
db, 'share_snapshot_instance_update')
retval = self.share_manager._update_replica_snapshot(
self.context, snapshot_instance, replica_snapshots=None)
driver_update['progress'] = '100%'
self.assertIsNone(retval)
self.assertEqual(1, mock_debug_log.call_count)
self.assertFalse(mock_info_log.called)
if update:
mock_db_update_call.assert_called_once_with(
self.context, snapshot_instance['id'], driver_update)
else:
self.assertFalse(mock_db_update_call.called)
self.assertFalse(mock_db_delete_call.called)
def test_update_access(self):
share_instance = fakes.fake_share_instance()
self.mock_object(self.share_manager, '_get_share_server',
mock.Mock(return_value='fake_share_server'))
self.mock_object(self.share_manager, '_get_share_instance',
mock.Mock(return_value=share_instance))
access_rules_update_method = self.mock_object(
self.share_manager.access_helper, 'update_access_rules')
retval = self.share_manager.update_access(
self.context, share_instance['id'])
self.assertIsNone(retval)
access_rules_update_method.assert_called_once_with(
self.context, share_instance['id'],
share_server='fake_share_server')
@mock.patch('manila.tests.fake_notifier.FakeNotifier._notify')
def test_update_share_usage_size(self, mock_notify):
instances = self._setup_init_mocks(setup_access_rules=False)
update_shares = [{'id': 'fake_id', 'used_size': '3',
'gathered_at': 'fake'}]
mock_notify.assert_not_called()
manager = self.share_manager
self.mock_object(manager, 'driver')
self.mock_object(manager.db, 'share_instances_get_all_by_host',
mock.Mock(return_value=instances))
self.mock_object(manager.db, 'share_instance_get',
mock.Mock(side_effect=instances))
mock_driver_call = self.mock_object(
manager.driver, 'update_share_usage_size',
mock.Mock(return_value=update_shares))
self.share_manager.update_share_usage_size(self.context)
self.assert_notify_called(mock_notify,
(['INFO', 'share.consumed.size'], ))
mock_driver_call.assert_called_once_with(
self.context, instances)
@mock.patch('manila.tests.fake_notifier.FakeNotifier._notify')
def test_update_share_usage_size_fail(self, mock_notify):
instances = self._setup_init_mocks(setup_access_rules=False)
mock_notify.assert_not_called()
self.mock_object(self.share_manager, 'driver')
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_host',
mock.Mock(return_value=instances))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=instances))
self.mock_object(
self.share_manager.driver, 'update_share_usage_size',
mock.Mock(side_effect=exception.ProcessExecutionError))
mock_log_exception = self.mock_object(manager.LOG, 'exception')
self.share_manager.update_share_usage_size(self.context)
self.assertTrue(mock_log_exception.called)
@ddt.ddt
class HookWrapperTestCase(test.TestCase):
def setUp(self):
super(HookWrapperTestCase, self).setUp()
self.configuration = mock.Mock()
self.configuration.safe_get.return_value = True
@manager.add_hooks
def _fake_wrapped_method(self, some_arg, some_kwarg):
return "foo"
def test_hooks_enabled(self):
self.hooks = [mock.Mock(return_value=i) for i in range(2)]
result = self._fake_wrapped_method(
"some_arg", some_kwarg="some_kwarg_value")
self.assertEqual("foo", result)
for i, mock_hook in enumerate(self.hooks):
mock_hook.execute_pre_hook.assert_called_once_with(
"some_arg",
func_name="_fake_wrapped_method",
some_kwarg="some_kwarg_value")
mock_hook.execute_post_hook.assert_called_once_with(
"some_arg",
func_name="_fake_wrapped_method",
driver_action_results="foo",
pre_hook_data=self.hooks[i].execute_pre_hook.return_value,
some_kwarg="some_kwarg_value")
def test_hooks_disabled(self):
self.hooks = []
result = self._fake_wrapped_method(
"some_arg", some_kwarg="some_kwarg_value")
self.assertEqual("foo", result)
for mock_hook in self.hooks:
self.assertFalse(mock_hook.execute_pre_hook.called)
self.assertFalse(mock_hook.execute_post_hook.called)
| 47.493072 | 79 | 0.640515 |
57d3914639b7f5850aecce044bd00f2dd53a6324 | 16,162 | py | Python | examples/advanced_operations/add_dynamic_page_feed.py | infectious/google-ads-python | 599541be2ab625c7aeb84b8622a8614a6c1703d9 | [
"Apache-2.0"
] | null | null | null | examples/advanced_operations/add_dynamic_page_feed.py | infectious/google-ads-python | 599541be2ab625c7aeb84b8622a8614a6c1703d9 | [
"Apache-2.0"
] | null | null | null | examples/advanced_operations/add_dynamic_page_feed.py | infectious/google-ads-python | 599541be2ab625c7aeb84b8622a8614a6c1703d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a page feed with URLs for a Dynamic Search Ads Campaign.
The page feed specifies precisely which URLs to use with the campaign. To use
a Dynamic Search Ads Campaign run add_dynamic_search_ads_campaign.py. To get
campaigns run basic_operations/get_campaigns.py.
"""
import argparse
import sys
import uuid
from google.api_core import protobuf_helpers
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.errors import GoogleAdsException
# Class to keep track of page feed details.
class FeedDetails(object):
def __init__(self, resource_name, url_attribute_id, label_attribute_id):
self.resource_name = resource_name
self.url_attribute_id = url_attribute_id
self.label_attribute_id = label_attribute_id
def main(client, customer_id, campaign_id, ad_group_id):
"""The main method that creates all necessary entities for the example.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
campaign_id: a campaign ID str.
ad_group_id: an ad group ID str.
"""
dsa_page_url_label = "discounts"
try:
# Get the page feed resource name. This code example creates a new feed,
# but you can fetch and re-use an existing feed.
feed_resource_name = create_feed(client, customer_id)
# We need to look up the attribute name and ID for the feed we just
# created so that we can give them back to the API for construction of
# feed mappings in the next function.
feed_details = get_feed_details(client, customer_id, feed_resource_name)
create_feed_mapping(client, customer_id, feed_details)
create_feed_items(client, customer_id, feed_details, dsa_page_url_label)
# Associate the page feed with the campaign.
update_campaign_dsa_setting(
client, customer_id, campaign_id, feed_details
)
ad_group_service = client.get_service("AdGroupService", version="v5")
ad_group_resource_name = ad_group_service.ad_group_path(
customer_id, ad_group_id
)
# Optional: Target web pages matching the feed's label in the ad group.
add_dsa_targeting(
client, customer_id, ad_group_resource_name, dsa_page_url_label
)
except GoogleAdsException as ex:
print(
'Request with ID "{}" failed with status "{}" and includes the '
"following errors:".format(ex.request_id, ex.error.code().name)
)
for error in ex.failure.errors:
print('\tError with message "{}".'.format(error.message))
if error.location:
for field_path_element in error.location.field_path_elements:
print(
"\t\tOn field: {}".format(field_path_element.field_name)
)
sys.exit(1)
def create_feed(client, customer_id):
"""Creates a page feed with URLs.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
Returns:
A FeedDetails instance with information about the newly created feed.
"""
# Retrieve a new feed operation object.
feed_operation = client.get_type("FeedOperation", version="v5")
# Create a new feed.
feed = feed_operation.create
feed.name.value = "DSA Feed #{}".format(uuid.uuid4())
feed.origin = client.get_type("FeedOriginEnum", version="v5").USER
feed_attribute_type_enum = client.get_type(
"FeedAttributeTypeEnum", version="v5"
)
# Create the feed's attributes.
feed_attribute_url = client.get_type("FeedAttribute", version="v5")
feed_attribute_url.type = feed_attribute_type_enum.URL_LIST
feed_attribute_url.name.value = "Page URL"
feed.attributes.append(feed_attribute_url)
feed_attribute_label = client.get_type("FeedAttribute", version="v5")
feed_attribute_label.type = feed_attribute_type_enum.STRING_LIST
feed_attribute_label.name.value = "Label"
feed.attributes.append(feed_attribute_label)
# Retrieve the feed service.
feed_service = client.get_service("FeedService", version="v5")
# Send the feed operation and add the feed.
response = feed_service.mutate_feeds(customer_id, [feed_operation])
return response.results[0].resource_name
def get_feed_details(client, customer_id, resource_name):
"""Makes a search request to retrieve the attributes of a single feed.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
resource_name: the str resource_name of a feed.
Returns:
A FeedDetails instance with information about the feed that was
retrieved in the search request.
"""
query = f"""
SELECT feed.attributes
FROM feed
WHERE feed.resource_name = '{resource_name}'"""
ga_service = client.get_service("GoogleAdsService", version="v5")
response = ga_service.search(customer_id, query=query)
# Maps specific fields in each row in the response to a dict. This would
# overwrite the same fields in the dict for each row, but we know we'll
# only one row will be returned.
for row in response:
attribute_lookup = {
attribute.name.value: attribute.id.value
for attribute in row.feed.attributes
}
return FeedDetails(
resource_name, attribute_lookup["Page URL"], attribute_lookup["Label"]
)
def create_feed_mapping(client, customer_id, feed_details):
"""Creates feed mapping using the given feed details.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
feed_details: a FeedDetails instance with feed attribute information
"""
# Retrieve a new feed mapping operation object.
feed_mapping_operation = client.get_type(
"FeedMappingOperation", version="v5"
)
# Create a new feed mapping.
feed_mapping = feed_mapping_operation.create
feed_mapping.criterion_type = client.get_type(
"FeedMappingCriterionTypeEnum", version="v5"
).DSA_PAGE_FEED
feed_mapping.feed.value = feed_details.resource_name
dsa_page_feed_field_enum = client.get_type(
"DsaPageFeedCriterionFieldEnum", version="v5"
)
url_field_mapping = client.get_type("AttributeFieldMapping", version="v5")
url_field_mapping.feed_attribute_id.value = feed_details.url_attribute_id
url_field_mapping.dsa_page_feed_field = dsa_page_feed_field_enum.PAGE_URL
feed_mapping.attribute_field_mappings.append(url_field_mapping)
label_field_mapping = client.get_type("AttributeFieldMapping", version="v5")
label_field_mapping.feed_attribute_id.value = (
feed_details.label_attribute_id
)
label_field_mapping.dsa_page_feed_field = dsa_page_feed_field_enum.LABEL
feed_mapping.attribute_field_mappings.append(label_field_mapping)
# Retrieve the feed mapping service.
feed_mapping_service = client.get_service(
"FeedMappingService", version="v5"
)
# Submit the feed mapping operation and add the feed mapping.
response = feed_mapping_service.mutate_feed_mappings(
customer_id, [feed_mapping_operation]
)
resource_name = response.results[0].resource_name
# Display the results.
print("Feed mapping created with resource_name: # {}".format(resource_name))
def create_feed_items(client, customer_id, feed_details, label):
"""Creates feed items with the given feed_details and label.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
feed_details: a FeedDetails instance with feed attribute information
label: a Dynamic Search Ad URL label str.
"""
# See https://support.google.com/adwords/answer/7166527 for page feed URL
# recommendations and rules.
urls = [
"http://www.example.com/discounts/rental-cars",
"http://www.example.com/discounts/hotel-deals",
"http://www.example.com/discounts/flight-deals",
]
def map_feed_urls(url):
feed_item_operation = client.get_type("FeedItemOperation", version="v5")
feed_item = feed_item_operation.create
feed_item.feed.value = feed_details.resource_name
url_attribute_value = client.get_type(
"FeedItemAttributeValue", version="v5"
)
url_attribute_value.feed_attribute_id.value = (
feed_details.url_attribute_id
)
url_string_val = client.get_type("StringValue", version="v5")
url_string_val.value = url
url_attribute_value.string_values.append(url_string_val)
feed_item.attribute_values.append(url_attribute_value)
label_attribute_value = client.get_type(
"FeedItemAttributeValue", version="v5"
)
label_attribute_value.feed_attribute_id.value = (
feed_details.label_attribute_id
)
label_string_val = client.get_type("StringValue", version="v5")
label_string_val.value = label
label_attribute_value.string_values.append(label_string_val)
feed_item.attribute_values.append(label_attribute_value)
return feed_item_operation
# Create a new feed item operation for each of the URLs in the url list.
feed_item_operations = list(map(map_feed_urls, urls))
# Retrieve the feed item service.
feed_item_service = client.get_service("FeedItemService", version="v5")
# Submit the feed item operations and add the feed items.
response = feed_item_service.mutate_feed_items(
customer_id, feed_item_operations
)
# Display the results.
for feed_item in response.results:
print(
"Created feed item with resource_name: # {}".format(
feed_item.resource_name
)
)
def update_campaign_dsa_setting(client, customer_id, campaign_id, feed_details):
"""Updates the given campaign with the given feed details.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
campaign_id: a campaign ID str;
feed_details: a FeedDetails instance with feed attribute information.
"""
query = f"""
SELECT
campaign.id,
campaign.name,
campaign.dynamic_search_ads_setting.domain_name,
campaign.dynamic_search_ads_setting.language_code,
campaign.dynamic_search_ads_setting.use_supplied_urls_only
FROM campaign
WHERE campaign.id = {campaign_id}
LIMIT 1"""
ga_service = client.get_service("GoogleAdsService", version="v5")
results = ga_service.search(customer_id, query=query)
campaign = None
for row in results:
campaign = row.campaign
if not campaign:
raise ValueError("Campaign with id #{} not found".format(campaign_id))
if not campaign.dynamic_search_ads_setting.domain_name:
raise ValueError(
"Campaign id #{} is not set up for Dynamic Search Ads.".format(
campaign_id
)
)
# Retrieve a new campaign operation
campaign_operation = client.get_type("CampaignOperation", version="v5")
# Copy the retrieved campaign onto the new campaign operation.
campaign_operation.update.CopyFrom(campaign)
updated_campaign = campaign_operation.update
# Use a page feed to specify precisely which URLs to use with your Dynamic
# Search ads.
updated_campaign.dynamic_search_ads_setting.feeds.append(
feed_details.resource_name
)
# Normally we would pass in "campaign" as the first parameter to generate
# a field mask that represents the difference between the original campaign
# object and the updated one, which would be a list containing
# "resource_name" and "dynamic_search_ads_setting.feeds."
# However, for DSA campaigns, the API currently requires "domain_name" and
# "language_code" fields to be present in the update request, regardless of
# whether they are being updated or not. This behavior is not consistent
# with the rest of the API. As a workaround, we pass None instead, to
# generate a field mask that contains these two fields, even though they are
# not being explicitly changed.
field_mask = protobuf_helpers.field_mask(None, updated_campaign)
campaign_operation.update_mask.CopyFrom(field_mask)
# Retrieve the campaign service.
campaign_service = client.get_service("CampaignService", version="v5")
# Submit the campaign operation and update the campaign.
response = campaign_service.mutate_campaigns(
customer_id, [campaign_operation]
)
resource_name = response.results[0].resource_name
# Display the results.
print("Updated campaign #{}".format(resource_name))
def add_dsa_targeting(client, customer_id, ad_group_resource_name, label):
"""Adds Dynamic Search Ad targeting criteria to the given ad group.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
ad_group_resource_name: a resource_name str for an Ad Group.
label: a Dynamic Search Ad URL label str.
"""
# Retrieve a new ad group criterion operation object.
ad_group_criterion_operation = client.get_type(
"AdGroupCriterionOperation", version="v5"
)
# Create a new ad group criterion.
ad_group_criterion = ad_group_criterion_operation.create
ad_group_criterion.ad_group = ad_group_resource_name
# Set the custom bid for this criterion.
ad_group_criterion.cpc_bid_micros = 1500000
ad_group_criterion.webpage.criterion_name = "Test criterion"
# Add a condition for label=specified_label_name
webpage_criterion_info = ad_group_criterion.webpage.conditions.add()
webpage_criterion_info.argument = label
webpage_criterion_info.operand = client.get_type(
"WebpageConditionOperandEnum", version="v5"
).CUSTOM_LABEL
# Retrieve the ad group criterion service.
ad_group_criterion_service = client.get_service(
"AdGroupCriterionService", version="v5"
)
response = ad_group_criterion_service.mutate_ad_group_criteria(
customer_id, [ad_group_criterion_operation]
)
resource_name = response.results[0].resource_name
# Display the results.
print(
"Created ad group criterion with resource_name: # {}".format(
resource_name
)
)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description=(
"Adds a page feed with URLs for a Dynamic Search Ads " "Campaign."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-i", "--campaign_id", type=str, required=True, help="The campaign ID."
)
parser.add_argument(
"-a", "--ad_group_id", type=str, required=True, help="The ad group ID."
)
args = parser.parse_args()
main(
google_ads_client, args.customer_id, args.campaign_id, args.ad_group_id
)
| 38.028235 | 80 | 0.70375 |
5c82724eee29fcdc639ddf1ecba31ccc48527851 | 21,369 | py | Python | test/unit/data_operations/test_data_operations_implementations.py | technocreep/FEDOT | c11f19d1d231bd9c1d96d6e39d14697a028f6272 | [
"BSD-3-Clause"
] | null | null | null | test/unit/data_operations/test_data_operations_implementations.py | technocreep/FEDOT | c11f19d1d231bd9c1d96d6e39d14697a028f6272 | [
"BSD-3-Clause"
] | null | null | null | test/unit/data_operations/test_data_operations_implementations.py | technocreep/FEDOT | c11f19d1d231bd9c1d96d6e39d14697a028f6272 | [
"BSD-3-Clause"
] | null | null | null | import os
import numpy as np
from examples.simple.classification.classification_with_tuning import get_classification_dataset
from examples.simple.regression.regression_with_tuning import get_regression_dataset
from examples.simple.time_series_forecasting.gapfilling import generate_synthetic_data
from fedot.core.data.data import InputData
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.data.supplementary_data import SupplementaryData
from fedot.core.operations.evaluation.operation_implementations.data_operations. \
sklearn_transformations import ImputationImplementation
from fedot.core.operations.evaluation.operation_implementations.data_operations.ts_transformations import \
CutImplementation, LaggedTransformationImplementation
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.operation_types_repository import OperationTypesRepository
from fedot.core.repository.tasks import Task, TaskTypesEnum, TsForecastingParams
from fedot.preprocessing.data_types import NAME_CLASS_FLOAT, NAME_CLASS_INT, \
NAME_CLASS_STR
from test.unit.preprocessing.test_preprocessing_though_api import data_with_only_categorical_features
np.random.seed(2021)
def get_small_regression_dataset():
""" Function returns features and target for train and test regression models """
features_options = {'informative': 2, 'bias': 2.0}
x_train, y_train, x_test, y_test = get_regression_dataset(features_options=features_options,
samples_amount=70,
features_amount=4)
y_train = y_train.reshape((-1, 1))
y_test = y_test.reshape((-1, 1))
# Define regression task
task = Task(TaskTypesEnum.regression)
# Prepare data to train the model
train_input = InputData(idx=np.arange(0, len(x_train)),
features=x_train,
target=y_train,
task=task,
data_type=DataTypesEnum.table)
predict_input = InputData(idx=np.arange(0, len(x_test)),
features=x_test,
target=None,
task=task,
data_type=DataTypesEnum.table)
return train_input, predict_input, y_test
def get_small_classification_dataset():
""" Function returns features and target for train and test classification models """
features_options = {'informative': 1, 'redundant': 0,
'repeated': 0, 'clusters_per_class': 1}
x_train, y_train, x_test, y_test = get_classification_dataset(features_options=features_options,
samples_amount=70,
features_amount=4,
classes_amount=2)
y_train = y_train.reshape((-1, 1))
y_test = y_test.reshape((-1, 1))
# Define regression task
task = Task(TaskTypesEnum.classification)
# Prepare data to train the model
train_input = InputData(idx=np.arange(0, len(x_train)),
features=x_train,
target=y_train,
task=task,
data_type=DataTypesEnum.table)
predict_input = InputData(idx=np.arange(0, len(x_test)),
features=x_test,
target=None,
task=task,
data_type=DataTypesEnum.table)
return train_input, predict_input, y_test
def get_time_series():
""" Function returns time series for time series forecasting task """
len_forecast = 5
synthetic_ts = generate_synthetic_data(length=80)
train_data = synthetic_ts[:-len_forecast]
test_data = synthetic_ts[-len_forecast:]
task = Task(TaskTypesEnum.ts_forecasting,
TsForecastingParams(forecast_length=len_forecast))
train_input = InputData(idx=np.arange(0, len(train_data)),
features=train_data,
target=train_data,
task=task,
data_type=DataTypesEnum.ts)
start_forecast = len(train_data)
end_forecast = start_forecast + len_forecast
predict_input = InputData(idx=np.arange(start_forecast, end_forecast),
features=train_data,
target=None,
task=task,
data_type=DataTypesEnum.ts)
return train_input, predict_input, test_data
def get_multivariate_time_series():
""" Generate several time series in one InputData block """
ts_1 = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).reshape((-1, 1))
ts_2 = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19]).reshape((-1, 1))
several_ts = np.hstack((ts_1, ts_2))
task = Task(TaskTypesEnum.ts_forecasting,
TsForecastingParams(forecast_length=2))
train_input = InputData(idx=np.arange(0, len(several_ts)),
features=several_ts, target=np.ravel(ts_1),
task=task, data_type=DataTypesEnum.ts)
return train_input
def get_nan_inf_data():
supp_data = SupplementaryData(column_types={'features': [NAME_CLASS_FLOAT]*4})
train_input = InputData(idx=[0, 1, 2, 3],
features=np.array([[1, 2, 3, 4],
[2, np.nan, 4, 5],
[3, 4, 5, np.inf],
[-np.inf, 5, 6, 7]]),
target=np.array([1, 2, 3, 4]),
task=Task(TaskTypesEnum.regression),
data_type=DataTypesEnum.table,
supplementary_data=supp_data)
return train_input
def get_single_feature_data(task=None):
supp_data = SupplementaryData(column_types={'features': [NAME_CLASS_INT],
'target': [NAME_CLASS_INT]})
train_input = InputData(idx=[0, 1, 2, 3, 4, 5],
features=np.array([[1], [2], [3], [7], [8], [9]]),
target=np.array([[0], [0], [0], [1], [1], [1]]),
task=task,
data_type=DataTypesEnum.table,
supplementary_data=supp_data)
return train_input
def get_mixed_data(task=None, extended=False):
""" Generate InputData with five categorical features. The categorical features
are created in such a way that in any splitting there will be categories in the
test part that were not in the train.
"""
if extended:
features = np.array([[1, '0', '1', 1, '5', 'blue', 'blue'],
[2, '1', '0', 0, '4', 'blue', 'da'],
[3, '1', '0', 1, '3', 'blue', 'ba'],
[np.nan, np.nan, '1', np.nan, '2', 'not blue', 'di'],
[8, '1', '1', 0, '1', 'not blue', 'da bu'],
[9, '0', '0', 0, '0', 'not blue', 'dai']], dtype=object)
features_types = [NAME_CLASS_INT, NAME_CLASS_STR, NAME_CLASS_STR, NAME_CLASS_INT,
NAME_CLASS_STR, NAME_CLASS_STR, NAME_CLASS_STR]
supp_data = SupplementaryData(column_types={'features': features_types,
'target': [NAME_CLASS_INT]})
else:
features = np.array([[1, '0', 1],
[2, '1', 0],
[3, '1', 0],
[7, '1', 1],
[8, '1', 1],
[9, '0', 0]], dtype=object)
features_types = [NAME_CLASS_INT, NAME_CLASS_STR, NAME_CLASS_INT]
supp_data = SupplementaryData(column_types={'features': features_types,
'target': [NAME_CLASS_INT]})
train_input = InputData(idx=[0, 1, 2, 3, 4, 5],
features=features,
target=np.array([[0], [0], [0], [1], [1], [1]]),
task=task,
data_type=DataTypesEnum.table,
supplementary_data=supp_data)
return train_input
def get_nan_binary_data(task=None):
""" Generate table with two numerical and one categorical features.
Both them contain nans, which need to be filled in.
Binary int columns must be processed as "almost categorical". Current dataset
For example, nan object in [1, nan, 0, 0] must be filled as 0, not as 0.33
"""
features_types = [NAME_CLASS_INT, NAME_CLASS_STR, NAME_CLASS_INT]
supp_data = SupplementaryData(column_types={'features': features_types})
features = np.array([[1, '0', 0],
[np.nan, np.nan, np.nan],
[0, '2', 1],
[1, '1', 1],
[5, '1', 1]], dtype=object)
input_data = InputData(idx=[0, 1, 2, 3], features=features,
target=np.array([[0], [0], [1], [1]]),
task=task, data_type=DataTypesEnum.table,
supplementary_data=supp_data)
return input_data
def data_with_binary_int_features_and_equal_categories():
"""
Generate table with binary integer features and nans there. Such a columns
must be processed as "almost categorical". Current dataset
For example, nan object in [1, nan, 0, 0] must be filled as 0, not as 0.33
"""
supp_data = SupplementaryData(column_types={'features': [NAME_CLASS_INT, NAME_CLASS_INT]})
task = Task(TaskTypesEnum.classification)
features = np.array([[1, 10],
[np.nan, np.nan],
[np.nan, np.nan],
[0, 0]])
target = np.array([['not-nan'], ['nan'], ['nan'], ['not-nan']])
train_input = InputData(idx=[0, 1, 2, 3], features=features, target=target,
task=task, data_type=DataTypesEnum.table,
supplementary_data=supp_data)
return train_input
def test_regression_data_operations():
train_input, predict_input, y_test = get_small_regression_dataset()
operation_names, _ = OperationTypesRepository('data_operation').suitable_operation(
task_type=TaskTypesEnum.regression)
for data_operation in operation_names:
node_data_operation = PrimaryNode(data_operation)
node_final = SecondaryNode('linear', nodes_from=[node_data_operation])
pipeline = Pipeline(node_final)
# Fit and predict for pipeline
pipeline.fit_from_scratch(train_input)
predicted_output = pipeline.predict(predict_input)
predicted = predicted_output.predict
assert len(predicted) == len(y_test)
def test_classification_data_operations():
train_input, predict_input, y_test = get_small_classification_dataset()
operation_names, _ = OperationTypesRepository('data_operation').suitable_operation(
task_type=TaskTypesEnum.classification)
for data_operation in operation_names:
node_data_operation = PrimaryNode(data_operation)
node_final = SecondaryNode('logit', nodes_from=[node_data_operation])
pipeline = Pipeline(node_final)
# Fit and predict for pipeline
pipeline.fit_from_scratch(train_input)
predicted_output = pipeline.predict(predict_input)
predicted = predicted_output.predict
assert len(predicted) == len(y_test)
def test_ts_forecasting_lagged_data_operation():
train_input, predict_input, y_test = get_time_series()
node_lagged = PrimaryNode('lagged')
node_ridge = SecondaryNode('ridge', nodes_from=[node_lagged])
pipeline = Pipeline(node_ridge)
pipeline.fit_from_scratch(train_input)
predicted_output = pipeline.predict(predict_input)
predicted = np.ravel(predicted_output.predict)
assert len(predicted) == len(np.ravel(y_test))
def test_ts_forecasting_cut_data_operation():
train_input, predict_input, y_test = get_time_series()
horizon = train_input.task.task_params.forecast_length
operation_cut = CutImplementation(cut_part=0.5)
transformed_input = operation_cut.transform(train_input, is_fit_pipeline_stage=True)
assert train_input.idx.shape[0] == 2 * transformed_input.idx.shape[0] - horizon
def test_ts_forecasting_smoothing_data_operation():
train_input, predict_input, y_test = get_time_series()
model_names, _ = OperationTypesRepository().operations_with_tag(tags=['smoothing'])
for smoothing_operation in model_names:
node_smoothing = PrimaryNode(smoothing_operation)
node_lagged = SecondaryNode('lagged', nodes_from=[node_smoothing])
node_ridge = SecondaryNode('ridge', nodes_from=[node_lagged])
pipeline = Pipeline(node_ridge)
pipeline.fit_from_scratch(train_input)
predicted_output = pipeline.predict(predict_input)
predicted = np.ravel(predicted_output.predict)
assert len(predicted) == len(np.ravel(y_test))
def test_inf_and_nan_absence_after_imputation_implementation_fit_transform():
input_data = get_nan_inf_data()
output_data = ImputationImplementation().fit_transform(input_data)
assert np.sum(np.isinf(output_data.predict)) == 0
assert np.sum(np.isnan(output_data.predict)) == 0
def test_inf_and_nan_absence_after_imputation_implementation_fit_and_transform():
input_data = get_nan_inf_data()
imputer = ImputationImplementation()
imputer.fit(input_data)
output_data = imputer.transform(input_data)
assert np.sum(np.isinf(output_data.predict)) == 0
assert np.sum(np.isnan(output_data.predict)) == 0
def test_inf_and_nan_absence_after_pipeline_fitting_from_scratch():
train_input = get_nan_inf_data()
model_names, _ = OperationTypesRepository().suitable_operation(task_type=TaskTypesEnum.regression)
for model_name in model_names:
node_data_operation = PrimaryNode(model_name)
node_final = SecondaryNode('linear', nodes_from=[node_data_operation])
pipeline = Pipeline(node_final)
# Fit and predict for pipeline
pipeline.fit_from_scratch(train_input)
predicted_output = pipeline.predict(train_input)
predicted = predicted_output.predict
assert np.sum(np.isinf(predicted)) == 0
assert np.sum(np.isnan(predicted)) == 0
def test_feature_selection_of_single_features():
for task_type in [TaskTypesEnum.classification, TaskTypesEnum.regression]:
model_names, _ = OperationTypesRepository(operation_type='data_operation') \
.suitable_operation(tags=['feature_selection'], task_type=task_type)
task = Task(task_type)
for data_operation in model_names:
node_data_operation = PrimaryNode(data_operation)
assert node_data_operation.fitted_operation is None
# Fit and predict for pipeline
train_input = get_single_feature_data(task)
node_data_operation.fit(train_input)
predicted_output = node_data_operation.predict(train_input)
predicted = predicted_output.predict
assert node_data_operation.fitted_operation is not None
assert predicted.shape == train_input.features.shape
def test_one_hot_encoding_new_category_in_test():
""" Check if One Hot Encoding can correctly predict data with new categories
(which algorithm were not process during train stage)
"""
cat_data = get_mixed_data(task=Task(TaskTypesEnum.classification),
extended=True)
train, test = train_test_data_setup(cat_data)
# Create pipeline with encoding operation
one_hot_node = PrimaryNode('one_hot_encoding')
final_node = SecondaryNode('dt', nodes_from=[one_hot_node])
pipeline = Pipeline(final_node)
pipeline.fit(train)
predicted = pipeline.predict(test)
assert predicted is not None
def test_knn_with_float_neighbors():
"""
Check pipeline with k-nn fit and predict correctly if n_neighbors value
is float value
"""
node_knn = PrimaryNode('knnreg')
node_knn.custom_params = {'n_neighbors': 2.5}
pipeline = Pipeline(node_knn)
input_data = get_single_feature_data(task=Task(TaskTypesEnum.regression))
pipeline.fit(input_data)
pipeline.predict(input_data)
def test_imputation_with_binary_correct():
"""
Check if SimpleImputer can process mixed data with both numerical and categorical
features correctly. Moreover, check if the imputer swaps the columns (it shouldn't)
"""
nan_data = get_nan_binary_data(task=Task(TaskTypesEnum.classification))
# Create node with imputation operation
imputation_node = PrimaryNode('simple_imputation')
imputation_node.fit(nan_data)
predicted = imputation_node.predict(nan_data)
assert np.isclose(predicted.predict[1, 0], 1.75)
assert predicted.predict[1, 1] == '1'
assert np.isclose(predicted.predict[1, 2], 1)
def test_imputation_binary_features_with_equal_categories_correct():
"""
The correctness of the gap-filling algorithm is checked on data with binary
features. The number of known categories in each column is equal. Consequently,
there is no possibility to insert the majority class label into the gaps.
Instead of that the mean value is inserted.
"""
nan_data = data_with_binary_int_features_and_equal_categories()
imputation_node = PrimaryNode('simple_imputation')
imputation_node.fit(nan_data)
predicted = imputation_node.predict(nan_data)
assert np.isclose(predicted.predict[1, 0], 0.5)
assert np.isclose(predicted.predict[1, 1], 5.0)
def test_label_encoding_correct():
"""
Check if LabelEncoder can perform transformations correctly. Also the dataset
is generated so that new categories appear in the test sample.
"""
cat_data = data_with_only_categorical_features()
train_data, test_data = train_test_data_setup(cat_data)
encoding_node = PrimaryNode('label_encoding')
encoding_node.fit(train_data)
predicted_train = encoding_node.predict(train_data)
predicted_test = encoding_node.predict(test_data)
# Label 'a' was in the training sample - convert it into 0
assert predicted_train.predict[0, 0] == 0
# Label 'b' was in the training sample - convert it into 1
assert predicted_train.predict[1, 0] == 1
# Label 'c' was not in the training sample - convert it into 2
assert predicted_test.predict[0, 0] == 2
def test_lagged_with_multivariate_time_series():
"""
Checking the correct processing of multivariate time series in the lagged operation
"""
correct_fit_output = np.array([[0., 1., 10., 11.],
[1., 2., 11., 12.],
[2., 3., 12., 13.],
[3., 4., 13., 14.],
[4., 5., 14., 15.],
[5., 6., 15., 16.],
[6., 7., 16., 17.]])
correct_predict_output = np.array([[8, 9, 18, 19]])
input_data = get_multivariate_time_series()
lagged = LaggedTransformationImplementation(**{'window_size': 2})
transformed_for_fit = lagged.transform(input_data, is_fit_pipeline_stage=True)
transformed_for_predict = lagged.transform(input_data, is_fit_pipeline_stage=False)
# Check correctness on fit stage
lagged_features = transformed_for_fit.predict
assert lagged_features.shape == correct_fit_output.shape
assert np.all(np.isclose(lagged_features, correct_fit_output))
# Check correctness on predict stage
lagged_predict = transformed_for_predict.predict
assert lagged_predict.shape == correct_predict_output.shape
assert np.all(np.isclose(lagged_predict, correct_predict_output))
def test_poly_features_on_big_datasets():
"""
Use a table with a large number of features to run a poly features operation.
For a large number of features the operation should not greatly increase the
number of columns.
"""
test_file_path = str(os.path.dirname(__file__))
file = os.path.join('../../data', 'advanced_classification.csv')
train_input = InputData.from_csv(os.path.join(test_file_path, file),
task=Task(TaskTypesEnum.classification))
# Take only small number of rows from dataset
train_input.features = train_input.features[5: 20, :]
train_input.idx = np.arange(len(train_input.features))
train_input.target = train_input.target[5: 20].reshape((-1, 1))
poly_node = Pipeline(PrimaryNode('poly_features'))
poly_node.fit(train_input)
transformed_features = poly_node.predict(train_input)
n_rows, n_cols = transformed_features.predict.shape
assert n_cols == 85
| 41.736328 | 107 | 0.63784 |
f32b5b6b6db60d6338c2768f55bd984bd3473500 | 206 | pyw | Python | akhelper-gui.pyw | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
] | 18 | 2022-03-18T08:20:28.000Z | 2022-03-31T15:19:15.000Z | akhelper-gui.pyw | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
] | 2 | 2022-03-23T12:04:28.000Z | 2022-03-29T09:30:26.000Z | akhelper-gui.pyw | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
] | 1 | 2022-03-20T05:52:38.000Z | 2022-03-20T05:52:38.000Z | if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support()
import util.early_logs
import util.unfuck_https_proxy
import webgui2.server
webgui2.server.start()
| 25.75 | 36 | 0.742718 |
ba6321cdc68274bd007af1c378ec9e9884252c04 | 3,210 | py | Python | tfdiffeq/odeint.py | morgatron/tfdiffeq | ef646f85cbd0821749a03e7ab51e03e16798fab1 | [
"MIT"
] | 214 | 2019-02-10T08:24:12.000Z | 2022-03-31T06:15:05.000Z | tfdiffeq/odeint.py | morgatron/tfdiffeq | ef646f85cbd0821749a03e7ab51e03e16798fab1 | [
"MIT"
] | 14 | 2019-03-02T14:56:29.000Z | 2021-12-28T13:06:45.000Z | tfdiffeq/odeint.py | morgatron/tfdiffeq | ef646f85cbd0821749a03e7ab51e03e16798fab1 | [
"MIT"
] | 40 | 2019-03-03T12:55:09.000Z | 2022-02-11T02:14:47.000Z | from .adams import VariableCoefficientAdamsBashforth
from .dopri5 import Dopri5Solver
from .dopri8 import Dopri8Solver
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .fixed_grid import Euler, Midpoint, RK4, Heun
from .adaptive_huen import AdaptiveHeunSolver
from .bosh3 import Bosh3Solver
from .misc import _check_inputs
from .tsit5 import Tsit5Solver
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'dopri8': Dopri8Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'huen': Heun,
'heun': Heun,
'adaptive_heun': AdaptiveHeunSolver
}
def odeint(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
tensor_input, func, y0, t = _check_inputs(func, y0, t)
if options is None:
options = {}
elif method is None:
raise ValueError('cannot supply `options` without specifying `method`')
if method is None:
method = 'dopri5'
solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
solution = solver.integrate(t)
if tensor_input:
solution = solution[0]
return solution
| 39.146341 | 86 | 0.673832 |
0640ff5ad415acffbe6d99948b1f33fd9fb7ac74 | 479 | py | Python | swagger_server/__main__.py | garagonc/optimization-framework | 1ca57699d6a3f2f98dcaea96430e75c3f847b49f | [
"Apache-2.0"
] | null | null | null | swagger_server/__main__.py | garagonc/optimization-framework | 1ca57699d6a3f2f98dcaea96430e75c3f847b49f | [
"Apache-2.0"
] | null | null | null | swagger_server/__main__.py | garagonc/optimization-framework | 1ca57699d6a3f2f98dcaea96430e75c3f847b49f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import connexion
import logging
from swagger_server import encoder
def create_app():
#logging.getLogger('connexion.operation').setLevel('ERROR')
app = connexion.App(__name__, specification_dir='./swagger/')
app.app.json_encoder = encoder.JSONEncoder
app.add_api('swagger.yaml', arguments={'title': 'Optimization framework service'})
return app
def main():
create_app().run(port=8080)
if __name__ == '__main__':
create_app() | 25.210526 | 86 | 0.722338 |
d03d6c741bfbc579e2f65fd2204e971c2aa362f0 | 553 | py | Python | WorkflowManager/scripts/run_wildfire.py | KTH-HPC/vestec-system | 8168b90385468ca5e1ed701b5a0090e4423186c7 | [
"BSD-3-Clause"
] | 1 | 2021-10-31T08:41:58.000Z | 2021-10-31T08:41:58.000Z | WorkflowManager/scripts/run_wildfire.py | KTH-HPC/vestec-system | 8168b90385468ca5e1ed701b5a0090e4423186c7 | [
"BSD-3-Clause"
] | null | null | null | WorkflowManager/scripts/run_wildfire.py | KTH-HPC/vestec-system | 8168b90385468ca5e1ed701b5a0090e4423186c7 | [
"BSD-3-Clause"
] | 1 | 2022-02-08T16:57:05.000Z | 2022-02-08T16:57:05.000Z | import sys
sys.path.append("../")
sys.path.append("../../")
from manager import workflow
if __name__ == "__main__":
upperLeft = "1.8347167968750002/53.38332836757156"
lowerRight = "11.744384765625/48.75618876280552"
workflow.OpenConnection()
id = workflow.CreateIncident(name="wildfire",kind="WILDFIRE",upper_left_latlong=upperLeft,lower_right_latlong=lowerRight,duration=1)
msg = {"IncidentID": id}
workflow.send(queue="wildfire_init",message=msg,src_tag="Start")
workflow.FlushMessages()
workflow.CloseConnection()
| 30.722222 | 136 | 0.734177 |
583b71b855d5c7fb8d166823623a09e360f3e2b9 | 3,948 | py | Python | test/Fortran/FORTRANSUFFIXES.py | andrewyoung1991/scons | 7517c277e23bc04e3809a9bf0793cdfe00097a58 | [
"MIT"
] | 1 | 2015-11-04T22:22:10.000Z | 2015-11-04T22:22:10.000Z | test/Fortran/FORTRANSUFFIXES.py | azverkan/scons | 704ddb9270e14c7771d0c58c04c7afa7bc009603 | [
"MIT"
] | null | null | null | test/Fortran/FORTRANSUFFIXES.py | azverkan/scons | 704ddb9270e14c7771d0c58c04c7afa7bc009603 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ability to scan additional filesuffixes added to $FORTRANSUFFIXES.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('myfc.py', r"""
import sys
def do_file(outf, inf):
for line in open(inf, 'rb').readlines():
if line[:15] == " INCLUDE '":
do_file(outf, line[15:-2])
else:
outf.write(line)
outf = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
do_file(outf, f)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(FORTRANPATH = ['.'],
FORTRAN = r'%(_python_)s myfc.py',
FORTRANCOM = '$FORTRAN $TARGET $SOURCES',
OBJSUFFIX = '.o')
env.Append(FORTRANSUFFIXES = ['.x'])
env.Object(target = 'test1', source = 'test1.f')
env.InstallAs('test1_f', 'test1.f')
env.InstallAs('test1_h', 'test1.h')
env.InstallAs('test1_x', 'test1.x')
""" % locals())
test.write('test1.f', """\
test1.f 1
INCLUDE 'test1.h'
INCLUDE 'test1.x'
""")
test.write('test1.h', """\
test1.h 1
INCLUDE 'foo.h'
""")
test.write('test1.x', """\
test1.x 1
INCLUDE 'foo.h'
""")
test.write('foo.h', """\
foo.h 1
""")
expect = test.wrap_stdout("""\
%(_python_)s myfc.py test1.o test1.f
Install file: "test1.f" as "test1_f"
Install file: "test1.h" as "test1_h"
Install file: "test1.x" as "test1_x"
""" % locals())
test.run(arguments='.', stdout=expect)
test.must_match('test1.o', """\
test1.f 1
test1.h 1
foo.h 1
test1.x 1
foo.h 1
""")
test.up_to_date(arguments='.')
test.write('foo.h', """\
foo.h 2
""")
expect = test.wrap_stdout("""\
%(_python_)s myfc.py test1.o test1.f
""" % locals())
test.run(arguments='.', stdout=expect)
test.must_match('test1.o', """\
test1.f 1
test1.h 1
foo.h 2
test1.x 1
foo.h 2
""")
test.up_to_date(arguments='.')
test.write('test1.x', """\
test1.x 2
INCLUDE 'foo.h'
""")
expect = test.wrap_stdout("""\
%(_python_)s myfc.py test1.o test1.f
Install file: "test1.x" as "test1_x"
""" % locals())
test.run(arguments='.', stdout=expect)
test.must_match('test1.o', """\
test1.f 1
test1.h 1
foo.h 2
test1.x 2
foo.h 2
""")
test.up_to_date(arguments='.')
test.write('test1.h', """\
test1.h 2
INCLUDE 'foo.h'
""")
expect = test.wrap_stdout("""\
%(_python_)s myfc.py test1.o test1.f
Install file: "test1.h" as "test1_h"
""" % locals())
test.run(arguments='.', stdout=expect)
test.must_match('test1.o', """\
test1.f 1
test1.h 2
foo.h 2
test1.x 2
foo.h 2
""")
test.up_to_date(arguments='.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 22.820809 | 75 | 0.635512 |
ec63f5cd80834b53a7f86ae0bab1389e7cd8829b | 8,261 | py | Python | project/q_learning_2.0.py | jooncco/coindrop-game-ai-agent | b15b34b61c518f23c2813f53bf18b866c54b4cb4 | [
"BSD-2-Clause"
] | 1 | 2019-04-29T13:55:17.000Z | 2019-04-29T13:55:17.000Z | project/q_learning_2.0.py | jooncco/coindrop-game-ai-agent | b15b34b61c518f23c2813f53bf18b866c54b4cb4 | [
"BSD-2-Clause"
] | null | null | null | project/q_learning_2.0.py | jooncco/coindrop-game-ai-agent | b15b34b61c518f23c2813f53bf18b866c54b4cb4 | [
"BSD-2-Clause"
] | null | null | null | """
The original code is from https://github.com/dennybritz/reinforcement-learning/tree/master/TD
"""
import sys
import numpy as np
import itertools
import pickle
from collections import defaultdict
from game import Game
# states
STAY = 0
LEFT = 1
RIGHT = 2
TIME_STAY = 3
TIME_LEFT = 4
TIME_RIGHT = 5
# bascket coverage lookup table
table_coverage = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 11]]
# time item
time_flag = 0
time_x = -1
time_distance = 0
# money
coin_distance = 0
# happiness
happy = 0
### user designed utils
def distance(basket_x, x_pos):
dis = 0
for i in table_coverage[basket_x]:
dis += pow((x_pos-i), 2)
return dis
def coin_dis(game_info):
basket_x, item_info = game_info
dis = 0
for item in item_info:
if eatable(basket_x, item) and item[0] == 1:
dis += item[2]*distance(basket_x, item[1])
return dis
def eatable(basket_x, item):
if item[2] <= 1:
return 0
if item[1] in table_coverage[basket_x]:
return 1
else: #outside basket
if abs(table_coverage[basket_x][0]-item[1]) <= 9-item[2]:
return 1
else:
return 0
def happiness(game_info):
return 50 if time_x in table_coverage[game_info[0]] else 0
###
## In our case, we have 3 actions (stay, go-left, go-right)
def get_action_num():
return 3
## this function return policy function to choose the action based on Q value.
def make_policy(Q, epsilon, nA):
"""
This is the epsilon-greedy policy, which select random actions for some chance (epsilon).
(Check dennybritz's repository for detail)
You may change the policy function for the given task.
"""
def policy_fn(observation):
A = np.ones(nA, dtype=float) * epsilon / nA
best_action = np.argmax(Q[observation])
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
## this function return state from given game information.
def get_state(counter, score, game_info):
coins = [0, 0, 0]
global time_flag, time_x
# count eatable items
for item in game_info[1]:
if eatable(game_info[0], item):
# time
if item[0] == 2:
time_flag = 1
time_x = item[1]
# coin
else:
if item[1] in table_coverage[game_info[0]]:
coins[STAY] += 1
elif item[1] < table_coverage[game_info[0]][0]:
coins[LEFT] += 1
else:
coins[RIGHT] += 1
# return
if time_flag:
if time_x in table_coverage[game_info[0]]:
return TIME_STAY
elif time_x < table_coverage[game_info[0]][0]:
return TIME_LEFT
else:
return TIME_RIGHT
else:
return np.argmax(coins)
## this function return reward from given previous and current score and counter.
def get_reward(prev_score, current_score, prev_counter, current_counter, game_info):
global time_flag, time_x, time_distance, coin_distance
if current_counter > prev_counter:
time_flag = 0
time_x = -1
return 100*(current_counter - prev_counter)
if time_flag: # time item exists
prev_time_dis = time_distance
time_distance = distance(game_info[0], time_x)
return (prev_time_dis - time_distance) + happiness(game_info)
else: # coins only
prev_coin_dis = coin_distance
coin_distance = coin_dis(game_info)
return (current_score - prev_score) + 2*(prev_coin_dis-coin_distance)
def save_q(Q, num_episode, params, filename="model_q.pkl"):
data = {"num_episode": num_episode, "params": params, "q_table": dict(Q)}
with open(filename, "wb") as w:
w.write(pickle.dumps(data))
def load_q(filename="model_q.pkl"):
with open(filename, "rb") as f:
data = pickle.loads(f.read())
return defaultdict(lambda: np.zeros(3), data["q_table"]), data["num_episode"], data["params"]
def q_learning(game, num_episodes, params):
"""
Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy
while following an epsilon-greedy policy.
You can edit those parameters, please speficy your changes in the report.
Args:
game: Coin drop game environment.
num_episodes: Number of episodes to run for.
discount_factor: Gamma discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
Q: the optimal action-value function, a dictionary mapping state -> action values.
"""
epsilon, alpha, discount_factor = params
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(get_action_num()))
# The policy we're following
policy = make_policy(Q, epsilon, get_action_num())
for i_episode in range(num_episodes):
# Reset the environment and pick the first action
_, counter, score, game_info = game.reset()
state = get_state(counter, score, game_info)
action = 0
# One step in the environment
for t in itertools.count():
# Take a step
action_probs = policy(get_state(counter, score, game_info))
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
done, next_counter, next_score, game_info = game.step(action)
next_state = get_state(counter, score, game_info)
reward = get_reward(score, next_score, counter, next_counter, game_info)
counter = next_counter
score = next_score
"""
this code performs TD Update. (Update Q value)
You may change this part for the given task.
"""
best_next_action = np.argmax(Q[next_state])
td_target = reward + discount_factor * Q[next_state][best_next_action]
td_delta = td_target - Q[state][action]
Q[state][action] += alpha * td_delta
if done:
break
state = next_state
# Print out which episode we're on, useful for debugging.
if (i_episode + 1) % 100 == 0:
print("Episode {}/{} (Score: {})\n".format(i_episode + 1, num_episodes, score), end="")
sys.stdout.flush()
return Q
def train(num_episodes, params):
g = Game(False)
Q = q_learning(g, num_episodes, params)
return Q
## This function will be called in the game.py
def get_action(Q, counter, score, game_info, params):
epsilon = params[0]
policy = make_policy(Q, epsilon, get_action_num())
action_probs = policy(get_state(counter, score, game_info))
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
return action
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num_episode", help="# of the episode (size of training data)",
type=int, required=True)
parser.add_argument("-e", "--epsilon", help="the probability of random movement, 0~1",
type=float, default=0.1)
parser.add_argument("-lr", "--learning_rate", help="learning rate of training",
type=float, default=0.1)
args = parser.parse_args()
if args.num_episode is None:
parser.print_help()
exit(1)
# you can pass your parameter as list or dictionary.
# fix corresponding parts if you want to change the parameters
num_episodes = args.num_episode
epsilon = args.epsilon
learning_rate = args.learning_rate
discount_factor = 0.5
Q = train(num_episodes, [epsilon, learning_rate, discount_factor])
save_q(Q, num_episodes, [epsilon, learning_rate, discount_factor])
#Q, n, params = load_q()
if __name__ == "__main__":
main() | 33.044 | 134 | 0.608643 |
77232f053c9aa3273f532f50e69d9e4bfe4a1adf | 2,731 | py | Python | osd_client.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | osd_client.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | osd_client.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import re
import psutil
import subprocess
from socket import *
# get host network delay
def _get_delay(ip):
time_str = []
delay_time = 0
for i in range(15):
p = subprocess.Popen(["ping -c 1 "+ ip], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True)
out = p.stdout.read()
out_re = re.search((u'time=\d+\.+\d*'), out)
if out_re is not None:
time_tmp1 = filter(lambda x: x in '1234567890.', out_re.group())
time_tmp2 = max(time_tmp1, 0)
time_str.append(time_tmp2)
delay_time = sum([float(x) for x in time_str]) / len(time_str)
return round(delay_time , 2)
# get host's osd I/O load
def _get_io():
devices = {}
osd_io = {}
# use regular expression matches the ceph disk and record
partitions = psutil.disk_partitions()
pattern = re.compile(r'/var/lib/ceph/osd/')
# find device and it's index in partitions
# result:{'sdb1': 4, 'sdc1': 5}
for p in partitions:
if pattern.match(p.mountpoint):
devices_name = p.device[5:]
devices[devices_name] = partitions.index(p)
for key in devices:
osd_num = partitions[devices[key]].mountpoint[23:]
pre_read_bytes = psutil.disk_io_counters(perdisk=True)[key].read_bytes
pre_write_bytes = psutil.disk_io_counters(perdisk=True)[key].write_bytes
time.sleep(1)
after_read_bytes = psutil.disk_io_counters(perdisk=True)[key].read_bytes
after_write_bytes = psutil.disk_io_counters(perdisk=True)[key].write_bytes
read_bytes = after_read_bytes - pre_read_bytes
write_bytes = after_write_bytes - pre_write_bytes
total_kbytes = (read_bytes + write_bytes)/1024
osd_io[osd_num] = total_kbytes
return osd_io
# send data
def _send_date():
HOST = '172.25.1.11'
PORT = 12345
# BUFSIZE = 1024
ADDR = (HOST, PORT)
udpCliSock = socket(AF_INET, SOCK_DGRAM)
while True:
try:
delay_tmp = _get_delay('172.25.1.254')
delay = max(delay_tmp, 0)
io = _get_io()
cpu = max(psutil.cpu_percent(interval=1), 0.0)
mem = max(psutil.virtual_memory().percent, 0.0)
data = str((delay, cpu, mem, io))
if not data:
break
udpCliSock.sendto(data, ADDR)
time.sleep(10)
# data,ADDR = udpCliSock.recvfrom(BUFSIZE) #接受数据
# if not data:
# break
# print 'Server : ', data
except Exception as e:
print ('Error: ', e)
udpCliSock.close()
if __name__ == '__main__':
_send_date()
| 33.716049 | 139 | 0.602343 |
3fe846d26e8d8149bd818a59cf37f60045aed7b9 | 366 | py | Python | src/affe/io/CTE.py | eliavw/affe | 0e57d7f40cb67f9a300292e03e3f83b4b591d1e3 | [
"MIT"
] | 1 | 2020-12-02T06:16:00.000Z | 2020-12-02T06:16:00.000Z | src/affe/io/CTE.py | eliavw/affe | 0e57d7f40cb67f9a300292e03e3f83b4b591d1e3 | [
"MIT"
] | null | null | null | src/affe/io/CTE.py | eliavw/affe | 0e57d7f40cb67f9a300292e03e3f83b4b591d1e3 | [
"MIT"
] | null | null | null | # Suggested Defaults
SEPARATOR = "-"
KEYCHAIN_SEPARATOR = "."
LABELS = dict(query="q", exp="e", fold="f", experiment="e")
DEFAULT_CHILDREN = dict(
root=["cli", "data", "out", "scripts"],
out=["manual", "preprocessing", "fit", "predict"],
model=["models", "logs", "timings"],
flow=["config", "logs", "results", "models", "timings", "tmp", "flows"],
)
| 33.272727 | 76 | 0.592896 |
e4b8f3f79c32991351e0a9da5e2d4f22cc83a0d4 | 19,807 | py | Python | networkapi/util/__init__.py | treviza153/GloboNetworkAPI | 9ca8ba7febdef0eb716514196b1bda1afdccd851 | [
"Apache-2.0"
] | 73 | 2015-04-13T17:56:11.000Z | 2022-03-24T06:13:07.000Z | networkapi/util/__init__.py | treviza153/GloboNetworkAPI | 9ca8ba7febdef0eb716514196b1bda1afdccd851 | [
"Apache-2.0"
] | 99 | 2015-04-03T01:04:46.000Z | 2021-10-03T23:24:48.000Z | networkapi/util/__init__.py | treviza153/GloboNetworkAPI | 9ca8ba7febdef0eb716514196b1bda1afdccd851 | [
"Apache-2.0"
] | 64 | 2015-08-05T21:26:29.000Z | 2022-03-22T01:06:28.000Z | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import functools
import logging
import re
import socket
import sys
import time
import warnings
from hashlib import sha1
from django.core import validators
from django.core.cache import cache
from django.forms.models import model_to_dict
from networkapi.infrastructure.ipaddr import AddressValueError
from networkapi.infrastructure.ipaddr import IPAddress
from networkapi.plugins import exceptions as plugins_exceptions
# from .decorators import deprecated
LOCK = 'LOCK'
PATTERN_XML_PASSWORD = [
'<password>(.*?)</password>', '<enable_pass>(.*?)</enable_pass>', '<pass>(.*?)</pass>']
def valid_expression(operator, value1, value2):
if operator == 'eq':
return value1 == value2
elif operator == 'ne':
return value1 != value2
else:
return False
def search_hide_password(msg):
"""
Search and hide password
"""
for text in PATTERN_XML_PASSWORD:
r = re.compile(text)
m = r.search(msg)
if m:
password = m.group(1)
msg = msg.replace(password, '****')
return msg
def valid_regex(string, regex):
pattern = re.compile(regex)
return re.search(pattern, string) is not None
def is_valid_regex(string, regex):
"""Checks if the parameter is a valid value by regex.
:param param: Value to be validated.
:return: True if the parameter has a valid vakue, or False otherwise.
"""
pattern = re.compile(regex)
return pattern.match(string) is not None
def is_valid_ip(address):
"""Verifica se address é um endereço ip válido."""
pattern = r'\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b'
return re.match(pattern, address)
def to_ip(address):
"""Resolve o endereço IP caso address seja um hostname.
:param address: Hostname ou endereço IP.
:return: Endereço IP correspondente ao endereço informado.
"""
if is_valid_ip(address):
# Se for um ip válido retorna
return address
# Se não for um ip válido tenta resolver o ip considerando que address é
# um hostname
return socket.gethostbyname(address)
def is_valid_int_param(param, required=True):
"""Checks if the parameter is a valid integer value.
@param param: Value to be validated.
@return True if the parameter has a valid integer value, or False otherwise.
"""
if param is None and not required:
return True
elif param is None:
return False
try:
int(param)
except (TypeError, ValueError):
return False
return True
def is_valid_int_greater_zero_param(param, required=True):
"""Checks if the parameter is a valid integer value and greater than zero.
@param param: Value to be validated.
@return True if the parameter has a valid integer value, or False otherwise.
"""
if param is None and not required:
return True
elif param is None:
return False
try:
param = int(param)
if param <= 0:
return False
except (TypeError, ValueError):
return False
return True
def is_valid_int_greater_equal_zero_param(param):
"""Checks if the parameter is a valid integer value and greater and equal than zero.
@param param: Value to be validated.
@return True if the parameter has a valid integer value, or False otherwise.
"""
if param is None:
return False
try:
param = int(param)
if param < 0:
return False
except (TypeError, ValueError):
return False
return True
def is_valid_string_maxsize(param, maxsize=None, required=True):
"""Checks if the parameter is a valid string and his size is less than maxsize.
If the parameter maxsize is None than the size is ignored
If the parameter required is True than the string can not be None
@param param: Value to be validated.
@param maxsize: Max size of the value to be validated.
@param required: Check if the value can be None
@return True if the parameter is valid or False otherwise.
"""
if required is True and param is None:
return False
elif required is False and (param is None or param == ''):
return True
if '' == param.strip():
return False
if param is not None and not isinstance(param, basestring):
return False
if param is not None and maxsize is not None:
if is_valid_int_greater_zero_param(maxsize):
if len(param.strip()) > maxsize:
return False
return True
def is_valid_string_minsize(param, minsize=None, required=True):
"""Checks if the parameter is a valid string and his size is more than minsize.
If the parameter minsize is None than the size is ignored
If the parameter required is True than the string can not be None
@param param: Value to be validated.
@param minsize: Min size of the value to be validated.
@param required: Check if the value can be None
@return True if the parameter is valid or False otherwise.
"""
if required is True and param is None:
return False
elif required is False and (param is None or param == ''):
return True
if '' == param.strip():
return False
if param is not None and not isinstance(param, basestring):
return False
if param is not None and minsize is not None:
if is_valid_int_greater_zero_param(minsize):
if len(param.strip()) < minsize:
return False
return True
def is_valid_vlan_name(vlan_name):
"""Checks if the parameter is a valid string for Vlan's name, without special characters and breaklines
@param vlan_name: Value to be validated.
@return True if the parameter hasn't a special character, or False otherwise.
"""
if vlan_name is None or vlan_name == '':
return False
regex_for_breakline = re.compile('\r|\n\r|\n')
regex_for_special_characters = re.compile('[@!#$%^&*()<>?/\\\|}{~:]')
return False if regex_for_breakline.search(vlan_name) or regex_for_special_characters.search(vlan_name) else True
def is_valid_boolean_param(param, required=True):
"""Checks if the parameter is a valid boolean.
@param param: Value to be validated.
@return True if the parameter has a valid boolean value, or False otherwise.
"""
if param is None and not required:
return True
elif param is None:
return False
if param in ['0', '1', 'False', 'True', False, True]:
return True
else:
return False
def is_valid_zero_one_param(param, required=True):
"""Checks if the parameter is a valid zero or one string.
@param param: Value to be validated.
@return True if the parameter has a valid zero or one value, or False otherwise.
"""
if param is None and not required:
return True
elif param is None:
return False
if param == '0':
return True
elif param == '1':
return True
else:
return False
def is_valid_yes_no_choice(param):
"""Checks if the parameter is valid 'S' or 'N' char.
@param param: valid to be validated.
@return True if the parameter is a valid choice, or False otherwise.
"""
if param in ('S', 'N'):
return True
else:
return False
def is_valid_uri(param):
"""Checks if the parameter is a valid uri.
@param param: Value to be validated.
@return True if the parameter has a valid uri value, or False otherwise.
"""
pattern = r"^[a-zA-Z0-9\\-_\\\-\\.!\\~\\*'\\(\\);/\\?:\\@\\&=\\{\\}\\#\\\[\\\]\\,]*$"
return re.match(pattern, param)
def is_valid_text(param, required=True):
"""Checks if the parameter is a valid field text and should follow the format of [A-Za-z]
and special characters hyphen and underline.
@param param: Value to be validated.
@param required: Check if the value can be None
@return True if the parameter has a valid text value, or False otherwise.
"""
if required is True and param is None:
return False
elif required is False and (param is None or param == ''):
return True
pattern = r'^[a-zA-Z0-9\\-_\\\-\\ ]*$'
return re.match(pattern, param)
def is_valid_pool_identifier_text(param, required=True):
"""Checks if the parameter is a valid field text and should follow the format of [A-Za-z]
and special characters hyphen and underline.
@param param: Value to be validated.
@param required: Check if the value can be None
@return True if the parameter has a valid text value, or False otherwise.
"""
if required is True and param is None:
return False
elif required is False and (param is None or param == ''):
return True
pattern = r'^[a-zA-Z]+[a-zA-Z0-9\._-]*$'
return re.match(pattern, param)
def is_valid_option(param):
"""Checks if the parameter is a valid field text and 0-9 and should follow the format of [A-Za-z]
and special characters hyphen, underline and point.
@param param: Value to be validated.
@return True if the parameter has a valid text value, or False otherwise.
"""
pattern = r'^[0-9a-zA-Z\\-_.\\\-\\ ]*$'
return re.match(pattern, param)
def is_valid_email(param):
"""Checks if the parameter is a valid e-mail.
@param param: Value to be validated.
@return True if the parameter has a valid e-mail value, or False otherwise.
"""
pattern = re.compile(r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"'
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE)
return re.match(pattern, param)
def is_valid_healthcheck_destination(param):
"""Checks if the parameter is a valid healthcheck_destination.
@param param: Value to be validated.
@return True if the parameter has a valid healthcheck_destination value, or False otherwise.
"""
pattern = re.compile(r'^([0-9]+|\*):([0-9]+|\*)$')
return re.match(pattern, param)
def is_valid_ipv4(param):
"""Checks if the parameter is a valid ipv4.
@param param: Value to be validated.
@return True if the parameter has a valid ipv4 value, or False otherwise.
"""
try:
IPAddress(param, 4)
return True
except AddressValueError:
return False
def is_valid_ipv6(param):
"""Checks if the parameter is a valid ipv6.
@param param: Value to be validated.
@return True if the parameter has a valid ipv6 value, or False otherwise.
"""
try:
IPAddress(param, 6)
return True
except AddressValueError:
return False
def is_valid_ip_ipaddr(param):
"""Checks if the parameter is a valid ip is ipv4 or ipv6.
@param param: Value to be validated.
@return True if the parameter has a valid ipv6 or ipv4 value, or False otherwise.
"""
try:
IPAddress(param)
return True
except ValueError:
return False
def convert_boolean_to_int(param):
"""Convert the parameter of boolean to int.
@param param: parameter to be converted.
@return Parameter converted.
"""
if param is True:
return int(1)
elif param is False:
return int(0)
def convert_string_or_int_to_boolean(param, force=None):
"""Convert the parameter of string or int to boolean.
@param param: parameter to be converted.
@return Parameter converted.
"""
if param == '1' or param == int(1) or param == 'True' or param == 'true':
return True
elif param == '0' or param == int(0) or param == 'False' or param == 'false':
return False
elif force:
return False
def clone(obj):
"""Clone the object
@param obj: object to be cloned
@return object cloned.
"""
return copy.copy(obj)
def is_valid_version_ip(param, IP_VERSION):
"""Checks if the parameter is a valid ip version value.
@param param: Value to be validated.
@return True if the parameter has a valid ip version value, or False otherwise.
"""
if param is None:
return False
if param == IP_VERSION.IPv4[0] or param == IP_VERSION.IPv6[0]:
return True
return False
def mount_ipv4_string(ip):
return str(str(ip.oct1) + '.' + str(ip.oct2) + '.' + str(ip.oct3) + '.' + str(ip.oct4))
def mount_ipv6_string(ip):
return str(str(ip.block1) + ':' + str(ip.block2) + ':' + str(ip.block3) + ':' + str(ip.block4) + ':' + str(ip.block5) + ':' + str(ip.block6) + ':' + str(ip.block7) + ':' + str(ip.block8))
def cache_function(length, equipment=False):
"""
Cache the result of function
@param length: time in seconds to stay in cache
"""
def _decorated(func):
logging.debug("30")
def _cache(*args, **kwargs):
logging.debug("31")
if equipment is True:
logging.debug(equipment)
logging.debug(args)
key = sha1(str(args[0].id) + 'equipment').hexdigest()
print str(args[0].id) + 'equipment'
else:
key = sha1(str(args[0].id)).hexdigest()
print str(args[0].id)
logging.debug("32")
# Search in cache if it exists
logging.debug("cache %s" % cache)
logging.debug("key %s" % key)
if key in cache:
# Get value in cache
value = cache.get(key)
# If was locked
if value == LOCK:
# Try until unlock
while value == LOCK:
time.sleep(1)
value = cache.get(key)
# Return value of cache
return value
logging.debug("33")
# If not exists in cache
else:
# Function can be called several times before it finishes and is put into the cache,
# then lock it to others wait it finishes.
cache.set(key, LOCK, length)
# Execute method
result = func(*args, **kwargs)
# Set in cache the result of method
cache.set(key, result, length)
# If not exists in cache
# key_list = cache.get(sha1('key_networkapi_vlans').hexdigest())
# if(key_list is None):
# key_list = []
# Set in cache the keys
# key_list.append(key)
# cache.set(sha1('key_networkapi_vlans').hexdigest(), key_list)
logging.debug("34")
return result
logging.debug("35")
return _cache
return _decorated
def destroy_cache_function(key_list, equipment=False):
for key in key_list:
key = str(key)
if equipment is True:
key = str(key) + 'equipment'
if sha1(key).hexdigest() in cache:
cache.delete(sha1(key).hexdigest())
class IP_VERSION:
IPv6 = ('v6', 'IPv6')
IPv4 = ('v4', 'IPv4')
List = (IPv4, IPv6)
def get_environment_map(environment):
environment_map = dict()
environment_map['id'] = environment.id
environment_map['link'] = environment.link
environment_map['id_divisao'] = environment.divisao_dc.id
environment_map['nome_divisao'] = environment.divisao_dc.nome
environment_map['id_ambiente_logico'] = environment.ambiente_logico.id
environment_map['nome_ambiente_logico'] = environment.ambiente_logico.nome
environment_map['id_grupo_l3'] = environment.grupo_l3.id
environment_map['nome_grupo_l3'] = environment.grupo_l3.nome
environment_map['ambiente_rede'] = environment.divisao_dc.nome + ' - ' + \
environment.ambiente_logico.nome + ' - ' + \
environment.grupo_l3.nome
if environment.filter is not None:
environment_map['id_filter'] = environment.filter.id
environment_map['filter_name'] = environment.filter.name
environment_map['acl_path'] = environment.acl_path
environment_map['vrf'] = environment.vrf
environment_map['ipv4_template'] = environment.ipv4_template
environment_map['ipv6_template'] = environment.ipv6_template
environment_map['min_num_vlan_1'] = environment.min_num_vlan_1
environment_map['max_num_vlan_1'] = environment.max_num_vlan_1
environment_map['min_num_vlan_2'] = environment.min_num_vlan_2
environment_map['max_num_vlan_2'] = environment.max_num_vlan_2
return environment_map
def get_vlan_map(vlan, network_ipv4, network_ipv6):
vlan_map = model_to_dict(vlan)
if network_ipv4 is not None and len(network_ipv4) > 0:
net_map = []
for net in network_ipv4:
net_dict = model_to_dict(net)
net_map.append(net_dict)
vlan_map['redeipv4'] = net_map
else:
vlan_map['redeipv4'] = None
if network_ipv6 is not None and len(network_ipv6) > 0:
net_map = []
for net in network_ipv6:
net_dict = model_to_dict(net)
net_map.append(net_dict)
vlan_map['redeipv6'] = net_map
else:
vlan_map['redeipv6'] = None
return vlan_map
def clear_newline_chr(string):
str_return = string.replace(chr(10), '').replace(chr(13), '')
return str_return
def is_valid_list_int_greater_zero_param(list_param, required=True):
"""Checks if the parameter list is a valid integer value and greater than zero.
@param param: Value to be validated.
@raise ValidationError: If there is validation error in the field
"""
if required and list_param in validators.EMPTY_VALUES:
raise ValueError('Field is required.')
try:
for param in list_param:
if param is None and required:
raise ValueError('Field is required.')
try:
param = int(param)
if param < 1:
raise ValueError('Field must be an positive integer.')
except Exception:
raise ValueError('Field must be an integer.')
except Exception:
raise ValueError('Invalid List Parameter.')
return True
def is_healthcheck_valid(healthcheck):
if healthcheck['healthcheck_type'] != 'HTTP' and healthcheck['healthcheck_type'] != 'HTTPS':
if healthcheck['healthcheck_expect'] != '':
raise plugins_exceptions.ValueInvalid(
'healthcheck expect must be empty')
if healthcheck['healthcheck_request'] != '':
raise plugins_exceptions.ValueInvalid(
'healthcheck request must be empty')
return True
| 29.562687 | 191 | 0.634422 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.