hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73cb02d4b475c260a2a8c60e3aa0cd43f876edb | 7,952 | py | Python | maskrcnn_benchmark/structures/segmentation_mask.py | SIAAAAAA/MMT-PSM | 0835c01c5010d3337778f452e9d96416e0f8a11a | [
"MIT"
] | 41 | 2020-07-22T03:55:08.000Z | 2022-02-27T12:04:41.000Z | maskrcnn_benchmark/structures/segmentation_mask.py | SIAAAAAA/MMT-PSM | 0835c01c5010d3337778f452e9d96416e0f8a11a | [
"MIT"
] | 5 | 2020-11-08T08:47:34.000Z | 2021-07-09T03:53:42.000Z | maskrcnn_benchmark/structures/segmentation_mask.py | SIAAAAAA/MMT-PSM | 0835c01c5010d3337778f452e9d96416e0f8a11a | [
"MIT"
] | 5 | 2020-10-13T11:09:53.000Z | 2021-07-28T12:41:53.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import pycocotools.mask as mask_utils
from pycocotools import mask as maskUtils
import numpy as np
# from maskrcnn_benchmark.utils.miscellaneous
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Mask(object):
"""
This class is unfinished and not meant for use yet
It is supposed to contain the mask for an object as
a 2d tensor
"""
def __init__(self, masks, size, mode):
self.masks = masks
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 2
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
flip_idx = list(range(dim)[::-1])
flipped_masks = self.masks.index_select(dim, flip_idx)
return Mask(flipped_masks, self.size, self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]]
return Mask(cropped_masks, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
pass
class Polygons(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, polygons, size, mode):
# assert isinstance(polygons, list), '{}'.format(polygons)
if isinstance(polygons, list):
polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons]
elif isinstance(polygons, Polygons):
polygons = polygons.polygons
self.polygons = polygons
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return Polygons(flipped_polygons, size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
# TODO chck if necessary
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)
p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)
cropped_polygons.append(p)
return Polygons(cropped_polygons, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return Polygons(scaled_polys, size, mode=self.mode)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return Polygons(scaled_polygons, size=size, mode=self.mode)
def convert(self, mode):
width, height = self.size
if mode == "mask":
rles = mask_utils.frPyObjects(
[p.numpy() for p in self.polygons], height, width
)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
# TODO add squeeze?
return mask
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_polygons={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class SegmentationMask(object):
"""
This class stores the segmentations for all objects in the image
"""
def __init__(self, polygons, size, mode=None):
"""
Arguments:
polygons: a list of list of lists of numbers. The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
object, and the third level to the polygon coordinates.
"""
# print(polygons)
assert isinstance(polygons, list)
if not isinstance(polygons[0], np.ndarray):
self.polygons = [Polygons(p, size, mode) for p in polygons]
else:
self.polygons = []
self.mask = polygons
self.size = size
self.mode = mode
def decode(self, h, w):
# covnert mask object to binary mask numpy array
# RLES = []
binary_mask = np.zeros((h,w))
for segm in self.polygons:
mask = segm.convert('mask')
binary_mask = binary_mask + mask.numpy()
return binary_mask
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped = []
for polygon in self.polygons:
flipped.append(polygon.transpose(method))
return SegmentationMask(flipped, size=self.size, mode=self.mode)
def crop(self, box):
if isinstance(self.polygons[0], Polygons):
w, h = box[2] - box[0], box[3] - box[1]
cropped = []
for polygon in self.polygons:
cropped.append(polygon.crop(box))
return SegmentationMask(cropped, size=(w, h), mode=self.mode)
else:
cropped = []
w, h = box[2] - box[0], box[3] - box[1]
for mask in self.mask:
mask = mask[box[1]:box[3], box[0]:box[2]]
cropped.append(mask)
return SegmentationMask(cropped, size = (w,h), mode =
self.mode)
def resize(self, size, *args, **kwargs):
scaled = []
for polygon in self.polygons:
scaled.append(polygon.resize(size, *args, **kwargs))
return SegmentationMask(scaled, size=size, mode=self.mode)
def to(self, *args, **kwargs):
return self
def __getitem__(self, item):
if isinstance(item, (int, slice)):
selected_polygons = [self.polygons[item]]
else:
# advanced indexing on a single dimension
selected_polygons = []
if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
# print(self.polygons[i])
selected_polygons.append(self.polygons[i])
return SegmentationMask(selected_polygons, size=self.size, mode=self.mode)
def __iter__(self):
return iter(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
| 32.064516 | 86 | 0.564512 |
import torch
import pycocotools.mask as mask_utils
from pycocotools import mask as maskUtils
import numpy as np
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Mask(object):
def __init__(self, masks, size, mode):
self.masks = masks
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 2
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
flip_idx = list(range(dim)[::-1])
flipped_masks = self.masks.index_select(dim, flip_idx)
return Mask(flipped_masks, self.size, self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]]
return Mask(cropped_masks, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
pass
class Polygons(object):
def __init__(self, polygons, size, mode):
if isinstance(polygons, list):
polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons]
elif isinstance(polygons, Polygons):
polygons = polygons.polygons
self.polygons = polygons
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return Polygons(flipped_polygons, size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
cropped_polygons.append(p)
return Polygons(cropped_polygons, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return Polygons(scaled_polys, size, mode=self.mode)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return Polygons(scaled_polygons, size=size, mode=self.mode)
def convert(self, mode):
width, height = self.size
if mode == "mask":
rles = mask_utils.frPyObjects(
[p.numpy() for p in self.polygons], height, width
)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
return mask
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_polygons={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class SegmentationMask(object):
def __init__(self, polygons, size, mode=None):
assert isinstance(polygons, list)
if not isinstance(polygons[0], np.ndarray):
self.polygons = [Polygons(p, size, mode) for p in polygons]
else:
self.polygons = []
self.mask = polygons
self.size = size
self.mode = mode
def decode(self, h, w):
binary_mask = np.zeros((h,w))
for segm in self.polygons:
mask = segm.convert('mask')
binary_mask = binary_mask + mask.numpy()
return binary_mask
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped = []
for polygon in self.polygons:
flipped.append(polygon.transpose(method))
return SegmentationMask(flipped, size=self.size, mode=self.mode)
def crop(self, box):
if isinstance(self.polygons[0], Polygons):
w, h = box[2] - box[0], box[3] - box[1]
cropped = []
for polygon in self.polygons:
cropped.append(polygon.crop(box))
return SegmentationMask(cropped, size=(w, h), mode=self.mode)
else:
cropped = []
w, h = box[2] - box[0], box[3] - box[1]
for mask in self.mask:
mask = mask[box[1]:box[3], box[0]:box[2]]
cropped.append(mask)
return SegmentationMask(cropped, size = (w,h), mode =
self.mode)
def resize(self, size, *args, **kwargs):
scaled = []
for polygon in self.polygons:
scaled.append(polygon.resize(size, *args, **kwargs))
return SegmentationMask(scaled, size=size, mode=self.mode)
def to(self, *args, **kwargs):
return self
def __getitem__(self, item):
if isinstance(item, (int, slice)):
selected_polygons = [self.polygons[item]]
else:
selected_polygons = []
if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
selected_polygons.append(self.polygons[i])
return SegmentationMask(selected_polygons, size=self.size, mode=self.mode)
def __iter__(self):
return iter(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
| true | true |
f73cb2e177bce9a439ac8896c2d161e139655acb | 5,860 | py | Python | apps/recipes/models.py | DKudrik/foodgram-project-react | 1a754a98add627baaadc6ca7905a7abb4f33468e | [
"MIT"
] | null | null | null | apps/recipes/models.py | DKudrik/foodgram-project-react | 1a754a98add627baaadc6ca7905a7abb4f33468e | [
"MIT"
] | null | null | null | apps/recipes/models.py | DKudrik/foodgram-project-react | 1a754a98add627baaadc6ca7905a7abb4f33468e | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.core.validators import MinValueValidator
from django.db import models
User = get_user_model()
class Tag(models.Model):
"""Describes a tag object."""
name = models.CharField(max_length=255, verbose_name='Имя')
color = models.CharField(max_length=100, blank=True,
verbose_name='Цвет', default='')
class Meta:
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Describes an ingredient object."""
name = models.CharField(
max_length=256,
verbose_name='Название',
)
unit = models.CharField(
max_length=64,
verbose_name='Ед. измерения',
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['name', 'unit'],
name='name_unit'
)
]
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
def __str__(self):
return f'{self.name}, {self.unit}'
class Recipe(models.Model):
"""
Describes a recipe object. Related to 'auth.User', 'recipe.Tag' and
'recipe.Ingredient' through intermediate model 'IngredientRecipe'.
"""
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='recipes',
verbose_name='Автор',
)
title = models.CharField(
max_length=256,
blank=False,
verbose_name='Название',
)
image = models.ImageField(
upload_to='kartinki/',
blank=False,
null=True,
verbose_name='Изображение',
)
description = models.TextField(
blank=False,
verbose_name='Описание',
)
ingredients = models.ManyToManyField(
Ingredient,
through='IngredientRecipe',
related_name='recipes',
verbose_name='Ингредиенты',
)
cooking_time = models.PositiveIntegerField(
blank=False,
verbose_name='Время приготовления, мин',
validators=[MinValueValidator(1)]
)
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата публикации',
)
tags = models.ManyToManyField(
Tag,
related_name='recipes',
verbose_name='Теги',
)
slug = models.SlugField(max_length=100, unique=True, blank=True, null=True)
class Meta:
ordering = ('-pub_date', )
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
def __str__(self):
return (f'{self.author} : {self.title}')
class IngredientRecipe(models.Model):
"""
Serves to connect a recipe oиject with an ingredient object
via Many2Many relationship. Adds an additional field 'quantity'.
"""
ingredient = models.ForeignKey(Ingredient,
on_delete=models.CASCADE,
related_name='ingredientrecipe',
verbose_name='Ингредиент')
recipe = models.ForeignKey(Recipe,
on_delete=models.CASCADE,
related_name='ingredientrecipe',
verbose_name='Ингредиент')
quantity = models.DecimalField(
max_digits=8,
decimal_places=1,
verbose_name='Количество',
validators=[MinValueValidator(0.1)]
)
class Meta:
verbose_name = 'Ингредиент в рецепте'
verbose_name_plural = 'Ингредиенты в рецепте'
def __str__(self):
return (self.ingredient.name)
class Follow(models.Model):
"""Describes a follow object."""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='follower',
verbose_name='Подписчик',
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='following',
verbose_name='Автор',
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['user', 'author'],
name='user_author'
)
]
ordering = ('author', )
verbose_name = 'Подписка'
verbose_name_plural = 'Подписки'
def __str__(self):
return (f'Подписчик: {self.user}, Автор: {self.author}')
class Purchase(models.Model):
"""Describes a purchase object."""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='purchases',
verbose_name='Пользователь',
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='purchases',
verbose_name='Рецепт',
)
class Meta:
verbose_name = 'Покупка'
verbose_name_plural = 'Покупки'
constraints = [
models.UniqueConstraint(
fields=['user', 'recipe'],
name='unique_purchase'
)
]
def __str__(self):
return (f'Пользователь: {self.user}, Рецепт: {self.recipe}')
class Favourite(models.Model):
"""Describes a user's favourite recipes."""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='favourites',
verbose_name='Пользователь',
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='favourites',
verbose_name='Рецепт',
)
class Meta:
verbose_name = 'Избранное'
verbose_name_plural = 'Избранное'
constraints = [
models.UniqueConstraint(
fields=['user', 'recipe'],
name='unique_favourite'
)
]
def __str__(self):
return (f'Пользователь: {self.user}, Рецепт: {self.recipe}')
| 26.880734 | 79 | 0.57901 | from django.contrib.auth import get_user_model
from django.core.validators import MinValueValidator
from django.db import models
User = get_user_model()
class Tag(models.Model):
name = models.CharField(max_length=255, verbose_name='Имя')
color = models.CharField(max_length=100, blank=True,
verbose_name='Цвет', default='')
class Meta:
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
def __str__(self):
return self.name
class Ingredient(models.Model):
name = models.CharField(
max_length=256,
verbose_name='Название',
)
unit = models.CharField(
max_length=64,
verbose_name='Ед. измерения',
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['name', 'unit'],
name='name_unit'
)
]
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
def __str__(self):
return f'{self.name}, {self.unit}'
class Recipe(models.Model):
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='recipes',
verbose_name='Автор',
)
title = models.CharField(
max_length=256,
blank=False,
verbose_name='Название',
)
image = models.ImageField(
upload_to='kartinki/',
blank=False,
null=True,
verbose_name='Изображение',
)
description = models.TextField(
blank=False,
verbose_name='Описание',
)
ingredients = models.ManyToManyField(
Ingredient,
through='IngredientRecipe',
related_name='recipes',
verbose_name='Ингредиенты',
)
cooking_time = models.PositiveIntegerField(
blank=False,
verbose_name='Время приготовления, мин',
validators=[MinValueValidator(1)]
)
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата публикации',
)
tags = models.ManyToManyField(
Tag,
related_name='recipes',
verbose_name='Теги',
)
slug = models.SlugField(max_length=100, unique=True, blank=True, null=True)
class Meta:
ordering = ('-pub_date', )
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
def __str__(self):
return (f'{self.author} : {self.title}')
class IngredientRecipe(models.Model):
ingredient = models.ForeignKey(Ingredient,
on_delete=models.CASCADE,
related_name='ingredientrecipe',
verbose_name='Ингредиент')
recipe = models.ForeignKey(Recipe,
on_delete=models.CASCADE,
related_name='ingredientrecipe',
verbose_name='Ингредиент')
quantity = models.DecimalField(
max_digits=8,
decimal_places=1,
verbose_name='Количество',
validators=[MinValueValidator(0.1)]
)
class Meta:
verbose_name = 'Ингредиент в рецепте'
verbose_name_plural = 'Ингредиенты в рецепте'
def __str__(self):
return (self.ingredient.name)
class Follow(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='follower',
verbose_name='Подписчик',
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='following',
verbose_name='Автор',
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['user', 'author'],
name='user_author'
)
]
ordering = ('author', )
verbose_name = 'Подписка'
verbose_name_plural = 'Подписки'
def __str__(self):
return (f'Подписчик: {self.user}, Автор: {self.author}')
class Purchase(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='purchases',
verbose_name='Пользователь',
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='purchases',
verbose_name='Рецепт',
)
class Meta:
verbose_name = 'Покупка'
verbose_name_plural = 'Покупки'
constraints = [
models.UniqueConstraint(
fields=['user', 'recipe'],
name='unique_purchase'
)
]
def __str__(self):
return (f'Пользователь: {self.user}, Рецепт: {self.recipe}')
class Favourite(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='favourites',
verbose_name='Пользователь',
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='favourites',
verbose_name='Рецепт',
)
class Meta:
verbose_name = 'Избранное'
verbose_name_plural = 'Избранное'
constraints = [
models.UniqueConstraint(
fields=['user', 'recipe'],
name='unique_favourite'
)
]
def __str__(self):
return (f'Пользователь: {self.user}, Рецепт: {self.recipe}')
| true | true |
f73cb32287f46c3e451244ae7cf86f4c5ba02e6d | 10,339 | py | Python | Train/run_Train_SiamFC.py | GengZ/siameseFC-pytorch-vot | e825637b5ac68c38d6c62bba265b61b85c4955d9 | [
"Apache-2.0"
] | 26 | 2018-12-27T03:53:33.000Z | 2021-06-29T13:11:32.000Z | Train/run_Train_SiamFC.py | GengZ/siameseFC-pytorch-vot | e825637b5ac68c38d6c62bba265b61b85c4955d9 | [
"Apache-2.0"
] | 3 | 2019-01-14T20:02:20.000Z | 2019-07-08T01:48:19.000Z | Train/run_Train_SiamFC.py | GengZ/siameseFC-pytorch-vot | e825637b5ac68c38d6c62bba265b61b85c4955d9 | [
"Apache-2.0"
] | 6 | 2018-12-12T13:50:09.000Z | 2019-08-02T13:48:22.000Z | import __init_paths
import os
import numpy as np
from tqdm import tqdm
import torch
from torch.autograd import Variable
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from lib.VIDDataset import VIDDataset
from lib.DataAugmentation import RandomStretch, CenterCrop, RandomCrop, ToTensor
from lib.Utils import create_label
from lib.eval_utils import centerThrErr
from experiments.siamese_fc.Config import Config
from experiments.siamese_fc.network import SiamNet
# fix random seed
np.random.seed(1357)
torch.manual_seed(1234)
def train(data_dir, train_imdb, val_imdb,
model_save_path="./model/",
config=None):
assert config is not None
use_gpu = config.use_gpu
# set gpu ID
if use_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = str(config.gpu_id)
# do data augmentation in PyTorch;
# you can also do complex data augmentation as in the original paper
center_crop_size = config.instance_size - config.stride
random_crop_size = config.instance_size - 2 * config.stride
train_z_transforms = transforms.Compose([
RandomStretch(),
CenterCrop((config.examplar_size, config.examplar_size)),
ToTensor()
])
train_x_transforms = transforms.Compose([
RandomStretch(),
CenterCrop((center_crop_size, center_crop_size)),
RandomCrop((random_crop_size, random_crop_size)),
ToTensor()
])
valid_z_transforms = transforms.Compose([
CenterCrop((config.examplar_size, config.examplar_size)),
ToTensor(),
])
valid_x_transforms = transforms.Compose([
ToTensor()
])
# load data (see details in VIDDataset.py)
train_dataset = VIDDataset(train_imdb, data_dir, config,
train_z_transforms, train_x_transforms)
val_dataset = VIDDataset(val_imdb, data_dir, config, valid_z_transforms,
valid_x_transforms, "Validation")
# create dataloader
train_loader = DataLoader(train_dataset, batch_size=config.batch_size,
shuffle=True,
num_workers=config.train_num_workers,
drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=config.batch_size,
shuffle=True,
num_workers=config.val_num_workers,
drop_last=True)
# create SiamFC network architecture (see details in SiamNet.py)
net = SiamNet()
# move network to GPU if using GPU
if use_gpu:
net.cuda()
# define training strategy;
# ========================================
# customize parameters attributes
# 1. adjust layer weight learnable
# 2. bias in feat_extraction exempt from weight_decay
params = []
# feature extract
for key, value in dict(net.feat_extraction.named_parameters()).items():
if 'conv' in key:
if 'bias' in key:
params += [{'params': [value],
'weight_decay': 0}]
else: # weight
params += [{'params': [value],
'weight_decay': config.weight_decay}]
if 'bn' in key:
params += [{'params': [value],
'weight_decay': 0}]
# adjust layer
params += [
{'params': [net.adjust.bias]},
# {'params': [net.adjust.weight], 'lr': not config.fix_adjust_layer},
]
if config.fix_adjust_layer:
params += [
{'params': [net.adjust.weight], 'lr': 0},
]
else:
params += [
{'params': [net.adjust.weight]},
]
# ========================================
optimizer = torch.optim.SGD(params,
config.lr,
config.momentum,
config.weight_decay)
# adjusting learning in each epoch
if not config.resume:
train_lrs = np.logspace(-2, -5, config.num_epoch)
scheduler = LambdaLR(optimizer, lambda epoch: train_lrs[epoch])
else:
train_lrs = np.logspace(-2, -5, config.num_epoch)
train_lrs = train_lrs[config.start_epoch:]
net.load_state_dict(torch.load(os.path.join(model_save_path,
'SiamFC_' +
str(config.start_epoch) +
'_model.pth')))
optimizer.load_state_dict(torch.load(os.path.join(model_save_path,
'optimizer.pth')))
scheduler = LambdaLR(optimizer, lambda epoch: train_lrs[epoch])
print('resume training from epoch {} checkpoint'.format(config.start_epoch))
# used to control generating label for training;
# once generated, they are fixed since the labels for each
# pair of images (examplar z and search region x) are the same
train_response_flag = False
valid_response_flag = False
# -------------------- training & validation process --------------------
for i in range(config.start_epoch, config.num_epoch):
# adjusting learning rate
scheduler.step()
# -------------------------- training --------------------------
# indicating training (very important for batch normalization)
net.train()
# used to collect loss
train_loss = []
# used as eval metric
err_disp = 0
sample_num = 0
for j, data in enumerate(train_loader):
# fetch data,
# i.e., B x C x W x H (batchsize x channel x wdith x heigh)
exemplar_imgs, instance_imgs = data
# forward pass
if use_gpu:
exemplar_imgs = exemplar_imgs.cuda()
instance_imgs = instance_imgs.cuda()
output = net.forward(Variable(exemplar_imgs),
Variable(instance_imgs))
# create label for training (only do it one time)
if not train_response_flag:
# change control flag
train_response_flag = True
# get shape of output (i.e., response map)
response_size = output.shape[2:4]
# generate label and weight
train_eltwise_label, train_instance_weight = \
create_label(response_size, config, use_gpu)
# clear the gradient
optimizer.zero_grad()
# loss
if config.loss == "logistic":
loss = net.weight_loss(output,
Variable(train_eltwise_label),
Variable(train_instance_weight))
elif config.loss == 'customize':
loss = net.customize_loss(output,
Variable(train_eltwise_label),
Variable(train_instance_weight))
# backward
loss.backward()
# update parameter
optimizer.step()
# collect training loss
train_loss.append(loss.data)
# collect additional data for metric
err_disp = centerThrErr(output.data.cpu().numpy(),
train_eltwise_label.cpu().numpy(),
err_disp, sample_num)
sample_num += config.batch_size
# stdout
if (j + 1) % config.log_freq == 0:
print ("Epoch %d, Iter %06d, loss: %f, error disp: %f"
% (i+1, (j+1), np.mean(train_loss), err_disp))
# ------------------------- saving model ---------------------------
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
torch.save(net.state_dict(),
os.path.join(model_save_path,
"SiamFC_" + str(i + 1) + "_model.pth"))
torch.save(optimizer.state_dict(),
os.path.join(model_save_path,
'optimizer.pth'))
# --------------------------- validation ---------------------------
# indicate validation
net.eval()
# used to collect validation loss
val_loss = []
for j, data in enumerate(tqdm(val_loader)):
exemplar_imgs, instance_imgs = data
# forward pass
if use_gpu:
exemplar_imgs = exemplar_imgs.cuda()
instance_imgs = instance_imgs.cuda()
output = net.forward(Variable(exemplar_imgs),
Variable(instance_imgs))
# create label for validation (only do it one time)
if not valid_response_flag:
valid_response_flag = True
response_size = output.shape[2:4]
valid_eltwise_label, valid_instance_weight = \
create_label(response_size, config, use_gpu)
# loss
if config.loss == "logistic":
loss = net.weight_loss(output,
Variable(valid_eltwise_label),
Variable(valid_instance_weight))
elif config.loss == 'customize':
loss = net.customize_loss(output,
Variable(valid_eltwise_label),
Variable(valid_instance_weight))
# collect validation loss
val_loss.append(loss.data)
print ("Epoch %d training loss: %f, validation loss: %f"
% (i+1, np.mean(train_loss), np.mean(val_loss)))
if __name__ == "__main__":
# initialize training configuration
config = Config()
data_dir = config.data_dir
train_imdb = config.train_imdb
val_imdb = config.val_imdb
model_save_path = os.path.join(config.save_base_path, config.save_sub_path)
# training SiamFC network, using GPU by default
train(data_dir, train_imdb, val_imdb,
model_save_path=model_save_path,
config=config)
| 36.663121 | 85 | 0.543863 | import __init_paths
import os
import numpy as np
from tqdm import tqdm
import torch
from torch.autograd import Variable
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from lib.VIDDataset import VIDDataset
from lib.DataAugmentation import RandomStretch, CenterCrop, RandomCrop, ToTensor
from lib.Utils import create_label
from lib.eval_utils import centerThrErr
from experiments.siamese_fc.Config import Config
from experiments.siamese_fc.network import SiamNet
np.random.seed(1357)
torch.manual_seed(1234)
def train(data_dir, train_imdb, val_imdb,
model_save_path="./model/",
config=None):
assert config is not None
use_gpu = config.use_gpu
if use_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = str(config.gpu_id)
center_crop_size = config.instance_size - config.stride
random_crop_size = config.instance_size - 2 * config.stride
train_z_transforms = transforms.Compose([
RandomStretch(),
CenterCrop((config.examplar_size, config.examplar_size)),
ToTensor()
])
train_x_transforms = transforms.Compose([
RandomStretch(),
CenterCrop((center_crop_size, center_crop_size)),
RandomCrop((random_crop_size, random_crop_size)),
ToTensor()
])
valid_z_transforms = transforms.Compose([
CenterCrop((config.examplar_size, config.examplar_size)),
ToTensor(),
])
valid_x_transforms = transforms.Compose([
ToTensor()
])
train_dataset = VIDDataset(train_imdb, data_dir, config,
train_z_transforms, train_x_transforms)
val_dataset = VIDDataset(val_imdb, data_dir, config, valid_z_transforms,
valid_x_transforms, "Validation")
train_loader = DataLoader(train_dataset, batch_size=config.batch_size,
shuffle=True,
num_workers=config.train_num_workers,
drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=config.batch_size,
shuffle=True,
num_workers=config.val_num_workers,
drop_last=True)
net = SiamNet()
if use_gpu:
net.cuda()
params = []
for key, value in dict(net.feat_extraction.named_parameters()).items():
if 'conv' in key:
if 'bias' in key:
params += [{'params': [value],
'weight_decay': 0}]
else:
params += [{'params': [value],
'weight_decay': config.weight_decay}]
if 'bn' in key:
params += [{'params': [value],
'weight_decay': 0}]
params += [
{'params': [net.adjust.bias]},
]
if config.fix_adjust_layer:
params += [
{'params': [net.adjust.weight], 'lr': 0},
]
else:
params += [
{'params': [net.adjust.weight]},
]
optimizer = torch.optim.SGD(params,
config.lr,
config.momentum,
config.weight_decay)
if not config.resume:
train_lrs = np.logspace(-2, -5, config.num_epoch)
scheduler = LambdaLR(optimizer, lambda epoch: train_lrs[epoch])
else:
train_lrs = np.logspace(-2, -5, config.num_epoch)
train_lrs = train_lrs[config.start_epoch:]
net.load_state_dict(torch.load(os.path.join(model_save_path,
'SiamFC_' +
str(config.start_epoch) +
'_model.pth')))
optimizer.load_state_dict(torch.load(os.path.join(model_save_path,
'optimizer.pth')))
scheduler = LambdaLR(optimizer, lambda epoch: train_lrs[epoch])
print('resume training from epoch {} checkpoint'.format(config.start_epoch))
train_response_flag = False
valid_response_flag = False
for i in range(config.start_epoch, config.num_epoch):
scheduler.step()
net.train()
train_loss = []
err_disp = 0
sample_num = 0
for j, data in enumerate(train_loader):
exemplar_imgs, instance_imgs = data
if use_gpu:
exemplar_imgs = exemplar_imgs.cuda()
instance_imgs = instance_imgs.cuda()
output = net.forward(Variable(exemplar_imgs),
Variable(instance_imgs))
if not train_response_flag:
train_response_flag = True
response_size = output.shape[2:4]
train_eltwise_label, train_instance_weight = \
create_label(response_size, config, use_gpu)
optimizer.zero_grad()
if config.loss == "logistic":
loss = net.weight_loss(output,
Variable(train_eltwise_label),
Variable(train_instance_weight))
elif config.loss == 'customize':
loss = net.customize_loss(output,
Variable(train_eltwise_label),
Variable(train_instance_weight))
loss.backward()
optimizer.step()
train_loss.append(loss.data)
err_disp = centerThrErr(output.data.cpu().numpy(),
train_eltwise_label.cpu().numpy(),
err_disp, sample_num)
sample_num += config.batch_size
if (j + 1) % config.log_freq == 0:
print ("Epoch %d, Iter %06d, loss: %f, error disp: %f"
% (i+1, (j+1), np.mean(train_loss), err_disp))
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
torch.save(net.state_dict(),
os.path.join(model_save_path,
"SiamFC_" + str(i + 1) + "_model.pth"))
torch.save(optimizer.state_dict(),
os.path.join(model_save_path,
'optimizer.pth'))
net.eval()
val_loss = []
for j, data in enumerate(tqdm(val_loader)):
exemplar_imgs, instance_imgs = data
if use_gpu:
exemplar_imgs = exemplar_imgs.cuda()
instance_imgs = instance_imgs.cuda()
output = net.forward(Variable(exemplar_imgs),
Variable(instance_imgs))
if not valid_response_flag:
valid_response_flag = True
response_size = output.shape[2:4]
valid_eltwise_label, valid_instance_weight = \
create_label(response_size, config, use_gpu)
if config.loss == "logistic":
loss = net.weight_loss(output,
Variable(valid_eltwise_label),
Variable(valid_instance_weight))
elif config.loss == 'customize':
loss = net.customize_loss(output,
Variable(valid_eltwise_label),
Variable(valid_instance_weight))
val_loss.append(loss.data)
print ("Epoch %d training loss: %f, validation loss: %f"
% (i+1, np.mean(train_loss), np.mean(val_loss)))
if __name__ == "__main__":
config = Config()
data_dir = config.data_dir
train_imdb = config.train_imdb
val_imdb = config.val_imdb
model_save_path = os.path.join(config.save_base_path, config.save_sub_path)
train(data_dir, train_imdb, val_imdb,
model_save_path=model_save_path,
config=config)
| true | true |
f73cb3333a08b309d1f39edb1693b416c9b3c731 | 2,170 | py | Python | website/website/texts.py | tibet5/website | 937e1941aaadbf7cd0a404a2655858451c01dd54 | [
"MIT"
] | null | null | null | website/website/texts.py | tibet5/website | 937e1941aaadbf7cd0a404a2655858451c01dd54 | [
"MIT"
] | null | null | null | website/website/texts.py | tibet5/website | 937e1941aaadbf7cd0a404a2655858451c01dd54 | [
"MIT"
] | null | null | null | import logging
import requests
from django.conf import settings
from django.template.loader import render_to_string
logger = logging.getLogger(__name__)
class TextMessagingAPIException(Exception):
pass
class TextMessagingAPI:
API_BASE_URL = "https://application.textline.com/api"
GROUPS = {
"default": "a3019415-c4d3-4de9-8374-103e8ba690b9",
"groceries": "0d427662-aba2-4e4c-9bfb-0bb846f2353a",
}
def __init__(self, access_token=settings.TEXTLINE_ACCESS_TOKEN):
self.access_token = access_token
def send_text(self, phone_number, message, group_name):
"""Send a message to the phone number"""
if self.access_token is None:
raise TextMessagingAPIException("Textline access token is not set")
try:
group_uuid = self.GROUPS[group_name]
except KeyError:
raise TextMessagingAPIException("Invalid group_name")
try:
response = requests.post(
f"{self.API_BASE_URL}/conversations.json",
json={
"group_uuid": group_uuid,
"phone_number": phone_number,
"comment": {
"body": message,
},
},
headers={
"X-TGP-ACCESS-TOKEN": self.access_token,
},
timeout=10,
)
response.raise_for_status()
return response.json()
except Exception as e:
raise TextMessagingAPIException from e
class TextMessage:
def __init__(self, template, context={}, group_name="default", api=None):
self.template = template
self.context = context
self.group_name = group_name
self.api = api or TextMessagingAPI()
@property
def message(self):
return render_to_string(self.template, self.context)
def send(self, phone_number):
try:
self.api.send_text(phone_number, self.message, self.group_name)
except TextMessagingAPIException:
logger.exception("Failed to send text message to %s", phone_number)
| 30.138889 | 79 | 0.603687 | import logging
import requests
from django.conf import settings
from django.template.loader import render_to_string
logger = logging.getLogger(__name__)
class TextMessagingAPIException(Exception):
pass
class TextMessagingAPI:
API_BASE_URL = "https://application.textline.com/api"
GROUPS = {
"default": "a3019415-c4d3-4de9-8374-103e8ba690b9",
"groceries": "0d427662-aba2-4e4c-9bfb-0bb846f2353a",
}
def __init__(self, access_token=settings.TEXTLINE_ACCESS_TOKEN):
self.access_token = access_token
def send_text(self, phone_number, message, group_name):
if self.access_token is None:
raise TextMessagingAPIException("Textline access token is not set")
try:
group_uuid = self.GROUPS[group_name]
except KeyError:
raise TextMessagingAPIException("Invalid group_name")
try:
response = requests.post(
f"{self.API_BASE_URL}/conversations.json",
json={
"group_uuid": group_uuid,
"phone_number": phone_number,
"comment": {
"body": message,
},
},
headers={
"X-TGP-ACCESS-TOKEN": self.access_token,
},
timeout=10,
)
response.raise_for_status()
return response.json()
except Exception as e:
raise TextMessagingAPIException from e
class TextMessage:
def __init__(self, template, context={}, group_name="default", api=None):
self.template = template
self.context = context
self.group_name = group_name
self.api = api or TextMessagingAPI()
@property
def message(self):
return render_to_string(self.template, self.context)
def send(self, phone_number):
try:
self.api.send_text(phone_number, self.message, self.group_name)
except TextMessagingAPIException:
logger.exception("Failed to send text message to %s", phone_number)
| true | true |
f73cb4291efff6e508979f434ec2c7c7247f7463 | 244 | py | Python | tests/basics/string_split.py | bygreencn/micropython | 3f759b71c63f5e01df18a6e204c50f78d1b6a20b | [
"MIT"
] | 1 | 2019-05-07T15:01:19.000Z | 2019-05-07T15:01:19.000Z | tests/basics/string_split.py | bygreencn/micropython | 3f759b71c63f5e01df18a6e204c50f78d1b6a20b | [
"MIT"
] | null | null | null | tests/basics/string_split.py | bygreencn/micropython | 3f759b71c63f5e01df18a6e204c50f78d1b6a20b | [
"MIT"
] | null | null | null | print("a b".split())
print(" a b ".split(None))
print(" a b ".split(None, 1))
print(" a b ".split(None, 2))
print(" a b c ".split(None, 1))
print(" a b c ".split(None, 0))
print(" a b c ".split(None, -1))
| 30.5 | 38 | 0.47541 | print("a b".split())
print(" a b ".split(None))
print(" a b ".split(None, 1))
print(" a b ".split(None, 2))
print(" a b c ".split(None, 1))
print(" a b c ".split(None, 0))
print(" a b c ".split(None, -1))
| true | true |
f73cb539d947c85f0ce6a8a9d877fe496437ddaf | 9,047 | py | Python | pyseer/kmer_mapping/annotate_hits.py | yemilawal/pyseer | 0d2130a1a0cb5fe247f339b226ea4f69b12fc321 | [
"Apache-2.0"
] | 1 | 2021-02-01T00:42:02.000Z | 2021-02-01T00:42:02.000Z | pyseer/kmer_mapping/annotate_hits.py | johnlees/pyseer | acfdfaa088d57b6f4cba730553f6c8c6a8c9ff3b | [
"Apache-2.0"
] | null | null | null | pyseer/kmer_mapping/annotate_hits.py | johnlees/pyseer | acfdfaa088d57b6f4cba730553f6c8c6a8c9ff3b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2017 Marco Galardini and John Lees
'''Script to annotate kmer hits'''
import sys
import os
import re
import tempfile
import subprocess
import pybedtools
from .bwa import bwa_index
from .bwa import bwa_iter
def get_options():
import argparse
description = 'Iteratively annotate significant kmers from SEER'
parser = argparse.ArgumentParser(description=description, prog="annotate_hits")
parser.add_argument("kmers",
help="Kmers file, filtered output from SEER")
parser.add_argument("references",
help="File of reference annotations. "
"First column fasta sequence, second column gff annotation, "
"third column 'ref' or 'draft'")
parser.add_argument("output",
help="Output file")
parser.add_argument("--bwa",
help="Location of bwa executable "
"[default=bwa]",
default="bwa")
parser.add_argument("--tmp-prefix",
help="Directory to store temporary files "
"[default=./]",
default=os.getcwd())
return parser.parse_args()
# returns first overlapping feature with gene= annotation. Otherwise first feature ID
def extract_genes(bedtools_intervals):
annotations = {}
for match in bedtools_intervals.features():
kmer_id, hit_id = match.fields[3].split("_")
annotations[int(kmer_id)] = {}
ID = None
gene = None
for tag in match.fields[15].split(";"):
parse_tag = re.search('^(.+)=(.+)$', tag)
if parse_tag:
if parse_tag.group(1) == "gene":
gene = parse_tag.group(2)
break
elif parse_tag.group(1) == "ID" and ID is None:
ID = parse_tag.group(2)
if gene is None:
if ID is not None:
gene = ID
else:
gene = ""
annotations[int(kmer_id)][int(hit_id)] = gene
return annotations
def main():
options = get_options()
# tmp file locations
remaining_tmp = options.tmp_prefix + "/remaining_kmers.txt"
remaining_next_tmp = options.tmp_prefix + "/remaining_kmers_next.txt"
remaining_fa_tmp = options.tmp_prefix + "/remaining_kmers.fa"
remaining_fa_next_tmp = options.tmp_prefix + "/remaining_kmers_next.fa"
pybedtools.helpers.set_tempdir(options.tmp_prefix)
# read references and drafts into list
references = []
with open(options.references, 'r') as reference_files:
for reference in reference_files:
(fa, gff, ref) = reference.rstrip().split()
references.append((fa, gff, ref))
output_file = open(options.output, 'w')
# Open seer results
# seer_remaining = seer_results
seer_remaining = open(options.kmers, 'r')
header = seer_remaining.readline()
# Write out kmer fasta file, keep track of count
kmers_remaining = 0
with open(remaining_fa_tmp, 'w') as kmer_fa:
for kmer in seer_remaining:
kmers_remaining += 1
kmer_fa.write(">" + str(kmers_remaining) + "\n")
kmer_fa.write(kmer.split("\t")[0] + "\n")
seer_remaining.seek(0)
seer_remaining.readline()
# for each reference, then draft
ref_id = 0
for reference in references:
(ref_fa, ref_gff, ref_type) = reference
ref_id += 1
# print number of kmers remaining. if zero, break
if kmers_remaining == 0:
break
sys.stderr.write(str(kmers_remaining) + " kmers remain\n")
if ref_type == "ref":
sys.stderr.write("Reference " + str(ref_id) + "\n")
else:
sys.stderr.write("Draft reference " + str(ref_id) + "\n")
# index reference sequence
bwa_index(ref_fa)
if ref_type == "ref":
bwa_algorithms = ["mem", "fastmap"]
elif ref_type == "draft":
bwa_algorithms = ["fastmap"]
else:
bwa_algorithms = ["fastmap"]
sys.stderr.write("Unknown reference type " + ref_type + " for " + ref_fa + ". Assuming draft\n")
# Fix ref annotation
tmp_bed = tempfile.NamedTemporaryFile(prefix=options.tmp_prefix + "/")
try:
subprocess.run("gff2bed < " + ref_gff + " > " + tmp_bed.name, shell=True, check=True)
except AttributeError:
# python prior to 3.5
subprocess.check_call("gff2bed < " + ref_gff + " > " + tmp_bed.name, shell=True)
ref_annotation = pybedtools.BedTool(tmp_bed.name)
filtered_ref = ref_annotation.filter(lambda x: True if x[7] == "CDS" else False).saveas('tmp_bed')
ref_annotation = pybedtools.BedTool('tmp_bed')
for bwa_algorithm in bwa_algorithms:
next_seer_remaining = open(remaining_next_tmp, 'w')
next_fasta_remaining = open(remaining_fa_next_tmp, 'w')
# run bwa mem -k 8 for ref, bwa fastmap for draft of remaining.fa
new_idx = 0
kmer_lines = []
map_pos = {}
mapped_kmers = bwa_iter(ref_fa, remaining_fa_tmp, bwa_algorithm)
with tempfile.NamedTemporaryFile('w', prefix=options.tmp_prefix + "/") as query_bed:
kmer_idx = 0
for mapping, kmer_line in zip(mapped_kmers, seer_remaining):
if mapping.mapped:
kmers_remaining -= 1
kmer_lines.append(kmer_line.rstrip())
map_pos[kmer_idx] = []
for hit_idx, (contig, start, end, strand) in enumerate(mapping.positions):
map_pos[kmer_idx].append(contig + ":" + str(start) + "-" + str(end))
query_bed.write('\t'.join([contig, str(start), str(end), str(kmer_idx) + "_" + str(hit_idx), '0', strand]) + "\n")
kmer_idx += 1
else:
# if unmapped write to seer_remaining and remaining.fa
next_seer_remaining.write(kmer_line)
new_idx += 1
next_fasta_remaining.write(">" + str(new_idx) + "\n")
next_fasta_remaining.write(kmer_line.split("\t")[0] + "\n")
if kmer_idx > 0:
query_bed.flush()
query_interval = pybedtools.BedTool(query_bed.name)
sorted_query = query_interval.sort()
in_genes = extract_genes(query_interval.intersect(b=ref_annotation, s=False, stream=True, wb=True))
up_genes = extract_genes(sorted_query.closest(b=ref_annotation, s=False, D="ref", iu=True, stream=True))
down_genes = extract_genes(sorted_query.closest(b=ref_annotation, s=False, D="ref", id=True, stream=True))
for kmer_idx, kmer_line in enumerate(kmer_lines):
annotations = []
for hit_idx, hit in enumerate(map_pos[kmer_idx]):
annotation = hit + ";"
if kmer_idx in down_genes and hit_idx in down_genes[kmer_idx]:
annotation += down_genes[kmer_idx][hit_idx]
annotation += ";"
if kmer_idx in in_genes and hit_idx in in_genes[kmer_idx]:
annotation += in_genes[kmer_idx][hit_idx]
annotation += ";"
if kmer_idx in up_genes and hit_idx in up_genes[kmer_idx]:
annotation += up_genes[kmer_idx][hit_idx]
annotations.append(annotation)
output_file.write("\t".join([kmer_line, ",".join(annotations)]) + "\n")
else:
# something went wrong, write down remaining kmers
for kmer_line in seer_remaining:
# if unmapped write to seer_remaining and remaining.fa
next_seer_remaining.write(kmer_line)
new_idx += 1
next_fasta_remaining.write(">" + str(new_idx) + "\n")
next_fasta_remaining.write(kmer_line.split("\t")[0] + "\n")
pybedtools.cleanup() # delete the bed file
# Clean up
seer_remaining.close()
next_seer_remaining.close()
next_fasta_remaining.close()
os.rename(remaining_next_tmp, remaining_tmp)
os.rename(remaining_fa_next_tmp, remaining_fa_tmp)
# Open next kmer file
seer_remaining = open(remaining_tmp, 'r')
# Clean up
tmp_bed.close()
os.remove('tmp_bed')
sys.stderr.write(str(kmers_remaining) + " kmers remain unannotated\n")
if __name__ == "__main__":
main()
| 40.388393 | 142 | 0.557643 |
import sys
import os
import re
import tempfile
import subprocess
import pybedtools
from .bwa import bwa_index
from .bwa import bwa_iter
def get_options():
import argparse
description = 'Iteratively annotate significant kmers from SEER'
parser = argparse.ArgumentParser(description=description, prog="annotate_hits")
parser.add_argument("kmers",
help="Kmers file, filtered output from SEER")
parser.add_argument("references",
help="File of reference annotations. "
"First column fasta sequence, second column gff annotation, "
"third column 'ref' or 'draft'")
parser.add_argument("output",
help="Output file")
parser.add_argument("--bwa",
help="Location of bwa executable "
"[default=bwa]",
default="bwa")
parser.add_argument("--tmp-prefix",
help="Directory to store temporary files "
"[default=./]",
default=os.getcwd())
return parser.parse_args()
def extract_genes(bedtools_intervals):
annotations = {}
for match in bedtools_intervals.features():
kmer_id, hit_id = match.fields[3].split("_")
annotations[int(kmer_id)] = {}
ID = None
gene = None
for tag in match.fields[15].split(";"):
parse_tag = re.search('^(.+)=(.+)$', tag)
if parse_tag:
if parse_tag.group(1) == "gene":
gene = parse_tag.group(2)
break
elif parse_tag.group(1) == "ID" and ID is None:
ID = parse_tag.group(2)
if gene is None:
if ID is not None:
gene = ID
else:
gene = ""
annotations[int(kmer_id)][int(hit_id)] = gene
return annotations
def main():
options = get_options()
remaining_tmp = options.tmp_prefix + "/remaining_kmers.txt"
remaining_next_tmp = options.tmp_prefix + "/remaining_kmers_next.txt"
remaining_fa_tmp = options.tmp_prefix + "/remaining_kmers.fa"
remaining_fa_next_tmp = options.tmp_prefix + "/remaining_kmers_next.fa"
pybedtools.helpers.set_tempdir(options.tmp_prefix)
references = []
with open(options.references, 'r') as reference_files:
for reference in reference_files:
(fa, gff, ref) = reference.rstrip().split()
references.append((fa, gff, ref))
output_file = open(options.output, 'w')
seer_remaining = open(options.kmers, 'r')
header = seer_remaining.readline()
kmers_remaining = 0
with open(remaining_fa_tmp, 'w') as kmer_fa:
for kmer in seer_remaining:
kmers_remaining += 1
kmer_fa.write(">" + str(kmers_remaining) + "\n")
kmer_fa.write(kmer.split("\t")[0] + "\n")
seer_remaining.seek(0)
seer_remaining.readline()
ref_id = 0
for reference in references:
(ref_fa, ref_gff, ref_type) = reference
ref_id += 1
if kmers_remaining == 0:
break
sys.stderr.write(str(kmers_remaining) + " kmers remain\n")
if ref_type == "ref":
sys.stderr.write("Reference " + str(ref_id) + "\n")
else:
sys.stderr.write("Draft reference " + str(ref_id) + "\n")
bwa_index(ref_fa)
if ref_type == "ref":
bwa_algorithms = ["mem", "fastmap"]
elif ref_type == "draft":
bwa_algorithms = ["fastmap"]
else:
bwa_algorithms = ["fastmap"]
sys.stderr.write("Unknown reference type " + ref_type + " for " + ref_fa + ". Assuming draft\n")
tmp_bed = tempfile.NamedTemporaryFile(prefix=options.tmp_prefix + "/")
try:
subprocess.run("gff2bed < " + ref_gff + " > " + tmp_bed.name, shell=True, check=True)
except AttributeError:
subprocess.check_call("gff2bed < " + ref_gff + " > " + tmp_bed.name, shell=True)
ref_annotation = pybedtools.BedTool(tmp_bed.name)
filtered_ref = ref_annotation.filter(lambda x: True if x[7] == "CDS" else False).saveas('tmp_bed')
ref_annotation = pybedtools.BedTool('tmp_bed')
for bwa_algorithm in bwa_algorithms:
next_seer_remaining = open(remaining_next_tmp, 'w')
next_fasta_remaining = open(remaining_fa_next_tmp, 'w')
new_idx = 0
kmer_lines = []
map_pos = {}
mapped_kmers = bwa_iter(ref_fa, remaining_fa_tmp, bwa_algorithm)
with tempfile.NamedTemporaryFile('w', prefix=options.tmp_prefix + "/") as query_bed:
kmer_idx = 0
for mapping, kmer_line in zip(mapped_kmers, seer_remaining):
if mapping.mapped:
kmers_remaining -= 1
kmer_lines.append(kmer_line.rstrip())
map_pos[kmer_idx] = []
for hit_idx, (contig, start, end, strand) in enumerate(mapping.positions):
map_pos[kmer_idx].append(contig + ":" + str(start) + "-" + str(end))
query_bed.write('\t'.join([contig, str(start), str(end), str(kmer_idx) + "_" + str(hit_idx), '0', strand]) + "\n")
kmer_idx += 1
else:
next_seer_remaining.write(kmer_line)
new_idx += 1
next_fasta_remaining.write(">" + str(new_idx) + "\n")
next_fasta_remaining.write(kmer_line.split("\t")[0] + "\n")
if kmer_idx > 0:
query_bed.flush()
query_interval = pybedtools.BedTool(query_bed.name)
sorted_query = query_interval.sort()
in_genes = extract_genes(query_interval.intersect(b=ref_annotation, s=False, stream=True, wb=True))
up_genes = extract_genes(sorted_query.closest(b=ref_annotation, s=False, D="ref", iu=True, stream=True))
down_genes = extract_genes(sorted_query.closest(b=ref_annotation, s=False, D="ref", id=True, stream=True))
for kmer_idx, kmer_line in enumerate(kmer_lines):
annotations = []
for hit_idx, hit in enumerate(map_pos[kmer_idx]):
annotation = hit + ";"
if kmer_idx in down_genes and hit_idx in down_genes[kmer_idx]:
annotation += down_genes[kmer_idx][hit_idx]
annotation += ";"
if kmer_idx in in_genes and hit_idx in in_genes[kmer_idx]:
annotation += in_genes[kmer_idx][hit_idx]
annotation += ";"
if kmer_idx in up_genes and hit_idx in up_genes[kmer_idx]:
annotation += up_genes[kmer_idx][hit_idx]
annotations.append(annotation)
output_file.write("\t".join([kmer_line, ",".join(annotations)]) + "\n")
else:
for kmer_line in seer_remaining:
next_seer_remaining.write(kmer_line)
new_idx += 1
next_fasta_remaining.write(">" + str(new_idx) + "\n")
next_fasta_remaining.write(kmer_line.split("\t")[0] + "\n")
pybedtools.cleanup()
seer_remaining.close()
next_seer_remaining.close()
next_fasta_remaining.close()
os.rename(remaining_next_tmp, remaining_tmp)
os.rename(remaining_fa_next_tmp, remaining_fa_tmp)
seer_remaining = open(remaining_tmp, 'r')
tmp_bed.close()
os.remove('tmp_bed')
sys.stderr.write(str(kmers_remaining) + " kmers remain unannotated\n")
if __name__ == "__main__":
main()
| true | true |
f73cb6ea810c89eb0590e550a18e30bd2aa34a2f | 1,029 | py | Python | setup.py | didadadida93/tkpy | c8cc41e2115cca7d975a68e418c462a4da3232a9 | [
"MIT"
] | 1 | 2019-11-12T12:56:54.000Z | 2019-11-12T12:56:54.000Z | setup.py | didadadida93/tkpy | c8cc41e2115cca7d975a68e418c462a4da3232a9 | [
"MIT"
] | 1 | 2019-02-16T17:06:04.000Z | 2019-02-16T20:53:05.000Z | setup.py | didadadida93/tkpy | c8cc41e2115cca7d975a68e418c462a4da3232a9 | [
"MIT"
] | 5 | 2018-12-23T14:52:32.000Z | 2021-07-17T06:11:15.000Z | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
about = {}
with open("tkpy/__attrs__.py") as f:
exec(f.read(), about)
setuptools.setup(
name=about["__name__"],
version=about["__version__"],
description=about["__description__"],
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/didadadida93/tkpy",
author=about["__author__"],
author_email=about["__author_email__"],
packages=["tkpy"],
include_package_data=True,
package_data={"enums": ["*.py"], "models": ["*.py", "*.sql"]},
install_requires=["requests"],
license=about["__license__"],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Topic :: Utilities",
],
)
| 27.810811 | 66 | 0.624879 | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
about = {}
with open("tkpy/__attrs__.py") as f:
exec(f.read(), about)
setuptools.setup(
name=about["__name__"],
version=about["__version__"],
description=about["__description__"],
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/didadadida93/tkpy",
author=about["__author__"],
author_email=about["__author_email__"],
packages=["tkpy"],
include_package_data=True,
package_data={"enums": ["*.py"], "models": ["*.py", "*.sql"]},
install_requires=["requests"],
license=about["__license__"],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Topic :: Utilities",
],
)
| true | true |
f73cb71a565eaec15d39cf312dbd5018b983cef6 | 10,419 | py | Python | hanbrake_revLights/src/fdp.py | ricardo-quintela/SimRacingSetup | 3d995ca2097a951aa3b54cf627598481f9fcd54d | [
"MIT"
] | 41 | 2018-06-06T18:08:13.000Z | 2022-01-25T17:01:13.000Z | hanbrake_revLights/src/fdp.py | ricardo-quintela/SimRacingSetup | 3d995ca2097a951aa3b54cf627598481f9fcd54d | [
"MIT"
] | 9 | 2018-07-10T04:51:41.000Z | 2022-03-02T20:04:03.000Z | hanbrake_revLights/src/fdp.py | ricardo-quintela/SimRacingSetup | 3d995ca2097a951aa3b54cf627598481f9fcd54d | [
"MIT"
] | 12 | 2019-05-25T14:58:16.000Z | 2022-03-27T14:56:08.000Z | #!/usr/env/python
# -*- coding: utf-8 -*-
'''
Python class for Forza Motorsport 7's data stream format.
Copyright (c) 2018 Morten Wang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from struct import unpack
## Documentation of the packet format is available on
## https://forums.forzamotorsport.net/turn10_postsm926839_Forza-Motorsport-7--Data-Out--feature-details.aspx#post_926839
class ForzaDataPacket:
## Class variables are the specification of the format and the names of all
## the properties found in the data packet.
## Format string that allows unpack to process the data bytestream
## for the V1 format called 'sled'
sled_format = '<iIfffffffffffffffffffffffffffffffffffffffffffffffffffiiiii'
## Format string for the V2 format called 'car dash'
dash_format = '<iIfffffffffffffffffffffffffffffffffffffffffffffffffffiiiiifffffffffffffffffHBBBBBBbbb'
## Names of the properties in the order they're featured in the packet:
sled_props = [
'is_race_on', 'timestamp_ms',
'engine_max_rpm', 'engine_idle_rpm', 'current_engine_rpm',
'acceleration_x', 'acceleration_y', 'acceleration_z',
'velocity_x', 'velocity_y', 'velocity_z',
'angular_velocity_x', 'angular_velocity_y', 'angular_velocity_z',
'yaw', 'pitch', 'roll',
'norm_suspension_travel_FL', 'norm_suspension_travel_FR',
'norm_suspension_travel_RL', 'norm_suspension_travel_RR',
'tire_slip_ratio_FL', 'tire_slip_ratio_FR',
'tire_slip_ratio_RL', 'tire_slip_ratio_RR',
'wheel_rotation_speed_FL', 'wheel_rotation_speed_FR',
'wheel_rotation_speed_RL', 'wheel_rotation_speed_RR',
'wheel_on_rumble_strip_FL', 'wheel_on_rumble_strip_FR',
'wheel_on_rumble_strip_RL', 'wheel_on_rumble_strip_RR',
'wheel_in_puddle_FL', 'wheel_in_puddle_FR',
'wheel_in_puddle_RL', 'wheel_in_puddle_RR',
'surface_rumble_FL', 'surface_rumble_FR',
'surface_rumble_RL', 'surface_rumble_RR',
'tire_slip_angle_FL', 'tire_slip_angle_FR',
'tire_slip_angle_RL', 'tire_slip_angle_RR',
'tire_combined_slip_FL', 'tire_combined_slip_FR',
'tire_combined_slip_RL', 'tire_combined_slip_RR',
'suspension_travel_meters_FL', 'suspension_travel_meters_FR',
'suspension_travel_meters_RL', 'suspension_travel_meters_RR',
'car_ordinal', 'car_class', 'car_performance_index',
'drivetrain_type', 'num_cylinders'
]
## The additional props added in the 'car dash' format
dash_props = ['position_x', 'position_y', 'position_z',
'speed', 'power', 'torque',
'tire_temp_FL', 'tire_temp_FR',
'tire_temp_RL', 'tire_temp_RR',
'boost', 'fuel', 'dist_traveled',
'best_lap_time', 'last_lap_time',
'cur_lap_time', 'cur_race_time',
'lap_no', 'race_pos',
'accel', 'brake', 'clutch', 'handbrake',
'gear', 'steer',
'norm_driving_line', 'norm_ai_brake_diff']
def __init__(self, data, packet_format='dash'):
## The format this data packet was created with:
self.packet_format = packet_format
## zip makes for convenient flexibility when mapping names to
## values in the data packet:
if packet_format == 'sled':
for prop_name, prop_value in zip(self.sled_props,
unpack(self.sled_format, data)):
setattr(self, prop_name, prop_value)
elif packet_format == 'fh4':
patched_data = data[:232] + data[244:323]
for prop_name, prop_value in zip(self.sled_props + self.dash_props,
unpack(self.dash_format,
patched_data)):
setattr(self, prop_name, prop_value)
else:
for prop_name, prop_value in zip(self.sled_props + self.dash_props,
unpack(self.dash_format, data)):
setattr(self, prop_name, prop_value)
@classmethod
def get_props(cls, packet_format = 'dash'):
'''
Return the list of properties in the data packet, in order.
:param packet_format: which packet format to get properties for,
one of either 'sled' or 'dash'
:type packet_format: str
'''
if packet_format == 'sled':
return(cls.sled_props)
return(cls.sled_props + cls.dash_props)
def to_list(self, attributes):
'''
Return the values of this data packet, in order. If a list of
attributes are provided, only return those.
:param attributes: the attributes to return
:type attributes: list
'''
if attributes:
return([getattr(self, a) for a in attributes])
if self.packet_format == 'sled':
return([getattr(self, prop_name) for prop_name in self.sled_props])
return([getattr(self, prop_name) for prop_name in \
self.sled_props + self.dash_props])
def get_format(self):
'''
Return the format this packet was sent with.
'''
return(self.packet_format)
def get_tsv_header(self):
'''
Return a tab-separated string with the names of all properties in the order defined in the data packet.
'''
if self.packet_format == 'sled':
return('\t'.join(self.sled_props))
return('\t'.join(self.sled_props + self.dash_props))
def to_tsv(self):
'''
Return a tab-separated values string with all data in the given order.
All floating point numbers are defined as such to allow for changing
the number of significant digits if desired.
'''
if self.packet_format == 'sled':
return('{0.is_race_on}\t{0.timestamp_ms}\t{0.engine_max_rpm:f}\t{0.engine_idle_rpm:f}\t{0.current_engine_rpm:f}\t{0.acceleration_x:f}\t{0.acceleration_y:f}\t{0.acceleration_z:f}\t{0.velocity_x:f}\t{0.velocity_y:f}\t{0.velocity_z:f}\t{0.angular_velocity_x:f}\t{0.angular_velocity_y:f}\t{0.angular_velocity_z:f}\t{0.yaw:f}\t{0.pitch:f}\t{0.roll:f}\t{0.norm_suspension_travel_FL:f}\t{0.norm_suspension_travel_FR:f}\t{0.norm_suspension_travel_RL:f}\t{0.norm_suspension_travel_RR:f}\t{0.tire_slip_ratio_FL:f}\t{0.tire_slip_ratio_FR:f}\t{0.tire_slip_ratio_RL:f}\t{0.tire_slip_ratio_RR:f}\t{0.wheel_rotation_speed_FL:f}\t{0.wheel_rotation_speed_FR:f}\t{0.wheel_rotation_speed_RL:f}\t{0.wheel_rotation_speed_RR:f}\t{0.wheel_on_rumble_strip_FL:f}\t{0.wheel_on_rumble_strip_FR:f}\t{0.wheel_on_rumble_strip_RL:f}\t{0.wheel_on_rumble_strip_RR:f}\t{0.wheel_in_puddle_FL:f}\t{0.wheel_in_puddle_FR:f}\t{0.wheel_in_puddle_RL:f}\t{0.wheel_in_puddle_RR:f}\t{0.surface_rumble_FL:f}\t{0.surface_rumble_FR:f}\t{0.surface_rumble_RL:f}\t{0.surface_rumble_RR:f}\t{0.tire_slip_angle_FL:f}\t{0.tire_slip_angle_FR:f}\t{0.tire_slip_angle_RL:f}\t{0.tire_slip_angle_RR:f}\t{0.tire_combined_slip_FL:f}\t{0.tire_combined_slip_FR:f}\t{0.tire_combined_slip_RL:f}\t{0.tire_combined_slip_RR:f}\t{0.suspension_travel_meters_FL:f}\t{0.suspension_travel_meters_FR:f}\t{0.suspension_travel_meters_RL:f}\t{0.suspension_travel_meters_RR:f}\t{0.car_ordinal}\t{0.car_class}\t{0.car_performance_index}\t{0.drivetrain_type}\t{0.num_cylinders}'.format(self))
return('{0.is_race_on}\t{0.timestamp_ms}\t{0.engine_max_rpm:f}\t{0.engine_idle_rpm:f}\t{0.current_engine_rpm:f}\t{0.acceleration_x:f}\t{0.acceleration_y:f}\t{0.acceleration_z:f}\t{0.velocity_x:f}\t{0.velocity_y:f}\t{0.velocity_z:f}\t{0.angular_velocity_x:f}\t{0.angular_velocity_y:f}\t{0.angular_velocity_z:f}\t{0.yaw:f}\t{0.pitch:f}\t{0.roll:f}\t{0.norm_suspension_travel_FL:f}\t{0.norm_suspension_travel_FR:f}\t{0.norm_suspension_travel_RL:f}\t{0.norm_suspension_travel_RR:f}\t{0.tire_slip_ratio_FL:f}\t{0.tire_slip_ratio_FR:f}\t{0.tire_slip_ratio_RL:f}\t{0.tire_slip_ratio_RR:f}\t{0.wheel_rotation_speed_FL:f}\t{0.wheel_rotation_speed_FR:f}\t{0.wheel_rotation_speed_RL:f}\t{0.wheel_rotation_speed_RR:f}\t{0.wheel_on_rumble_strip_FL:f}\t{0.wheel_on_rumble_strip_FR:f}\t{0.wheel_on_rumble_strip_RL:f}\t{0.wheel_on_rumble_strip_RR:f}\t{0.wheel_in_puddle_FL:f}\t{0.wheel_in_puddle_FR:f}\t{0.wheel_in_puddle_RL:f}\t{0.wheel_in_puddle_RR:f}\t{0.surface_rumble_FL:f}\t{0.surface_rumble_FR:f}\t{0.surface_rumble_RL:f}\t{0.surface_rumble_RR:f}\t{0.tire_slip_angle_FL:f}\t{0.tire_slip_angle_FR:f}\t{0.tire_slip_angle_RL:f}\t{0.tire_slip_angle_RR:f}\t{0.tire_combined_slip_FL:f}\t{0.tire_combined_slip_FR:f}\t{0.tire_combined_slip_RL:f}\t{0.tire_combined_slip_RR:f}\t{0.suspension_travel_meters_FL:f}\t{0.suspension_travel_meters_FR:f}\t{0.suspension_travel_meters_RL:f}\t{0.suspension_travel_meters_RR:f}\t{0.car_ordinal}\t{0.car_class}\t{0.car_performance_index}\t{0.drivetrain_type}\t{0.num_cylinders}\t{0.position_x}\t{0.position_y}\t{0.position_z}\t{0.speed}\t{0.power}\t{0.torque}\t{0.tire_temp_FL}\t{0.tire_temp_FR}\t{0.tire_temp_RL}\t{0.tire_temp_RR}\t{0.boost}\t{0.fuel}\t{0.dist_traveled}\t{0.best_lap}\t{0.last_lap}\t{0.cur_lap}\t{0.cur_race_time}\t{0.lap_no}\t{0.race_pos}\t{0.accel}\t{0.brake}\t{0.clutch}\t{0.handbrake}\t{0.gear}\t{0.steer}\t{0.norm_driving_line}\t{0.norm_ai_brake_diff}'.format(self))
| 63.920245 | 1,916 | 0.695844 |
from struct import unpack
ngular_velocity_y', 'angular_velocity_z',
'yaw', 'pitch', 'roll',
'norm_suspension_travel_FL', 'norm_suspension_travel_FR',
'norm_suspension_travel_RL', 'norm_suspension_travel_RR',
'tire_slip_ratio_FL', 'tire_slip_ratio_FR',
'tire_slip_ratio_RL', 'tire_slip_ratio_RR',
'wheel_rotation_speed_FL', 'wheel_rotation_speed_FR',
'wheel_rotation_speed_RL', 'wheel_rotation_speed_RR',
'wheel_on_rumble_strip_FL', 'wheel_on_rumble_strip_FR',
'wheel_on_rumble_strip_RL', 'wheel_on_rumble_strip_RR',
'wheel_in_puddle_FL', 'wheel_in_puddle_FR',
'wheel_in_puddle_RL', 'wheel_in_puddle_RR',
'surface_rumble_FL', 'surface_rumble_FR',
'surface_rumble_RL', 'surface_rumble_RR',
'tire_slip_angle_FL', 'tire_slip_angle_FR',
'tire_slip_angle_RL', 'tire_slip_angle_RR',
'tire_combined_slip_FL', 'tire_combined_slip_FR',
'tire_combined_slip_RL', 'tire_combined_slip_RR',
'suspension_travel_meters_FL', 'suspension_travel_meters_FR',
'suspension_travel_meters_RL', 'suspension_travel_meters_RR',
'car_ordinal', 'car_class', 'car_performance_index',
'drivetrain_type', 'num_cylinders'
]
## The additional props added in the 'car dash' format
dash_props = ['position_x', 'position_y', 'position_z',
'speed', 'power', 'torque',
'tire_temp_FL', 'tire_temp_FR',
'tire_temp_RL', 'tire_temp_RR',
'boost', 'fuel', 'dist_traveled',
'best_lap_time', 'last_lap_time',
'cur_lap_time', 'cur_race_time',
'lap_no', 'race_pos',
'accel', 'brake', 'clutch', 'handbrake',
'gear', 'steer',
'norm_driving_line', 'norm_ai_brake_diff']
def __init__(self, data, packet_format='dash'):
## The format this data packet was created with:
self.packet_format = packet_format
## zip makes for convenient flexibility when mapping names to
## values in the data packet:
if packet_format == 'sled':
for prop_name, prop_value in zip(self.sled_props,
unpack(self.sled_format, data)):
setattr(self, prop_name, prop_value)
elif packet_format == 'fh4':
patched_data = data[:232] + data[244:323]
for prop_name, prop_value in zip(self.sled_props + self.dash_props,
unpack(self.dash_format,
patched_data)):
setattr(self, prop_name, prop_value)
else:
for prop_name, prop_value in zip(self.sled_props + self.dash_props,
unpack(self.dash_format, data)):
setattr(self, prop_name, prop_value)
@classmethod
def get_props(cls, packet_format = 'dash'):
if packet_format == 'sled':
return(cls.sled_props)
return(cls.sled_props + cls.dash_props)
def to_list(self, attributes):
if attributes:
return([getattr(self, a) for a in attributes])
if self.packet_format == 'sled':
return([getattr(self, prop_name) for prop_name in self.sled_props])
return([getattr(self, prop_name) for prop_name in \
self.sled_props + self.dash_props])
def get_format(self):
return(self.packet_format)
def get_tsv_header(self):
if self.packet_format == 'sled':
return('\t'.join(self.sled_props))
return('\t'.join(self.sled_props + self.dash_props))
def to_tsv(self):
if self.packet_format == 'sled':
return('{0.is_race_on}\t{0.timestamp_ms}\t{0.engine_max_rpm:f}\t{0.engine_idle_rpm:f}\t{0.current_engine_rpm:f}\t{0.acceleration_x:f}\t{0.acceleration_y:f}\t{0.acceleration_z:f}\t{0.velocity_x:f}\t{0.velocity_y:f}\t{0.velocity_z:f}\t{0.angular_velocity_x:f}\t{0.angular_velocity_y:f}\t{0.angular_velocity_z:f}\t{0.yaw:f}\t{0.pitch:f}\t{0.roll:f}\t{0.norm_suspension_travel_FL:f}\t{0.norm_suspension_travel_FR:f}\t{0.norm_suspension_travel_RL:f}\t{0.norm_suspension_travel_RR:f}\t{0.tire_slip_ratio_FL:f}\t{0.tire_slip_ratio_FR:f}\t{0.tire_slip_ratio_RL:f}\t{0.tire_slip_ratio_RR:f}\t{0.wheel_rotation_speed_FL:f}\t{0.wheel_rotation_speed_FR:f}\t{0.wheel_rotation_speed_RL:f}\t{0.wheel_rotation_speed_RR:f}\t{0.wheel_on_rumble_strip_FL:f}\t{0.wheel_on_rumble_strip_FR:f}\t{0.wheel_on_rumble_strip_RL:f}\t{0.wheel_on_rumble_strip_RR:f}\t{0.wheel_in_puddle_FL:f}\t{0.wheel_in_puddle_FR:f}\t{0.wheel_in_puddle_RL:f}\t{0.wheel_in_puddle_RR:f}\t{0.surface_rumble_FL:f}\t{0.surface_rumble_FR:f}\t{0.surface_rumble_RL:f}\t{0.surface_rumble_RR:f}\t{0.tire_slip_angle_FL:f}\t{0.tire_slip_angle_FR:f}\t{0.tire_slip_angle_RL:f}\t{0.tire_slip_angle_RR:f}\t{0.tire_combined_slip_FL:f}\t{0.tire_combined_slip_FR:f}\t{0.tire_combined_slip_RL:f}\t{0.tire_combined_slip_RR:f}\t{0.suspension_travel_meters_FL:f}\t{0.suspension_travel_meters_FR:f}\t{0.suspension_travel_meters_RL:f}\t{0.suspension_travel_meters_RR:f}\t{0.car_ordinal}\t{0.car_class}\t{0.car_performance_index}\t{0.drivetrain_type}\t{0.num_cylinders}'.format(self))
return('{0.is_race_on}\t{0.timestamp_ms}\t{0.engine_max_rpm:f}\t{0.engine_idle_rpm:f}\t{0.current_engine_rpm:f}\t{0.acceleration_x:f}\t{0.acceleration_y:f}\t{0.acceleration_z:f}\t{0.velocity_x:f}\t{0.velocity_y:f}\t{0.velocity_z:f}\t{0.angular_velocity_x:f}\t{0.angular_velocity_y:f}\t{0.angular_velocity_z:f}\t{0.yaw:f}\t{0.pitch:f}\t{0.roll:f}\t{0.norm_suspension_travel_FL:f}\t{0.norm_suspension_travel_FR:f}\t{0.norm_suspension_travel_RL:f}\t{0.norm_suspension_travel_RR:f}\t{0.tire_slip_ratio_FL:f}\t{0.tire_slip_ratio_FR:f}\t{0.tire_slip_ratio_RL:f}\t{0.tire_slip_ratio_RR:f}\t{0.wheel_rotation_speed_FL:f}\t{0.wheel_rotation_speed_FR:f}\t{0.wheel_rotation_speed_RL:f}\t{0.wheel_rotation_speed_RR:f}\t{0.wheel_on_rumble_strip_FL:f}\t{0.wheel_on_rumble_strip_FR:f}\t{0.wheel_on_rumble_strip_RL:f}\t{0.wheel_on_rumble_strip_RR:f}\t{0.wheel_in_puddle_FL:f}\t{0.wheel_in_puddle_FR:f}\t{0.wheel_in_puddle_RL:f}\t{0.wheel_in_puddle_RR:f}\t{0.surface_rumble_FL:f}\t{0.surface_rumble_FR:f}\t{0.surface_rumble_RL:f}\t{0.surface_rumble_RR:f}\t{0.tire_slip_angle_FL:f}\t{0.tire_slip_angle_FR:f}\t{0.tire_slip_angle_RL:f}\t{0.tire_slip_angle_RR:f}\t{0.tire_combined_slip_FL:f}\t{0.tire_combined_slip_FR:f}\t{0.tire_combined_slip_RL:f}\t{0.tire_combined_slip_RR:f}\t{0.suspension_travel_meters_FL:f}\t{0.suspension_travel_meters_FR:f}\t{0.suspension_travel_meters_RL:f}\t{0.suspension_travel_meters_RR:f}\t{0.car_ordinal}\t{0.car_class}\t{0.car_performance_index}\t{0.drivetrain_type}\t{0.num_cylinders}\t{0.position_x}\t{0.position_y}\t{0.position_z}\t{0.speed}\t{0.power}\t{0.torque}\t{0.tire_temp_FL}\t{0.tire_temp_FR}\t{0.tire_temp_RL}\t{0.tire_temp_RR}\t{0.boost}\t{0.fuel}\t{0.dist_traveled}\t{0.best_lap}\t{0.last_lap}\t{0.cur_lap}\t{0.cur_race_time}\t{0.lap_no}\t{0.race_pos}\t{0.accel}\t{0.brake}\t{0.clutch}\t{0.handbrake}\t{0.gear}\t{0.steer}\t{0.norm_driving_line}\t{0.norm_ai_brake_diff}'.format(self))
| true | true |
f73cb8544021686bca7140f3665f3a31dac0efba | 187 | py | Python | ftlib/finetune/__init__.py | ashok-arjun/Transfer-Learning-Library | e13bb3e37bc7ca74a382cfdc85a640deaf0333b3 | [
"MIT"
] | 1 | 2021-08-04T23:31:30.000Z | 2021-08-04T23:31:30.000Z | ftlib/finetune/__init__.py | ashok-arjun/Transfer-Learning-Library | e13bb3e37bc7ca74a382cfdc85a640deaf0333b3 | [
"MIT"
] | null | null | null | ftlib/finetune/__init__.py | ashok-arjun/Transfer-Learning-Library | e13bb3e37bc7ca74a382cfdc85a640deaf0333b3 | [
"MIT"
] | null | null | null | from .stochnorm import *
from .bss import *
from .co_tuning import *
from .delta import *
from .bi_tuning import *
__all__ = ['stochnorm', 'bss', 'co_tuning', 'delta', 'bi_tuning'] | 26.714286 | 65 | 0.679144 | from .stochnorm import *
from .bss import *
from .co_tuning import *
from .delta import *
from .bi_tuning import *
__all__ = ['stochnorm', 'bss', 'co_tuning', 'delta', 'bi_tuning'] | true | true |
f73cbc9c60f9e8a9868301eef42e3e0f1610c845 | 980 | py | Python | python/microphone.py | Coolstuff14/RaspberryPI-ws2812b-audio-reactive | 561a5bee9fd688b908d69ab81ef5af3e2cabd9cb | [
"MIT"
] | 1 | 2021-01-07T02:03:16.000Z | 2021-01-07T02:03:16.000Z | python/microphone.py | Coolstuff14/RaspberryPI-ws2812b-audio-reactive | 561a5bee9fd688b908d69ab81ef5af3e2cabd9cb | [
"MIT"
] | null | null | null | python/microphone.py | Coolstuff14/RaspberryPI-ws2812b-audio-reactive | 561a5bee9fd688b908d69ab81ef5af3e2cabd9cb | [
"MIT"
] | null | null | null | import time
import numpy as np
import pyaudio
import config
import sys
def start_stream(callback):
p = pyaudio.PyAudio()
frames_per_buffer = int(config.MIC_RATE / config.FPS)
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=config.MIC_RATE,
input=True,
frames_per_buffer=frames_per_buffer)
overflows = 0
prev_ovf_time = time.time()
while True:
try:
y = np.fromstring(stream.read(frames_per_buffer), dtype=np.int16)
y = y.astype(np.float32)
callback(y)
except IOError:
overflows += 1
if time.time() > prev_ovf_time + 1:
prev_ovf_time = time.time()
print('Audio buffer has overflowed {} times'.format(overflows))
if overflows > 10000:
sys.exit()
stream.stop_stream()
stream.close()
p.terminate()
| 26.486486 | 79 | 0.55102 | import time
import numpy as np
import pyaudio
import config
import sys
def start_stream(callback):
p = pyaudio.PyAudio()
frames_per_buffer = int(config.MIC_RATE / config.FPS)
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=config.MIC_RATE,
input=True,
frames_per_buffer=frames_per_buffer)
overflows = 0
prev_ovf_time = time.time()
while True:
try:
y = np.fromstring(stream.read(frames_per_buffer), dtype=np.int16)
y = y.astype(np.float32)
callback(y)
except IOError:
overflows += 1
if time.time() > prev_ovf_time + 1:
prev_ovf_time = time.time()
print('Audio buffer has overflowed {} times'.format(overflows))
if overflows > 10000:
sys.exit()
stream.stop_stream()
stream.close()
p.terminate()
| true | true |
f73cbf2bab99a7328feb35771cde3ea1764c3913 | 60 | py | Python | DIRE/neural_model/model/encoder.py | lukedram/DIRE | f2149bad5d655938bb682fddd33e6cd652f0bf4a | [
"MIT"
] | 43 | 2019-11-20T18:19:05.000Z | 2022-03-30T11:56:33.000Z | DIRE/neural_model/model/encoder.py | lukedram/DIRE | f2149bad5d655938bb682fddd33e6cd652f0bf4a | [
"MIT"
] | 8 | 2020-05-07T01:34:02.000Z | 2021-04-15T14:06:14.000Z | DIRE/neural_model/model/encoder.py | lukedram/DIRE | f2149bad5d655938bb682fddd33e6cd652f0bf4a | [
"MIT"
] | 15 | 2019-11-19T14:15:36.000Z | 2021-06-04T17:54:54.000Z | import torch.nn as nn
class Encoder(nn.Module):
pass
| 8.571429 | 25 | 0.683333 | import torch.nn as nn
class Encoder(nn.Module):
pass
| true | true |
f73cbf51f9b21d8b224cf99c1bb40bb26089e615 | 610 | py | Python | d1.py | jbruns/advent-of-code-2018 | f15f469e338e7c9a7da4c3490746af91a6871637 | [
"MIT"
] | null | null | null | d1.py | jbruns/advent-of-code-2018 | f15f469e338e7c9a7da4c3490746af91a6871637 | [
"MIT"
] | null | null | null | d1.py | jbruns/advent-of-code-2018 | f15f469e338e7c9a7da4c3490746af91a6871637 | [
"MIT"
] | null | null | null | import itertools, sys, time
frequencies = [int(x) for x in sys.stdin]
print(sum(frequencies))
startTime = time.time()
# initialize frequency counter int at zero and occurrences set at zero
f = 0
o = {0: 1}
for i in itertools.cycle(frequencies):
# maintain the frequency count as we loop
f += i
# add this frequency to the set and increment the number of occurrences for it
result = o[f] = o.setdefault(f, 0) + 1
# check to see if this is the first frequency result we've seen twice
if result == 2:
print(f)
print("%s seconds" % (time.time() - startTime))
break | 32.105263 | 82 | 0.663934 | import itertools, sys, time
frequencies = [int(x) for x in sys.stdin]
print(sum(frequencies))
startTime = time.time()
f = 0
o = {0: 1}
for i in itertools.cycle(frequencies):
f += i
result = o[f] = o.setdefault(f, 0) + 1
if result == 2:
print(f)
print("%s seconds" % (time.time() - startTime))
break | true | true |
f73cbf7ba96aea8aafa0a276cba70a6c852db5b3 | 4,042 | py | Python | google/ads/google_ads/v1/proto/common/feed_common_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v1/proto/common/feed_common_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v1/proto/common/feed_common_pb2.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/common/feed_common.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/common/feed_common.proto',
package='google.ads.googleads.v1.common',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v1.commonB\017FeedCommonProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V1.Common\312\002\036Google\\Ads\\GoogleAds\\V1\\Common\352\002\"Google::Ads::GoogleAds::V1::Common'),
serialized_pb=_b('\n6google/ads/googleads_v1/proto/common/feed_common.proto\x12\x1egoogle.ads.googleads.v1.common\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"p\n\x05Money\x12\x33\n\rcurrency_code\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\ramount_micros\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\xea\x01\n\"com.google.ads.googleads.v1.commonB\x0f\x46\x65\x65\x64\x43ommonProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V1.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V1\\Common\xea\x02\"Google::Ads::GoogleAds::V1::Commonb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MONEY = _descriptor.Descriptor(
name='Money',
full_name='google.ads.googleads.v1.common.Money',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='currency_code', full_name='google.ads.googleads.v1.common.Money.currency_code', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='amount_micros', full_name='google.ads.googleads.v1.common.Money.amount_micros', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=152,
serialized_end=264,
)
_MONEY.fields_by_name['currency_code'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MONEY.fields_by_name['amount_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
DESCRIPTOR.message_types_by_name['Money'] = _MONEY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Money = _reflection.GeneratedProtocolMessageType('Money', (_message.Message,), dict(
DESCRIPTOR = _MONEY,
__module__ = 'google.ads.googleads_v1.proto.common.feed_common_pb2'
,
__doc__ = """Represents a price in a particular currency.
Attributes:
currency_code:
Three-character ISO 4217 currency code.
amount_micros:
Amount in micros. One million is equivalent to one unit.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.Money)
))
_sym_db.RegisterMessage(Money)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 43.462366 | 677 | 0.781049 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/common/feed_common.proto',
package='google.ads.googleads.v1.common',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v1.commonB\017FeedCommonProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V1.Common\312\002\036Google\\Ads\\GoogleAds\\V1\\Common\352\002\"Google::Ads::GoogleAds::V1::Common'),
serialized_pb=_b('\n6google/ads/googleads_v1/proto/common/feed_common.proto\x12\x1egoogle.ads.googleads.v1.common\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"p\n\x05Money\x12\x33\n\rcurrency_code\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\ramount_micros\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64ValueB\xea\x01\n\"com.google.ads.googleads.v1.commonB\x0f\x46\x65\x65\x64\x43ommonProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V1.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V1\\Common\xea\x02\"Google::Ads::GoogleAds::V1::Commonb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MONEY = _descriptor.Descriptor(
name='Money',
full_name='google.ads.googleads.v1.common.Money',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='currency_code', full_name='google.ads.googleads.v1.common.Money.currency_code', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='amount_micros', full_name='google.ads.googleads.v1.common.Money.amount_micros', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=152,
serialized_end=264,
)
_MONEY.fields_by_name['currency_code'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MONEY.fields_by_name['amount_micros'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
DESCRIPTOR.message_types_by_name['Money'] = _MONEY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Money = _reflection.GeneratedProtocolMessageType('Money', (_message.Message,), dict(
DESCRIPTOR = _MONEY,
__module__ = 'google.ads.googleads_v1.proto.common.feed_common_pb2'
,
__doc__ = """Represents a price in a particular currency.
Attributes:
currency_code:
Three-character ISO 4217 currency code.
amount_micros:
Amount in micros. One million is equivalent to one unit.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.Money)
))
_sym_db.RegisterMessage(Money)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f73cbfc71b037b76a31f00e51e456e1555ab18b4 | 1,378 | py | Python | ltf/generate_samples.py | KT12/Training | ac4de382a1387ccfe51404eb3302cc518762a781 | [
"MIT"
] | 1 | 2017-08-17T04:44:53.000Z | 2017-08-17T04:44:53.000Z | ltf/generate_samples.py | KT12/training | ac4de382a1387ccfe51404eb3302cc518762a781 | [
"MIT"
] | null | null | null | ltf/generate_samples.py | KT12/training | ac4de382a1387ccfe51404eb3302cc518762a781 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
from functions import *
n_features = 2
n_clusters = 3
n_samples_per_cluster = 500
seed = 700
embiggen_factor = 70
np.random.seed()
data_centroids, samples = create_samples(n_clusters, n_samples_per_cluster, n_features,
embiggen_factor, seed)
initial_centroids = choose_random_centroids(samples, n_clusters)
nearest_indices = assign_to_nearest(samples, initial_centroids)
updated_centroids = update_centroids(samples, nearest_indices, n_clusters)
model = tf.global_variables_initializer()
with tf.Session() as session:
sample_values = session.run(samples)
#plot_clusters(sample_values, initial_centroids, n_samples_per_cluster)
for i in range(1024):
updated_centroid_value = session.run(updated_centroids)
if i%128 == 0:
print(nearest_indices)
plot_to_nearest(sample_values, updated_centroid_value, nearest_indices)
# Code to run iteratively, but not display original distribution below
# Also doesn't change the color of the dots to closest centroid
# model = tf.global_variables_initializer()
# with tf.Session() as session:
# sample_values = session.run(samples)
# for i in range(1024):
# updated_centroid_value = session.run(updated_centroids)
# if i%64 == 0:
# plot_clusters(sample_values, updated_centroid_value, n_samples_per_cluster) | 36.263158 | 88 | 0.761974 | import tensorflow as tf
import numpy as np
from functions import *
n_features = 2
n_clusters = 3
n_samples_per_cluster = 500
seed = 700
embiggen_factor = 70
np.random.seed()
data_centroids, samples = create_samples(n_clusters, n_samples_per_cluster, n_features,
embiggen_factor, seed)
initial_centroids = choose_random_centroids(samples, n_clusters)
nearest_indices = assign_to_nearest(samples, initial_centroids)
updated_centroids = update_centroids(samples, nearest_indices, n_clusters)
model = tf.global_variables_initializer()
with tf.Session() as session:
sample_values = session.run(samples)
for i in range(1024):
updated_centroid_value = session.run(updated_centroids)
if i%128 == 0:
print(nearest_indices)
plot_to_nearest(sample_values, updated_centroid_value, nearest_indices)
# model = tf.global_variables_initializer()
# with tf.Session() as session:
# sample_values = session.run(samples)
# for i in range(1024):
# updated_centroid_value = session.run(updated_centroids)
# if i%64 == 0:
# plot_clusters(sample_values, updated_centroid_value, n_samples_per_cluster) | true | true |
f73cbff97795e0691af8ab3a7ccb4cfc17d7868c | 1,074 | py | Python | setup.py | keiichishima/echonetlite | b8f1cfcf57bae75b76262a7a13b9bb27f36fc1dc | [
"BSD-2-Clause"
] | 5 | 2018-01-15T08:18:12.000Z | 2021-05-27T02:53:28.000Z | setup.py | keiichishima/echonetlite | b8f1cfcf57bae75b76262a7a13b9bb27f36fc1dc | [
"BSD-2-Clause"
] | 1 | 2019-02-20T09:26:22.000Z | 2019-04-02T01:00:21.000Z | setup.py | keiichishima/echonetlite | b8f1cfcf57bae75b76262a7a13b9bb27f36fc1dc | [
"BSD-2-Clause"
] | 2 | 2017-04-05T04:04:27.000Z | 2018-01-22T14:22:22.000Z | #!/usr/bin/env python
from setuptools import setup
try:
from pypandoc import convert_file
read_me = lambda f: convert_file(f, 'rst')
except ImportError:
print('pypandoc is not installed.')
read_me = lambda f: open(f, 'r').read()
setup(name='echonetlite',
version='0.1.0',
description='Echonet Lite',
long_description=read_me('README.md'),
author='Keiichi SHIMA',
author_email='keiichi@iijlab.net',
url='https://github.com/keiichishima/echonetlite',
packages=['echonetlite'],
install_requires=['Twisted>=16.3.0'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.5',
'Topic :: Home Automation',
'Topic :: System :: Networking',
'Topic :: Software Development :: Libraries :: Python Modules'],
license='BSD License',
)
| 32.545455 | 74 | 0.612663 |
from setuptools import setup
try:
from pypandoc import convert_file
read_me = lambda f: convert_file(f, 'rst')
except ImportError:
print('pypandoc is not installed.')
read_me = lambda f: open(f, 'r').read()
setup(name='echonetlite',
version='0.1.0',
description='Echonet Lite',
long_description=read_me('README.md'),
author='Keiichi SHIMA',
author_email='keiichi@iijlab.net',
url='https://github.com/keiichishima/echonetlite',
packages=['echonetlite'],
install_requires=['Twisted>=16.3.0'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.5',
'Topic :: Home Automation',
'Topic :: System :: Networking',
'Topic :: Software Development :: Libraries :: Python Modules'],
license='BSD License',
)
| true | true |
f73cc125b672ed61f967b8ca253b3d35daa76596 | 91 | py | Python | ex110.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | ex110.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | ex110.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | from modulos import moeda
p = float(input('Digite o preço: R$ '))
moeda.resumo(p, 80, 35)
| 18.2 | 39 | 0.681319 | from modulos import moeda
p = float(input('Digite o preço: R$ '))
moeda.resumo(p, 80, 35)
| true | true |
f73cc1e1f027b6872ae33d68f3c4888d5df09ea0 | 484 | py | Python | height_map/timeit.py | jaluebbe/HeightMap | 1463f9ef16f4b12aec7164250c57c7b494b1d813 | [
"MIT"
] | 5 | 2019-09-01T12:09:58.000Z | 2019-11-24T02:06:29.000Z | height_map/timeit.py | jaluebbe/HeightMap | 1463f9ef16f4b12aec7164250c57c7b494b1d813 | [
"MIT"
] | 1 | 2019-09-06T17:34:53.000Z | 2019-09-06T17:34:53.000Z | height_map/timeit.py | jaluebbe/HeightMap | 1463f9ef16f4b12aec7164250c57c7b494b1d813 | [
"MIT"
] | 1 | 2019-09-01T12:10:39.000Z | 2019-09-01T12:10:39.000Z | import time
import logging
logger = logging.getLogger(__name__)
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
logger.warning('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
return result
return timed
| 25.473684 | 79 | 0.543388 | import time
import logging
logger = logging.getLogger(__name__)
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
logger.warning('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
return result
return timed
| true | true |
f73cc419f9ae211dad2586ffe7ed7cd04c7a144d | 1,096 | py | Python | src/mds/core/models/queue.py | m-socha/sana.mds | 4d3b71b7ba939c91570fee4f60444cf07035bd51 | [
"BSD-3-Clause"
] | 2 | 2016-05-19T02:32:13.000Z | 2017-09-06T07:06:25.000Z | src/mds/core/models/queue.py | m-socha/sana.mds | 4d3b71b7ba939c91570fee4f60444cf07035bd51 | [
"BSD-3-Clause"
] | 6 | 2015-07-19T17:40:49.000Z | 2016-12-20T21:54:59.000Z | src/mds/core/models/queue.py | m-socha/sana.mds | 4d3b71b7ba939c91570fee4f60444cf07035bd51 | [
"BSD-3-Clause"
] | 14 | 2015-10-30T09:50:21.000Z | 2019-06-15T13:07:37.000Z | '''
Created on Aug 9, 2012
:author: Sana Development Team
:version: 2.0
'''
from django.db import models
from mds.api.utils import make_uuid
QUEUE_STATUS=((0,'Failed Dispatch'))
class EncounterQueueElement(models.Model):
""" An element that is being processed
"""
class Meta:
app_label = "core"
uuid = models.SlugField(max_length=36, unique=True, default=make_uuid, editable=False)
""" A universally unique identifier """
created = models.DateTimeField(auto_now_add=True)
""" When the object was created """
modified = models.DateTimeField(auto_now=True)
""" updated on modification """
object_url = models.CharField(max_length=512)
""" The uuid of the cached object """
@property
def object_uuid(self):
return ''
cache = models.TextField(blank=True)
""" Dump of the form data for the object """
status = models.IntegerField(choices=QUEUE_STATUS)
""" Current state in the queue """
message = models.TextField(blank=True)
""" Useful messages returned from processing """ | 26.731707 | 90 | 0.663321 | from django.db import models
from mds.api.utils import make_uuid
QUEUE_STATUS=((0,'Failed Dispatch'))
class EncounterQueueElement(models.Model):
class Meta:
app_label = "core"
uuid = models.SlugField(max_length=36, unique=True, default=make_uuid, editable=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
object_url = models.CharField(max_length=512)
@property
def object_uuid(self):
return ''
cache = models.TextField(blank=True)
status = models.IntegerField(choices=QUEUE_STATUS)
message = models.TextField(blank=True) | true | true |
f73cc4568792220ea1ba39341b8438e4e5008863 | 4,780 | py | Python | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_pointpillars_kitti_12000_100_10.8G_1.3/code/train/second/pytorch/builder/optimizer_builder.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | 1 | 2020-12-18T14:49:19.000Z | 2020-12-18T14:49:19.000Z | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_pointpillars_kitti_12000_100_10.8G_1.3/code/train/second/pytorch/builder/optimizer_builder.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_pointpillars_kitti_12000_100_10.8G_1.3/code/train/second/pytorch/builder/optimizer_builder.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null |
# This code is based on: https://github.com/nutonomy/second.pytorch.git
#
# MIT License
# Copyright (c) 2018
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
from torchplus.train import learning_schedules
import torch
def build(optimizer_config, params, name=None):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
optimizer = torch.optim.RMSprop(
params,
lr=_get_base_lr_by_lr_scheduler(config.learning_rate),
alpha=config.decay,
momentum=config.momentum_optimizer_value,
eps=config.epsilon,
weight_decay=config.weight_decay)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
optimizer = torch.optim.SGD(
params,
lr=_get_base_lr_by_lr_scheduler(config.learning_rate),
momentum=config.momentum_optimizer_value,
weight_decay=config.weight_decay)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
optimizer = torch.optim.Adam(
params,
lr=_get_base_lr_by_lr_scheduler(config.learning_rate),
weight_decay=config.weight_decay)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
raise ValueError('torch don\'t support moving average')
if name is None:
# assign a name to optimizer for checkpoint system
optimizer.name = optimizer_type
else:
optimizer.name = name
return optimizer
def _get_base_lr_by_lr_scheduler(learning_rate_config):
base_lr = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'constant_learning_rate':
config = learning_rate_config.constant_learning_rate
base_lr = config.learning_rate
if learning_rate_type == 'exponential_decay_learning_rate':
config = learning_rate_config.exponential_decay_learning_rate
base_lr = config.initial_learning_rate
if learning_rate_type == 'manual_step_learning_rate':
config = learning_rate_config.manual_step_learning_rate
base_lr = config.initial_learning_rate
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
if learning_rate_type == 'cosine_decay_learning_rate':
config = learning_rate_config.cosine_decay_learning_rate
base_lr = config.learning_rate_base
if base_lr is None:
raise ValueError(
'Learning_rate %s not supported.' % learning_rate_type)
return base_lr
| 39.833333 | 80 | 0.719247 |
from torchplus.train import learning_schedules
import torch
def build(optimizer_config, params, name=None):
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
optimizer = torch.optim.RMSprop(
params,
lr=_get_base_lr_by_lr_scheduler(config.learning_rate),
alpha=config.decay,
momentum=config.momentum_optimizer_value,
eps=config.epsilon,
weight_decay=config.weight_decay)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
optimizer = torch.optim.SGD(
params,
lr=_get_base_lr_by_lr_scheduler(config.learning_rate),
momentum=config.momentum_optimizer_value,
weight_decay=config.weight_decay)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
optimizer = torch.optim.Adam(
params,
lr=_get_base_lr_by_lr_scheduler(config.learning_rate),
weight_decay=config.weight_decay)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
raise ValueError('torch don\'t support moving average')
if name is None:
# assign a name to optimizer for checkpoint system
optimizer.name = optimizer_type
else:
optimizer.name = name
return optimizer
def _get_base_lr_by_lr_scheduler(learning_rate_config):
base_lr = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'constant_learning_rate':
config = learning_rate_config.constant_learning_rate
base_lr = config.learning_rate
if learning_rate_type == 'exponential_decay_learning_rate':
config = learning_rate_config.exponential_decay_learning_rate
base_lr = config.initial_learning_rate
if learning_rate_type == 'manual_step_learning_rate':
config = learning_rate_config.manual_step_learning_rate
base_lr = config.initial_learning_rate
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
if learning_rate_type == 'cosine_decay_learning_rate':
config = learning_rate_config.cosine_decay_learning_rate
base_lr = config.learning_rate_base
if base_lr is None:
raise ValueError(
'Learning_rate %s not supported.' % learning_rate_type)
return base_lr
| true | true |
f73cc48814ec43a2ac07ab4955c2b43a42d0df9b | 551 | py | Python | app/WordList_Reader/application/routes.py | RobLewisQA/BChain_Project | 7bdf682ea1e23359f2958620ad0afb417892ba1e | [
"MIT"
] | null | null | null | app/WordList_Reader/application/routes.py | RobLewisQA/BChain_Project | 7bdf682ea1e23359f2958620ad0afb417892ba1e | [
"MIT"
] | null | null | null | app/WordList_Reader/application/routes.py | RobLewisQA/BChain_Project | 7bdf682ea1e23359f2958620ad0afb417892ba1e | [
"MIT"
] | 1 | 2021-04-18T14:09:27.000Z | 2021-04-18T14:09:27.000Z | import random
import application.BChain_WordList as bwords
import pandas as pd
from flask import Flask, redirect, request, url_for,render_template, Response, jsonify
from application import app
@app.route('/mnemonic_generator', methods=['GET'])
def mnemonic_generator():
seedphrase_words = []
while len(seedphrase_words) < 12:
seedphrase_words.append(bwords.wordlist[random.randint(0,len(bwords.wordlist)-1)])
series = pd.Series(seedphrase_words, name="sp_words").reset_index().drop(columns='index')
return series.to_json() | 39.357143 | 94 | 0.760436 | import random
import application.BChain_WordList as bwords
import pandas as pd
from flask import Flask, redirect, request, url_for,render_template, Response, jsonify
from application import app
@app.route('/mnemonic_generator', methods=['GET'])
def mnemonic_generator():
seedphrase_words = []
while len(seedphrase_words) < 12:
seedphrase_words.append(bwords.wordlist[random.randint(0,len(bwords.wordlist)-1)])
series = pd.Series(seedphrase_words, name="sp_words").reset_index().drop(columns='index')
return series.to_json() | true | true |
f73cc914f05895d0ee7fe04577dfa97c7053eeaa | 51 | py | Python | Topsis_Sakshi_101917011/__init__.py | sakshi0309-champ/Topsis_Sakshi_101917011 | 962273a7ea7c14e041c0e674018b7ab4045e6ddd | [
"MIT"
] | 1 | 2022-03-10T05:14:59.000Z | 2022-03-10T05:14:59.000Z | Topsis_Sakshi_101917011/__init__.py | sakshi0309-champ/Topsis_Sakshi_101917011 | 962273a7ea7c14e041c0e674018b7ab4045e6ddd | [
"MIT"
] | null | null | null | Topsis_Sakshi_101917011/__init__.py | sakshi0309-champ/Topsis_Sakshi_101917011 | 962273a7ea7c14e041c0e674018b7ab4045e6ddd | [
"MIT"
] | 1 | 2022-02-27T07:00:23.000Z | 2022-02-27T07:00:23.000Z | from Topsis_Sakshi_101917011.topsis import MyTopsis | 51 | 51 | 0.921569 | from Topsis_Sakshi_101917011.topsis import MyTopsis | true | true |
f73cc9f1cb16d8f75ed889d9bad26e5df8797488 | 1,244 | py | Python | soluciones/pizza/main.py | carlosviveros/Soluciones | 115f4fa929c7854ca497e4c994352adc64565456 | [
"MIT"
] | 1 | 2022-02-02T04:44:56.000Z | 2022-02-02T04:44:56.000Z | soluciones/pizza/main.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | null | null | null | soluciones/pizza/main.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | null | null | null | """AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
from prototools import int_input, menu_input, banner, text_align
ADICIONAL = 1.5
IVA = 0.12
PRECIOS = {"pequeña": 6.5, "mediana": 12.35, "familiar": 22.5}
iva = lambda precio: precio * IVA
precio_con_iva = lambda precio, iva: precio + iva
precio_sin_iva = lambda t, c, i: PRECIOS[t] * c + (ADICIONAL * i)
def datos():
t = menu_input(tuple(PRECIOS.keys()), numbers=True, lang="es")
c = int_input("Cantidad: ", min=1, max=25)
i = int_input("Adicionales: ", min=0, max=5)
return t, c, i
@banner("Boleta", 48)
def mostrar(t, c, i):
precio = precio_sin_iva(t, c, i)
return (
f"{c} unidades {t}\t- {PRECIOS[t]:{5}.2f} c/u\t"
f"$ {PRECIOS[t] * c:{6}.2f}\n"
f"{i} ingredientes\t\t- {ADICIONAL:{5}.2f} c/u\t"
f"$ {i * ADICIONAL:{6}.2f}\n\n"
f"\t\t\tPrecio sin IVA\t$ "
f"{precio:{6}.2f}\n"
f"\t\t\tIVA 12%\t\t$ "
f"{iva(precio):{6}.2f}\n"
f"\t\t\tPrecio Final\t$ "
f"{precio_con_iva(precio, iva(precio)):{6}.2f}"
)
def main():
text_align("Ingresar Pedido", 48, align="center")
t, c, i = datos()
print(mostrar(t, c, i))
if __name__ == "__main__":
main()
| 27.043478 | 66 | 0.573955 | from prototools import int_input, menu_input, banner, text_align
ADICIONAL = 1.5
IVA = 0.12
PRECIOS = {"pequeña": 6.5, "mediana": 12.35, "familiar": 22.5}
iva = lambda precio: precio * IVA
precio_con_iva = lambda precio, iva: precio + iva
precio_sin_iva = lambda t, c, i: PRECIOS[t] * c + (ADICIONAL * i)
def datos():
t = menu_input(tuple(PRECIOS.keys()), numbers=True, lang="es")
c = int_input("Cantidad: ", min=1, max=25)
i = int_input("Adicionales: ", min=0, max=5)
return t, c, i
@banner("Boleta", 48)
def mostrar(t, c, i):
precio = precio_sin_iva(t, c, i)
return (
f"{c} unidades {t}\t- {PRECIOS[t]:{5}.2f} c/u\t"
f"$ {PRECIOS[t] * c:{6}.2f}\n"
f"{i} ingredientes\t\t- {ADICIONAL:{5}.2f} c/u\t"
f"$ {i * ADICIONAL:{6}.2f}\n\n"
f"\t\t\tPrecio sin IVA\t$ "
f"{precio:{6}.2f}\n"
f"\t\t\tIVA 12%\t\t$ "
f"{iva(precio):{6}.2f}\n"
f"\t\t\tPrecio Final\t$ "
f"{precio_con_iva(precio, iva(precio)):{6}.2f}"
)
def main():
text_align("Ingresar Pedido", 48, align="center")
t, c, i = datos()
print(mostrar(t, c, i))
if __name__ == "__main__":
main()
| true | true |
f73ccafc72dc2428a82fc32eec25b5c429189627 | 1,614 | py | Python | lane_finder.py | desihsu/lane-finder | f40249c52909d5e235bee40b116965cf0641f871 | [
"MIT"
] | null | null | null | lane_finder.py | desihsu/lane-finder | f40249c52909d5e235bee40b116965cf0641f871 | [
"MIT"
] | null | null | null | lane_finder.py | desihsu/lane-finder | f40249c52909d5e235bee40b116965cf0641f871 | [
"MIT"
] | null | null | null | import sys
from moviepy.editor import VideoFileClip
import camera
import image_thresholding
import line_fitting
import matplotlib.image as mpimg
class Line():
def __init__(self):
self.detected = False # lane line detected in previous iteration
self.fit = None # most recent polynomial fit
self.fitx = None # most recent x pixel values for line
def process_image(img):
color_grad_combined = image_thresholding.color_grad(img)
warped, Minv = camera.perspective_transform(color_grad_combined, mtx, dist)
if left_line.detected and right_line.detected:
(left_line.fit, right_line.fit,
left_line.fitx, right_line.fitx,
ploty) = line_fitting.search_around_poly(warped, left_line.fit,
right_line.fit)
else:
(left_line.fit, right_line.fit,
left_line.fitx, right_line.fitx,
ploty) = line_fitting.fit_polynomial(warped, detected=False)
left_line.detected = True
right_line.detected = True
result = line_fitting.draw_lines(img, warped, Minv, left_line.fitx,
right_line.fitx, ploty)
return result
if __name__ == "__main__":
mtx, dist = camera.calibrate()
left_line = Line()
right_line = Line()
if (sys.argv[1].split(".")[-1] == "mp4"):
clip = VideoFileClip(sys.argv[1])
output = clip.fl_image(process_image)
output.write_videofile("output.mp4", audio=False)
else:
img = mpimg.imread(sys.argv[1])
img = process_image(img)
mpimg.imsave("output.jpg", img) | 32.938776 | 79 | 0.651797 | import sys
from moviepy.editor import VideoFileClip
import camera
import image_thresholding
import line_fitting
import matplotlib.image as mpimg
class Line():
def __init__(self):
self.detected = False
self.fit = None
self.fitx = None
def process_image(img):
color_grad_combined = image_thresholding.color_grad(img)
warped, Minv = camera.perspective_transform(color_grad_combined, mtx, dist)
if left_line.detected and right_line.detected:
(left_line.fit, right_line.fit,
left_line.fitx, right_line.fitx,
ploty) = line_fitting.search_around_poly(warped, left_line.fit,
right_line.fit)
else:
(left_line.fit, right_line.fit,
left_line.fitx, right_line.fitx,
ploty) = line_fitting.fit_polynomial(warped, detected=False)
left_line.detected = True
right_line.detected = True
result = line_fitting.draw_lines(img, warped, Minv, left_line.fitx,
right_line.fitx, ploty)
return result
if __name__ == "__main__":
mtx, dist = camera.calibrate()
left_line = Line()
right_line = Line()
if (sys.argv[1].split(".")[-1] == "mp4"):
clip = VideoFileClip(sys.argv[1])
output = clip.fl_image(process_image)
output.write_videofile("output.mp4", audio=False)
else:
img = mpimg.imread(sys.argv[1])
img = process_image(img)
mpimg.imsave("output.jpg", img) | true | true |
f73ccb497ae212f31e28d3785d2f420d1dedc5b5 | 8,058 | py | Python | 03_Scripts/plot.py | willijm92/fsri-compartments-2018 | 8fd5ddd6f535d434df69e5853aa27db77d599ef3 | [
"MIT"
] | null | null | null | 03_Scripts/plot.py | willijm92/fsri-compartments-2018 | 8fd5ddd6f535d434df69e5853aa27db77d599ef3 | [
"MIT"
] | null | null | null | 03_Scripts/plot.py | willijm92/fsri-compartments-2018 | 8fd5ddd6f535d434df69e5853aa27db77d599ef3 | [
"MIT"
] | null | null | null | # plot.py
# --------------- #
# Import Packages #
# --------------- #
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Set seaborn as default plot config
sns.set()
sns.set_style("whitegrid")
from itertools import cycle
# ---------------------------------- #
# Define Subdirectories & Info Files #
# ---------------------------------- #
data_dir = '../01_Data/'
info_dir = '../02_Info/'
plot_dir = '../04_Charts/'
# Create plot dir if necessary
if not os.path.exists(plot_dir): os.makedirs(plot_dir)
# Read in channel list & create list of sensor groups
full_channel_list = pd.read_csv(f'{info_dir}channel_list.csv', index_col='Channel_Name')
# ------------------- #
# Set Plot Parameters #
# ------------------- #
label_size = 18
tick_size = 16
line_width = 2
event_font = 12
font_rotation = 60
legend_font = 12
fig_width = 10
fig_height = 8
# ---------------------- #
# User-Defined Functions #
# ---------------------- #
def timestamp_to_seconds(timestamp):
timestamp = timestamp[11:]
hh, mm, ss = timestamp.split(':')
return(3600 * int(hh) + 60 * int(mm) + int(ss))
def convert_timestamps(timestamps, start_time):
raw_seconds = map(timestamp_to_seconds, timestamps)
return([s - start_time for s in list(raw_seconds)])
def create_1plot_fig():
# Define figure for the plot
fig, ax1 = plt.subplots(figsize=(fig_width, fig_height))
# Set line colors & markers; reset axis lims
current_palette_8 = sns.color_palette('deep', 8)
sns.set_palette(current_palette_8)
plot_markers = cycle(['s', 'o', '^', 'd', 'h', 'p','v', '8', 'D', '*', '<', '>', 'H'])
x_max, y_min, y_max = 0, 0, 0
return(fig, ax1, plot_markers, x_max, y_min, y_max)
def format_and_save_plot(y_lims, x_lims, secondary_axis_label, file_loc):
# Set tick parameters
ax1.tick_params(labelsize=tick_size, length=0, width=0)
# Scale axes limits & labels
ax1.grid(True)
ax1.set_ylim(bottom=y_lims[0], top=y_lims[1])
ax1.set_xlim(x_lims[0] - x_lims[1] / 500, x_lims[1])
ax1.set_xlabel('Time (s)', fontsize=label_size)
# Secondary y-axis parameters
if secondary_axis_label != 'None':
ax2 = ax1.twinx()
ax2.tick_params(labelsize=tick_size, length=0, width=0)
ax2.set_ylabel(secondary_axis_label, fontsize=label_size)
if secondary_axis_label == 'Temperature ($^\circ$F)':
ax2.set_ylim([y_lims[0] * 1.8 + 32., y_lims[1] * 1.8 + 32.])
else:
ax2.set_ylim([secondary_axis_scale * y_lims[0], secondary_axis_scale * y_lims[1]])
ax2.yaxis.grid(visible=None)
# Add vertical lines and labels for timing information (if available)
ax3 = ax1.twiny()
ax3.set_xlim(x_lims[0] - x_lims[1] / 500, x_lims[1])
ax3.set_xticks([_x for _x in Events.index.values if _x >= x_lims[0] and _x <= x_lims[1]])
ax3.tick_params(axis='x', width=1, labelrotation=font_rotation, labelsize=event_font)
ax3.set_xticklabels([Events['Event'][_x] for _x in Events.index.values if _x >= x_lims[0] and _x <= x_lims[1]], fontsize=event_font, ha='left')
ax3.xaxis.grid(visible=None)
# Add legend, clean up whitespace padding, save chart as pdf, & close fig
handles1, labels1 = ax1.get_legend_handles_labels()
ax1.legend(handles1, labels1, loc='best', fontsize=legend_font, handlelength=3, frameon=True, framealpha=0.75)
fig.tight_layout()
plt.savefig(file_loc)
plt.close()
# ----------------- #
# Main Body of Code #
# ----------------- #
# Loop through test data files & create plots
for f in os.listdir(data_dir):
# Skip if f is not a exp data file
if any([not f.endswith('.csv'), f.startswith('.'), f.endswith('_Events.csv')]):
continue
# Get test name from file & load data & event files for given experiment
test_name = f[:-4]
data_df = pd.read_csv(f'{data_dir}{f}', index_col='Time')
Events = pd.read_csv(f'{data_dir}{test_name}_Events.csv')
print (f'--- Loaded data for {test_name} ---')
# Create index column of time relative to ignition in events file
Events = pd.read_csv(f'{data_dir}{f[:-4]}_Events.csv')
Events.rename(columns={'Time':'Timestamp'}, inplace=True)
start_timestamp = Events.loc[0, 'Timestamp'][11:]
hh,mm,ss = start_timestamp.split(':')
start_time = 3600 * int(hh) + 60 * int(mm) + int(ss)
Events['Time'] = convert_timestamps(Events['Timestamp'], start_time)
Events = Events.set_index('Time')
# Define channel list as full list & drop unused channels for given experiment
channel_list = full_channel_list[[i in data_df.columns for i in full_channel_list.index]]
# Loop through channel groups to plot data from all channels in each group
for group in channel_list.groupby('Group').groups:
# Create figure for plot
print (f" Plotting {group.replace('_',' ')}")
fig, ax1, plot_markers, x_max, y_min, y_max = create_1plot_fig()
# Loop through each channel in given group
for channel in channel_list.groupby('Group').get_group(group).index.values:
# Set secondary axis default to None, get data type from channel list
secondary_axis_label = 'None'
data_type = channel_list.loc[channel, 'Type']
# Set plot parameters based on data type
if data_type == 'Temperature':
# Set y-axis labels & y_min
ax1.set_ylabel('Temperature ($^\circ$C)', fontsize=label_size)
secondary_axis_label = 'Temperature ($^\circ$F)'
y_min = 0
elif data_type == 'Velocity':
# Apply moving average & set y-axis labels, secondary scale
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Velocity (m/s)', fontsize=label_size)
secondary_axis_label = 'Velocity (mph)'
secondary_axis_scale = 2.23694
elif data_type == 'Pressure':
# Apply moving average & set y-axis labels, secondary scale
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Pressure (Pa)', fontsize=label_size)
elif data_type == 'Oxygen':
# Set y-axis label
ax1.set_ylabel('O$_2$ Concentration (%)', fontsize=label_size)
elif data_type.endswith('Heat Flux'):
# Apply moving average & set y-axis label
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Heat Flux (kW/m$^2$)', fontsize=label_size)
elif data_type == 'Heat Release Rate':
# Set y-axis label
ax1.set_ylabel('Heat Release Rate (kW)', fontsize=label_size)
# Determine x max bound for current data & update max of chart if necessary
x_end = data_df[channel].index[-1]
if x_end > x_max:
x_max = x_end
# Plot channel data
ax1.plot(data_df.index, data_df[channel], lw=line_width,
marker=next(plot_markers), markevery=30, mew=3, mec='none', ms=7,
label=channel_list.loc[channel, 'Label'])
# Check if y min/max need to be updated
if data_df[channel].min() - abs(data_df[channel].min() * .1) < y_min:
y_min = data_df[channel].min() - abs(data_df[channel].min() * .1)
if data_df[channel].max() * 1.1 > y_max:
y_max = data_df[channel].max() * 1.1
# Add vertical lines for event labels; label to y axis
[ax1.axvline(_x, color='0.25', lw=1.5) for _x in Events.index.values if _x >= 0 and _x <= x_max]
# Define/create save directory, call function to format & save plot
save_dir = f'{plot_dir}{test_name}/'
if not os.path.exists(save_dir): os.makedirs(save_dir)
format_and_save_plot([y_min, y_max], [0, x_max], secondary_axis_label, f'{save_dir}{group}.pdf')
print() | 40.903553 | 147 | 0.618888 |
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
sns.set_style("whitegrid")
from itertools import cycle
data_dir = '../01_Data/'
info_dir = '../02_Info/'
plot_dir = '../04_Charts/'
if not os.path.exists(plot_dir): os.makedirs(plot_dir)
full_channel_list = pd.read_csv(f'{info_dir}channel_list.csv', index_col='Channel_Name')
label_size = 18
tick_size = 16
line_width = 2
event_font = 12
font_rotation = 60
legend_font = 12
fig_width = 10
fig_height = 8
def timestamp_to_seconds(timestamp):
timestamp = timestamp[11:]
hh, mm, ss = timestamp.split(':')
return(3600 * int(hh) + 60 * int(mm) + int(ss))
def convert_timestamps(timestamps, start_time):
raw_seconds = map(timestamp_to_seconds, timestamps)
return([s - start_time for s in list(raw_seconds)])
def create_1plot_fig():
fig, ax1 = plt.subplots(figsize=(fig_width, fig_height))
current_palette_8 = sns.color_palette('deep', 8)
sns.set_palette(current_palette_8)
plot_markers = cycle(['s', 'o', '^', 'd', 'h', 'p','v', '8', 'D', '*', '<', '>', 'H'])
x_max, y_min, y_max = 0, 0, 0
return(fig, ax1, plot_markers, x_max, y_min, y_max)
def format_and_save_plot(y_lims, x_lims, secondary_axis_label, file_loc):
ax1.tick_params(labelsize=tick_size, length=0, width=0)
ax1.grid(True)
ax1.set_ylim(bottom=y_lims[0], top=y_lims[1])
ax1.set_xlim(x_lims[0] - x_lims[1] / 500, x_lims[1])
ax1.set_xlabel('Time (s)', fontsize=label_size)
if secondary_axis_label != 'None':
ax2 = ax1.twinx()
ax2.tick_params(labelsize=tick_size, length=0, width=0)
ax2.set_ylabel(secondary_axis_label, fontsize=label_size)
if secondary_axis_label == 'Temperature ($^\circ$F)':
ax2.set_ylim([y_lims[0] * 1.8 + 32., y_lims[1] * 1.8 + 32.])
else:
ax2.set_ylim([secondary_axis_scale * y_lims[0], secondary_axis_scale * y_lims[1]])
ax2.yaxis.grid(visible=None)
ax3 = ax1.twiny()
ax3.set_xlim(x_lims[0] - x_lims[1] / 500, x_lims[1])
ax3.set_xticks([_x for _x in Events.index.values if _x >= x_lims[0] and _x <= x_lims[1]])
ax3.tick_params(axis='x', width=1, labelrotation=font_rotation, labelsize=event_font)
ax3.set_xticklabels([Events['Event'][_x] for _x in Events.index.values if _x >= x_lims[0] and _x <= x_lims[1]], fontsize=event_font, ha='left')
ax3.xaxis.grid(visible=None)
handles1, labels1 = ax1.get_legend_handles_labels()
ax1.legend(handles1, labels1, loc='best', fontsize=legend_font, handlelength=3, frameon=True, framealpha=0.75)
fig.tight_layout()
plt.savefig(file_loc)
plt.close()
for f in os.listdir(data_dir):
if any([not f.endswith('.csv'), f.startswith('.'), f.endswith('_Events.csv')]):
continue
test_name = f[:-4]
data_df = pd.read_csv(f'{data_dir}{f}', index_col='Time')
Events = pd.read_csv(f'{data_dir}{test_name}_Events.csv')
print (f'--- Loaded data for {test_name} ---')
Events = pd.read_csv(f'{data_dir}{f[:-4]}_Events.csv')
Events.rename(columns={'Time':'Timestamp'}, inplace=True)
start_timestamp = Events.loc[0, 'Timestamp'][11:]
hh,mm,ss = start_timestamp.split(':')
start_time = 3600 * int(hh) + 60 * int(mm) + int(ss)
Events['Time'] = convert_timestamps(Events['Timestamp'], start_time)
Events = Events.set_index('Time')
channel_list = full_channel_list[[i in data_df.columns for i in full_channel_list.index]]
for group in channel_list.groupby('Group').groups:
print (f" Plotting {group.replace('_',' ')}")
fig, ax1, plot_markers, x_max, y_min, y_max = create_1plot_fig()
for channel in channel_list.groupby('Group').get_group(group).index.values:
secondary_axis_label = 'None'
data_type = channel_list.loc[channel, 'Type']
if data_type == 'Temperature':
ax1.set_ylabel('Temperature ($^\circ$C)', fontsize=label_size)
secondary_axis_label = 'Temperature ($^\circ$F)'
y_min = 0
elif data_type == 'Velocity':
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Velocity (m/s)', fontsize=label_size)
secondary_axis_label = 'Velocity (mph)'
secondary_axis_scale = 2.23694
elif data_type == 'Pressure':
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Pressure (Pa)', fontsize=label_size)
elif data_type == 'Oxygen':
ax1.set_ylabel('O$_2$ Concentration (%)', fontsize=label_size)
elif data_type.endswith('Heat Flux'):
data_df[channel] = data_df[channel].rolling(window=10, center=True).mean()
ax1.set_ylabel('Heat Flux (kW/m$^2$)', fontsize=label_size)
elif data_type == 'Heat Release Rate':
ax1.set_ylabel('Heat Release Rate (kW)', fontsize=label_size)
x_end = data_df[channel].index[-1]
if x_end > x_max:
x_max = x_end
ax1.plot(data_df.index, data_df[channel], lw=line_width,
marker=next(plot_markers), markevery=30, mew=3, mec='none', ms=7,
label=channel_list.loc[channel, 'Label'])
if data_df[channel].min() - abs(data_df[channel].min() * .1) < y_min:
y_min = data_df[channel].min() - abs(data_df[channel].min() * .1)
if data_df[channel].max() * 1.1 > y_max:
y_max = data_df[channel].max() * 1.1
[ax1.axvline(_x, color='0.25', lw=1.5) for _x in Events.index.values if _x >= 0 and _x <= x_max]
save_dir = f'{plot_dir}{test_name}/'
if not os.path.exists(save_dir): os.makedirs(save_dir)
format_and_save_plot([y_min, y_max], [0, x_max], secondary_axis_label, f'{save_dir}{group}.pdf')
print() | true | true |
f73ccbef33027dd535f704695801ed382dc28bc0 | 451 | py | Python | core/facade.py | tiagocordeiro/woodash | 181323a24270da50da61bb4deb6610a79c13a08e | [
"MIT"
] | null | null | null | core/facade.py | tiagocordeiro/woodash | 181323a24270da50da61bb4deb6610a79c13a08e | [
"MIT"
] | 4 | 2021-03-30T13:49:40.000Z | 2021-09-22T19:20:34.000Z | core/facade.py | tiagocordeiro/woodash | 181323a24270da50da61bb4deb6610a79c13a08e | [
"MIT"
] | null | null | null | from decouple import config
from wordpress import API
# Configurações do WooCommerce
consumer_key = config("WC_CK", False)
consumer_secret = config("WC_CS", False)
woo_commerce_url = config("WC_URL", False)
wpapi = API(
url=woo_commerce_url,
api="wp-json",
version='wc/v3',
consumer_key=consumer_key,
consumer_secret=consumer_secret,
timeout=10
)
def get_orders():
orders = wpapi.get("orders")
return orders.json()
| 20.5 | 42 | 0.716186 | from decouple import config
from wordpress import API
consumer_key = config("WC_CK", False)
consumer_secret = config("WC_CS", False)
woo_commerce_url = config("WC_URL", False)
wpapi = API(
url=woo_commerce_url,
api="wp-json",
version='wc/v3',
consumer_key=consumer_key,
consumer_secret=consumer_secret,
timeout=10
)
def get_orders():
orders = wpapi.get("orders")
return orders.json()
| true | true |
f73ccc04714308cfa1fd22cc495a754df509da4a | 1,857 | py | Python | litex_boards/platforms/de1soc.py | mhrtmnn/litex-boards | e950a4a588515c69c0eb559f432fa41d35f5eb0c | [
"BSD-2-Clause"
] | null | null | null | litex_boards/platforms/de1soc.py | mhrtmnn/litex-boards | e950a4a588515c69c0eb559f432fa41d35f5eb0c | [
"BSD-2-Clause"
] | null | null | null | litex_boards/platforms/de1soc.py | mhrtmnn/litex-boards | e950a4a588515c69c0eb559f432fa41d35f5eb0c | [
"BSD-2-Clause"
] | null | null | null | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019 Antony Pavlov <antonynpavlov@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.altera import AlteraPlatform
from litex.build.altera.programmer import USBBlaster
# IOs ----------------------------------------------------------------------------------------------
_io = [
("clk50", 0, Pins("AF14"), IOStandard("3.3-V LVTTL")),
("serial", 0,
Subsignal("tx", Pins("AC18"), IOStandard("3.3-V LVTTL")), # JP1 GPIO[0]
Subsignal("rx", Pins("Y17"), IOStandard("3.3-V LVTTL")) # JP1 GPIO[1]
),
("sdram_clock", 0, Pins("AH12"), IOStandard("3.3-V LVTTL")),
("sdram", 0,
Subsignal("a", Pins(
"AK14 AH14 AG15 AE14 AB15 AC14 AD14 AF15",
"AH15 AG13 AG12 AH13 AJ14")),
Subsignal("ba", Pins("AF13 AJ12")),
Subsignal("cs_n", Pins("AG11")),
Subsignal("cke", Pins("AK13")),
Subsignal("ras_n", Pins("AE13")),
Subsignal("cas_n", Pins("AF11")),
Subsignal("we_n", Pins("AA13")),
Subsignal("dq", Pins(
"AK6 AJ7 AK7 AK8 AK9 AG10 AK11 AJ11",
"AH10 AJ10 AJ9 AH9 AH8 AH7 AJ6 AJ5")),
Subsignal("dm", Pins("AB13 AK12")),
IOStandard("3.3-V LVTTL")
),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(AlteraPlatform):
default_clk_name = "clk50"
default_clk_period = 1e9/50e6
def __init__(self):
AlteraPlatform.__init__(self, "5CSEMA5F31C6", _io)
def create_programmer(self):
return USBBlaster()
def do_finalize(self, fragment):
AlteraPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk50", loose=True), 1e9/50e6)
| 33.763636 | 100 | 0.545504 |
from litex.build.generic_platform import *
from litex.build.altera import AlteraPlatform
from litex.build.altera.programmer import USBBlaster
_io = [
("clk50", 0, Pins("AF14"), IOStandard("3.3-V LVTTL")),
("serial", 0,
Subsignal("tx", Pins("AC18"), IOStandard("3.3-V LVTTL")),
Subsignal("rx", Pins("Y17"), IOStandard("3.3-V LVTTL"))
),
("sdram_clock", 0, Pins("AH12"), IOStandard("3.3-V LVTTL")),
("sdram", 0,
Subsignal("a", Pins(
"AK14 AH14 AG15 AE14 AB15 AC14 AD14 AF15",
"AH15 AG13 AG12 AH13 AJ14")),
Subsignal("ba", Pins("AF13 AJ12")),
Subsignal("cs_n", Pins("AG11")),
Subsignal("cke", Pins("AK13")),
Subsignal("ras_n", Pins("AE13")),
Subsignal("cas_n", Pins("AF11")),
Subsignal("we_n", Pins("AA13")),
Subsignal("dq", Pins(
"AK6 AJ7 AK7 AK8 AK9 AG10 AK11 AJ11",
"AH10 AJ10 AJ9 AH9 AH8 AH7 AJ6 AJ5")),
Subsignal("dm", Pins("AB13 AK12")),
IOStandard("3.3-V LVTTL")
),
]
class Platform(AlteraPlatform):
default_clk_name = "clk50"
default_clk_period = 1e9/50e6
def __init__(self):
AlteraPlatform.__init__(self, "5CSEMA5F31C6", _io)
def create_programmer(self):
return USBBlaster()
def do_finalize(self, fragment):
AlteraPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk50", loose=True), 1e9/50e6)
| true | true |
f73cce2c7368adf702192de2606836bdc48971e9 | 30,035 | py | Python | test/test_grammars.py | onchiptech/pyjsgf | f7ff26323e5e602ea10e7d302610c2fcb46234d6 | [
"MIT"
] | 40 | 2018-01-24T23:01:27.000Z | 2022-01-19T03:33:37.000Z | test/test_grammars.py | onchiptech/pyjsgf | f7ff26323e5e602ea10e7d302610c2fcb46234d6 | [
"MIT"
] | 31 | 2018-03-01T07:58:27.000Z | 2022-01-13T12:07:45.000Z | test/test_grammars.py | onchiptech/pyjsgf | f7ff26323e5e602ea10e7d302610c2fcb46234d6 | [
"MIT"
] | 21 | 2017-11-14T09:11:17.000Z | 2022-02-02T15:32:57.000Z | # This Python file uses the following encoding: utf-8
# The above line is required for the MultiLingualTests class
import copy
import tempfile
import unittest
from jsgf import *
from jsgf.ext import Dictation
class BasicGrammarCase(unittest.TestCase):
def setUp(self):
rule2 = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
rule3 = PrivateRule("name", AlternativeSet(
"peter", "john", "mary", "anna"))
rule1 = PublicRule("greet", RequiredGrouping(
RuleRef(rule2), RuleRef(rule3)))
self.grammar = Grammar("test")
self.grammar.add_rules(rule1, rule2, rule3)
self.rule1 = rule1
self.rule2 = rule2
self.rule3 = rule3
def test_compile(self):
expected = "#JSGF V1.0;\n" \
"grammar test;\n" \
"public <greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
compiled = self.grammar.compile()
self.assertEqual(expected, compiled)
def test_compile_to_file(self):
expected = "#JSGF V1.0;\n" \
"grammar test;\n" \
"public <greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
# Create a temporary testing file.
tf = tempfile.NamedTemporaryFile()
self.grammar.compile_to_file(tf.name)
# Check the file contents after writing to it.
with open(tf.name) as f:
content = f.read()
try:
self.assertEqual(expected, content)
finally:
# Always close and remove the temp file, even if the assertion fails.
tf.close()
def test_remove_dependent_rule(self):
self.assertRaises(GrammarError, self.grammar.remove_rule, "greetWord")
self.assertRaises(GrammarError, self.grammar.remove_rule, "name")
# Test again with the actual rule objects
self.assertRaises(GrammarError, self.grammar.remove_rule, self.rule2)
self.assertRaises(GrammarError, self.grammar.remove_rule, self.rule3)
self.grammar.remove_rule("greet")
self.assertListEqual([self.rule2, self.rule3], self.grammar.rules)
# Add it again to test removing the rule using the object
self.grammar.add_rule(self.rule1)
self.assertListEqual([self.rule2, self.rule3, self.rule1],
self.grammar.rules)
self.grammar.remove_rule(self.rule1)
self.assertListEqual([self.rule2, self.rule3], self.grammar.rules)
# Test that removing rule2 works using ignore_dependent=True
self.grammar.add_rule(self.rule1) # add rule1 again
self.assertIsNone(self.grammar.remove_rule(self.rule2,
ignore_dependent=True))
def test_add_rules_with_taken_names(self):
self.assertRaises(GrammarError, self.grammar.add_rule,
PublicRule("name", "bob"))
self.assertRaises(GrammarError, self.grammar.add_rule,
PrivateRule("name", "bob"))
rules_to_add = [PrivateRule("name", "bob"),
PublicRule("name", "bob")]
self.assertRaises(GrammarError, self.grammar.add_rules, *rules_to_add)
def test_enable_disable_rule(self):
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.grammar.enable_rule(self.rule1)
self.assertTrue(self.rule1.active)
def test_enable_disable_using_name(self):
self.grammar.disable_rule("greetWord")
self.assertFalse(self.rule2.active)
self.grammar.enable_rule("greetWord")
self.assertTrue(self.rule1.active)
def test_enable_disable_non_existent(self):
self.assertRaises(GrammarError, self.grammar.disable_rule, "hello")
self.assertRaises(GrammarError, self.grammar.enable_rule, "hello")
r = PublicRule("test", "hello")
self.assertRaises(GrammarError, self.grammar.disable_rule, r)
self.assertRaises(GrammarError, self.grammar.enable_rule, r)
def test_enable_disable_using_dup_rule(self):
"""
Test that a copy of a rule in the grammar can be used to disable or enable
the equivalent rule in the grammar as well as the rule object passed.
"""
r = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
self.grammar.disable_rule(r)
self.assertFalse(r.active, "duplicate rule should be disabled")
self.assertFalse(self.rule2.active, "rule in grammar should be disabled")
# Test enabling it again
self.grammar.enable_rule(r)
self.assertTrue(r.active, "duplicate rule should be enabled again")
self.assertTrue(self.rule2.active, "rule in grammar should be enabled")
def test_enable_disable_compile_output(self):
enabled_output = "#JSGF V1.0;\n" \
"grammar test;\n" \
"public <greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
self.assertEqual(self.grammar.compile(), enabled_output)
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.assertEqual(
self.grammar.compile(),
"#JSGF V1.0;\n"
"grammar test;\n"
"<greetWord> = (hello|hi);\n"
"<name> = (peter|john|mary|anna);\n",
"disabled output shouldn't have the public 'greet' rule"
)
self.grammar.enable_rule(self.rule1)
self.assertTrue(self.rule1.active)
self.assertEqual(self.grammar.compile(), enabled_output)
def test_comparisons(self):
self.assertEqual(Grammar(), Grammar())
self.assertNotEqual(Grammar(name="test"), Grammar(name="test2"),
"grammars with different names should not be equal")
g1 = Grammar(name="test")
g1.add_import(Import("test2.*"))
self.assertNotEqual(g1, Grammar(name="test"),
"grammars with different imports should not be equal")
g2 = Grammar()
g2.add_rule(PublicRule("r1", "hello"))
g3 = Grammar()
self.assertNotEqual(g1, g2,
"grammars with different names, rules and imports "
"should not be equal")
self.assertEqual(g2, g2, "the same grammar should be equal with itself")
self.assertEqual(g2, copy.copy(g2),
"grammars with the same rules should be equal")
self.assertNotEqual(g2, g3, "grammars with only different rules should not "
"be equal")
# Assert that any difference in the JSGF header makes Grammar objects not
# equal
default = Grammar()
def check():
self.assertNotEqual(g3, default, "grammars with only different JSGF "
"headers should not be equal")
g3.language_name = "ru"
check()
g3.jsgf_version = "2.0"
check()
g3.charset_name = "utf-16"
check()
self.assertEqual(RootGrammar(name="test"), Grammar(name="test"),
"grammars with only different types should be equal")
# Check case-sensitive vs case-insensitive grammars.
self.assertNotEqual(
Grammar(case_sensitive=False), Grammar(case_sensitive=True),
"grammars with different case sensitivity should not be equal")
def test_jsgf_header(self):
""" JSGF header uses grammar header attributes correctly. """
grammar = Grammar()
self.assertEqual(grammar.jsgf_header, "#JSGF V1.0;\n")
grammar.charset_name = "utf-8"
self.assertEqual(grammar.jsgf_header, "#JSGF V1.0 utf-8;\n")
grammar.charset_name = ""
grammar.language_name = "en"
self.assertEqual(grammar.jsgf_header, "#JSGF V1.0 en;\n")
grammar.charset_name = "utf-8"
self.assertEqual(grammar.jsgf_header, "#JSGF V1.0 utf-8 en;\n")
def test_links(self):
"""Expansion.rule and Rule.grammar attributes work correctly."""
hello = Literal("hello")
self.assertIsNone(hello.rule, "no rule will use the expansion yet")
r = PublicRule("test", hello)
self.assertEqual(hello.rule, r, "rule 'test' should use the expansion")
r.expansion = "hi"
self.assertIsNone(hello.rule, "setting r.expansion should reset "
"hello.rule")
# Test Rule.grammar
g = Grammar(name="g")
self.assertIsNone(r.grammar, "no grammar will be using the rule yet")
g.add_rule(r)
self.assertEqual(r.grammar, g, "adding r to a grammar should set r.grammar")
g.remove_rule(r)
self.assertIsNone(r.grammar, "remove r from its grammar should reset "
"r.grammar")
def test_case_sensitivity(self):
"""JSGF Grammars support configurable case-sensitivity."""
grammar = Grammar("test")
direction = Rule("direction", False, AlternativeSet(
"Up", "Down", "Left", "Right"
))
n = Rule("n", False, AlternativeSet("One", "Two", "Three"))
cmd_rule = Rule("cmd", True, Sequence(
NamedRuleRef("direction"), NamedRuleRef("n")
))
grammar.add_rules(direction, n, cmd_rule)
expected_sensitive = "#JSGF V1.0;\n" \
"grammar test;\n" \
"<direction> = (Up|Down|Left|Right);\n" \
"<n> = (One|Two|Three);\n" \
"public <cmd> = <direction> <n>;\n"
expected_insensitive = "#JSGF V1.0;\n" \
"grammar test;\n" \
"<direction> = (up|down|left|right);\n" \
"<n> = (one|two|three);\n" \
"public <cmd> = <direction> <n>;\n"
# Test that default is case-insensitive.
self.assertFalse(grammar.case_sensitive)
self.assertEqual(grammar.compile(), expected_insensitive)
# Test that setting grammar.case_sensitive overrides the values for each
# grammar rule.
grammar.case_sensitive = True
self.assertTrue(grammar.case_sensitive)
for rule in grammar.rules:
self.assertTrue(rule.case_sensitive)
# Test case-sensitive compilation and matching.
self.assertEqual(grammar.compile(), expected_sensitive)
self.assertSequenceEqual(grammar.find_matching_rules("Up Two"), [cmd_rule])
self.assertSequenceEqual(grammar.find_matching_rules("up two"), [])
# Switch back to case-insensitive to test that the casing of rule literals is
# never lost.
grammar.case_sensitive = False
self.assertFalse(grammar.case_sensitive)
self.assertEqual(grammar.compile(), expected_insensitive)
self.assertSequenceEqual(grammar.find_matching_rules("Up Two"), [cmd_rule])
self.assertSequenceEqual(grammar.find_matching_rules("up two"), [cmd_rule])
def test_add_import(self):
""" Import objects can be added and used by grammars. """
grammar = Grammar("test")
X = "com.example.grammar.X"
Y = "com.example.grammar.Y"
Z = "com.example.grammar.Z"
grammar.add_import(Import(X))
grammar.add_imports(Import(Y), Import(Z))
self.assertEqual(grammar.compile(),
"#JSGF V1.0;\n"
"grammar test;\n"
"import <com.example.grammar.X>;\n"
"import <com.example.grammar.Y>;\n"
"import <com.example.grammar.Z>;\n")
self.assertEqual(grammar.imports, [Import(i) for i in (X, Y, Z)])
self.assertEqual(grammar.import_names, [X, Y, Z])
def test_add_import_optimal(self):
""" Import objects added to grammars multiple times are only added once. """
grammar = Grammar("test")
import_name = "com.example.grammar.X"
for i in range(2):
grammar.add_import(Import(import_name))
self.assertEqual(grammar.compile(),
"#JSGF V1.0;\n"
"grammar test;\n"
"import <com.example.grammar.X>;\n")
self.assertEqual(grammar.imports, [Import(import_name)])
self.assertEqual(grammar.import_names, [import_name])
def test_add_import_type(self):
""" Grammar.add_import only accepts Import objects. """
grammar = Grammar("test")
grammar.add_import(Import("com.example.grammar.X"))
self.assertRaises(TypeError, grammar.add_import, "com.example.grammar.Y")
self.assertRaises(TypeError, grammar.add_imports, "com.example.grammar.Y")
def test_remove_import(self):
""" Import objects can be properly removed from grammars. """
grammar = Grammar("test")
expected = "#JSGF V1.0;\ngrammar test;\n"
import_name = "com.example.grammar.X"
import_ = Import(import_name)
# Both identical and equivalent Import objects should work.
for remove_item in (import_, Import(import_name)):
grammar.add_import(import_)
grammar.remove_import(remove_item)
self.assertEqual(grammar.compile(), expected)
self.assertEqual(grammar.imports, [])
self.assertEqual(grammar.import_names, [])
def test_remove_import_type(self):
""" Grammar.remove_import only accepts Import objects. """
grammar = Grammar("test")
grammar.add_import(Import("com.example.grammar.X"))
self.assertRaises(TypeError, grammar.remove_import, "com.example.grammar.X")
self.assertRaises(TypeError, grammar.remove_imports, "com.example.grammar.X")
def test_remove_import_unknown(self):
""" Removing an Import object that isn't in a grammar raises an error. """
grammar = Grammar("test")
self.assertRaises(GrammarError, grammar.remove_import,
Import("com.example.grammar.X"))
self.assertRaises(GrammarError, grammar.remove_imports,
Import("com.example.grammar.X"),
Import("com.example.grammar.Y"))
class TagTests(unittest.TestCase):
"""
Test the Grammar.find_tagged_rules method.
"""
def test_simple(self):
g = Grammar()
r = Rule("r", True, "test")
r.expansion.tag = "tag"
g.add_rule(r)
self.assertListEqual(g.find_tagged_rules("tag"), [r])
def test_hidden_rule(self):
g = Grammar()
r = Rule("r", False, "test")
r.expansion.tag = "tag"
g.add_rule(r)
self.assertListEqual(g.find_tagged_rules("tag"), [])
self.assertListEqual(g.find_tagged_rules("tag", include_hidden=True), [r])
def test_no_tag(self):
g = Grammar()
r = PublicRule("hello", "hello world")
self.assertListEqual(g.find_tagged_rules(""), [])
r.expansion.tag = ""
self.assertListEqual(g.find_tagged_rules(""), [])
self.assertListEqual(g.find_tagged_rules(" "), [])
r.expansion.tag = " "
self.assertListEqual(g.find_tagged_rules(" "), [])
def test_whitespace(self):
# Leading or trailing whitespace should be ignored by find_tagged_rules.
g = Grammar()
r = PublicRule("r", "test")
r.expansion.tag = " tag "
g.add_rule(r)
self.assertEqual(r.expansion.tag, "tag")
self.assertListEqual(g.find_tagged_rules("tag"), [r])
self.assertListEqual(g.find_tagged_rules(" tag "), [r])
def test_get_rules_from_names(self):
g = Grammar()
x = PublicRule("X", "x")
y = PrivateRule("Y", "y")
z = PublicRule("Z", "z")
g.add_rules(x, y, z)
# Test that rules are retrievable with both methods.
self.assertEqual(g.get_rules_from_names("X", "Y"), [x, y])
self.assertEqual(g.get_rules("X", "Y"), [x, y])
# Test that a GrammarError is raised if any name is invalid.
self.assertRaises(GrammarError, g.get_rules_from_names, "W")
self.assertRaises(GrammarError, g.get_rules_from_names, "X", "W")
self.assertRaises(GrammarError, g.get_rules, "W")
self.assertRaises(GrammarError, g.get_rules, "X", "W")
class SpeechMatchCase(unittest.TestCase):
def assert_matches(self, speech, rule):
self.assertTrue(rule.matches(speech))
def assert_no_match(self, speech, rule):
self.assertFalse(rule.matches(speech))
def test_single_rule_match(self):
grammar = Grammar("test")
rule = PrivateRule("greet", Sequence(
AlternativeSet("hello", "hi"), "world"
))
grammar.add_rules(rule)
self.assert_matches("hello world", rule)
self.assert_matches("hello world".swapcase(), rule)
self.assert_matches("hi world", rule)
self.assert_no_match("hey world", rule)
self.assert_no_match("hello", rule)
self.assert_no_match("world", rule)
self.assert_no_match("", rule)
def test_multi_rule_match(self):
grammar = Grammar("test")
rule2 = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
rule3 = PrivateRule("name", AlternativeSet("peter", "john",
"mary", "anna"))
rule1 = PublicRule("greet",
RequiredGrouping(
RuleRef(rule2),
RuleRef(rule3))
)
grammar.add_rules(rule1, rule2, rule3)
# Rule 1
self.assert_matches("hello john", rule1)
self.assert_matches("hello john".swapcase(), rule1)
self.assert_no_match("hello", rule1)
self.assert_no_match("john", rule1)
self.assert_no_match("", rule1)
# Rule 2
self.assert_matches("hello", rule2)
self.assert_matches("HELLO", rule2)
self.assert_matches("hi", rule2)
self.assert_matches("HI", rule2)
self.assert_no_match("", rule2)
# Rule 3
self.assert_matches("john", rule3)
self.assert_no_match("", rule3)
class MultiLingualTests(unittest.TestCase):
"""
Test that Unicode characters can be used in rule, import and grammar names
as well as in literals and that the text can be matched.
Cyrillic characters are used to test this functionality. There are various
Unicode character sets, each containing an enormous number of characters, so
it is hardly feasible to test everything. Plus, this library simply uses
Python's Unicode support.
"""
def test_names(self):
"""Unicode strings can be used in names and literals and can be matched."""
grammar = Grammar(name=u"грамматика")
self.assertEqual(grammar.name, u"грамматика")
rule = PublicRule(u"русский", AlternativeSet(
u"привет", u"здравствуйте", u"пожалуйста"))
import_ = Import(u"грамматика2.*")
self.assertEqual(import_.name, u"грамматика2.*")
# Test matching the rule
self.assertTrue(rule.matches(u"здравствуйте"))
# Test matching using the grammar
grammar.add_rule(rule)
self.assertListEqual(grammar.find_matching_rules(u"пожалуйста"), [rule])
def test_dictation(self):
"""Dictation Expansions match Unicode strings."""
self.assertTrue(PublicRule(u"всё", Dictation().matches(u"это кофе")))
class VisibleRulesCase(unittest.TestCase):
"""
Test the 'visible_rules' property of the Grammar class.
"""
def setUp(self):
grammar1 = Grammar("test")
self.rule1 = PrivateRule("rule1", "Hello")
self.rule2 = PrivateRule("rule2", "Hey")
self.rule3 = PrivateRule("rule3", "Hi")
grammar1.add_rules(self.rule1, self.rule2, self.rule3)
self.grammar1 = grammar1
grammar2 = Grammar("test2")
self.rule4 = PublicRule("rule4", "Hello")
self.rule5 = PublicRule("rule5", "Hey")
self.rule6 = PrivateRule("rule6", "Hi")
grammar2.add_rules(self.rule4, self.rule5, self.rule6)
self.grammar2 = grammar2
def test_none(self):
self.assertListEqual(self.grammar1.visible_rules, [])
def test_many(self):
self.assertListEqual(self.grammar2.visible_rules, [self.rule4, self.rule5])
class RootGrammarCase(unittest.TestCase):
def setUp(self):
self.grammar = RootGrammar(name="root")
self.rule2 = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
self.rule3 = PrivateRule("name", AlternativeSet(
"peter", "john", "mary", "anna"))
self.rule1 = PublicRule("greet", RequiredGrouping(
RuleRef(self.rule2), RuleRef(self.rule3)))
self.grammar.add_rules(self.rule1, self.rule2, self.rule3)
self.rule5 = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
self.rule4 = PublicRule("greet", Sequence(RuleRef(self.rule5), "there"))
self.rule6 = PublicRule("partingPhrase", AlternativeSet(
"goodbye", "see you"))
def test_compile(self):
root = self.grammar
expected = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>);\n" \
"<greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
self.assertEqual(root.compile(), expected)
def test_compile_to_file(self):
root = self.grammar
expected = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>);\n" \
"<greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
# Create a temporary testing file.
tf = tempfile.NamedTemporaryFile()
root.compile_to_file(tf.name)
# Check the file contents after writing to it.
with open(tf.name) as f:
content = f.read()
try:
self.assertEqual(expected, content)
finally:
# Always close and remove the temp file, even if the assertion fails.
tf.close()
def test_compile_add_remove_rule(self):
root = RootGrammar(rules=[self.rule5, self.rule4], name="root")
expected_without = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>);\n" \
"<greetWord> = (hello|hi);\n" \
"<greet> = <greetWord> there;\n"
expected_with = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>|<partingPhrase>);\n" \
"<greetWord> = (hello|hi);\n" \
"<greet> = <greetWord> there;\n" \
"<partingPhrase> = (goodbye|see you);\n"
self.assertEqual(root.compile(), expected_without)
root.add_rule(self.rule6)
self.assertEqual(root.compile(), expected_with)
# Test removing the partingPhrase rule using the name
root.remove_rule("partingPhrase")
self.assertEqual(root.compile(), expected_without)
# Add the rule and test removing it using the rule object
root.add_rule(self.rule6)
self.assertEqual(root.compile(), expected_with)
root.remove_rule(self.rule6)
self.assertEqual(root.compile(), expected_without)
def test_match(self):
# Only rule1 should match
root = self.grammar
self.assertListEqual(root.find_matching_rules("Hello John"), [self.rule1])
self.assertListEqual(root.find_matching_rules("HELLO mary"), [self.rule1])
self.assertListEqual(root.find_matching_rules("hello ANNA"), [self.rule1])
def test_match_add_remove(self):
root = RootGrammar(rules=[self.rule5, self.rule4], name="root")
self.assertListEqual(root.find_matching_rules("Hello there"), [self.rule4])
self.assertListEqual(root.find_matching_rules("Hi there"), [self.rule4])
# Add a rule
root.add_rule(self.rule6)
self.assertListEqual(root.find_matching_rules("Goodbye"), [self.rule6])
self.assertListEqual(root.find_matching_rules("See you"), [self.rule6])
# Remove it and test again
root.remove_rule("partingPhrase")
self.assertListEqual(root.find_matching_rules("Goodbye"), [])
self.assertListEqual(root.find_matching_rules("See you"), [])
# Test again using the remove_rule(rule object) instead
root.add_rule(self.rule6)
self.assertListEqual(root.find_matching_rules("Goodbye"), [self.rule6])
self.assertListEqual(root.find_matching_rules("See you"), [self.rule6])
root.remove_rule(self.rule6)
self.assertListEqual(root.find_matching_rules("Goodbye"), [])
self.assertListEqual(root.find_matching_rules("See you"), [])
def test_add_rules_with_taken_names(self):
root = self.grammar
self.assertRaises(GrammarError, root.add_rule,
PublicRule("name", "bob"))
self.assertRaises(GrammarError, root.add_rule,
PrivateRule("name", "bob"))
rules_to_add = [PrivateRule("name", "bob"),
PublicRule("name", "bob")]
self.assertRaises(GrammarError, root.add_rules,
*rules_to_add)
# Test if adding a rule with the name 'root' raises an error
self.assertRaises(GrammarError, root.add_rule, PublicRule("root", "test"))
def test_create_grammar_with_rule_name_conflicts(self):
# Try with duplicate rules (should fail silently)
g = RootGrammar()
r = PublicRule("test", "test")
g.add_rule(r)
self.assertListEqual(g.rules, [r])
g.add_rule(PublicRule("test", "test"))
self.assertListEqual(g.rules, [r])
# Try with slightly different rules
self.assertRaises(GrammarError, RootGrammar,
[PublicRule("test", "testing"),
PublicRule("test", "test")])
self.assertRaises(GrammarError, RootGrammar,
[PublicRule("test", "test"),
PrivateRule("test", "test")])
self.assertRaises(GrammarError, RootGrammar,
[PublicRule("test", "testing"),
PrivateRule("test", "test")])
def test_enable_disable_rule(self):
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.grammar.enable_rule(self.rule1)
self.assertTrue(self.rule1.active)
def test_enable_disable_using_name(self):
self.grammar.disable_rule("greetWord")
self.assertFalse(self.rule2.active)
self.grammar.enable_rule("greetWord")
self.assertTrue(self.rule2.active)
def test_enable_disable_non_existent(self):
self.assertRaises(GrammarError, self.grammar.disable_rule, "hello")
self.assertRaises(GrammarError, self.grammar.enable_rule, "hello")
r = PublicRule("test", "hello")
self.assertRaises(GrammarError, self.grammar.disable_rule, r)
self.assertRaises(GrammarError, self.grammar.enable_rule, r)
def test_enable_disable_using_dup_rule(self):
"""
Test that a copy of a rule in the grammar can be used to disable or enable
the equivalent rule in the grammar as well as the rule object passed.
"""
r = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
self.assertTrue(self.rule2.active)
self.grammar.disable_rule(r)
self.assertFalse(r.active, "duplicate rule should be disabled")
self.assertFalse(self.rule2.active, "original rule should be disabled")
# Test enabling it again
self.grammar.enable_rule(r)
self.assertTrue(r.active, "duplicate rule should be enabled again")
self.assertTrue(self.rule2.active, "original rule should be enabled")
def test_enable_disable_compile_output(self):
enabled_output = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>);\n" \
"<greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
self.assertEqual(self.grammar.compile(), enabled_output)
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.assertEqual(
self.grammar.compile(),
"#JSGF V1.0;\n"
"grammar root;\n",
"disabled output shouldn't have the originally public 'greet' rule"
)
self.grammar.enable_rule(self.rule1)
self.assertTrue(self.rule1.active)
self.assertEqual(self.grammar.compile(), enabled_output)
# Add another public rule and test again
self.grammar.add_rule(PublicRule("test", "testing"))
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.assertEqual(
self.grammar.compile(),
"#JSGF V1.0;\n"
"grammar root;\n"
"public <root> = (<test>);\n"
"<greetWord> = (hello|hi);\n"
"<name> = (peter|john|mary|anna);\n"
"<test> = testing;\n",
"disabled output should have the originally public 'test' rule"
)
if __name__ == '__main__':
unittest.main()
| 40.587838 | 85 | 0.599234 |
import copy
import tempfile
import unittest
from jsgf import *
from jsgf.ext import Dictation
class BasicGrammarCase(unittest.TestCase):
def setUp(self):
rule2 = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
rule3 = PrivateRule("name", AlternativeSet(
"peter", "john", "mary", "anna"))
rule1 = PublicRule("greet", RequiredGrouping(
RuleRef(rule2), RuleRef(rule3)))
self.grammar = Grammar("test")
self.grammar.add_rules(rule1, rule2, rule3)
self.rule1 = rule1
self.rule2 = rule2
self.rule3 = rule3
def test_compile(self):
expected = "#JSGF V1.0;\n" \
"grammar test;\n" \
"public <greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
compiled = self.grammar.compile()
self.assertEqual(expected, compiled)
def test_compile_to_file(self):
expected = "#JSGF V1.0;\n" \
"grammar test;\n" \
"public <greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
tf = tempfile.NamedTemporaryFile()
self.grammar.compile_to_file(tf.name)
with open(tf.name) as f:
content = f.read()
try:
self.assertEqual(expected, content)
finally:
tf.close()
def test_remove_dependent_rule(self):
self.assertRaises(GrammarError, self.grammar.remove_rule, "greetWord")
self.assertRaises(GrammarError, self.grammar.remove_rule, "name")
self.assertRaises(GrammarError, self.grammar.remove_rule, self.rule2)
self.assertRaises(GrammarError, self.grammar.remove_rule, self.rule3)
self.grammar.remove_rule("greet")
self.assertListEqual([self.rule2, self.rule3], self.grammar.rules)
self.grammar.add_rule(self.rule1)
self.assertListEqual([self.rule2, self.rule3, self.rule1],
self.grammar.rules)
self.grammar.remove_rule(self.rule1)
self.assertListEqual([self.rule2, self.rule3], self.grammar.rules)
self.grammar.add_rule(self.rule1)
self.assertIsNone(self.grammar.remove_rule(self.rule2,
ignore_dependent=True))
def test_add_rules_with_taken_names(self):
self.assertRaises(GrammarError, self.grammar.add_rule,
PublicRule("name", "bob"))
self.assertRaises(GrammarError, self.grammar.add_rule,
PrivateRule("name", "bob"))
rules_to_add = [PrivateRule("name", "bob"),
PublicRule("name", "bob")]
self.assertRaises(GrammarError, self.grammar.add_rules, *rules_to_add)
def test_enable_disable_rule(self):
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.grammar.enable_rule(self.rule1)
self.assertTrue(self.rule1.active)
def test_enable_disable_using_name(self):
self.grammar.disable_rule("greetWord")
self.assertFalse(self.rule2.active)
self.grammar.enable_rule("greetWord")
self.assertTrue(self.rule1.active)
def test_enable_disable_non_existent(self):
self.assertRaises(GrammarError, self.grammar.disable_rule, "hello")
self.assertRaises(GrammarError, self.grammar.enable_rule, "hello")
r = PublicRule("test", "hello")
self.assertRaises(GrammarError, self.grammar.disable_rule, r)
self.assertRaises(GrammarError, self.grammar.enable_rule, r)
def test_enable_disable_using_dup_rule(self):
r = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
self.grammar.disable_rule(r)
self.assertFalse(r.active, "duplicate rule should be disabled")
self.assertFalse(self.rule2.active, "rule in grammar should be disabled")
self.grammar.enable_rule(r)
self.assertTrue(r.active, "duplicate rule should be enabled again")
self.assertTrue(self.rule2.active, "rule in grammar should be enabled")
def test_enable_disable_compile_output(self):
enabled_output = "#JSGF V1.0;\n" \
"grammar test;\n" \
"public <greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
self.assertEqual(self.grammar.compile(), enabled_output)
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.assertEqual(
self.grammar.compile(),
"#JSGF V1.0;\n"
"grammar test;\n"
"<greetWord> = (hello|hi);\n"
"<name> = (peter|john|mary|anna);\n",
"disabled output shouldn't have the public 'greet' rule"
)
self.grammar.enable_rule(self.rule1)
self.assertTrue(self.rule1.active)
self.assertEqual(self.grammar.compile(), enabled_output)
def test_comparisons(self):
self.assertEqual(Grammar(), Grammar())
self.assertNotEqual(Grammar(name="test"), Grammar(name="test2"),
"grammars with different names should not be equal")
g1 = Grammar(name="test")
g1.add_import(Import("test2.*"))
self.assertNotEqual(g1, Grammar(name="test"),
"grammars with different imports should not be equal")
g2 = Grammar()
g2.add_rule(PublicRule("r1", "hello"))
g3 = Grammar()
self.assertNotEqual(g1, g2,
"grammars with different names, rules and imports "
"should not be equal")
self.assertEqual(g2, g2, "the same grammar should be equal with itself")
self.assertEqual(g2, copy.copy(g2),
"grammars with the same rules should be equal")
self.assertNotEqual(g2, g3, "grammars with only different rules should not "
"be equal")
# Assert that any difference in the JSGF header makes Grammar objects not
# equal
default = Grammar()
def check():
self.assertNotEqual(g3, default, "grammars with only different JSGF "
"headers should not be equal")
g3.language_name = "ru"
check()
g3.jsgf_version = "2.0"
check()
g3.charset_name = "utf-16"
check()
self.assertEqual(RootGrammar(name="test"), Grammar(name="test"),
"grammars with only different types should be equal")
# Check case-sensitive vs case-insensitive grammars.
self.assertNotEqual(
Grammar(case_sensitive=False), Grammar(case_sensitive=True),
"grammars with different case sensitivity should not be equal")
def test_jsgf_header(self):
grammar = Grammar()
self.assertEqual(grammar.jsgf_header, "#JSGF V1.0;\n")
grammar.charset_name = "utf-8"
self.assertEqual(grammar.jsgf_header, "#JSGF V1.0 utf-8;\n")
grammar.charset_name = ""
grammar.language_name = "en"
self.assertEqual(grammar.jsgf_header, "#JSGF V1.0 en;\n")
grammar.charset_name = "utf-8"
self.assertEqual(grammar.jsgf_header, "#JSGF V1.0 utf-8 en;\n")
def test_links(self):
hello = Literal("hello")
self.assertIsNone(hello.rule, "no rule will use the expansion yet")
r = PublicRule("test", hello)
self.assertEqual(hello.rule, r, "rule 'test' should use the expansion")
r.expansion = "hi"
self.assertIsNone(hello.rule, "setting r.expansion should reset "
"hello.rule")
# Test Rule.grammar
g = Grammar(name="g")
self.assertIsNone(r.grammar, "no grammar will be using the rule yet")
g.add_rule(r)
self.assertEqual(r.grammar, g, "adding r to a grammar should set r.grammar")
g.remove_rule(r)
self.assertIsNone(r.grammar, "remove r from its grammar should reset "
"r.grammar")
def test_case_sensitivity(self):
grammar = Grammar("test")
direction = Rule("direction", False, AlternativeSet(
"Up", "Down", "Left", "Right"
))
n = Rule("n", False, AlternativeSet("One", "Two", "Three"))
cmd_rule = Rule("cmd", True, Sequence(
NamedRuleRef("direction"), NamedRuleRef("n")
))
grammar.add_rules(direction, n, cmd_rule)
expected_sensitive = "#JSGF V1.0;\n" \
"grammar test;\n" \
"<direction> = (Up|Down|Left|Right);\n" \
"<n> = (One|Two|Three);\n" \
"public <cmd> = <direction> <n>;\n"
expected_insensitive = "#JSGF V1.0;\n" \
"grammar test;\n" \
"<direction> = (up|down|left|right);\n" \
"<n> = (one|two|three);\n" \
"public <cmd> = <direction> <n>;\n"
# Test that default is case-insensitive.
self.assertFalse(grammar.case_sensitive)
self.assertEqual(grammar.compile(), expected_insensitive)
# Test that setting grammar.case_sensitive overrides the values for each
# grammar rule.
grammar.case_sensitive = True
self.assertTrue(grammar.case_sensitive)
for rule in grammar.rules:
self.assertTrue(rule.case_sensitive)
# Test case-sensitive compilation and matching.
self.assertEqual(grammar.compile(), expected_sensitive)
self.assertSequenceEqual(grammar.find_matching_rules("Up Two"), [cmd_rule])
self.assertSequenceEqual(grammar.find_matching_rules("up two"), [])
# Switch back to case-insensitive to test that the casing of rule literals is
# never lost.
grammar.case_sensitive = False
self.assertFalse(grammar.case_sensitive)
self.assertEqual(grammar.compile(), expected_insensitive)
self.assertSequenceEqual(grammar.find_matching_rules("Up Two"), [cmd_rule])
self.assertSequenceEqual(grammar.find_matching_rules("up two"), [cmd_rule])
def test_add_import(self):
grammar = Grammar("test")
X = "com.example.grammar.X"
Y = "com.example.grammar.Y"
Z = "com.example.grammar.Z"
grammar.add_import(Import(X))
grammar.add_imports(Import(Y), Import(Z))
self.assertEqual(grammar.compile(),
"#JSGF V1.0;\n"
"grammar test;\n"
"import <com.example.grammar.X>;\n"
"import <com.example.grammar.Y>;\n"
"import <com.example.grammar.Z>;\n")
self.assertEqual(grammar.imports, [Import(i) for i in (X, Y, Z)])
self.assertEqual(grammar.import_names, [X, Y, Z])
def test_add_import_optimal(self):
grammar = Grammar("test")
import_name = "com.example.grammar.X"
for i in range(2):
grammar.add_import(Import(import_name))
self.assertEqual(grammar.compile(),
"#JSGF V1.0;\n"
"grammar test;\n"
"import <com.example.grammar.X>;\n")
self.assertEqual(grammar.imports, [Import(import_name)])
self.assertEqual(grammar.import_names, [import_name])
def test_add_import_type(self):
grammar = Grammar("test")
grammar.add_import(Import("com.example.grammar.X"))
self.assertRaises(TypeError, grammar.add_import, "com.example.grammar.Y")
self.assertRaises(TypeError, grammar.add_imports, "com.example.grammar.Y")
def test_remove_import(self):
grammar = Grammar("test")
expected = "#JSGF V1.0;\ngrammar test;\n"
import_name = "com.example.grammar.X"
import_ = Import(import_name)
# Both identical and equivalent Import objects should work.
for remove_item in (import_, Import(import_name)):
grammar.add_import(import_)
grammar.remove_import(remove_item)
self.assertEqual(grammar.compile(), expected)
self.assertEqual(grammar.imports, [])
self.assertEqual(grammar.import_names, [])
def test_remove_import_type(self):
grammar = Grammar("test")
grammar.add_import(Import("com.example.grammar.X"))
self.assertRaises(TypeError, grammar.remove_import, "com.example.grammar.X")
self.assertRaises(TypeError, grammar.remove_imports, "com.example.grammar.X")
def test_remove_import_unknown(self):
grammar = Grammar("test")
self.assertRaises(GrammarError, grammar.remove_import,
Import("com.example.grammar.X"))
self.assertRaises(GrammarError, grammar.remove_imports,
Import("com.example.grammar.X"),
Import("com.example.grammar.Y"))
class TagTests(unittest.TestCase):
def test_simple(self):
g = Grammar()
r = Rule("r", True, "test")
r.expansion.tag = "tag"
g.add_rule(r)
self.assertListEqual(g.find_tagged_rules("tag"), [r])
def test_hidden_rule(self):
g = Grammar()
r = Rule("r", False, "test")
r.expansion.tag = "tag"
g.add_rule(r)
self.assertListEqual(g.find_tagged_rules("tag"), [])
self.assertListEqual(g.find_tagged_rules("tag", include_hidden=True), [r])
def test_no_tag(self):
g = Grammar()
r = PublicRule("hello", "hello world")
self.assertListEqual(g.find_tagged_rules(""), [])
r.expansion.tag = ""
self.assertListEqual(g.find_tagged_rules(""), [])
self.assertListEqual(g.find_tagged_rules(" "), [])
r.expansion.tag = " "
self.assertListEqual(g.find_tagged_rules(" "), [])
def test_whitespace(self):
# Leading or trailing whitespace should be ignored by find_tagged_rules.
g = Grammar()
r = PublicRule("r", "test")
r.expansion.tag = " tag "
g.add_rule(r)
self.assertEqual(r.expansion.tag, "tag")
self.assertListEqual(g.find_tagged_rules("tag"), [r])
self.assertListEqual(g.find_tagged_rules(" tag "), [r])
def test_get_rules_from_names(self):
g = Grammar()
x = PublicRule("X", "x")
y = PrivateRule("Y", "y")
z = PublicRule("Z", "z")
g.add_rules(x, y, z)
# Test that rules are retrievable with both methods.
self.assertEqual(g.get_rules_from_names("X", "Y"), [x, y])
self.assertEqual(g.get_rules("X", "Y"), [x, y])
# Test that a GrammarError is raised if any name is invalid.
self.assertRaises(GrammarError, g.get_rules_from_names, "W")
self.assertRaises(GrammarError, g.get_rules_from_names, "X", "W")
self.assertRaises(GrammarError, g.get_rules, "W")
self.assertRaises(GrammarError, g.get_rules, "X", "W")
class SpeechMatchCase(unittest.TestCase):
def assert_matches(self, speech, rule):
self.assertTrue(rule.matches(speech))
def assert_no_match(self, speech, rule):
self.assertFalse(rule.matches(speech))
def test_single_rule_match(self):
grammar = Grammar("test")
rule = PrivateRule("greet", Sequence(
AlternativeSet("hello", "hi"), "world"
))
grammar.add_rules(rule)
self.assert_matches("hello world", rule)
self.assert_matches("hello world".swapcase(), rule)
self.assert_matches("hi world", rule)
self.assert_no_match("hey world", rule)
self.assert_no_match("hello", rule)
self.assert_no_match("world", rule)
self.assert_no_match("", rule)
def test_multi_rule_match(self):
grammar = Grammar("test")
rule2 = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
rule3 = PrivateRule("name", AlternativeSet("peter", "john",
"mary", "anna"))
rule1 = PublicRule("greet",
RequiredGrouping(
RuleRef(rule2),
RuleRef(rule3))
)
grammar.add_rules(rule1, rule2, rule3)
# Rule 1
self.assert_matches("hello john", rule1)
self.assert_matches("hello john".swapcase(), rule1)
self.assert_no_match("hello", rule1)
self.assert_no_match("john", rule1)
self.assert_no_match("", rule1)
# Rule 2
self.assert_matches("hello", rule2)
self.assert_matches("HELLO", rule2)
self.assert_matches("hi", rule2)
self.assert_matches("HI", rule2)
self.assert_no_match("", rule2)
# Rule 3
self.assert_matches("john", rule3)
self.assert_no_match("", rule3)
class MultiLingualTests(unittest.TestCase):
def test_names(self):
grammar = Grammar(name=u"грамматика")
self.assertEqual(grammar.name, u"грамматика")
rule = PublicRule(u"русский", AlternativeSet(
u"привет", u"здравствуйте", u"пожалуйста"))
import_ = Import(u"грамматика2.*")
self.assertEqual(import_.name, u"грамматика2.*")
# Test matching the rule
self.assertTrue(rule.matches(u"здравствуйте"))
# Test matching using the grammar
grammar.add_rule(rule)
self.assertListEqual(grammar.find_matching_rules(u"пожалуйста"), [rule])
def test_dictation(self):
self.assertTrue(PublicRule(u"всё", Dictation().matches(u"это кофе")))
class VisibleRulesCase(unittest.TestCase):
def setUp(self):
grammar1 = Grammar("test")
self.rule1 = PrivateRule("rule1", "Hello")
self.rule2 = PrivateRule("rule2", "Hey")
self.rule3 = PrivateRule("rule3", "Hi")
grammar1.add_rules(self.rule1, self.rule2, self.rule3)
self.grammar1 = grammar1
grammar2 = Grammar("test2")
self.rule4 = PublicRule("rule4", "Hello")
self.rule5 = PublicRule("rule5", "Hey")
self.rule6 = PrivateRule("rule6", "Hi")
grammar2.add_rules(self.rule4, self.rule5, self.rule6)
self.grammar2 = grammar2
def test_none(self):
self.assertListEqual(self.grammar1.visible_rules, [])
def test_many(self):
self.assertListEqual(self.grammar2.visible_rules, [self.rule4, self.rule5])
class RootGrammarCase(unittest.TestCase):
def setUp(self):
self.grammar = RootGrammar(name="root")
self.rule2 = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
self.rule3 = PrivateRule("name", AlternativeSet(
"peter", "john", "mary", "anna"))
self.rule1 = PublicRule("greet", RequiredGrouping(
RuleRef(self.rule2), RuleRef(self.rule3)))
self.grammar.add_rules(self.rule1, self.rule2, self.rule3)
self.rule5 = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
self.rule4 = PublicRule("greet", Sequence(RuleRef(self.rule5), "there"))
self.rule6 = PublicRule("partingPhrase", AlternativeSet(
"goodbye", "see you"))
def test_compile(self):
root = self.grammar
expected = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>);\n" \
"<greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
self.assertEqual(root.compile(), expected)
def test_compile_to_file(self):
root = self.grammar
expected = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>);\n" \
"<greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
# Create a temporary testing file.
tf = tempfile.NamedTemporaryFile()
root.compile_to_file(tf.name)
# Check the file contents after writing to it.
with open(tf.name) as f:
content = f.read()
try:
self.assertEqual(expected, content)
finally:
# Always close and remove the temp file, even if the assertion fails.
tf.close()
def test_compile_add_remove_rule(self):
root = RootGrammar(rules=[self.rule5, self.rule4], name="root")
expected_without = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>);\n" \
"<greetWord> = (hello|hi);\n" \
"<greet> = <greetWord> there;\n"
expected_with = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>|<partingPhrase>);\n" \
"<greetWord> = (hello|hi);\n" \
"<greet> = <greetWord> there;\n" \
"<partingPhrase> = (goodbye|see you);\n"
self.assertEqual(root.compile(), expected_without)
root.add_rule(self.rule6)
self.assertEqual(root.compile(), expected_with)
# Test removing the partingPhrase rule using the name
root.remove_rule("partingPhrase")
self.assertEqual(root.compile(), expected_without)
# Add the rule and test removing it using the rule object
root.add_rule(self.rule6)
self.assertEqual(root.compile(), expected_with)
root.remove_rule(self.rule6)
self.assertEqual(root.compile(), expected_without)
def test_match(self):
# Only rule1 should match
root = self.grammar
self.assertListEqual(root.find_matching_rules("Hello John"), [self.rule1])
self.assertListEqual(root.find_matching_rules("HELLO mary"), [self.rule1])
self.assertListEqual(root.find_matching_rules("hello ANNA"), [self.rule1])
def test_match_add_remove(self):
root = RootGrammar(rules=[self.rule5, self.rule4], name="root")
self.assertListEqual(root.find_matching_rules("Hello there"), [self.rule4])
self.assertListEqual(root.find_matching_rules("Hi there"), [self.rule4])
# Add a rule
root.add_rule(self.rule6)
self.assertListEqual(root.find_matching_rules("Goodbye"), [self.rule6])
self.assertListEqual(root.find_matching_rules("See you"), [self.rule6])
# Remove it and test again
root.remove_rule("partingPhrase")
self.assertListEqual(root.find_matching_rules("Goodbye"), [])
self.assertListEqual(root.find_matching_rules("See you"), [])
# Test again using the remove_rule(rule object) instead
root.add_rule(self.rule6)
self.assertListEqual(root.find_matching_rules("Goodbye"), [self.rule6])
self.assertListEqual(root.find_matching_rules("See you"), [self.rule6])
root.remove_rule(self.rule6)
self.assertListEqual(root.find_matching_rules("Goodbye"), [])
self.assertListEqual(root.find_matching_rules("See you"), [])
def test_add_rules_with_taken_names(self):
root = self.grammar
self.assertRaises(GrammarError, root.add_rule,
PublicRule("name", "bob"))
self.assertRaises(GrammarError, root.add_rule,
PrivateRule("name", "bob"))
rules_to_add = [PrivateRule("name", "bob"),
PublicRule("name", "bob")]
self.assertRaises(GrammarError, root.add_rules,
*rules_to_add)
# Test if adding a rule with the name 'root' raises an error
self.assertRaises(GrammarError, root.add_rule, PublicRule("root", "test"))
def test_create_grammar_with_rule_name_conflicts(self):
# Try with duplicate rules (should fail silently)
g = RootGrammar()
r = PublicRule("test", "test")
g.add_rule(r)
self.assertListEqual(g.rules, [r])
g.add_rule(PublicRule("test", "test"))
self.assertListEqual(g.rules, [r])
# Try with slightly different rules
self.assertRaises(GrammarError, RootGrammar,
[PublicRule("test", "testing"),
PublicRule("test", "test")])
self.assertRaises(GrammarError, RootGrammar,
[PublicRule("test", "test"),
PrivateRule("test", "test")])
self.assertRaises(GrammarError, RootGrammar,
[PublicRule("test", "testing"),
PrivateRule("test", "test")])
def test_enable_disable_rule(self):
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.grammar.enable_rule(self.rule1)
self.assertTrue(self.rule1.active)
def test_enable_disable_using_name(self):
self.grammar.disable_rule("greetWord")
self.assertFalse(self.rule2.active)
self.grammar.enable_rule("greetWord")
self.assertTrue(self.rule2.active)
def test_enable_disable_non_existent(self):
self.assertRaises(GrammarError, self.grammar.disable_rule, "hello")
self.assertRaises(GrammarError, self.grammar.enable_rule, "hello")
r = PublicRule("test", "hello")
self.assertRaises(GrammarError, self.grammar.disable_rule, r)
self.assertRaises(GrammarError, self.grammar.enable_rule, r)
def test_enable_disable_using_dup_rule(self):
r = PrivateRule("greetWord", AlternativeSet("hello", "hi"))
self.assertTrue(self.rule2.active)
self.grammar.disable_rule(r)
self.assertFalse(r.active, "duplicate rule should be disabled")
self.assertFalse(self.rule2.active, "original rule should be disabled")
# Test enabling it again
self.grammar.enable_rule(r)
self.assertTrue(r.active, "duplicate rule should be enabled again")
self.assertTrue(self.rule2.active, "original rule should be enabled")
def test_enable_disable_compile_output(self):
enabled_output = "#JSGF V1.0;\n" \
"grammar root;\n" \
"public <root> = (<greet>);\n" \
"<greet> = (<greetWord> <name>);\n" \
"<greetWord> = (hello|hi);\n" \
"<name> = (peter|john|mary|anna);\n"
self.assertEqual(self.grammar.compile(), enabled_output)
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.assertEqual(
self.grammar.compile(),
"#JSGF V1.0;\n"
"grammar root;\n",
"disabled output shouldn't have the originally public 'greet' rule"
)
self.grammar.enable_rule(self.rule1)
self.assertTrue(self.rule1.active)
self.assertEqual(self.grammar.compile(), enabled_output)
self.grammar.add_rule(PublicRule("test", "testing"))
self.grammar.disable_rule(self.rule1)
self.assertFalse(self.rule1.active)
self.assertEqual(
self.grammar.compile(),
"#JSGF V1.0;\n"
"grammar root;\n"
"public <root> = (<test>);\n"
"<greetWord> = (hello|hi);\n"
"<name> = (peter|john|mary|anna);\n"
"<test> = testing;\n",
"disabled output should have the originally public 'test' rule"
)
if __name__ == '__main__':
unittest.main()
| true | true |
f73ccf43dc25aa7b62c8a9be0f28f353d16014f4 | 27,178 | py | Python | rasa/nlu/selectors/response_selector.py | joeriess/rasa | c1bdfd0934578f515a8bf3ab708c294b809300f8 | [
"Apache-2.0"
] | 1 | 2020-09-12T17:27:21.000Z | 2020-09-12T17:27:21.000Z | rasa/nlu/selectors/response_selector.py | joeriess/rasa | c1bdfd0934578f515a8bf3ab708c294b809300f8 | [
"Apache-2.0"
] | null | null | null | rasa/nlu/selectors/response_selector.py | joeriess/rasa | c1bdfd0934578f515a8bf3ab708c294b809300f8 | [
"Apache-2.0"
] | null | null | null | import copy
import logging
import numpy as np
import tensorflow as tf
from typing import Any, Dict, Optional, Text, Tuple, Union, List, Type
from rasa.shared.nlu.training_data import util
import rasa.shared.utils.io
from rasa.nlu.config import InvalidConfigError
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.components import Component
from rasa.nlu.featurizers.featurizer import Featurizer
from rasa.nlu.model import Metadata
from rasa.nlu.classifiers.diet_classifier import (
DIETClassifier,
DIET,
LABEL_KEY,
LABEL_SUB_KEY,
EntityTagSpec,
SEQUENCE_LENGTH,
SENTENCE,
SEQUENCE,
)
from rasa.utils.tensorflow.constants import (
LABEL,
HIDDEN_LAYERS_SIZES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
WEIGHT_SPARSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
RETRIEVAL_INTENT,
USE_TEXT_AS_LABEL,
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
DENSE_DIMENSION,
)
from rasa.nlu.constants import (
RESPONSE_SELECTOR_PROPERTY_NAME,
RESPONSE_SELECTOR_RETRIEVAL_INTENTS,
RESPONSE_SELECTOR_RESPONSES_KEY,
RESPONSE_SELECTOR_PREDICTION_KEY,
RESPONSE_SELECTOR_RANKING_KEY,
RESPONSE_SELECTOR_TEMPLATE_NAME_KEY,
PREDICTED_CONFIDENCE_KEY,
RESPONSE_SELECTOR_DEFAULT_INTENT,
)
from rasa.shared.nlu.constants import (
TEXT,
INTENT,
RESPONSE,
INTENT_RESPONSE_KEY,
INTENT_NAME_KEY,
)
from rasa.utils.tensorflow.model_data import RasaModelData
from rasa.utils.tensorflow.models import RasaModel
logger = logging.getLogger(__name__)
class ResponseSelector(DIETClassifier):
"""Response selector using supervised embeddings.
The response selector embeds user inputs
and candidate response into the same space.
Supervised embeddings are trained by maximizing similarity between them.
It also provides rankings of the response that did not "win".
The supervised response selector needs to be preceded by
a featurizer in the pipeline.
This featurizer creates the features used for the embeddings.
It is recommended to use ``CountVectorsFeaturizer`` that
can be optionally preceded by ``SpacyNLP`` and ``SpacyTokenizer``.
Based on the starspace idea from: https://arxiv.org/abs/1709.03856.
However, in this implementation the `mu` parameter is treated differently
and additional hidden layers are added together with dropout.
"""
@classmethod
def required_components(cls) -> List[Type[Component]]:
return [Featurizer]
defaults = {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the embedding layers for user message
# and labels.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {TEXT: [256, 128], LABEL: [256, 128]},
# Whether to share the hidden layer weights between input words and responses
SHARE_HIDDEN_LAYERS: False,
# Number of units in transformer
TRANSFORMER_SIZE: None,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 0,
# Number of attention heads in transformer
NUM_HEADS: 4,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use key relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings
MAX_RELATIVE_POSITION: None,
# Use a unidirectional or bidirectional encoder.
UNIDIRECTIONAL_ENCODER: False,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [64, 256],
# Strategy used when creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 300,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# Initial learning rate for the optimizer
LEARNING_RATE: 0.001,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# Default dense dimension to use if no dense features are present.
DENSE_DIMENSION: {TEXT: 512, LABEL: 512},
# Default dimension to use for concatenating sequence and sentence features.
CONCAT_DIMENSION: {TEXT: 512, LABEL: 512},
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'softmax' or 'margin'.
LOSS_TYPE: SOFTMAX,
# Number of top actions to normalize scores for loss type 'softmax'.
# Set to 0 to turn off normalization.
RANKING_LENGTH: 10,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.4,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# Scale loss inverse proportionally to confidence of correct prediction
SCALE_LOSS: True,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.002,
# Sparsity of the weights in dense layers
WEIGHT_SPARSITY: 0.0,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for encoder
DROP_RATE: 0.2,
# Dropout rate for attention
DROP_RATE_ATTENTION: 0,
# If 'True' apply dropout to sparse input tensors
SPARSE_INPUT_DROPOUT: False,
# If 'True' apply dropout to dense input tensors
DENSE_INPUT_DROPOUT: False,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance, e.g. model accuracy.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
EVAL_NUM_EXAMPLES: 0,
# ## Selector config
# If 'True' random tokens of the input message will be masked and the model
# should predict those tokens.
MASKED_LM: False,
# Name of the intent for which this response selector is to be trained
RETRIEVAL_INTENT: None,
# Boolean flag to check if actual text of the response
# should be used as ground truth label for training the model.
USE_TEXT_AS_LABEL: False,
# If you want to use tensorboard to visualize training and validation metrics,
# set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
TENSORBOARD_LOG_LEVEL: "epoch",
# Specify what features to use as sequence and sentence features
# By default all features in the pipeline are used.
FEATURIZERS: [],
}
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
all_retrieval_intents: Optional[List[Text]] = None,
responses: Optional[Dict[Text, List[Dict[Text, Any]]]] = None,
) -> None:
component_config = component_config or {}
# the following properties cannot be adapted for the ResponseSelector
component_config[INTENT_CLASSIFICATION] = True
component_config[ENTITY_RECOGNITION] = False
component_config[BILOU_FLAG] = None
# Initialize defaults
self.responses = responses or {}
self.all_retrieval_intents = all_retrieval_intents or []
self.retrieval_intent = None
self.use_text_as_label = False
super().__init__(
component_config, index_label_id_mapping, entity_tag_specs, model
)
@property
def label_key(self) -> Text:
return LABEL_KEY
@property
def label_sub_key(self) -> Text:
return LABEL_SUB_KEY
@staticmethod
def model_class(use_text_as_label: bool) -> Type[RasaModel]:
if use_text_as_label:
return DIET2DIET
else:
return DIET2BOW
def _load_selector_params(self, config: Dict[Text, Any]) -> None:
self.retrieval_intent = config[RETRIEVAL_INTENT]
self.use_text_as_label = config[USE_TEXT_AS_LABEL]
def _check_config_parameters(self) -> None:
super()._check_config_parameters()
self._load_selector_params(self.component_config)
def _set_message_property(
self, message: Message, prediction_dict: Dict[Text, Any], selector_key: Text
) -> None:
message_selector_properties = message.get(RESPONSE_SELECTOR_PROPERTY_NAME, {})
message_selector_properties[
RESPONSE_SELECTOR_RETRIEVAL_INTENTS
] = self.all_retrieval_intents
message_selector_properties[selector_key] = prediction_dict
message.set(
RESPONSE_SELECTOR_PROPERTY_NAME,
message_selector_properties,
add_to_output=True,
)
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
"""Prepares data for training.
Performs sanity checks on training data, extracts encodings for labels.
"""
if self.retrieval_intent:
training_data = training_data.filter_training_examples(
lambda ex: self.retrieval_intent == ex.get(INTENT)
)
else:
# retrieval intent was left to its default value
logger.info(
"Retrieval intent parameter was left to its default value. This "
"response selector will be trained on training examples combining "
"all retrieval intents."
)
label_attribute = RESPONSE if self.use_text_as_label else INTENT_RESPONSE_KEY
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=label_attribute
)
self.responses = training_data.responses
self.all_retrieval_intents = list(training_data.retrieval_intents)
if not label_id_index_mapping:
# no labels are present to train
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=label_attribute
)
model_data = self._create_model_data(
training_data.intent_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
def _resolve_intent_response_key(
self, label: Dict[Text, Optional[Text]]
) -> Optional[Text]:
"""Given a label, return the response key based on the label id.
Args:
label: predicted label by the selector
Returns:
The match for the label that was found in the known responses.
It is always guaranteed to have a match, otherwise that case should have been caught
earlier and a warning should have been raised.
"""
for key, responses in self.responses.items():
# First check if the predicted label was the key itself
search_key = util.template_key_to_intent_response_key(key)
if hash(search_key) == label.get("id"):
return search_key
# Otherwise loop over the responses to check if the text has a direct match
for response in responses:
if hash(response.get(TEXT, "")) == label.get("id"):
return search_key
return None
def process(self, message: Message, **kwargs: Any) -> None:
"""Return the most likely response, the associated intent_response_key and its similarity to the input."""
out = self._predict(message)
top_label, label_ranking = self._predict_label(out)
# Get the exact intent_response_key and the associated
# response templates for the top predicted label
label_intent_response_key = (
self._resolve_intent_response_key(top_label) or top_label[INTENT_NAME_KEY]
)
label_response_templates = self.responses.get(
util.intent_response_key_to_template_key(label_intent_response_key)
)
if label_intent_response_key and not label_response_templates:
# response templates seem to be unavailable,
# likely an issue with the training data
# we'll use a fallback instead
rasa.shared.utils.io.raise_warning(
f"Unable to fetch response templates for {label_intent_response_key} "
f"This means that there is likely an issue with the training data."
f"Please make sure you have added response templates for this intent."
)
label_response_templates = [{TEXT: label_intent_response_key}]
for label in label_ranking:
label[INTENT_RESPONSE_KEY] = (
self._resolve_intent_response_key(label) or label[INTENT_NAME_KEY]
)
# Remove the "name" key since it is either the same as
# "intent_response_key" or it is the response text which
# is not needed in the ranking.
label.pop(INTENT_NAME_KEY)
selector_key = (
self.retrieval_intent
if self.retrieval_intent
else RESPONSE_SELECTOR_DEFAULT_INTENT
)
logger.debug(
f"Adding following selector key to message property: {selector_key}"
)
prediction_dict = {
RESPONSE_SELECTOR_PREDICTION_KEY: {
"id": top_label["id"],
RESPONSE_SELECTOR_RESPONSES_KEY: label_response_templates,
PREDICTED_CONFIDENCE_KEY: top_label[PREDICTED_CONFIDENCE_KEY],
INTENT_RESPONSE_KEY: label_intent_response_key,
RESPONSE_SELECTOR_TEMPLATE_NAME_KEY: util.intent_response_key_to_template_key(
label_intent_response_key
),
},
RESPONSE_SELECTOR_RANKING_KEY: label_ranking,
}
self._set_message_property(message, prediction_dict, selector_key)
def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]:
"""Persist this model into the passed directory.
Return the metadata necessary to load the model again.
"""
if self.model is None:
return {"file": None}
super().persist(file_name, model_dir)
return {
"file": file_name,
"responses": self.responses,
"all_retrieval_intents": self.all_retrieval_intents,
}
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
meta: Dict[Text, Any],
) -> "RasaModel":
return cls.model_class(meta[USE_TEXT_AS_LABEL]).load(
tf_model_file,
model_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(meta),
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class(self.use_text_as_label)(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Text = None,
model_metadata: Metadata = None,
cached_component: Optional["ResponseSelector"] = None,
**kwargs: Any,
) -> "ResponseSelector":
"""Loads the trained model from the provided directory."""
model = super().load(
meta, model_dir, model_metadata, cached_component, **kwargs
)
if not meta.get("file"):
return model # pytype: disable=bad-return-type
model.responses = meta.get("responses", {})
model.all_retrieval_intents = meta.get("all_retrieval_intents", [])
return model # pytype: disable=bad-return-type
class DIET2BOW(DIET):
def _create_metrics(self) -> None:
# self.metrics preserve order
# output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.response_loss = tf.keras.metrics.Mean(name="r_loss")
# output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.response_acc = tf.keras.metrics.Mean(name="r_acc")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
self.metrics_to_log.append("r_acc")
if debug_log_level:
self.metrics_to_log.append("r_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {"t": "total", "m": "mask", "r": "response"}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.response_loss.update_state(loss)
self.response_acc.update_state(acc)
class DIET2DIET(DIET):
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigError(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if LABEL not in self.data_signature:
raise InvalidConfigError(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if (
self.config[SHARE_HIDDEN_LAYERS]
and self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
):
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
def _create_metrics(self) -> None:
# self.metrics preserve order
# output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.response_loss = tf.keras.metrics.Mean(name="r_loss")
# output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.response_acc = tf.keras.metrics.Mean(name="r_acc")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
self.metrics_to_log.append("r_acc")
if debug_log_level:
self.metrics_to_log.append("r_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {"t": "total", "m": "mask", "r": "response"}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
self.text_name = TEXT
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
self._prepare_sequence_layers(self.text_name)
self._prepare_sequence_layers(self.label_name)
if self.config[MASKED_LM]:
self._prepare_mask_lm_layers(self.text_name)
self._prepare_label_classification_layers()
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_mask_label = super()._get_mask_for(
self.tf_label_data, LABEL, SEQUENCE_LENGTH
)
batch_dim = tf.shape(self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0])[0]
sequence_lengths_label = self._get_sequence_lengths(
self.tf_label_data, LABEL, SEQUENCE_LENGTH, batch_dim
)
mask_label = self._compute_mask(sequence_lengths_label)
label_transformed, _, _, _ = self._create_sequence(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_mask_label,
mask_label,
self.label_name,
)
sentence_label = self._last_token(label_transformed, sequence_lengths_label)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](sentence_label)
return all_label_ids, all_labels_embed
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
batch_dim = self._get_batch_dim(tf_batch_data)
sequence_mask_text = super()._get_mask_for(tf_batch_data, TEXT, SEQUENCE_LENGTH)
sequence_lengths_text = self._get_sequence_lengths(
tf_batch_data, TEXT, SEQUENCE_LENGTH, batch_dim
)
mask_text = self._compute_mask(sequence_lengths_text)
(
text_transformed,
text_in,
text_seq_ids,
lm_mask_bool_text,
) = self._create_sequence(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_mask_text,
mask_text,
self.text_name,
sparse_dropout=self.config[SPARSE_INPUT_DROPOUT],
dense_dropout=self.config[DENSE_INPUT_DROPOUT],
masked_lm_loss=self.config[MASKED_LM],
sequence_ids=True,
)
sequence_mask_label = super()._get_mask_for(
tf_batch_data, LABEL, SEQUENCE_LENGTH
)
sequence_lengths_label = self._get_sequence_lengths(
tf_batch_data, LABEL, SEQUENCE_LENGTH, batch_dim
)
mask_label = self._compute_mask(sequence_lengths_label)
label_transformed, _, _, _ = self._create_sequence(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_mask_label,
mask_label,
self.label_name,
)
losses = []
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed,
text_in,
text_seq_ids,
lm_mask_bool_text,
self.text_name,
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
# get sentence feature vector for label classification
sentence_vector_text = self._last_token(text_transformed, sequence_lengths_text)
sentence_vector_label = self._last_token(
label_transformed, sequence_lengths_label
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
loss, acc = self._calculate_label_loss(
sentence_vector_text, sentence_vector_label, label_ids
)
self.response_loss.update_state(loss)
self.response_acc.update_state(acc)
losses.append(loss)
return tf.math.add_n(losses)
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_mask_text = super()._get_mask_for(tf_batch_data, TEXT, SEQUENCE_LENGTH)
sequence_lengths_text = self._get_sequence_lengths(
tf_batch_data, TEXT, SEQUENCE_LENGTH, batch_dim=1
)
mask_text = self._compute_mask(sequence_lengths_text)
text_transformed, _, _, _ = self._create_sequence(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_mask_text,
mask_text,
self.text_name,
)
out = {}
if self.all_labels_embed is None:
_, self.all_labels_embed = self._create_all_labels()
# get sentence feature vector for intent classification
sentence_vector = self._last_token(text_transformed, sequence_lengths_text)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
sim_all = self._tf_layers[f"loss.{LABEL}"].sim(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
scores = self._tf_layers[f"loss.{LABEL}"].confidence_from_sim(
sim_all, self.config[SIMILARITY_TYPE]
)
out["i_scores"] = scores
return out
| 36.776725 | 114 | 0.650673 | import copy
import logging
import numpy as np
import tensorflow as tf
from typing import Any, Dict, Optional, Text, Tuple, Union, List, Type
from rasa.shared.nlu.training_data import util
import rasa.shared.utils.io
from rasa.nlu.config import InvalidConfigError
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.components import Component
from rasa.nlu.featurizers.featurizer import Featurizer
from rasa.nlu.model import Metadata
from rasa.nlu.classifiers.diet_classifier import (
DIETClassifier,
DIET,
LABEL_KEY,
LABEL_SUB_KEY,
EntityTagSpec,
SEQUENCE_LENGTH,
SENTENCE,
SEQUENCE,
)
from rasa.utils.tensorflow.constants import (
LABEL,
HIDDEN_LAYERS_SIZES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
WEIGHT_SPARSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
RETRIEVAL_INTENT,
USE_TEXT_AS_LABEL,
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
DENSE_DIMENSION,
)
from rasa.nlu.constants import (
RESPONSE_SELECTOR_PROPERTY_NAME,
RESPONSE_SELECTOR_RETRIEVAL_INTENTS,
RESPONSE_SELECTOR_RESPONSES_KEY,
RESPONSE_SELECTOR_PREDICTION_KEY,
RESPONSE_SELECTOR_RANKING_KEY,
RESPONSE_SELECTOR_TEMPLATE_NAME_KEY,
PREDICTED_CONFIDENCE_KEY,
RESPONSE_SELECTOR_DEFAULT_INTENT,
)
from rasa.shared.nlu.constants import (
TEXT,
INTENT,
RESPONSE,
INTENT_RESPONSE_KEY,
INTENT_NAME_KEY,
)
from rasa.utils.tensorflow.model_data import RasaModelData
from rasa.utils.tensorflow.models import RasaModel
logger = logging.getLogger(__name__)
class ResponseSelector(DIETClassifier):
@classmethod
def required_components(cls) -> List[Type[Component]]:
return [Featurizer]
defaults = {
, LABEL: [256, 128]},
SHARE_HIDDEN_LAYERS: False,
TRANSFORMER_SIZE: None,
NUM_TRANSFORMER_LAYERS: 0,
NUM_HEADS: 4,
KEY_RELATIVE_ATTENTION: False,
VALUE_RELATIVE_ATTENTION: False,
MAX_RELATIVE_POSITION: None,
UNIDIRECTIONAL_ENCODER: False,
, 256],
BATCH_STRATEGY: BALANCED,
EPOCHS: 300,
RANDOM_SEED: None,
LEARNING_RATE: 0.001,
DENSE_DIMENSION: {TEXT: 512, LABEL: 512},
CONCAT_DIMENSION: {TEXT: 512, LABEL: 512},
NUM_NEG: 20,
SIMILARITY_TYPE: AUTO,
LOSS_TYPE: SOFTMAX,
RANKING_LENGTH: 10,
MAX_POS_SIM: 0.8,
MAX_NEG_SIM: -0.4,
USE_MAX_NEG_SIM: True,
SCALE_LOSS: True,
WEIGHT_SPARSITY: 0.0,
NEGATIVE_MARGIN_SCALE: 0.8,
DROP_RATE: 0.2,
DROP_RATE_ATTENTION: 0,
SPARSE_INPUT_DROPOUT: False,
DENSE_INPUT_DROPOUT: False,
EVAL_NUM_EXAMPLES: 0,
M: False,
RETRIEVAL_INTENT: None,
USE_TEXT_AS_LABEL: False,
TENSORBOARD_LOG_DIR: None,
TENSORBOARD_LOG_LEVEL: "epoch",
FEATURIZERS: [],
}
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
all_retrieval_intents: Optional[List[Text]] = None,
responses: Optional[Dict[Text, List[Dict[Text, Any]]]] = None,
) -> None:
component_config = component_config or {}
component_config[INTENT_CLASSIFICATION] = True
component_config[ENTITY_RECOGNITION] = False
component_config[BILOU_FLAG] = None
self.responses = responses or {}
self.all_retrieval_intents = all_retrieval_intents or []
self.retrieval_intent = None
self.use_text_as_label = False
super().__init__(
component_config, index_label_id_mapping, entity_tag_specs, model
)
@property
def label_key(self) -> Text:
return LABEL_KEY
@property
def label_sub_key(self) -> Text:
return LABEL_SUB_KEY
@staticmethod
def model_class(use_text_as_label: bool) -> Type[RasaModel]:
if use_text_as_label:
return DIET2DIET
else:
return DIET2BOW
def _load_selector_params(self, config: Dict[Text, Any]) -> None:
self.retrieval_intent = config[RETRIEVAL_INTENT]
self.use_text_as_label = config[USE_TEXT_AS_LABEL]
def _check_config_parameters(self) -> None:
super()._check_config_parameters()
self._load_selector_params(self.component_config)
def _set_message_property(
self, message: Message, prediction_dict: Dict[Text, Any], selector_key: Text
) -> None:
message_selector_properties = message.get(RESPONSE_SELECTOR_PROPERTY_NAME, {})
message_selector_properties[
RESPONSE_SELECTOR_RETRIEVAL_INTENTS
] = self.all_retrieval_intents
message_selector_properties[selector_key] = prediction_dict
message.set(
RESPONSE_SELECTOR_PROPERTY_NAME,
message_selector_properties,
add_to_output=True,
)
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
if self.retrieval_intent:
training_data = training_data.filter_training_examples(
lambda ex: self.retrieval_intent == ex.get(INTENT)
)
else:
logger.info(
"Retrieval intent parameter was left to its default value. This "
"response selector will be trained on training examples combining "
"all retrieval intents."
)
label_attribute = RESPONSE if self.use_text_as_label else INTENT_RESPONSE_KEY
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=label_attribute
)
self.responses = training_data.responses
self.all_retrieval_intents = list(training_data.retrieval_intents)
if not label_id_index_mapping:
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=label_attribute
)
model_data = self._create_model_data(
training_data.intent_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
def _resolve_intent_response_key(
self, label: Dict[Text, Optional[Text]]
) -> Optional[Text]:
for key, responses in self.responses.items():
search_key = util.template_key_to_intent_response_key(key)
if hash(search_key) == label.get("id"):
return search_key
for response in responses:
if hash(response.get(TEXT, "")) == label.get("id"):
return search_key
return None
def process(self, message: Message, **kwargs: Any) -> None:
out = self._predict(message)
top_label, label_ranking = self._predict_label(out)
label_intent_response_key = (
self._resolve_intent_response_key(top_label) or top_label[INTENT_NAME_KEY]
)
label_response_templates = self.responses.get(
util.intent_response_key_to_template_key(label_intent_response_key)
)
if label_intent_response_key and not label_response_templates:
rasa.shared.utils.io.raise_warning(
f"Unable to fetch response templates for {label_intent_response_key} "
f"This means that there is likely an issue with the training data."
f"Please make sure you have added response templates for this intent."
)
label_response_templates = [{TEXT: label_intent_response_key}]
for label in label_ranking:
label[INTENT_RESPONSE_KEY] = (
self._resolve_intent_response_key(label) or label[INTENT_NAME_KEY]
)
# Remove the "name" key since it is either the same as
# "intent_response_key" or it is the response text which
# is not needed in the ranking.
label.pop(INTENT_NAME_KEY)
selector_key = (
self.retrieval_intent
if self.retrieval_intent
else RESPONSE_SELECTOR_DEFAULT_INTENT
)
logger.debug(
f"Adding following selector key to message property: {selector_key}"
)
prediction_dict = {
RESPONSE_SELECTOR_PREDICTION_KEY: {
"id": top_label["id"],
RESPONSE_SELECTOR_RESPONSES_KEY: label_response_templates,
PREDICTED_CONFIDENCE_KEY: top_label[PREDICTED_CONFIDENCE_KEY],
INTENT_RESPONSE_KEY: label_intent_response_key,
RESPONSE_SELECTOR_TEMPLATE_NAME_KEY: util.intent_response_key_to_template_key(
label_intent_response_key
),
},
RESPONSE_SELECTOR_RANKING_KEY: label_ranking,
}
self._set_message_property(message, prediction_dict, selector_key)
def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]:
if self.model is None:
return {"file": None}
super().persist(file_name, model_dir)
return {
"file": file_name,
"responses": self.responses,
"all_retrieval_intents": self.all_retrieval_intents,
}
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
meta: Dict[Text, Any],
) -> "RasaModel":
return cls.model_class(meta[USE_TEXT_AS_LABEL]).load(
tf_model_file,
model_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(meta),
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class(self.use_text_as_label)(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Text = None,
model_metadata: Metadata = None,
cached_component: Optional["ResponseSelector"] = None,
**kwargs: Any,
) -> "ResponseSelector":
model = super().load(
meta, model_dir, model_metadata, cached_component, **kwargs
)
if not meta.get("file"):
return model # pytype: disable=bad-return-type
model.responses = meta.get("responses", {})
model.all_retrieval_intents = meta.get("all_retrieval_intents", [])
return model # pytype: disable=bad-return-type
class DIET2BOW(DIET):
def _create_metrics(self) -> None:
# self.metrics preserve order
# output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.response_loss = tf.keras.metrics.Mean(name="r_loss")
# output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.response_acc = tf.keras.metrics.Mean(name="r_acc")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
self.metrics_to_log.append("r_acc")
if debug_log_level:
self.metrics_to_log.append("r_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {"t": "total", "m": "mask", "r": "response"}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.response_loss.update_state(loss)
self.response_acc.update_state(acc)
class DIET2DIET(DIET):
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigError(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if LABEL not in self.data_signature:
raise InvalidConfigError(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if (
self.config[SHARE_HIDDEN_LAYERS]
and self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
):
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
def _create_metrics(self) -> None:
# self.metrics preserve order
# output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.response_loss = tf.keras.metrics.Mean(name="r_loss")
# output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.response_acc = tf.keras.metrics.Mean(name="r_acc")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
self.metrics_to_log.append("r_acc")
if debug_log_level:
self.metrics_to_log.append("r_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {"t": "total", "m": "mask", "r": "response"}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
self.text_name = TEXT
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
self._prepare_sequence_layers(self.text_name)
self._prepare_sequence_layers(self.label_name)
if self.config[MASKED_LM]:
self._prepare_mask_lm_layers(self.text_name)
self._prepare_label_classification_layers()
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_mask_label = super()._get_mask_for(
self.tf_label_data, LABEL, SEQUENCE_LENGTH
)
batch_dim = tf.shape(self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0])[0]
sequence_lengths_label = self._get_sequence_lengths(
self.tf_label_data, LABEL, SEQUENCE_LENGTH, batch_dim
)
mask_label = self._compute_mask(sequence_lengths_label)
label_transformed, _, _, _ = self._create_sequence(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_mask_label,
mask_label,
self.label_name,
)
sentence_label = self._last_token(label_transformed, sequence_lengths_label)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](sentence_label)
return all_label_ids, all_labels_embed
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
batch_dim = self._get_batch_dim(tf_batch_data)
sequence_mask_text = super()._get_mask_for(tf_batch_data, TEXT, SEQUENCE_LENGTH)
sequence_lengths_text = self._get_sequence_lengths(
tf_batch_data, TEXT, SEQUENCE_LENGTH, batch_dim
)
mask_text = self._compute_mask(sequence_lengths_text)
(
text_transformed,
text_in,
text_seq_ids,
lm_mask_bool_text,
) = self._create_sequence(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_mask_text,
mask_text,
self.text_name,
sparse_dropout=self.config[SPARSE_INPUT_DROPOUT],
dense_dropout=self.config[DENSE_INPUT_DROPOUT],
masked_lm_loss=self.config[MASKED_LM],
sequence_ids=True,
)
sequence_mask_label = super()._get_mask_for(
tf_batch_data, LABEL, SEQUENCE_LENGTH
)
sequence_lengths_label = self._get_sequence_lengths(
tf_batch_data, LABEL, SEQUENCE_LENGTH, batch_dim
)
mask_label = self._compute_mask(sequence_lengths_label)
label_transformed, _, _, _ = self._create_sequence(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_mask_label,
mask_label,
self.label_name,
)
losses = []
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed,
text_in,
text_seq_ids,
lm_mask_bool_text,
self.text_name,
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
# get sentence feature vector for label classification
sentence_vector_text = self._last_token(text_transformed, sequence_lengths_text)
sentence_vector_label = self._last_token(
label_transformed, sequence_lengths_label
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
loss, acc = self._calculate_label_loss(
sentence_vector_text, sentence_vector_label, label_ids
)
self.response_loss.update_state(loss)
self.response_acc.update_state(acc)
losses.append(loss)
return tf.math.add_n(losses)
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_mask_text = super()._get_mask_for(tf_batch_data, TEXT, SEQUENCE_LENGTH)
sequence_lengths_text = self._get_sequence_lengths(
tf_batch_data, TEXT, SEQUENCE_LENGTH, batch_dim=1
)
mask_text = self._compute_mask(sequence_lengths_text)
text_transformed, _, _, _ = self._create_sequence(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_mask_text,
mask_text,
self.text_name,
)
out = {}
if self.all_labels_embed is None:
_, self.all_labels_embed = self._create_all_labels()
# get sentence feature vector for intent classification
sentence_vector = self._last_token(text_transformed, sequence_lengths_text)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
sim_all = self._tf_layers[f"loss.{LABEL}"].sim(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
scores = self._tf_layers[f"loss.{LABEL}"].confidence_from_sim(
sim_all, self.config[SIMILARITY_TYPE]
)
out["i_scores"] = scores
return out
| true | true |
f73ccf49bd2bf5c2271598ebb49e9d7cd71ad60d | 894 | py | Python | ocdskingfisherprocess/maindatabase/migrations/versions/b66e30eb6816_note.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | 1 | 2019-04-11T10:17:32.000Z | 2019-04-11T10:17:32.000Z | ocdskingfisherprocess/maindatabase/migrations/versions/b66e30eb6816_note.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | 282 | 2018-12-20T16:49:22.000Z | 2022-02-01T00:48:10.000Z | ocdskingfisherprocess/maindatabase/migrations/versions/b66e30eb6816_note.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | 7 | 2019-04-15T13:36:18.000Z | 2021-03-02T16:25:41.000Z | """note
Revision ID: b66e30eb6816
Revises: 8add39cb253d
Create Date: 2019-02-26 13:09:27.596374
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'b66e30eb6816'
down_revision = '8add39cb253d'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('collection_note',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('collection_id', sa.Integer,
sa.ForeignKey("collection.id",
name="fk_collection_file_collection_id"),
nullable=False),
sa.Column('note', sa.Text, nullable=False),
sa.Column('stored_at', sa.DateTime(timezone=False), nullable=False),
)
def downgrade():
op.drop_table('collection_note')
| 27.9375 | 88 | 0.582774 | import sqlalchemy as sa
from alembic import op
revision = 'b66e30eb6816'
down_revision = '8add39cb253d'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('collection_note',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('collection_id', sa.Integer,
sa.ForeignKey("collection.id",
name="fk_collection_file_collection_id"),
nullable=False),
sa.Column('note', sa.Text, nullable=False),
sa.Column('stored_at', sa.DateTime(timezone=False), nullable=False),
)
def downgrade():
op.drop_table('collection_note')
| true | true |
f73cd0e1fc3d35857bf507391f4ebade7b5b3bd0 | 1,356 | py | Python | util/registry/dockerver.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 1 | 2021-05-30T01:54:21.000Z | 2021-05-30T01:54:21.000Z | util/registry/dockerver.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 20 | 2019-12-26T17:32:34.000Z | 2022-03-21T22:18:06.000Z | util/registry/dockerver.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 1 | 2020-05-31T16:28:40.000Z | 2020-05-31T16:28:40.000Z | import re
from semantic_version import Version
_USER_AGENT_SEARCH_REGEX = re.compile(r'docker\/([0-9]+(?:\.[0-9]+){1,2})')
_EXACT_1_5_USER_AGENT = re.compile(r'^Go 1\.1 package http$')
_ONE_FIVE_ZERO = '1.5.0'
def docker_version(user_agent_string):
""" Extract the Docker version from the user agent, taking special care to
handle the case of a 1.5 client requesting an auth token, which sends
a broken user agent. If we can not positively identify a version, return
None.
"""
# First search for a well defined semver portion in the UA header.
found_semver = _USER_AGENT_SEARCH_REGEX.search(user_agent_string)
if found_semver:
# Docker changed their versioning scheme on Feb 17, 2017 to use date-based versioning:
# https://github.com/docker/docker/pull/31075
# This scheme allows for 0s to appear as prefixes in the major or minor portions of the version,
# which violates semver. Strip them out.
portions = found_semver.group(1).split('.')
updated_portions = [(p[:-1].lstrip('0') + p[-1]) for p in portions]
return Version('.'.join(updated_portions), partial=True)
# Check if we received the very specific header which represents a 1.5 request
# to the auth endpoints.
elif _EXACT_1_5_USER_AGENT.match(user_agent_string):
return Version(_ONE_FIVE_ZERO)
else:
return None
| 38.742857 | 100 | 0.722714 | import re
from semantic_version import Version
_USER_AGENT_SEARCH_REGEX = re.compile(r'docker\/([0-9]+(?:\.[0-9]+){1,2})')
_EXACT_1_5_USER_AGENT = re.compile(r'^Go 1\.1 package http$')
_ONE_FIVE_ZERO = '1.5.0'
def docker_version(user_agent_string):
found_semver = _USER_AGENT_SEARCH_REGEX.search(user_agent_string)
if found_semver:
portions = found_semver.group(1).split('.')
updated_portions = [(p[:-1].lstrip('0') + p[-1]) for p in portions]
return Version('.'.join(updated_portions), partial=True)
elif _EXACT_1_5_USER_AGENT.match(user_agent_string):
return Version(_ONE_FIVE_ZERO)
else:
return None
| true | true |
f73cd19dd5c00d5a408287174de39fdf0436a5da | 405 | py | Python | questionarios/urls.py | bcunhasa/nutriodonto | 3ff20377bc85bc4960619f63688b7732e6eebba9 | [
"CC0-1.0"
] | null | null | null | questionarios/urls.py | bcunhasa/nutriodonto | 3ff20377bc85bc4960619f63688b7732e6eebba9 | [
"CC0-1.0"
] | null | null | null | questionarios/urls.py | bcunhasa/nutriodonto | 3ff20377bc85bc4960619f63688b7732e6eebba9 | [
"CC0-1.0"
] | null | null | null | """Define os padrões de URL para a aplicação"""
from django.conf.urls import url
from . import views
app_name="questionarios"
urlpatterns = [
# Mostra a confirmação do envio do formulário
url(r'^confirmacao/$', views.ConfirmacaoView.as_view(), name='confirmacao'),
# Questionário para o diretor de escola
url(r'^diretor/$', views.QuestionarioDiretorView.as_view(), name='diretor'),
]
| 25.3125 | 80 | 0.718519 |
from django.conf.urls import url
from . import views
app_name="questionarios"
urlpatterns = [
url(r'^confirmacao/$', views.ConfirmacaoView.as_view(), name='confirmacao'),
url(r'^diretor/$', views.QuestionarioDiretorView.as_view(), name='diretor'),
]
| true | true |
f73cd309c645f2f8e880f06497b62ff644185a5e | 12,003 | py | Python | python_modules/libraries/dagstermill/dagstermill/manager.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 1 | 2021-04-27T19:49:59.000Z | 2021-04-27T19:49:59.000Z | python_modules/libraries/dagstermill/dagstermill/manager.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 7 | 2022-03-16T06:55:04.000Z | 2022-03-18T07:03:25.000Z | python_modules/libraries/dagstermill/dagstermill/manager.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | null | null | null | import os
import pickle
import uuid
import six
from dagster import (
AssetMaterialization,
ExpectationResult,
Failure,
Materialization,
ModeDefinition,
PipelineDefinition,
SolidDefinition,
TypeCheck,
check,
seven,
)
from dagster.core.definitions.dependency import SolidHandle
from dagster.core.definitions.reconstructable import ReconstructablePipeline
from dagster.core.definitions.resource import ScopedResourcesBuilder
from dagster.core.execution.api import create_execution_plan, scoped_pipeline_context
from dagster.core.execution.resources_init import (
get_required_resource_keys_to_init,
resource_initialization_event_generator,
)
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.utils import make_new_run_id
from dagster.loggers import colored_console_logger
from dagster.serdes import unpack_value
from dagster.utils import EventGenerationManager
from .context import DagstermillExecutionContext, DagstermillRuntimeExecutionContext
from .errors import DagstermillError
from .serialize import PICKLE_PROTOCOL, read_value, write_value
class DagstermillResourceEventGenerationManager(EventGenerationManager):
""" Utility class to explicitly manage setup/teardown of resource events. Overrides the default
`generate_teardown_events` method so that teardown is deferred until explicitly called by the
dagstermill Manager
"""
def generate_teardown_events(self):
return iter(())
def teardown(self):
return [
teardown_event
for teardown_event in super(
DagstermillResourceEventGenerationManager, self
).generate_teardown_events()
]
class Manager(object):
def __init__(self):
self.pipeline = None
self.solid_def = None
self.in_pipeline = False
self.marshal_dir = None
self.context = None
self.resource_manager = None
def _setup_resources(
self, execution_plan, environment_config, pipeline_run, log_manager, resource_keys_to_init
):
"""
Drop-in replacement for
`dagster.core.execution.resources_init.resource_initialization_manager`. It uses a
`DagstermillResourceEventGenerationManager` and explicitly calls `teardown` on it
"""
generator = resource_initialization_event_generator(
execution_plan, environment_config, pipeline_run, log_manager, resource_keys_to_init
)
self.resource_manager = DagstermillResourceEventGenerationManager(
generator, ScopedResourcesBuilder
)
return self.resource_manager
def reconstitute_pipeline_context(
self,
output_log_path=None,
marshal_dir=None,
run_config=None,
executable_dict=None,
pipeline_run_dict=None,
solid_handle_kwargs=None,
instance_ref_dict=None,
):
"""Reconstitutes a context for dagstermill-managed execution.
You'll see this function called to reconstruct a pipeline context within the ``injected
parameters`` cell of a dagstermill output notebook. Users should not call this function
interactively except when debugging output notebooks.
Use :func:`dagstermill.get_context` in the ``parameters`` cell of your notebook to define a
context for interactive exploration and development. This call will be replaced by one to
:func:`dagstermill.reconstitute_pipeline_context` when the notebook is executed by
dagstermill.
"""
check.opt_str_param(output_log_path, "output_log_path")
check.opt_str_param(marshal_dir, "marshal_dir")
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
check.dict_param(pipeline_run_dict, "pipeline_run_dict")
check.dict_param(executable_dict, "executable_dict")
check.dict_param(solid_handle_kwargs, "solid_handle_kwargs")
check.dict_param(instance_ref_dict, "instance_ref_dict")
pipeline = ReconstructablePipeline.from_dict(executable_dict)
pipeline_def = pipeline.get_definition()
try:
instance_ref = unpack_value(instance_ref_dict)
instance = DagsterInstance.from_ref(instance_ref)
except Exception as err: # pylint: disable=broad-except
six.raise_from(
DagstermillError(
"Error when attempting to resolve DagsterInstance from serialized InstanceRef"
),
err,
)
pipeline_run = unpack_value(pipeline_run_dict)
solid_handle = SolidHandle.from_dict(solid_handle_kwargs)
solid_def = pipeline_def.get_solid(solid_handle).definition
self.marshal_dir = marshal_dir
self.in_pipeline = True
self.solid_def = solid_def
self.pipeline = pipeline
execution_plan = create_execution_plan(
self.pipeline,
run_config,
mode=pipeline_run.mode,
step_keys_to_execute=pipeline_run.step_keys_to_execute,
)
with scoped_pipeline_context(
execution_plan,
run_config,
pipeline_run,
instance,
scoped_resources_builder_cm=self._setup_resources,
# Set this flag even though we're not in test for clearer error reporting
raise_on_error=True,
) as pipeline_context:
self.context = DagstermillRuntimeExecutionContext(
pipeline_context=pipeline_context,
solid_config=run_config.get("solids", {}).get(solid_def.name, {}).get("config"),
resource_keys_to_init=get_required_resource_keys_to_init(
execution_plan,
pipeline_context.system_storage_def,
pipeline_context.intermediate_storage_def,
),
solid_name=solid_def.name,
)
return self.context
def get_context(self, solid_config=None, mode_def=None, run_config=None):
"""Get a dagstermill execution context for interactive exploration and development.
Args:
solid_config (Optional[Any]): If specified, this value will be made available on the
context as its ``solid_config`` property.
mode_def (Optional[:class:`dagster.ModeDefinition`]): If specified, defines the mode to
use to construct the context. Specify this if you would like a context constructed
with specific ``resource_defs`` or ``logger_defs``. By default, an ephemeral mode
with a console logger will be constructed.
run_config(Optional[dict]): The environment config dict with which to construct
the context.
Returns:
:py:class:`~dagstermill.DagstermillExecutionContext`
"""
check.opt_inst_param(mode_def, "mode_def", ModeDefinition)
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
# If we are running non-interactively, and there is already a context reconstituted, return
# that context rather than overwriting it.
if self.context is not None and isinstance(
self.context, DagstermillRuntimeExecutionContext
):
return self.context
if not mode_def:
mode_def = ModeDefinition(logger_defs={"dagstermill": colored_console_logger})
run_config["loggers"] = {"dagstermill": {}}
solid_def = SolidDefinition(
name="this_solid",
input_defs=[],
compute_fn=lambda *args, **kwargs: None,
output_defs=[],
description="Ephemeral solid constructed by dagstermill.get_context()",
required_resource_keys=mode_def.resource_key_set,
)
pipeline_def = PipelineDefinition(
[solid_def], mode_defs=[mode_def], name="ephemeral_dagstermill_pipeline"
)
run_id = make_new_run_id()
# construct stubbed PipelineRun for notebook exploration...
# The actual pipeline run during pipeline execution will be serialized and reconstituted
# in the `reconstitute_pipeline_context` call
pipeline_run = PipelineRun(
pipeline_name=pipeline_def.name,
run_id=run_id,
run_config=run_config,
mode=mode_def.name,
step_keys_to_execute=None,
status=PipelineRunStatus.NOT_STARTED,
tags=None,
)
self.in_pipeline = False
self.solid_def = solid_def
self.pipeline = pipeline_def
execution_plan = create_execution_plan(self.pipeline, run_config, mode=mode_def.name)
with scoped_pipeline_context(
execution_plan,
run_config,
pipeline_run,
DagsterInstance.ephemeral(),
scoped_resources_builder_cm=self._setup_resources,
) as pipeline_context:
self.context = DagstermillExecutionContext(
pipeline_context=pipeline_context,
solid_config=solid_config,
resource_keys_to_init=get_required_resource_keys_to_init(
execution_plan,
pipeline_context.system_storage_def,
pipeline_context.intermediate_storage_def,
),
solid_name=solid_def.name,
)
return self.context
def yield_result(self, value, output_name="result"):
"""Yield a result directly from notebook code.
When called interactively or in development, returns its input.
Args:
value (Any): The value to yield.
output_name (Optional[str]): The name of the result to yield (default: ``'result'``).
"""
if not self.in_pipeline:
return value
# deferred import for perf
import scrapbook
if not self.solid_def.has_output(output_name):
raise DagstermillError(
"Solid {solid_name} does not have output named {output_name}".format(
solid_name=self.solid_def.name, output_name=output_name
)
)
dagster_type = self.solid_def.output_def_named(output_name).dagster_type
out_file = os.path.join(self.marshal_dir, "output-{}".format(output_name))
scrapbook.glue(output_name, write_value(dagster_type, value, out_file))
def yield_event(self, dagster_event):
"""Yield a dagster event directly from notebook code.
When called interactively or in development, returns its input.
Args:
dagster_event (Union[:class:`dagster.Materialization`, :class:`dagster.ExpectationResult`, :class:`dagster.TypeCheck`, :class:`dagster.Failure`]):
An event to yield back to Dagster.
"""
check.inst_param(
dagster_event,
"dagster_event",
(AssetMaterialization, Materialization, ExpectationResult, TypeCheck, Failure),
)
if not self.in_pipeline:
return dagster_event
# deferred import for perf
import scrapbook
event_id = "event-{event_uuid}".format(event_uuid=str(uuid.uuid4()))
out_file_path = os.path.join(self.marshal_dir, event_id)
with open(out_file_path, "wb") as fd:
fd.write(pickle.dumps(dagster_event, PICKLE_PROTOCOL))
scrapbook.glue(event_id, out_file_path)
def teardown_resources(self):
if self.resource_manager is not None:
self.resource_manager.teardown()
def load_parameter(self, input_name, input_value):
input_def = self.solid_def.input_def_named(input_name)
return read_value(input_def.dagster_type, seven.json.loads(input_value))
MANAGER_FOR_NOTEBOOK_INSTANCE = Manager()
| 38.348243 | 158 | 0.668916 | import os
import pickle
import uuid
import six
from dagster import (
AssetMaterialization,
ExpectationResult,
Failure,
Materialization,
ModeDefinition,
PipelineDefinition,
SolidDefinition,
TypeCheck,
check,
seven,
)
from dagster.core.definitions.dependency import SolidHandle
from dagster.core.definitions.reconstructable import ReconstructablePipeline
from dagster.core.definitions.resource import ScopedResourcesBuilder
from dagster.core.execution.api import create_execution_plan, scoped_pipeline_context
from dagster.core.execution.resources_init import (
get_required_resource_keys_to_init,
resource_initialization_event_generator,
)
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.utils import make_new_run_id
from dagster.loggers import colored_console_logger
from dagster.serdes import unpack_value
from dagster.utils import EventGenerationManager
from .context import DagstermillExecutionContext, DagstermillRuntimeExecutionContext
from .errors import DagstermillError
from .serialize import PICKLE_PROTOCOL, read_value, write_value
class DagstermillResourceEventGenerationManager(EventGenerationManager):
def generate_teardown_events(self):
return iter(())
def teardown(self):
return [
teardown_event
for teardown_event in super(
DagstermillResourceEventGenerationManager, self
).generate_teardown_events()
]
class Manager(object):
def __init__(self):
self.pipeline = None
self.solid_def = None
self.in_pipeline = False
self.marshal_dir = None
self.context = None
self.resource_manager = None
def _setup_resources(
self, execution_plan, environment_config, pipeline_run, log_manager, resource_keys_to_init
):
generator = resource_initialization_event_generator(
execution_plan, environment_config, pipeline_run, log_manager, resource_keys_to_init
)
self.resource_manager = DagstermillResourceEventGenerationManager(
generator, ScopedResourcesBuilder
)
return self.resource_manager
def reconstitute_pipeline_context(
self,
output_log_path=None,
marshal_dir=None,
run_config=None,
executable_dict=None,
pipeline_run_dict=None,
solid_handle_kwargs=None,
instance_ref_dict=None,
):
check.opt_str_param(output_log_path, "output_log_path")
check.opt_str_param(marshal_dir, "marshal_dir")
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
check.dict_param(pipeline_run_dict, "pipeline_run_dict")
check.dict_param(executable_dict, "executable_dict")
check.dict_param(solid_handle_kwargs, "solid_handle_kwargs")
check.dict_param(instance_ref_dict, "instance_ref_dict")
pipeline = ReconstructablePipeline.from_dict(executable_dict)
pipeline_def = pipeline.get_definition()
try:
instance_ref = unpack_value(instance_ref_dict)
instance = DagsterInstance.from_ref(instance_ref)
except Exception as err:
six.raise_from(
DagstermillError(
"Error when attempting to resolve DagsterInstance from serialized InstanceRef"
),
err,
)
pipeline_run = unpack_value(pipeline_run_dict)
solid_handle = SolidHandle.from_dict(solid_handle_kwargs)
solid_def = pipeline_def.get_solid(solid_handle).definition
self.marshal_dir = marshal_dir
self.in_pipeline = True
self.solid_def = solid_def
self.pipeline = pipeline
execution_plan = create_execution_plan(
self.pipeline,
run_config,
mode=pipeline_run.mode,
step_keys_to_execute=pipeline_run.step_keys_to_execute,
)
with scoped_pipeline_context(
execution_plan,
run_config,
pipeline_run,
instance,
scoped_resources_builder_cm=self._setup_resources,
raise_on_error=True,
) as pipeline_context:
self.context = DagstermillRuntimeExecutionContext(
pipeline_context=pipeline_context,
solid_config=run_config.get("solids", {}).get(solid_def.name, {}).get("config"),
resource_keys_to_init=get_required_resource_keys_to_init(
execution_plan,
pipeline_context.system_storage_def,
pipeline_context.intermediate_storage_def,
),
solid_name=solid_def.name,
)
return self.context
def get_context(self, solid_config=None, mode_def=None, run_config=None):
check.opt_inst_param(mode_def, "mode_def", ModeDefinition)
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
# If we are running non-interactively, and there is already a context reconstituted, return
# that context rather than overwriting it.
if self.context is not None and isinstance(
self.context, DagstermillRuntimeExecutionContext
):
return self.context
if not mode_def:
mode_def = ModeDefinition(logger_defs={"dagstermill": colored_console_logger})
run_config["loggers"] = {"dagstermill": {}}
solid_def = SolidDefinition(
name="this_solid",
input_defs=[],
compute_fn=lambda *args, **kwargs: None,
output_defs=[],
description="Ephemeral solid constructed by dagstermill.get_context()",
required_resource_keys=mode_def.resource_key_set,
)
pipeline_def = PipelineDefinition(
[solid_def], mode_defs=[mode_def], name="ephemeral_dagstermill_pipeline"
)
run_id = make_new_run_id()
# construct stubbed PipelineRun for notebook exploration...
# The actual pipeline run during pipeline execution will be serialized and reconstituted
# in the `reconstitute_pipeline_context` call
pipeline_run = PipelineRun(
pipeline_name=pipeline_def.name,
run_id=run_id,
run_config=run_config,
mode=mode_def.name,
step_keys_to_execute=None,
status=PipelineRunStatus.NOT_STARTED,
tags=None,
)
self.in_pipeline = False
self.solid_def = solid_def
self.pipeline = pipeline_def
execution_plan = create_execution_plan(self.pipeline, run_config, mode=mode_def.name)
with scoped_pipeline_context(
execution_plan,
run_config,
pipeline_run,
DagsterInstance.ephemeral(),
scoped_resources_builder_cm=self._setup_resources,
) as pipeline_context:
self.context = DagstermillExecutionContext(
pipeline_context=pipeline_context,
solid_config=solid_config,
resource_keys_to_init=get_required_resource_keys_to_init(
execution_plan,
pipeline_context.system_storage_def,
pipeline_context.intermediate_storage_def,
),
solid_name=solid_def.name,
)
return self.context
def yield_result(self, value, output_name="result"):
if not self.in_pipeline:
return value
# deferred import for perf
import scrapbook
if not self.solid_def.has_output(output_name):
raise DagstermillError(
"Solid {solid_name} does not have output named {output_name}".format(
solid_name=self.solid_def.name, output_name=output_name
)
)
dagster_type = self.solid_def.output_def_named(output_name).dagster_type
out_file = os.path.join(self.marshal_dir, "output-{}".format(output_name))
scrapbook.glue(output_name, write_value(dagster_type, value, out_file))
def yield_event(self, dagster_event):
check.inst_param(
dagster_event,
"dagster_event",
(AssetMaterialization, Materialization, ExpectationResult, TypeCheck, Failure),
)
if not self.in_pipeline:
return dagster_event
# deferred import for perf
import scrapbook
event_id = "event-{event_uuid}".format(event_uuid=str(uuid.uuid4()))
out_file_path = os.path.join(self.marshal_dir, event_id)
with open(out_file_path, "wb") as fd:
fd.write(pickle.dumps(dagster_event, PICKLE_PROTOCOL))
scrapbook.glue(event_id, out_file_path)
def teardown_resources(self):
if self.resource_manager is not None:
self.resource_manager.teardown()
def load_parameter(self, input_name, input_value):
input_def = self.solid_def.input_def_named(input_name)
return read_value(input_def.dagster_type, seven.json.loads(input_value))
MANAGER_FOR_NOTEBOOK_INSTANCE = Manager()
| true | true |
f73cd6e62d1bbea7ab19563515d05f154b5d0912 | 6,004 | py | Python | arcade/importers/cos_oem/starlink_oem.py | ProjectPersephone/arcade | a692637312b25347fcf6e3ea0c3ce720b7857a2a | [
"Apache-2.0"
] | 22 | 2021-03-18T12:55:39.000Z | 2022-02-09T08:44:02.000Z | arcade/importers/cos_oem/starlink_oem.py | ProjectPersephone/arcade | a692637312b25347fcf6e3ea0c3ce720b7857a2a | [
"Apache-2.0"
] | 9 | 2021-05-13T18:57:24.000Z | 2021-10-05T08:14:38.000Z | arcade/importers/cos_oem/starlink_oem.py | ProjectPersephone/arcade | a692637312b25347fcf6e3ea0c3ce720b7857a2a | [
"Apache-2.0"
] | 3 | 2021-10-03T11:38:02.000Z | 2021-11-08T10:08:18.000Z | # Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import logging
import zipfile
from typing import List, IO, Tuple
from datetime import datetime
import arcade.models.cos as cos
import arcade.models.graph as graph
from arcade.importers.cos_oem.cos_oem import (BaseOEMCOSImporter,
OEMData, EphemerisLine)
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
class StarlinkOEMCOSImporter(BaseOEMCOSImporter):
"""A class for fetching OEM data from the Starlink constellation in cloud
object storage and loading it into neo4j.
:param oem_bucket: The COS bucket where the OEM files are stored
"""
def __init__(self, oem_bucket: cos.COSBucket) -> None:
super().__init__(oem_bucket,
data_source_name='Starlink - OEM',
oem_file_fmt='[0-9]{20}.oem',
data_source_public=True)
def _convert_header_time(self, time_str: str) -> str:
"""Converts the time strings in the header of the Starlink OEM files
into the standard format used in the graph.
:param time_str: The time string to conver
:return: The normalized time string
"""
input_time_fmt = '%Y-%m-%d %H:%M:%S %Z'
output_time_fmt = '%Y-%m-%dT%H:%M:%S'
dt_obj = datetime.strptime(time_str.strip(), input_time_fmt)
return dt_obj.strftime(output_time_fmt)
def _convert_ephem_time(self, time_str: str) -> str:
"""Converts the epoch time strings in the ephemeris lines of the
Starlink OEM files into the standard format used in the graph.
:param time_str: The time string to conver
:return: The normalized time string
"""
input_time_fmt = '%Y%j%H%M%S.%f'
output_time_fmt = '%Y-%m-%dT%H:%M:%S.%f'
dt_obj = datetime.strptime(time_str.strip(), input_time_fmt)
return dt_obj.strftime(output_time_fmt)
def _parse_oem_data(self,
zip_file: zipfile.ZipFile,
oem_file_name: str) -> OEMData:
"""Parses the OEM data in text file contained in the passed zip
archive.
:param zip_file: The zip archive containing the OEM text files
:param oem_file_name: The text file in the zip archive to parse
return: The parsed OEM data
"""
ephemeris_lines: List[EphemerisLine] = []
# Message data not contained in the OEM files
oem_data: OEMData = {
'originator': 'Starlink',
'center_name': 'EARTH',
'ref_frame': 'EME2000',
'time_system': 'UTC'
}
with io.TextIOWrapper(zip_file.open(oem_file_name),
encoding='utf8') as oem_file:
for line_no, line in enumerate(oem_file):
if len(line.strip()) == 0:
break
# Header information is on the first 2 lines of the file
if line_no == 0:
ts = line[8:]
oem_data['creation_date'] = self._convert_header_time(ts)
elif line_no == 1:
start = line[16:39]
stop = line[55:78]
oem_data['start_time'] = self._convert_header_time(start)
oem_data['stop_time'] = self._convert_header_time(stop)
else:
# The state vectors are on every 4th line
if not line_no % 4 == 0:
continue
ephem_data = line.split(' ')
epoch = self._convert_ephem_time(ephem_data[0])
state_vector = [float(s) for s in ephem_data[1:]]
ephemeris_line: EphemerisLine
ephemeris_line = dict(epoch=epoch,
state_vector=state_vector)
ephemeris_lines.append(ephemeris_line)
oem_data['ephemeris_lines'] = ephemeris_lines
return oem_data
def _get_aso_id_name(self, file_name: str) -> Tuple[str, str]:
"""Gets the Starlink satellite's name and NORAD ID from the text
file name.
:param file_name: The name of the text file containing the OEM data
:return: The NORAD ID and name of the satellite
"""
data_parts = file_name.split('_')
aso_id = data_parts[1]
object_name = data_parts[2]
return aso_id, object_name
def _process_fileobj(self,
fileobj: IO[bytes],
object_node: graph.COSObject) -> None:
"""Extracts and parses the OEM data from the given tar archive file.
:param tar_file_obj: The file object of the tar archive to extract OEM
data out of
:param object_node: The node in the graph representing the COS object
the OEM is stored in
"""
with zipfile.ZipFile(fileobj) as zip_file:
txt_file_names = [f for f in zip_file.namelist()
if f.endswith('.txt')]
for txt_file_name in txt_file_names:
oem_data = self._parse_oem_data(zip_file, txt_file_name)
aso_id, object_name = self._get_aso_id_name(txt_file_name)
oem_data['object_name'] = object_name
self._save_oem(oem_data, aso_id, object_node)
| 41.986014 | 78 | 0.601765 |
import io
import os
import logging
import zipfile
from typing import List, IO, Tuple
from datetime import datetime
import arcade.models.cos as cos
import arcade.models.graph as graph
from arcade.importers.cos_oem.cos_oem import (BaseOEMCOSImporter,
OEMData, EphemerisLine)
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
class StarlinkOEMCOSImporter(BaseOEMCOSImporter):
def __init__(self, oem_bucket: cos.COSBucket) -> None:
super().__init__(oem_bucket,
data_source_name='Starlink - OEM',
oem_file_fmt='[0-9]{20}.oem',
data_source_public=True)
def _convert_header_time(self, time_str: str) -> str:
input_time_fmt = '%Y-%m-%d %H:%M:%S %Z'
output_time_fmt = '%Y-%m-%dT%H:%M:%S'
dt_obj = datetime.strptime(time_str.strip(), input_time_fmt)
return dt_obj.strftime(output_time_fmt)
def _convert_ephem_time(self, time_str: str) -> str:
input_time_fmt = '%Y%j%H%M%S.%f'
output_time_fmt = '%Y-%m-%dT%H:%M:%S.%f'
dt_obj = datetime.strptime(time_str.strip(), input_time_fmt)
return dt_obj.strftime(output_time_fmt)
def _parse_oem_data(self,
zip_file: zipfile.ZipFile,
oem_file_name: str) -> OEMData:
ephemeris_lines: List[EphemerisLine] = []
oem_data: OEMData = {
'originator': 'Starlink',
'center_name': 'EARTH',
'ref_frame': 'EME2000',
'time_system': 'UTC'
}
with io.TextIOWrapper(zip_file.open(oem_file_name),
encoding='utf8') as oem_file:
for line_no, line in enumerate(oem_file):
if len(line.strip()) == 0:
break
if line_no == 0:
ts = line[8:]
oem_data['creation_date'] = self._convert_header_time(ts)
elif line_no == 1:
start = line[16:39]
stop = line[55:78]
oem_data['start_time'] = self._convert_header_time(start)
oem_data['stop_time'] = self._convert_header_time(stop)
else:
if not line_no % 4 == 0:
continue
ephem_data = line.split(' ')
epoch = self._convert_ephem_time(ephem_data[0])
state_vector = [float(s) for s in ephem_data[1:]]
ephemeris_line: EphemerisLine
ephemeris_line = dict(epoch=epoch,
state_vector=state_vector)
ephemeris_lines.append(ephemeris_line)
oem_data['ephemeris_lines'] = ephemeris_lines
return oem_data
def _get_aso_id_name(self, file_name: str) -> Tuple[str, str]:
data_parts = file_name.split('_')
aso_id = data_parts[1]
object_name = data_parts[2]
return aso_id, object_name
def _process_fileobj(self,
fileobj: IO[bytes],
object_node: graph.COSObject) -> None:
with zipfile.ZipFile(fileobj) as zip_file:
txt_file_names = [f for f in zip_file.namelist()
if f.endswith('.txt')]
for txt_file_name in txt_file_names:
oem_data = self._parse_oem_data(zip_file, txt_file_name)
aso_id, object_name = self._get_aso_id_name(txt_file_name)
oem_data['object_name'] = object_name
self._save_oem(oem_data, aso_id, object_node)
| true | true |
f73cd7659f96363d6da4f7c2bb9631d7abb9b9f3 | 5,938 | py | Python | salt/states/cabal.py | rfairburn/salt | 7e44444e873e1cb1d2fe13e39b0edea3779a2b5e | [
"Apache-2.0"
] | 2 | 2015-08-04T21:54:38.000Z | 2019-04-25T21:47:08.000Z | salt/states/cabal.py | rfairburn/salt | 7e44444e873e1cb1d2fe13e39b0edea3779a2b5e | [
"Apache-2.0"
] | 1 | 2015-09-02T12:49:48.000Z | 2015-09-02T19:22:58.000Z | salt/states/cabal.py | rfairburn/salt | 7e44444e873e1cb1d2fe13e39b0edea3779a2b5e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Installation of Cabal Packages
==============================
.. versionadded:: 2015.8.0
These states manage the installed packages for Haskell using
cabal. Note that cabal-install must be installed for these states to
be available, so cabal states should include a requisite to a
pkg.installed state for the package which provides cabal
(``cabal-install`` in case of Debian based distributions). Example::
.. code-block:: yaml
cabal-install:
pkg.installed
ShellCheck:
cabal.installed:
- require:
- pkg: cabal-install
'''
from __future__ import absolute_import
from salt.exceptions import CommandExecutionError, CommandNotFoundError
import salt.utils
def __virtual__():
'''
Only work when cabal-install is installed.
'''
return (salt.utils.which('cabal') is not None) and \
(salt.utils.which('ghc-pkg') is not None)
def _parse_pkg_string(pkg):
'''
Parse pkg string and return a tuple of packge name, separator, and
package version.
Cabal support install package with following format:
* foo-1.0
* foo < 1.2
* foo > 1.3
For the sake of simplicity only the first form is supported,
support for other forms can be added later.
'''
pkg_name, separator, pkg_ver = pkg.partition('-')
return (pkg_name.strip(), separator, pkg_ver.strip())
def installed(name,
pkgs=None,
user=None,
install_global=False,
env=None):
'''
Verify that the given package is installed and is at the correct version
(if specified).
.. code-block:: yaml
ShellCheck-0.3.5:
cabal:
- installed:
name
The package to install
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
A list of environment variables to be set prior to execution. The
format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`.
state function.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
call = __salt__['cabal.update']()
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Could not run cabal update {0}'.format(err)
if pkgs is not None:
pkg_list = pkgs
else:
pkg_list = [name]
try:
installed_pkgs = __salt__['cabal.list'](
user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err)
pkgs_satisfied = []
pkgs_to_install = []
for pkg in pkg_list:
pkg_name, _, pkg_ver = _parse_pkg_string(pkg)
if pkg_name not in installed_pkgs:
pkgs_to_install.append(pkg)
else:
if pkg_ver: # version is specified
if installed_pkgs[pkg_name] != pkg_ver:
pkgs_to_install.append(pkg)
else:
pkgs_satisfied.append(pkg)
else:
pkgs_satisfied.append(pkg)
if __opts__['test']:
ret['result'] = None
comment_msg = []
if pkgs_to_install:
comment_msg.append(
'Packages(s) {0!r} are set to be installed'.format(
', '.join(pkgs_to_install)))
if pkgs_satisfied:
comment_msg.append(
'Packages(s) {0!r} satisfied by {1}'.format(
', '.join(pkg_list), ', '.join(pkgs_satisfied)))
ret['comment'] = '. '.join(comment_msg)
return ret
if not pkgs_to_install:
ret['result'] = True
ret['comment'] = ('Packages(s) {0!r} satisfied by {1}'.format(
', '.join(pkg_list), ', '.join(pkgs_satisfied)))
return ret
try:
call = __salt__['cabal.install'](pkgs=pkg_list,
user=user,
install_global=install_global,
env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error installing {0!r}: {1}'.format(
', '.join(pkg_list), err)
return ret
if call and isinstance(call, dict):
ret['result'] = True
ret['changes'] = {'old': [], 'new': pkgs_to_install}
ret['comment'] = 'Packages(s) {0!r} successfully installed'.format(
', '.join(pkgs_to_install))
else:
ret['result'] = False
ret['comment'] = 'Could not install packages(s) {0!r}'.format(
', '.join(pkg_list))
return ret
def removed(name,
user=None,
env=None):
'''
Verify that given package is not installed.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
installed_pkgs = __salt__['cabal.list'](
user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err)
if name not in installed_pkgs:
ret['result'] = True
ret['comment'] = 'Package {0!r} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0!r} is set to be removed'.format(name)
return ret
if __salt__['cabal.uninstall'](pkg=name, user=user, env=env):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package {0!r} was successfully removed'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Error removing package {0!r}'.format(name)
return ret
| 28.965854 | 78 | 0.573594 |
from __future__ import absolute_import
from salt.exceptions import CommandExecutionError, CommandNotFoundError
import salt.utils
def __virtual__():
return (salt.utils.which('cabal') is not None) and \
(salt.utils.which('ghc-pkg') is not None)
def _parse_pkg_string(pkg):
pkg_name, separator, pkg_ver = pkg.partition('-')
return (pkg_name.strip(), separator, pkg_ver.strip())
def installed(name,
pkgs=None,
user=None,
install_global=False,
env=None):
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
call = __salt__['cabal.update']()
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Could not run cabal update {0}'.format(err)
if pkgs is not None:
pkg_list = pkgs
else:
pkg_list = [name]
try:
installed_pkgs = __salt__['cabal.list'](
user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err)
pkgs_satisfied = []
pkgs_to_install = []
for pkg in pkg_list:
pkg_name, _, pkg_ver = _parse_pkg_string(pkg)
if pkg_name not in installed_pkgs:
pkgs_to_install.append(pkg)
else:
if pkg_ver:
if installed_pkgs[pkg_name] != pkg_ver:
pkgs_to_install.append(pkg)
else:
pkgs_satisfied.append(pkg)
else:
pkgs_satisfied.append(pkg)
if __opts__['test']:
ret['result'] = None
comment_msg = []
if pkgs_to_install:
comment_msg.append(
'Packages(s) {0!r} are set to be installed'.format(
', '.join(pkgs_to_install)))
if pkgs_satisfied:
comment_msg.append(
'Packages(s) {0!r} satisfied by {1}'.format(
', '.join(pkg_list), ', '.join(pkgs_satisfied)))
ret['comment'] = '. '.join(comment_msg)
return ret
if not pkgs_to_install:
ret['result'] = True
ret['comment'] = ('Packages(s) {0!r} satisfied by {1}'.format(
', '.join(pkg_list), ', '.join(pkgs_satisfied)))
return ret
try:
call = __salt__['cabal.install'](pkgs=pkg_list,
user=user,
install_global=install_global,
env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error installing {0!r}: {1}'.format(
', '.join(pkg_list), err)
return ret
if call and isinstance(call, dict):
ret['result'] = True
ret['changes'] = {'old': [], 'new': pkgs_to_install}
ret['comment'] = 'Packages(s) {0!r} successfully installed'.format(
', '.join(pkgs_to_install))
else:
ret['result'] = False
ret['comment'] = 'Could not install packages(s) {0!r}'.format(
', '.join(pkg_list))
return ret
def removed(name,
user=None,
env=None):
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
installed_pkgs = __salt__['cabal.list'](
user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err)
if name not in installed_pkgs:
ret['result'] = True
ret['comment'] = 'Package {0!r} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0!r} is set to be removed'.format(name)
return ret
if __salt__['cabal.uninstall'](pkg=name, user=user, env=env):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package {0!r} was successfully removed'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Error removing package {0!r}'.format(name)
return ret
| true | true |
f73cd7959b4977f5f9409d5c3d63b88906bf112a | 399 | py | Python | backend/apps/family/migrations/0002_alter_family_id_grupo.py | jorgejimenez98/backend-evaluacion-desempenno | 08975303952608809375c5e2185bf20a84cc0f4e | [
"MIT"
] | null | null | null | backend/apps/family/migrations/0002_alter_family_id_grupo.py | jorgejimenez98/backend-evaluacion-desempenno | 08975303952608809375c5e2185bf20a84cc0f4e | [
"MIT"
] | null | null | null | backend/apps/family/migrations/0002_alter_family_id_grupo.py | jorgejimenez98/backend-evaluacion-desempenno | 08975303952608809375c5e2185bf20a84cc0f4e | [
"MIT"
] | null | null | null | # Generated by Django 3.2.2 on 2021-05-28 06:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('family', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='family',
name='id_grupo',
field=models.IntegerField(primary_key=True, serialize=False),
),
]
| 21 | 73 | 0.60401 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('family', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='family',
name='id_grupo',
field=models.IntegerField(primary_key=True, serialize=False),
),
]
| true | true |
f73cd7d567aca912fa59ab9b173e698c0795c0e9 | 177 | py | Python | users/apps.py | intelligems/stolos | ca658aeea92b841f89992948a136214519dffcc7 | [
"MIT"
] | 5 | 2019-08-27T10:33:14.000Z | 2021-11-09T10:57:40.000Z | users/apps.py | intelligems/stolos | ca658aeea92b841f89992948a136214519dffcc7 | [
"MIT"
] | null | null | null | users/apps.py | intelligems/stolos | ca658aeea92b841f89992948a136214519dffcc7 | [
"MIT"
] | 3 | 2019-05-23T14:37:40.000Z | 2020-12-14T18:43:16.000Z | from __future__ import unicode_literals
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
import users.signals
| 16.090909 | 39 | 0.728814 | from __future__ import unicode_literals
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
import users.signals
| true | true |
f73cd8409539f599db3c3d3fa91b6e0842779d1c | 9,622 | py | Python | tests/test_hgvs_variantmapper_gcp.py | naomifox/hgvs | 5007142191cac8dba2272bad5b945a27c0a5cf87 | [
"Apache-2.0"
] | null | null | null | tests/test_hgvs_variantmapper_gcp.py | naomifox/hgvs | 5007142191cac8dba2272bad5b945a27c0a5cf87 | [
"Apache-2.0"
] | null | null | null | tests/test_hgvs_variantmapper_gcp.py | naomifox/hgvs | 5007142191cac8dba2272bad5b945a27c0a5cf87 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pprint
import re
import sys
import os
import unittest
if sys.version_info < (3, ):
import unicodecsv as csv
else:
import csv
import pytest
from hgvs.exceptions import HGVSError
import hgvs.dataproviders.uta
import hgvs.parser
import hgvs.sequencevariant
import hgvs.variantmapper
from support import CACHE
def gxp_file_reader(fn):
rdr = csv.DictReader(open(fn, "r"), delimiter=str("\t"))
for rec in rdr:
if rec["id"].startswith("#"):
continue
yield rec
@pytest.mark.mapping
class Test_VariantMapper(unittest.TestCase):
def setUp(self):
self.hdp = hgvs.dataproviders.uta.connect(mode=os.environ.get("HGVS_CACHE_MODE", "run"), cache=CACHE)
self.hm = hgvs.variantmapper.VariantMapper(self.hdp)
self.hp = hgvs.parser.Parser()
# ZCCHC3 -- one exon, + strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_033089.6";
# ┌────────┬────────────┬─────────┬─────────────┬──────────────┬─────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├────────┼────────────┼─────────┼─────────────┼──────────────┼─────────────┼─────────────┼───────────┤
# │ ZCCHC3 │ 1 │ 1 │ NM_033089.6 │ NC_000020.10 │ 484=3I2275= │ 24 │ 1236 │
# └────────┴────────────┴─────────┴─────────────┴──────────────┴─────────────┴─────────────┴───────────┘
def test_ZCCHC3_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ZCCHC3-dbSNP.tsv"):
self._test_gxp_mapping(rec)
# ORAI1 -- two exons, + strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_032790.3";
# ┌───────┬────────────┬─────────┬─────────────┬──────────────┬──────────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├───────┼────────────┼─────────┼─────────────┼──────────────┼──────────────────┼─────────────┼───────────┤
# │ ORAI1 │ 1 │ 2 │ NM_032790.3 │ NC_000012.11 │ 319=6I177=;1000= │ 193 │ 1099 │
# └───────┴────────────┴─────────┴─────────────┴──────────────┴──────────────────┴─────────────┴───────────┘
def test_ORAI1_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ORAI1-dbSNP.tsv"):
self._test_gxp_mapping(rec)
# FOLR3 -- multiple exons, + strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_000804.2";
# ┌───────┬────────────┬─────────┬─────────────┬─────────────┬──────────────────────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├───────┼────────────┼─────────┼─────────────┼─────────────┼──────────────────────────────┼─────────────┼───────────┤
# │ FOLR3 │ 1 │ 5 │ NM_000804.2 │ NC_000011.9 │ 44=;174=;150=2D37=;136=;304= │ 50 │ 788 │
# └───────┴────────────┴─────────┴─────────────┴─────────────┴──────────────────────────────┴─────────────┴───────────┘
def test_FOLR3_dbSNP(self):
# TODO: CORE-158: g-to-c mapped insertions have incorrect interval bounds
for rec in gxp_file_reader("tests/data/gcp/FOLR3-dbSNP.tsv"):
self._test_gxp_mapping(rec)
# ADRA2B -- one exon, - strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_000682.5";
# ┌────────┬────────────┬─────────┬─────────────┬──────────────┬─────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├────────┼────────────┼─────────┼─────────────┼──────────────┼─────────────┼─────────────┼───────────┤
# │ ADRA2B │ -1 │ 1 │ NM_000682.5 │ NC_000002.11 │ 891=9D2375= │ 0 │ 1353 │
# └────────┴────────────┴─────────┴─────────────┴──────────────┴─────────────┴─────────────┴───────────┘
def test_ADRA2B_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ADRA2B-dbSNP.tsv"):
self._test_gxp_mapping(rec)
# JRK -- multiple exons, - strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_001077527.1";
# ┌──────┬────────────┬─────────┬────────────────┬──────────────┬───────────────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├──────┼────────────┼─────────┼────────────────┼──────────────┼───────────────────────┼─────────────┼───────────┤
# │ JRK │ -1 │ 3 │ NM_001077527.1 │ NC_000008.10 │ 52=;1844=2I199=;1483= │ 514 │ 2185 │
# └──────┴────────────┴─────────┴────────────────┴──────────────┴───────────────────────┴─────────────┴───────────┘
def test_JRK_dbSNP(self):
# TODO: CORE-157: del26 on -1 strands gets reverse complemented as del62
for rec in gxp_file_reader("tests/data/gcp/JRK-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_NEFL_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/NEFL-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_hgmd(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-HGMD.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_dbSNP_NM_003777(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP-NM_003777.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_dbSNP_NM_001277115(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP-NM_001277115.tsv"):
self._test_gxp_mapping(rec)
@pytest.mark.regression
def test_regression(self):
for rec in gxp_file_reader("tests/data/gcp/regression.tsv"):
self._test_gxp_mapping(rec)
@pytest.mark.extra
def test_DNAH11_dbSNP_full(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_real(self):
for rec in gxp_file_reader("tests/data/gcp/real.tsv"):
self._test_gxp_mapping(rec)
def test_noncoding(self):
for rec in gxp_file_reader("tests/data/gcp/noncoding.tsv"):
self._test_gxp_mapping(rec)
def _test_gxp_mapping(self, rec):
"""given one record (row) of g, c/n/r, and p (optional) test variants, map
g->c/n/r, c/n/r->g, and c->p and verify equivalence
"""
def _rm_del_seq(vs):
return re.sub(vs, "del\w+ins", "delins")
var_g = self.hp.parse_hgvs_variant(rec["HGVSg"])
var_x = self.hp.parse_hgvs_variant(rec["HGVSc"])
var_p = self.hp.parse_hgvs_variant(rec["HGVSp"]) if rec["HGVSp"] is not None and rec["HGVSp"] != "" else None
# g -> x
if var_x.type == "c":
var_x_test = self.hm.g_to_c(var_g, var_x.ac)
elif var_x.type == "n":
var_x_test = self.hm.g_to_n(var_g, var_x.ac)
self.assertEquals(
_rm_del_seq(str(var_x)),
_rm_del_seq(str(var_x_test)),
msg="%s != %s (%s; HGVSg=%s)" % (str(var_x_test), str(var_x), rec["id"], rec["HGVSg"]))
# c,n -> g
if var_x.type == "c":
var_g_test = self.hm.c_to_g(var_x, var_g.ac)
elif var_x.type == "n":
var_g_test = self.hm.n_to_g(var_x, var_g.ac)
self.assertEquals(
_rm_del_seq(str(var_g)),
_rm_del_seq(str(var_g_test)),
msg="%s != %s (%s; HGVSc=%s)" % (str(var_g_test), str(var_g), rec["id"], rec["HGVSc"]))
if var_p is not None:
# c -> p
hgvs_p_exp = str(var_p)
var_p_test = self.hm.c_to_p(var_x, var_p.ac)
if var_p.posedit and not var_p.posedit.uncertain:
# if expected value isn't uncertain, strip uncertain from test
var_p_test.posedit.uncertain = False
hgvs_p_test = str(var_p_test)
if re.search("Ter$", hgvs_p_exp):
# if expected value doesn't have a count, strip it from the test
hgvs_p_test = re.sub("Ter\d+$", "Ter", hgvs_p_test)
self.assertEquals(hgvs_p_exp, hgvs_p_test, msg="%s != %s (%s)" % (hgvs_p_exp, hgvs_p_test, rec["id"]))
if __name__ == "__main__":
unittest.main()
# <LICENSE>
# Copyright 2013-2015 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
| 47.633663 | 165 | 0.518499 |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pprint
import re
import sys
import os
import unittest
if sys.version_info < (3, ):
import unicodecsv as csv
else:
import csv
import pytest
from hgvs.exceptions import HGVSError
import hgvs.dataproviders.uta
import hgvs.parser
import hgvs.sequencevariant
import hgvs.variantmapper
from support import CACHE
def gxp_file_reader(fn):
rdr = csv.DictReader(open(fn, "r"), delimiter=str("\t"))
for rec in rdr:
if rec["id"].startswith("#"):
continue
yield rec
@pytest.mark.mapping
class Test_VariantMapper(unittest.TestCase):
def setUp(self):
self.hdp = hgvs.dataproviders.uta.connect(mode=os.environ.get("HGVS_CACHE_MODE", "run"), cache=CACHE)
self.hm = hgvs.variantmapper.VariantMapper(self.hdp)
self.hp = hgvs.parser.Parser()
def test_ZCCHC3_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ZCCHC3-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_ORAI1_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ORAI1-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_FOLR3_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/FOLR3-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_ADRA2B_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ADRA2B-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_JRK_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/JRK-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_NEFL_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/NEFL-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_hgmd(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-HGMD.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_dbSNP_NM_003777(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP-NM_003777.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_dbSNP_NM_001277115(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP-NM_001277115.tsv"):
self._test_gxp_mapping(rec)
@pytest.mark.regression
def test_regression(self):
for rec in gxp_file_reader("tests/data/gcp/regression.tsv"):
self._test_gxp_mapping(rec)
@pytest.mark.extra
def test_DNAH11_dbSNP_full(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_real(self):
for rec in gxp_file_reader("tests/data/gcp/real.tsv"):
self._test_gxp_mapping(rec)
def test_noncoding(self):
for rec in gxp_file_reader("tests/data/gcp/noncoding.tsv"):
self._test_gxp_mapping(rec)
def _test_gxp_mapping(self, rec):
def _rm_del_seq(vs):
return re.sub(vs, "del\w+ins", "delins")
var_g = self.hp.parse_hgvs_variant(rec["HGVSg"])
var_x = self.hp.parse_hgvs_variant(rec["HGVSc"])
var_p = self.hp.parse_hgvs_variant(rec["HGVSp"]) if rec["HGVSp"] is not None and rec["HGVSp"] != "" else None
if var_x.type == "c":
var_x_test = self.hm.g_to_c(var_g, var_x.ac)
elif var_x.type == "n":
var_x_test = self.hm.g_to_n(var_g, var_x.ac)
self.assertEquals(
_rm_del_seq(str(var_x)),
_rm_del_seq(str(var_x_test)),
msg="%s != %s (%s; HGVSg=%s)" % (str(var_x_test), str(var_x), rec["id"], rec["HGVSg"]))
if var_x.type == "c":
var_g_test = self.hm.c_to_g(var_x, var_g.ac)
elif var_x.type == "n":
var_g_test = self.hm.n_to_g(var_x, var_g.ac)
self.assertEquals(
_rm_del_seq(str(var_g)),
_rm_del_seq(str(var_g_test)),
msg="%s != %s (%s; HGVSc=%s)" % (str(var_g_test), str(var_g), rec["id"], rec["HGVSc"]))
if var_p is not None:
hgvs_p_exp = str(var_p)
var_p_test = self.hm.c_to_p(var_x, var_p.ac)
if var_p.posedit and not var_p.posedit.uncertain:
var_p_test.posedit.uncertain = False
hgvs_p_test = str(var_p_test)
if re.search("Ter$", hgvs_p_exp):
# if expected value doesn't have a count, strip it from the test
hgvs_p_test = re.sub("Ter\d+$", "Ter", hgvs_p_test)
self.assertEquals(hgvs_p_exp, hgvs_p_test, msg="%s != %s (%s)" % (hgvs_p_exp, hgvs_p_test, rec["id"]))
if __name__ == "__main__":
unittest.main()
| true | true |
f73cd95bb6aa4920a4638441783cc502b84894db | 11,177 | py | Python | combine_sg2im_neural_motifs/model.py | LUGUANSONG/i2g2i | ec532f2e128301472478c3d8fe4c72929e2967a4 | [
"MIT"
] | null | null | null | combine_sg2im_neural_motifs/model.py | LUGUANSONG/i2g2i | ec532f2e128301472478c3d8fe4c72929e2967a4 | [
"MIT"
] | 3 | 2021-06-08T21:42:14.000Z | 2022-01-13T02:48:20.000Z | combine_sg2im_neural_motifs/model.py | LUGUANSONG/i2g2i | ec532f2e128301472478c3d8fe4c72929e2967a4 | [
"MIT"
] | null | null | null | from lib.object_detector import ObjectDetector, gather_res
import torch
import torch.nn as nn
import torch.nn.parallel
from combine_sg2im_neural_motifs.sg2im_model import Sg2ImModel
from combine_sg2im_neural_motifs.discriminators import PatchDiscriminator, AcCropDiscriminator
import os
from collections import defaultdict
from lib.pytorch_misc import optimistic_restore
import torch.nn.functional as F
from config import BOX_SCALE
from sg2im.utils import timeit
def build_model(args):
if args.checkpoint_start_from is not None:
checkpoint = torch.load(args.checkpoint_start_from)
kwargs = checkpoint['model_kwargs']
model = Sg2ImModel(**kwargs)
raw_state_dict = checkpoint['model_state']
state_dict = {}
for k, v in raw_state_dict.items():
if k.startswith('module.'):
k = k[7:]
state_dict[k] = v
model.load_state_dict(state_dict)
else:
kwargs = {
'image_size': args.image_size,
'embedding_dim': args.embedding_dim,
'gconv_dim': args.gconv_dim,
'gconv_hidden_dim': args.gconv_hidden_dim,
'gconv_num_layers': args.gconv_num_layers,
'mlp_normalization': args.mlp_normalization,
'refinement_dims': args.refinement_network_dims,
'normalization': args.normalization,
'activation': args.activation,
'mask_size': args.mask_size,
'layout_noise_dim': args.layout_noise_dim,
}
model = Sg2ImModel(**kwargs)
return model, kwargs
def build_obj_discriminator(args, vocab):
discriminator = None
d_kwargs = {}
d_weight = args.discriminator_loss_weight
d_obj_weight = args.d_obj_weight
if d_weight == 0 or d_obj_weight == 0:
return discriminator, d_kwargs
d_kwargs = {
'vocab': vocab,
'arch': args.d_obj_arch,
'normalization': args.d_normalization,
'activation': args.d_activation,
'padding': args.d_padding,
'object_size': args.crop_size,
}
discriminator = AcCropDiscriminator(**d_kwargs)
return discriminator, d_kwargs
def build_img_discriminator(args):
discriminator = None
d_kwargs = {}
d_weight = args.discriminator_loss_weight
d_img_weight = args.d_img_weight
if d_weight == 0 or d_img_weight == 0:
return discriminator, d_kwargs
d_kwargs = {
'arch': args.d_img_arch,
'normalization': args.d_normalization,
'activation': args.d_activation,
'padding': args.d_padding,
}
discriminator = PatchDiscriminator(**d_kwargs)
return discriminator, d_kwargs
class neural_motifs_sg2im_model(nn.Module):
def __init__(self, args, ind_to_classes):
super(neural_motifs_sg2im_model, self).__init__()
self.args = args
# define and initial detector
self.detector = ObjectDetector(classes=ind_to_classes, num_gpus=args.num_gpus,
mode='refinerels' if not args.use_proposals else 'proposals',
use_resnet=args.use_resnet)
if args.ckpt is not None:
ckpt = torch.load(args.ckpt)
optimistic_restore(self.detector, ckpt['state_dict'])
self.detector.eval()
# define and initial generator, image_discriminator, obj_discriminator,
# and corresponding optimizer
vocab = {
'object_idx_to_name': ind_to_classes,
}
self.model, model_kwargs = build_model(args)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.learning_rate)
self.obj_discriminator, d_obj_kwargs = build_obj_discriminator(args, vocab)
self.img_discriminator, d_img_kwargs = build_img_discriminator(args)
if self.obj_discriminator is not None:
self.obj_discriminator.train()
self.optimizer_d_obj = torch.optim.Adam(self.obj_discriminator.parameters(), lr=args.learning_rate)
if self.img_discriminator is not None:
self.img_discriminator.train()
self.optimizer_d_img = torch.optim.Adam(self.img_discriminator.parameters(), lr=args.learning_rate)
restore_path = None
if args.restore_from_checkpoint:
restore_path = '%s_with_model.pt' % args.checkpoint_name
restore_path = os.path.join(args.output_dir, restore_path)
if restore_path is not None and os.path.isfile(restore_path):
print('Restoring from checkpoint:')
print(restore_path)
checkpoint = torch.load(restore_path)
self.model.load_state_dict(checkpoint['model_state'])
self.optimizer.load_state_dict(checkpoint['optim_state'])
if self.obj_discriminator is not None:
self.obj_discriminator.load_state_dict(checkpoint['d_obj_state'])
self.optimizer_d_obj.load_state_dict(checkpoint['d_obj_optim_state'])
if self.img_discriminator is not None:
self.img_discriminator.load_state_dict(checkpoint['d_img_state'])
self.optimizer_d_img.load_state_dict(checkpoint['d_img_optim_state'])
t = checkpoint['counters']['t']
if 0 <= args.eval_mode_after <= t:
self.model.eval()
else:
self.model.train()
epoch = checkpoint['counters']['epoch']
else:
t, epoch = 0, 0
checkpoint = {
'vocab': vocab,
'model_kwargs': model_kwargs,
'd_obj_kwargs': d_obj_kwargs,
'd_img_kwargs': d_img_kwargs,
'losses_ts': [],
'losses': defaultdict(list),
'd_losses': defaultdict(list),
'checkpoint_ts': [],
'train_batch_data': [],
'train_samples': [],
'train_iou': [],
'val_batch_data': [],
'val_samples': [],
'val_losses': defaultdict(list),
'val_iou': [],
'norm_d': [],
'norm_g': [],
'counters': {
't': None,
'epoch': None,
},
'model_state': None, 'model_best_state': None, 'optim_state': None,
'd_obj_state': None, 'd_obj_best_state': None, 'd_obj_optim_state': None,
'd_img_state': None, 'd_img_best_state': None, 'd_img_optim_state': None,
'best_t': [],
}
self.t, self.epoch, self.checkpoint = t, epoch, checkpoint
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
# forward detector
with timeit('detector forward', self.args.timing):
result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals,
train_anchor_inds, return_fmap=True)
if result.is_none():
return ValueError("heck")
# forward generator
imgs = F.interpolate(x, size=self.args.image_size)
objs = result.obj_preds
boxes = result.rm_box_priors / BOX_SCALE
obj_to_img = result.im_inds - image_offset
obj_fmap = result.obj_fmap
# check if all image have detection
cnt = torch.zeros(len(imgs)).byte()
cnt[obj_to_img] += 1
if (cnt > 0).sum() != len(imgs):
print("some imgs have no detection")
print(cnt)
imgs = imgs[cnt]
obj_to_img_new = obj_to_img.clone()
for i in range(len(cnt)):
if cnt[i] == 0:
obj_to_img_new -= (obj_to_img > i).long()
obj_to_img = obj_to_img_new
with timeit('generator forward', self.args.timing):
imgs_pred = self.model(obj_to_img, boxes, obj_fmap)
# forward discriminators to train generator
if self.obj_discriminator is not None:
with timeit('d_obj forward for g', self.args.timing):
g_scores_fake_crop, g_obj_scores_fake_crop = self.obj_discriminator(imgs_pred, objs, boxes, obj_to_img)
if self.img_discriminator is not None:
with timeit('d_img forward for g', self.args.timing):
g_scores_fake_img = self.img_discriminator(imgs_pred)
# forward discriminators to train discriminators
if self.obj_discriminator is not None:
imgs_fake = imgs_pred.detach()
with timeit('d_obj forward for d', self.args.timing):
d_scores_fake_crop, d_obj_scores_fake_crop = self.obj_discriminator(imgs_fake, objs, boxes, obj_to_img)
d_scores_real_crop, d_obj_scores_real_crop = self.obj_discriminator(imgs, objs, boxes, obj_to_img)
if self.img_discriminator is not None:
imgs_fake = imgs_pred.detach()
with timeit('d_img forward for d', self.args.timing):
d_scores_fake_img = self.img_discriminator(imgs_fake)
d_scores_real_img = self.img_discriminator(imgs)
return Result(
imgs=imgs,
imgs_pred=imgs_pred,
objs=objs,
g_scores_fake_crop=g_scores_fake_crop,
g_obj_scores_fake_crop=g_obj_scores_fake_crop,
g_scores_fake_img=g_scores_fake_img,
d_scores_fake_crop=d_scores_fake_crop,
d_obj_scores_fake_crop=d_obj_scores_fake_crop,
d_scores_real_crop=d_scores_real_crop,
d_obj_scores_real_crop=d_obj_scores_real_crop,
d_scores_fake_img=d_scores_fake_img,
d_scores_real_img=d_scores_real_img
)
# return imgs, imgs_pred, objs, g_scores_fake_crop, g_obj_scores_fake_crop, g_scores_fake_img, d_scores_fake_crop, \
# d_obj_scores_fake_crop, d_scores_real_crop, d_obj_scores_real_crop, d_scores_fake_img, d_scores_real_img
def __getitem__(self, batch):
""" Hack to do multi-GPU training"""
batch.scatter()
if self.args.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.args.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.args.num_gpus)])
if self.training:
return gather_res(outputs, 0, dim=0)
return outputs
class Result(object):
def __init__(self, imgs=None,
imgs_pred=None,
objs=None,
g_scores_fake_crop=None,
g_obj_scores_fake_crop=None,
g_scores_fake_img=None,
d_scores_fake_crop=None,
d_obj_scores_fake_crop=None,
d_scores_real_crop=None,
d_obj_scores_real_crop=None,
d_scores_fake_img=None,
d_scores_real_img=None):
self.__dict__.update(locals())
del self.__dict__['self']
def is_none(self):
return all([v is None for k, v in self.__dict__.items() if k != 'self'])
| 40.205036 | 124 | 0.621007 | from lib.object_detector import ObjectDetector, gather_res
import torch
import torch.nn as nn
import torch.nn.parallel
from combine_sg2im_neural_motifs.sg2im_model import Sg2ImModel
from combine_sg2im_neural_motifs.discriminators import PatchDiscriminator, AcCropDiscriminator
import os
from collections import defaultdict
from lib.pytorch_misc import optimistic_restore
import torch.nn.functional as F
from config import BOX_SCALE
from sg2im.utils import timeit
def build_model(args):
if args.checkpoint_start_from is not None:
checkpoint = torch.load(args.checkpoint_start_from)
kwargs = checkpoint['model_kwargs']
model = Sg2ImModel(**kwargs)
raw_state_dict = checkpoint['model_state']
state_dict = {}
for k, v in raw_state_dict.items():
if k.startswith('module.'):
k = k[7:]
state_dict[k] = v
model.load_state_dict(state_dict)
else:
kwargs = {
'image_size': args.image_size,
'embedding_dim': args.embedding_dim,
'gconv_dim': args.gconv_dim,
'gconv_hidden_dim': args.gconv_hidden_dim,
'gconv_num_layers': args.gconv_num_layers,
'mlp_normalization': args.mlp_normalization,
'refinement_dims': args.refinement_network_dims,
'normalization': args.normalization,
'activation': args.activation,
'mask_size': args.mask_size,
'layout_noise_dim': args.layout_noise_dim,
}
model = Sg2ImModel(**kwargs)
return model, kwargs
def build_obj_discriminator(args, vocab):
discriminator = None
d_kwargs = {}
d_weight = args.discriminator_loss_weight
d_obj_weight = args.d_obj_weight
if d_weight == 0 or d_obj_weight == 0:
return discriminator, d_kwargs
d_kwargs = {
'vocab': vocab,
'arch': args.d_obj_arch,
'normalization': args.d_normalization,
'activation': args.d_activation,
'padding': args.d_padding,
'object_size': args.crop_size,
}
discriminator = AcCropDiscriminator(**d_kwargs)
return discriminator, d_kwargs
def build_img_discriminator(args):
discriminator = None
d_kwargs = {}
d_weight = args.discriminator_loss_weight
d_img_weight = args.d_img_weight
if d_weight == 0 or d_img_weight == 0:
return discriminator, d_kwargs
d_kwargs = {
'arch': args.d_img_arch,
'normalization': args.d_normalization,
'activation': args.d_activation,
'padding': args.d_padding,
}
discriminator = PatchDiscriminator(**d_kwargs)
return discriminator, d_kwargs
class neural_motifs_sg2im_model(nn.Module):
def __init__(self, args, ind_to_classes):
super(neural_motifs_sg2im_model, self).__init__()
self.args = args
self.detector = ObjectDetector(classes=ind_to_classes, num_gpus=args.num_gpus,
mode='refinerels' if not args.use_proposals else 'proposals',
use_resnet=args.use_resnet)
if args.ckpt is not None:
ckpt = torch.load(args.ckpt)
optimistic_restore(self.detector, ckpt['state_dict'])
self.detector.eval()
vocab = {
'object_idx_to_name': ind_to_classes,
}
self.model, model_kwargs = build_model(args)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.learning_rate)
self.obj_discriminator, d_obj_kwargs = build_obj_discriminator(args, vocab)
self.img_discriminator, d_img_kwargs = build_img_discriminator(args)
if self.obj_discriminator is not None:
self.obj_discriminator.train()
self.optimizer_d_obj = torch.optim.Adam(self.obj_discriminator.parameters(), lr=args.learning_rate)
if self.img_discriminator is not None:
self.img_discriminator.train()
self.optimizer_d_img = torch.optim.Adam(self.img_discriminator.parameters(), lr=args.learning_rate)
restore_path = None
if args.restore_from_checkpoint:
restore_path = '%s_with_model.pt' % args.checkpoint_name
restore_path = os.path.join(args.output_dir, restore_path)
if restore_path is not None and os.path.isfile(restore_path):
print('Restoring from checkpoint:')
print(restore_path)
checkpoint = torch.load(restore_path)
self.model.load_state_dict(checkpoint['model_state'])
self.optimizer.load_state_dict(checkpoint['optim_state'])
if self.obj_discriminator is not None:
self.obj_discriminator.load_state_dict(checkpoint['d_obj_state'])
self.optimizer_d_obj.load_state_dict(checkpoint['d_obj_optim_state'])
if self.img_discriminator is not None:
self.img_discriminator.load_state_dict(checkpoint['d_img_state'])
self.optimizer_d_img.load_state_dict(checkpoint['d_img_optim_state'])
t = checkpoint['counters']['t']
if 0 <= args.eval_mode_after <= t:
self.model.eval()
else:
self.model.train()
epoch = checkpoint['counters']['epoch']
else:
t, epoch = 0, 0
checkpoint = {
'vocab': vocab,
'model_kwargs': model_kwargs,
'd_obj_kwargs': d_obj_kwargs,
'd_img_kwargs': d_img_kwargs,
'losses_ts': [],
'losses': defaultdict(list),
'd_losses': defaultdict(list),
'checkpoint_ts': [],
'train_batch_data': [],
'train_samples': [],
'train_iou': [],
'val_batch_data': [],
'val_samples': [],
'val_losses': defaultdict(list),
'val_iou': [],
'norm_d': [],
'norm_g': [],
'counters': {
't': None,
'epoch': None,
},
'model_state': None, 'model_best_state': None, 'optim_state': None,
'd_obj_state': None, 'd_obj_best_state': None, 'd_obj_optim_state': None,
'd_img_state': None, 'd_img_best_state': None, 'd_img_optim_state': None,
'best_t': [],
}
self.t, self.epoch, self.checkpoint = t, epoch, checkpoint
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
with timeit('detector forward', self.args.timing):
result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals,
train_anchor_inds, return_fmap=True)
if result.is_none():
return ValueError("heck")
imgs = F.interpolate(x, size=self.args.image_size)
objs = result.obj_preds
boxes = result.rm_box_priors / BOX_SCALE
obj_to_img = result.im_inds - image_offset
obj_fmap = result.obj_fmap
cnt = torch.zeros(len(imgs)).byte()
cnt[obj_to_img] += 1
if (cnt > 0).sum() != len(imgs):
print("some imgs have no detection")
print(cnt)
imgs = imgs[cnt]
obj_to_img_new = obj_to_img.clone()
for i in range(len(cnt)):
if cnt[i] == 0:
obj_to_img_new -= (obj_to_img > i).long()
obj_to_img = obj_to_img_new
with timeit('generator forward', self.args.timing):
imgs_pred = self.model(obj_to_img, boxes, obj_fmap)
if self.obj_discriminator is not None:
with timeit('d_obj forward for g', self.args.timing):
g_scores_fake_crop, g_obj_scores_fake_crop = self.obj_discriminator(imgs_pred, objs, boxes, obj_to_img)
if self.img_discriminator is not None:
with timeit('d_img forward for g', self.args.timing):
g_scores_fake_img = self.img_discriminator(imgs_pred)
if self.obj_discriminator is not None:
imgs_fake = imgs_pred.detach()
with timeit('d_obj forward for d', self.args.timing):
d_scores_fake_crop, d_obj_scores_fake_crop = self.obj_discriminator(imgs_fake, objs, boxes, obj_to_img)
d_scores_real_crop, d_obj_scores_real_crop = self.obj_discriminator(imgs, objs, boxes, obj_to_img)
if self.img_discriminator is not None:
imgs_fake = imgs_pred.detach()
with timeit('d_img forward for d', self.args.timing):
d_scores_fake_img = self.img_discriminator(imgs_fake)
d_scores_real_img = self.img_discriminator(imgs)
return Result(
imgs=imgs,
imgs_pred=imgs_pred,
objs=objs,
g_scores_fake_crop=g_scores_fake_crop,
g_obj_scores_fake_crop=g_obj_scores_fake_crop,
g_scores_fake_img=g_scores_fake_img,
d_scores_fake_crop=d_scores_fake_crop,
d_obj_scores_fake_crop=d_obj_scores_fake_crop,
d_scores_real_crop=d_scores_real_crop,
d_obj_scores_real_crop=d_obj_scores_real_crop,
d_scores_fake_img=d_scores_fake_img,
d_scores_real_img=d_scores_real_img
)
def __getitem__(self, batch):
batch.scatter()
if self.args.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.args.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.args.num_gpus)])
if self.training:
return gather_res(outputs, 0, dim=0)
return outputs
class Result(object):
def __init__(self, imgs=None,
imgs_pred=None,
objs=None,
g_scores_fake_crop=None,
g_obj_scores_fake_crop=None,
g_scores_fake_img=None,
d_scores_fake_crop=None,
d_obj_scores_fake_crop=None,
d_scores_real_crop=None,
d_obj_scores_real_crop=None,
d_scores_fake_img=None,
d_scores_real_img=None):
self.__dict__.update(locals())
del self.__dict__['self']
def is_none(self):
return all([v is None for k, v in self.__dict__.items() if k != 'self'])
| true | true |
f73cd968a4fa0b9cc7113032ba81b1e6191e4673 | 994 | py | Python | toontown/shtiker/NPCFriendPage.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | 1 | 2020-09-27T22:12:47.000Z | 2020-09-27T22:12:47.000Z | toontown/shtiker/NPCFriendPage.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | null | null | null | toontown/shtiker/NPCFriendPage.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | 2 | 2020-09-26T20:37:18.000Z | 2020-11-15T20:55:33.000Z | from . import ShtikerPage
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toon import NPCFriendPanel
from toontown.toonbase import TTLocalizer
class NPCFriendPage(ShtikerPage.ShtikerPage):
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
def load(self):
self.title = DirectLabel(parent=self, relief=None, text=TTLocalizer.NPCFriendPageTitle, text_scale=0.12, textMayChange=0, pos=(0, 0, 0.6))
self.friendPanel = NPCFriendPanel.NPCFriendPanel(parent=self)
self.friendPanel.setScale(0.1225)
self.friendPanel.setZ(-0.03)
def unload(self):
ShtikerPage.ShtikerPage.unload(self)
del self.title
del self.friendPanel
def updatePage(self):
self.friendPanel.update(base.localAvatar.NPCFriendsDict, fCallable=0)
def enter(self):
self.updatePage()
ShtikerPage.ShtikerPage.enter(self)
def exit(self):
ShtikerPage.ShtikerPage.exit(self) | 32.064516 | 146 | 0.714286 | from . import ShtikerPage
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toon import NPCFriendPanel
from toontown.toonbase import TTLocalizer
class NPCFriendPage(ShtikerPage.ShtikerPage):
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
def load(self):
self.title = DirectLabel(parent=self, relief=None, text=TTLocalizer.NPCFriendPageTitle, text_scale=0.12, textMayChange=0, pos=(0, 0, 0.6))
self.friendPanel = NPCFriendPanel.NPCFriendPanel(parent=self)
self.friendPanel.setScale(0.1225)
self.friendPanel.setZ(-0.03)
def unload(self):
ShtikerPage.ShtikerPage.unload(self)
del self.title
del self.friendPanel
def updatePage(self):
self.friendPanel.update(base.localAvatar.NPCFriendsDict, fCallable=0)
def enter(self):
self.updatePage()
ShtikerPage.ShtikerPage.enter(self)
def exit(self):
ShtikerPage.ShtikerPage.exit(self) | true | true |
f73cd977f85b3c9985295649dd8bbddf10edc58b | 848 | py | Python | neighbourapp/migrations/0004_newpost.py | dennis027/neighbourhood | ad0878ded40af6917dbf4a8905db4f4915627221 | [
"MIT"
] | null | null | null | neighbourapp/migrations/0004_newpost.py | dennis027/neighbourhood | ad0878ded40af6917dbf4a8905db4f4915627221 | [
"MIT"
] | null | null | null | neighbourapp/migrations/0004_newpost.py | dennis027/neighbourhood | ad0878ded40af6917dbf4a8905db4f4915627221 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-24 21:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('neighbourapp', '0003_business_neighbourhood_user'),
]
operations = [
migrations.CreateModel(
name='NewPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('subject', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.615385 | 118 | 0.641509 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('neighbourapp', '0003_business_neighbourhood_user'),
]
operations = [
migrations.CreateModel(
name='NewPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('subject', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f73cd9905bcf7f44b5ee4ee03fe193ad304a4274 | 5,508 | py | Python | inference.py | charzy/vocalremover | d7a32c20bb0e272f7bb5a486cd2b4ced45e214e8 | [
"MIT"
] | 411 | 2019-08-15T10:56:09.000Z | 2022-03-27T18:34:11.000Z | vocal-remover-master/inference.py | fernando-figueredo/ChordsWebApp | 9bf983ab5579c36c75447c74eec0400d78ab49f9 | [
"MIT"
] | 71 | 2019-08-13T15:46:50.000Z | 2022-03-31T13:47:48.000Z | vocal-remover-master/inference.py | fernando-figueredo/ChordsWebApp | 9bf983ab5579c36c75447c74eec0400d78ab49f9 | [
"MIT"
] | 83 | 2019-10-10T08:32:31.000Z | 2022-03-15T09:16:39.000Z | import argparse
import os
import cv2
import librosa
import numpy as np
import soundfile as sf
import torch
from tqdm import tqdm
from lib import dataset
from lib import nets
from lib import spec_utils
class VocalRemover(object):
def __init__(self, model, device, window_size):
self.model = model
self.offset = model.offset
self.device = device
self.window_size = window_size
def _execute(self, X_mag_pad, roi_size, n_window):
self.model.eval()
with torch.no_grad():
preds = []
for i in tqdm(range(n_window)):
start = i * roi_size
X_mag_window = X_mag_pad[None, :, :, start:start + self.window_size]
X_mag_window = torch.from_numpy(X_mag_window).to(self.device)
pred = self.model.predict(X_mag_window)
pred = pred.detach().cpu().numpy()
preds.append(pred[0])
pred = np.concatenate(preds, axis=2)
return pred
def preprocess(self, X_spec):
X_mag = np.abs(X_spec)
X_phase = np.angle(X_spec)
return X_mag, X_phase
def inference(self, X_spec):
X_mag, X_phase = self.preprocess(X_spec)
coef = X_mag.max()
X_mag_pre = X_mag / coef
n_frame = X_mag_pre.shape[2]
pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.window_size, self.offset)
n_window = int(np.ceil(n_frame / roi_size))
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred = self._execute(X_mag_pad, roi_size, n_window)
pred = pred[:, :, :n_frame]
return pred * coef, X_mag, np.exp(1.j * X_phase)
def inference_tta(self, X_spec):
X_mag, X_phase = self.preprocess(X_spec)
coef = X_mag.max()
X_mag_pre = X_mag / coef
n_frame = X_mag_pre.shape[2]
pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.window_size, self.offset)
n_window = int(np.ceil(n_frame / roi_size))
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred = self._execute(X_mag_pad, roi_size, n_window)
pred = pred[:, :, :n_frame]
pad_l += roi_size // 2
pad_r += roi_size // 2
n_window += 1
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred_tta = self._execute(X_mag_pad, roi_size, n_window)
pred_tta = pred_tta[:, :, roi_size // 2:]
pred_tta = pred_tta[:, :, :n_frame]
return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.j * X_phase)
def main():
p = argparse.ArgumentParser()
p.add_argument('--gpu', '-g', type=int, default=-1)
p.add_argument('--pretrained_model', '-P', type=str, default='models/baseline.pth')
p.add_argument('--input', '-i', required=True)
p.add_argument('--sr', '-r', type=int, default=44100)
p.add_argument('--n_fft', '-f', type=int, default=2048)
p.add_argument('--hop_length', '-l', type=int, default=1024)
p.add_argument('--window_size', '-w', type=int, default=512)
p.add_argument('--output_image', '-I', action='store_true')
p.add_argument('--postprocess', '-p', action='store_true')
p.add_argument('--tta', '-t', action='store_true')
args = p.parse_args()
print('loading model...', end=' ')
device = torch.device('cpu')
model = nets.CascadedASPPNet(args.n_fft)
model.load_state_dict(torch.load(args.pretrained_model, map_location=device))
if torch.cuda.is_available() and args.gpu >= 0:
device = torch.device('cuda:{}'.format(args.gpu))
model.to(device)
print('done')
print('loading wave source...', end=' ')
X, sr = librosa.load(
args.input, args.sr, False, dtype=np.float32, res_type='kaiser_fast')
basename = os.path.splitext(os.path.basename(args.input))[0]
print('done')
if X.ndim == 1:
X = np.asarray([X, X])
print('stft of wave source...', end=' ')
X = spec_utils.wave_to_spectrogram(X, args.hop_length, args.n_fft)
print('done')
vr = VocalRemover(model, device, args.window_size)
if args.tta:
pred, X_mag, X_phase = vr.inference_tta(X)
else:
pred, X_mag, X_phase = vr.inference(X)
if args.postprocess:
print('post processing...', end=' ')
pred_inv = np.clip(X_mag - pred, 0, np.inf)
pred = spec_utils.mask_silence(pred, pred_inv)
print('done')
print('inverse stft of instruments...', end=' ')
y_spec = pred * X_phase
wave = spec_utils.spectrogram_to_wave(y_spec, hop_length=args.hop_length)
print('done')
sf.write('{}_Instruments.wav'.format(basename), wave.T, sr)
print('inverse stft of vocals...', end=' ')
v_spec = np.clip(X_mag - pred, 0, np.inf) * X_phase
wave = spec_utils.spectrogram_to_wave(v_spec, hop_length=args.hop_length)
print('done')
sf.write('{}_Vocals.wav'.format(basename), wave.T, sr)
if args.output_image:
with open('{}_Instruments.jpg'.format(basename), mode='wb') as f:
image = spec_utils.spectrogram_to_image(y_spec)
_, bin_image = cv2.imencode('.jpg', image)
bin_image.tofile(f)
with open('{}_Vocals.jpg'.format(basename), mode='wb') as f:
image = spec_utils.spectrogram_to_image(v_spec)
_, bin_image = cv2.imencode('.jpg', image)
bin_image.tofile(f)
if __name__ == '__main__':
main()
| 32.982036 | 93 | 0.611837 | import argparse
import os
import cv2
import librosa
import numpy as np
import soundfile as sf
import torch
from tqdm import tqdm
from lib import dataset
from lib import nets
from lib import spec_utils
class VocalRemover(object):
def __init__(self, model, device, window_size):
self.model = model
self.offset = model.offset
self.device = device
self.window_size = window_size
def _execute(self, X_mag_pad, roi_size, n_window):
self.model.eval()
with torch.no_grad():
preds = []
for i in tqdm(range(n_window)):
start = i * roi_size
X_mag_window = X_mag_pad[None, :, :, start:start + self.window_size]
X_mag_window = torch.from_numpy(X_mag_window).to(self.device)
pred = self.model.predict(X_mag_window)
pred = pred.detach().cpu().numpy()
preds.append(pred[0])
pred = np.concatenate(preds, axis=2)
return pred
def preprocess(self, X_spec):
X_mag = np.abs(X_spec)
X_phase = np.angle(X_spec)
return X_mag, X_phase
def inference(self, X_spec):
X_mag, X_phase = self.preprocess(X_spec)
coef = X_mag.max()
X_mag_pre = X_mag / coef
n_frame = X_mag_pre.shape[2]
pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.window_size, self.offset)
n_window = int(np.ceil(n_frame / roi_size))
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred = self._execute(X_mag_pad, roi_size, n_window)
pred = pred[:, :, :n_frame]
return pred * coef, X_mag, np.exp(1.j * X_phase)
def inference_tta(self, X_spec):
X_mag, X_phase = self.preprocess(X_spec)
coef = X_mag.max()
X_mag_pre = X_mag / coef
n_frame = X_mag_pre.shape[2]
pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.window_size, self.offset)
n_window = int(np.ceil(n_frame / roi_size))
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred = self._execute(X_mag_pad, roi_size, n_window)
pred = pred[:, :, :n_frame]
pad_l += roi_size // 2
pad_r += roi_size // 2
n_window += 1
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred_tta = self._execute(X_mag_pad, roi_size, n_window)
pred_tta = pred_tta[:, :, roi_size // 2:]
pred_tta = pred_tta[:, :, :n_frame]
return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.j * X_phase)
def main():
p = argparse.ArgumentParser()
p.add_argument('--gpu', '-g', type=int, default=-1)
p.add_argument('--pretrained_model', '-P', type=str, default='models/baseline.pth')
p.add_argument('--input', '-i', required=True)
p.add_argument('--sr', '-r', type=int, default=44100)
p.add_argument('--n_fft', '-f', type=int, default=2048)
p.add_argument('--hop_length', '-l', type=int, default=1024)
p.add_argument('--window_size', '-w', type=int, default=512)
p.add_argument('--output_image', '-I', action='store_true')
p.add_argument('--postprocess', '-p', action='store_true')
p.add_argument('--tta', '-t', action='store_true')
args = p.parse_args()
print('loading model...', end=' ')
device = torch.device('cpu')
model = nets.CascadedASPPNet(args.n_fft)
model.load_state_dict(torch.load(args.pretrained_model, map_location=device))
if torch.cuda.is_available() and args.gpu >= 0:
device = torch.device('cuda:{}'.format(args.gpu))
model.to(device)
print('done')
print('loading wave source...', end=' ')
X, sr = librosa.load(
args.input, args.sr, False, dtype=np.float32, res_type='kaiser_fast')
basename = os.path.splitext(os.path.basename(args.input))[0]
print('done')
if X.ndim == 1:
X = np.asarray([X, X])
print('stft of wave source...', end=' ')
X = spec_utils.wave_to_spectrogram(X, args.hop_length, args.n_fft)
print('done')
vr = VocalRemover(model, device, args.window_size)
if args.tta:
pred, X_mag, X_phase = vr.inference_tta(X)
else:
pred, X_mag, X_phase = vr.inference(X)
if args.postprocess:
print('post processing...', end=' ')
pred_inv = np.clip(X_mag - pred, 0, np.inf)
pred = spec_utils.mask_silence(pred, pred_inv)
print('done')
print('inverse stft of instruments...', end=' ')
y_spec = pred * X_phase
wave = spec_utils.spectrogram_to_wave(y_spec, hop_length=args.hop_length)
print('done')
sf.write('{}_Instruments.wav'.format(basename), wave.T, sr)
print('inverse stft of vocals...', end=' ')
v_spec = np.clip(X_mag - pred, 0, np.inf) * X_phase
wave = spec_utils.spectrogram_to_wave(v_spec, hop_length=args.hop_length)
print('done')
sf.write('{}_Vocals.wav'.format(basename), wave.T, sr)
if args.output_image:
with open('{}_Instruments.jpg'.format(basename), mode='wb') as f:
image = spec_utils.spectrogram_to_image(y_spec)
_, bin_image = cv2.imencode('.jpg', image)
bin_image.tofile(f)
with open('{}_Vocals.jpg'.format(basename), mode='wb') as f:
image = spec_utils.spectrogram_to_image(v_spec)
_, bin_image = cv2.imencode('.jpg', image)
bin_image.tofile(f)
if __name__ == '__main__':
main()
| true | true |
f73cdb1d8c7cc059573c2f696a96be998cad3ac5 | 535 | py | Python | tests/test_40_reformat_zarr.py | shaorenshengg/bopene | 5ad6030e5ff8b501c220ddfdb6510e45399a7ab8 | [
"Apache-2.0"
] | 130 | 2021-04-01T08:23:49.000Z | 2022-03-28T15:59:57.000Z | tests/test_40_reformat_zarr.py | shaorenshengg/bopene | 5ad6030e5ff8b501c220ddfdb6510e45399a7ab8 | [
"Apache-2.0"
] | 80 | 2021-04-11T09:57:43.000Z | 2022-03-18T11:47:04.000Z | tests/test_40_reformat_zarr.py | shaorenshengg/bopene | 5ad6030e5ff8b501c220ddfdb6510e45399a7ab8 | [
"Apache-2.0"
] | 9 | 2021-04-25T21:44:16.000Z | 2022-03-30T06:44:48.000Z | import pathlib
import typing as T
import pytest
from xarray_sentinel import reformat
pytest.importorskip("zarr")
DATA_FOLDER = pathlib.Path(__file__).parent / "data"
def test_to_group_zarr(tmpdir: T.Any) -> None:
product_path = (
DATA_FOLDER
/ "S1B_IW_SLC__1SDV_20210401T052622_20210401T052650_026269_032297_EFA4.SAFE"
)
tmp_path = str(tmpdir.join("tmp.zarr"))
groups = {"IW1/VV/gcp": "IW1/VV/gcp", "IW2/VH/attitude": "IW2/VH/attitude"}
reformat.to_group_zarr(product_path, tmp_path, groups)
| 24.318182 | 84 | 0.723364 | import pathlib
import typing as T
import pytest
from xarray_sentinel import reformat
pytest.importorskip("zarr")
DATA_FOLDER = pathlib.Path(__file__).parent / "data"
def test_to_group_zarr(tmpdir: T.Any) -> None:
product_path = (
DATA_FOLDER
/ "S1B_IW_SLC__1SDV_20210401T052622_20210401T052650_026269_032297_EFA4.SAFE"
)
tmp_path = str(tmpdir.join("tmp.zarr"))
groups = {"IW1/VV/gcp": "IW1/VV/gcp", "IW2/VH/attitude": "IW2/VH/attitude"}
reformat.to_group_zarr(product_path, tmp_path, groups)
| true | true |
f73cdbfb3cb9a4b8dc9da1d5d65c5ffd3f04f0c7 | 7,457 | py | Python | src/run.py | SaynaEbrahimi/hat | c1333c5f1639a011db336a99eecb75cac8738212 | [
"MIT"
] | 1 | 2021-03-28T09:47:24.000Z | 2021-03-28T09:47:24.000Z | src/run.py | SaynaEbrahimi/hat | c1333c5f1639a011db336a99eecb75cac8738212 | [
"MIT"
] | null | null | null | src/run.py | SaynaEbrahimi/hat | c1333c5f1639a011db336a99eecb75cac8738212 | [
"MIT"
] | null | null | null | import sys,os,argparse,time
import numpy as np
import torch
import utils
tstart=time.time()
# Arguments
parser=argparse.ArgumentParser(description='xxx')
parser.add_argument('--seed',type=int,default=0,help='(default=%(default)d)')
parser.add_argument('--experiment',default='',type=str,required=True,choices=['mnist2','pmnist','cifar','mixture'],help='(default=%(default)s)')
parser.add_argument('--approach',default='',type=str,required=True,choices=['random','sgd','sgd-frozen','lwf','lfl','ewc','imm-mean','progressive','pathnet',
'imm-mode','sgd-restart',
'joint','alexnet-hat','resnet-hat','hat-test'],help='(default=%(default)s)')
parser.add_argument('--output',default='',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--nepochs',default=200,type=int,required=False,help='(default=%(default)d)')
parser.add_argument('--lr',default=0.05,type=float,required=False,help='(default=%(default)f)')
parser.add_argument('--parameter',type=str,default='',help='(default=%(default)s)')
args=parser.parse_args()
if args.output=='':
args.output='../res/'+args.experiment+'_'+args.approach+'_'+str(args.seed)+'.txt'
print('='*100)
print('Arguments =')
for arg in vars(args):
print('\t'+arg+':',getattr(args,arg))
print('='*100)
########################################################################################################################
# Seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed(args.seed)
else: print('[CUDA unavailable]'); sys.exit()
# Args -- Experiment
if args.experiment=='mnist2':
from dataloaders import mnist2 as dataloader
elif args.experiment=='pmnist':
from dataloaders import pmnist as dataloader
elif args.experiment=='cifar':
from dataloaders import cifar as dataloader
elif args.experiment=='mixture':
from dataloaders import mixture as dataloader
# Args -- Approach
if args.approach=='random':
from approaches import random as approach
elif args.approach=='sgd':
from approaches import sgd as approach
elif args.approach=='sgd-restart':
from approaches import sgd_restart as approach
elif args.approach=='sgd-frozen':
from approaches import sgd_frozen as approach
elif args.approach=='lwf':
from approaches import lwf as approach
elif args.approach=='lfl':
from approaches import lfl as approach
elif args.approach=='ewc':
from approaches import ewc as approach
elif args.approach=='imm-mean':
from approaches import imm_mean as approach
elif args.approach=='imm-mode':
from approaches import imm_mode as approach
elif args.approach=='progressive':
from approaches import progressive as approach
elif args.approach=='pathnet':
from approaches import pathnet as approach
elif args.approach=='hat-test':
from approaches import hat_test as approach
elif args.approach=='alexnet-hat' or args.approach=='resnet-hat':
from approaches import hat as approach
elif args.approach=='joint':
from approaches import joint as approach
# Args -- Network
if args.experiment=='mnist2' or args.experiment=='pmnist':
if args.approach=='hat' or args.approach=='hat-test':
from networks import mlp_hat as network
else:
from networks import mlp as network
else:
if args.approach=='lfl':
from networks import alexnet_lfl as network
elif args.approach=='alexnet-hat':
from networks import alexnet_hat as network
elif args.approach=='resnet-hat':
from networks import resnet_hat as network
elif args.approach=='progressive':
from networks import alexnet_progressive as network
elif args.approach=='pathnet':
from networks import alexnet_pathnet as network
elif args.approach=='hat-test':
from networks import alexnet_hat_test as network
else:
from networks import alexnet as network
########################################################################################################################
# Load
print('Load data...')
data,taskcla,inputsize=dataloader.get(seed=args.seed)
print('Input size =',inputsize,'\nTask info =',taskcla)
# Inits
print('Inits...')
net=network.Net(inputsize,taskcla).cuda()
utils.print_model_report(net)
appr=approach.Appr(net,nepochs=args.nepochs,lr=args.lr,args=args)
print(appr.criterion)
utils.print_optimizer_config(appr.optimizer)
print('-'*100)
# Loop tasks
acc=np.zeros((len(taskcla),len(taskcla)),dtype=np.float32)
lss=np.zeros((len(taskcla),len(taskcla)),dtype=np.float32)
for t,ncla in taskcla:
print('*'*100)
print('Task {:2d} ({:s})'.format(t,data[t]['name']))
print('*'*100)
if args.approach == 'joint':
# Get data. We do not put it to GPU
if t==0:
xtrain=data[t]['train']['x']
ytrain=data[t]['train']['y']
xvalid=data[t]['valid']['x']
yvalid=data[t]['valid']['y']
task_t=t*torch.ones(xtrain.size(0)).int()
task_v=t*torch.ones(xvalid.size(0)).int()
task=[task_t,task_v]
else:
xtrain=torch.cat((xtrain,data[t]['train']['x']))
ytrain=torch.cat((ytrain,data[t]['train']['y']))
xvalid=torch.cat((xvalid,data[t]['valid']['x']))
yvalid=torch.cat((yvalid,data[t]['valid']['y']))
task_t=torch.cat((task_t,t*torch.ones(data[t]['train']['y'].size(0)).int()))
task_v=torch.cat((task_v,t*torch.ones(data[t]['valid']['y'].size(0)).int()))
task=[task_t,task_v]
else:
# Get data
xtrain=data[t]['train']['x'].cuda()
ytrain=data[t]['train']['y'].cuda()
xvalid=data[t]['valid']['x'].cuda()
yvalid=data[t]['valid']['y'].cuda()
task=t
# Train
appr.train(task,xtrain,ytrain,xvalid,yvalid)
print('-'*100)
# Test
for u in range(t+1):
xtest=data[u]['test']['x'].cuda()
ytest=data[u]['test']['y'].cuda()
test_loss,test_acc=appr.eval(u,xtest,ytest)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u,data[u]['name'],test_loss,100*test_acc))
acc[t,u]=test_acc
lss[t,u]=test_loss
# Save
print('Save at '+args.output)
np.savetxt(args.output,acc,'%.4f')
# Done
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end='')
for j in range(acc.shape[1]):
print('{:5.1f}% '.format(100*acc[i,j]),end='')
print()
print('*'*100)
print('Done!')
print('[Elapsed time = {:.1f} h]'.format((time.time()-tstart)/(60*60)))
if hasattr(appr, 'logs'):
if appr.logs is not None:
#save task names
from copy import deepcopy
appr.logs['task_name'] = {}
appr.logs['test_acc'] = {}
appr.logs['test_loss'] = {}
for t,ncla in taskcla:
appr.logs['task_name'][t] = deepcopy(data[t]['name'])
appr.logs['test_acc'][t] = deepcopy(acc[t,:])
appr.logs['test_loss'][t] = deepcopy(lss[t,:])
#pickle
import gzip
import pickle
with gzip.open(os.path.join(appr.logpath), 'wb') as output:
pickle.dump(appr.logs, output, pickle.HIGHEST_PROTOCOL)
########################################################################################################################
| 37.661616 | 157 | 0.605874 | import sys,os,argparse,time
import numpy as np
import torch
import utils
tstart=time.time()
parser=argparse.ArgumentParser(description='xxx')
parser.add_argument('--seed',type=int,default=0,help='(default=%(default)d)')
parser.add_argument('--experiment',default='',type=str,required=True,choices=['mnist2','pmnist','cifar','mixture'],help='(default=%(default)s)')
parser.add_argument('--approach',default='',type=str,required=True,choices=['random','sgd','sgd-frozen','lwf','lfl','ewc','imm-mean','progressive','pathnet',
'imm-mode','sgd-restart',
'joint','alexnet-hat','resnet-hat','hat-test'],help='(default=%(default)s)')
parser.add_argument('--output',default='',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--nepochs',default=200,type=int,required=False,help='(default=%(default)d)')
parser.add_argument('--lr',default=0.05,type=float,required=False,help='(default=%(default)f)')
parser.add_argument('--parameter',type=str,default='',help='(default=%(default)s)')
args=parser.parse_args()
if args.output=='':
args.output='../res/'+args.experiment+'_'+args.approach+'_'+str(args.seed)+'.txt'
print('='*100)
print('Arguments =')
for arg in vars(args):
print('\t'+arg+':',getattr(args,arg))
print('='*100)
| true | true |
f73cdc102133baca532eb69a9883a9bee8c80b62 | 13,395 | py | Python | official/nlp/tasks/question_answering.py | ishani-chakraborty/models | a811a3b7e640722318ad868c99feddf3f3063e36 | [
"Apache-2.0"
] | 1 | 2020-08-05T14:38:17.000Z | 2020-08-05T14:38:17.000Z | official/nlp/tasks/question_answering.py | GulnihalKelkit/models | 367486482c5fe6fc896868edf9bbde7519deb52d | [
"Apache-2.0"
] | null | null | null | official/nlp/tasks/question_answering.py | GulnihalKelkit/models | 367486482c5fe6fc896868edf9bbde7519deb52d | [
"Apache-2.0"
] | 1 | 2022-02-27T23:00:11.000Z | 2022-02-27T23:00:11.000Z | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Question answering task."""
import collections
import json
import os
from absl import logging
import dataclasses
import orbit
import tensorflow as tf
import tensorflow_hub as hub
from official.core import base_task
from official.core import task_factory
from official.modeling.hyperparams import base_config
from official.modeling.hyperparams import config_definitions as cfg
from official.nlp.bert import squad_evaluate_v1_1
from official.nlp.bert import squad_evaluate_v2_0
from official.nlp.bert import tokenization
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.data import squad_lib as squad_lib_wp
from official.nlp.data import squad_lib_sp
from official.nlp.modeling import models
from official.nlp.tasks import utils
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base span labeler configuration."""
encoder: encoders.TransformerEncoderConfig = (
encoders.TransformerEncoderConfig())
@dataclasses.dataclass
class QuestionAnsweringConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
n_best_size: int = 20
max_answer_length: int = 30
null_score_diff_threshold: float = 0.0
model: ModelConfig = ModelConfig()
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
@task_factory.register_task_cls(QuestionAnsweringConfig)
class QuestionAnsweringTask(base_task.Task):
"""Task object for question answering."""
def __init__(self, params=cfg.TaskConfig, logging_dir=None):
super(QuestionAnsweringTask, self).__init__(params, logging_dir)
if params.hub_module_url and params.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if params.hub_module_url:
self._hub_module = hub.load(params.hub_module_url)
else:
self._hub_module = None
if params.validation_data.tokenization == 'WordPiece':
self.squad_lib = squad_lib_wp
elif params.validation_data.tokenization == 'SentencePiece':
self.squad_lib = squad_lib_sp
else:
raise ValueError('Unsupported tokenization method: {}'.format(
params.validation_data.tokenization))
if params.validation_data.input_path:
self._tf_record_input_path, self._eval_examples, self._eval_features = (
self._preprocess_eval_data(params.validation_data))
def set_preprocessed_eval_input_path(self, eval_input_path):
"""Sets the path to the preprocessed eval data."""
self._tf_record_input_path = eval_input_path
def build_model(self):
if self._hub_module:
encoder_network = utils.get_encoder_from_hub(self._hub_module)
else:
encoder_network = encoders.instantiate_encoder_from_cfg(
self.task_config.model.encoder)
# Currently, we only supports bert-style question answering finetuning.
return models.BertSpanLabeler(
network=encoder_network,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.task_config.model.encoder.initializer_range))
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
start_loss = tf.keras.losses.sparse_categorical_crossentropy(
start_positions,
tf.cast(start_logits, dtype=tf.float32),
from_logits=True)
end_loss = tf.keras.losses.sparse_categorical_crossentropy(
end_positions,
tf.cast(end_logits, dtype=tf.float32),
from_logits=True)
loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
return loss
def _preprocess_eval_data(self, params):
eval_examples = self.squad_lib.read_squad_examples(
input_file=params.input_path,
is_training=False,
version_2_with_negative=params.version_2_with_negative)
temp_file_path = params.input_preprocessed_data_path or self.logging_dir
if not temp_file_path:
raise ValueError('You must specify a temporary directory, either in '
'params.input_preprocessed_data_path or logging_dir to '
'store intermediate evaluation TFRecord data.')
eval_writer = self.squad_lib.FeatureWriter(
filename=os.path.join(temp_file_path, 'eval.tf_record'),
is_training=False)
eval_features = []
def _append_feature(feature, is_padding):
if not is_padding:
eval_features.append(feature)
eval_writer.process_feature(feature)
kwargs = dict(
examples=eval_examples,
tokenizer=tokenization.FullTokenizer(
vocab_file=params.vocab_file,
do_lower_case=params.do_lower_case),
max_seq_length=params.seq_length,
doc_stride=params.doc_stride,
max_query_length=params.query_length,
is_training=False,
output_fn=_append_feature,
batch_size=params.global_batch_size)
if params.tokenization == 'SentencePiece':
# squad_lib_sp requires one more argument 'do_lower_case'.
kwargs['do_lower_case'] = params.do_lower_case
eval_dataset_size = self.squad_lib.convert_examples_to_features(**kwargs)
eval_writer.close()
logging.info('***** Evaluation input stats *****')
logging.info(' Num orig examples = %d', len(eval_examples))
logging.info(' Num split examples = %d', len(eval_features))
logging.info(' Batch size = %d', params.global_batch_size)
logging.info(' Dataset size = %d', eval_dataset_size)
return eval_writer.filename, eval_examples, eval_features
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
# Dummy training data for unit test.
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
y = dict(
start_positions=tf.constant(0, dtype=tf.int32),
end_positions=tf.constant(1, dtype=tf.int32))
return (x, y)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
if params.is_training:
dataloader_params = params
else:
input_path = self._tf_record_input_path
dataloader_params = params.replace(input_path=input_path)
return data_loader_factory.get_data_loader(
dataloader_params).load(input_context)
def build_metrics(self, training=None):
del training
# TODO(lehou): a list of metrics doesn't work the same as in compile/fit.
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(
name='start_position_accuracy'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='end_position_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
start_logits, end_logits = model_outputs
metrics['start_position_accuracy'].update_state(
labels['start_positions'], start_logits)
metrics['end_position_accuracy'].update_state(
labels['end_positions'], end_logits)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
start_logits, end_logits = model_outputs
compiled_metrics.update_state(
y_true=labels, # labels has keys 'start_positions' and 'end_positions'.
y_pred={'start_positions': start_logits, 'end_positions': end_logits})
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, _ = inputs
unique_ids = features.pop('unique_ids')
model_outputs = self.inference_step(features, model)
start_logits, end_logits = model_outputs
logs = {
self.loss: 0.0, # TODO(lehou): compute the real validation loss.
'unique_ids': unique_ids,
'start_logits': start_logits,
'end_logits': end_logits,
}
return logs
raw_aggregated_result = collections.namedtuple(
'RawResult', ['unique_id', 'start_logits', 'end_logits'])
def aggregate_logs(self, state=None, step_outputs=None):
assert step_outputs is not None, 'Got no logs from self.validation_step.'
if state is None:
state = []
for unique_ids, start_logits, end_logits in zip(
step_outputs['unique_ids'],
step_outputs['start_logits'],
step_outputs['end_logits']):
u_ids, s_logits, e_logits = (
unique_ids.numpy(), start_logits.numpy(), end_logits.numpy())
for values in zip(u_ids, s_logits, e_logits):
state.append(self.raw_aggregated_result(
unique_id=values[0],
start_logits=values[1].tolist(),
end_logits=values[2].tolist()))
return state
def reduce_aggregated_logs(self, aggregated_logs):
all_predictions, _, scores_diff = (
self.squad_lib.postprocess_output(
self._eval_examples,
self._eval_features,
aggregated_logs,
self.task_config.n_best_size,
self.task_config.max_answer_length,
self.task_config.validation_data.do_lower_case,
version_2_with_negative=(
self.task_config.validation_data.version_2_with_negative),
null_score_diff_threshold=(
self.task_config.null_score_diff_threshold),
verbose=False))
with tf.io.gfile.GFile(
self.task_config.validation_data.input_path, 'r') as reader:
dataset_json = json.load(reader)
pred_dataset = dataset_json['data']
if self.task_config.validation_data.version_2_with_negative:
eval_metrics = squad_evaluate_v2_0.evaluate(
pred_dataset, all_predictions, scores_diff)
# Filter out useless metrics, such as start_position_accuracy that
# we did not actually compute.
eval_metrics = {
'exact_match': eval_metrics['final_exact'],
'exact_match_threshold': eval_metrics['final_exact_thresh'],
'final_f1': eval_metrics['final_f1'] / 100.0, # scale back to [0, 1].
'f1_threshold': eval_metrics['final_f1_thresh'],
'has_answer_exact_match': eval_metrics['HasAns_exact'],
'has_answer_f1': eval_metrics['HasAns_f1']}
else:
eval_metrics = squad_evaluate_v1_1.evaluate(pred_dataset, all_predictions)
# Filter out useless metrics, such as start_position_accuracy that
# we did not actually compute.
eval_metrics = {'exact_match': eval_metrics['exact_match'],
'final_f1': eval_metrics['final_f1']}
return eval_metrics
def predict(task: QuestionAnsweringTask, params: cfg.DataConfig,
model: tf.keras.Model):
"""Predicts on the input data.
Args:
task: A `QuestionAnsweringTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
Returns:
A tuple of `all_predictions`, `all_nbest` and `scores_diff`, which
are dict and can be written to json files including prediction json file,
nbest json file and null_odds json file.
"""
tf_record_input_path, eval_examples, eval_features = (
task._preprocess_eval_data(params)) # pylint: disable=protected-access
# `tf_record_input_path` will overwrite `params.input_path`,
# when `task.buid_inputs()` is called.
task.set_preprocessed_eval_input_path(tf_record_input_path)
def predict_step(inputs):
"""Replicated prediction calculation."""
return task.validation_step(inputs, model)
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
aggregated_outputs = utils.predict(predict_step, task.aggregate_logs, dataset)
all_predictions, all_nbest, scores_diff = (
task.squad_lib.postprocess_output(
eval_examples,
eval_features,
aggregated_outputs,
task.task_config.n_best_size,
task.task_config.max_answer_length,
task.task_config.validation_data.do_lower_case,
version_2_with_negative=(params.version_2_with_negative),
null_score_diff_threshold=task.task_config.null_score_diff_threshold,
verbose=False))
return all_predictions, all_nbest, scores_diff
| 39.513274 | 80 | 0.707055 |
import collections
import json
import os
from absl import logging
import dataclasses
import orbit
import tensorflow as tf
import tensorflow_hub as hub
from official.core import base_task
from official.core import task_factory
from official.modeling.hyperparams import base_config
from official.modeling.hyperparams import config_definitions as cfg
from official.nlp.bert import squad_evaluate_v1_1
from official.nlp.bert import squad_evaluate_v2_0
from official.nlp.bert import tokenization
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.data import squad_lib as squad_lib_wp
from official.nlp.data import squad_lib_sp
from official.nlp.modeling import models
from official.nlp.tasks import utils
@dataclasses.dataclass
class ModelConfig(base_config.Config):
encoder: encoders.TransformerEncoderConfig = (
encoders.TransformerEncoderConfig())
@dataclasses.dataclass
class QuestionAnsweringConfig(cfg.TaskConfig):
init_checkpoint: str = ''
hub_module_url: str = ''
n_best_size: int = 20
max_answer_length: int = 30
null_score_diff_threshold: float = 0.0
model: ModelConfig = ModelConfig()
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
@task_factory.register_task_cls(QuestionAnsweringConfig)
class QuestionAnsweringTask(base_task.Task):
def __init__(self, params=cfg.TaskConfig, logging_dir=None):
super(QuestionAnsweringTask, self).__init__(params, logging_dir)
if params.hub_module_url and params.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if params.hub_module_url:
self._hub_module = hub.load(params.hub_module_url)
else:
self._hub_module = None
if params.validation_data.tokenization == 'WordPiece':
self.squad_lib = squad_lib_wp
elif params.validation_data.tokenization == 'SentencePiece':
self.squad_lib = squad_lib_sp
else:
raise ValueError('Unsupported tokenization method: {}'.format(
params.validation_data.tokenization))
if params.validation_data.input_path:
self._tf_record_input_path, self._eval_examples, self._eval_features = (
self._preprocess_eval_data(params.validation_data))
def set_preprocessed_eval_input_path(self, eval_input_path):
self._tf_record_input_path = eval_input_path
def build_model(self):
if self._hub_module:
encoder_network = utils.get_encoder_from_hub(self._hub_module)
else:
encoder_network = encoders.instantiate_encoder_from_cfg(
self.task_config.model.encoder)
return models.BertSpanLabeler(
network=encoder_network,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.task_config.model.encoder.initializer_range))
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
start_loss = tf.keras.losses.sparse_categorical_crossentropy(
start_positions,
tf.cast(start_logits, dtype=tf.float32),
from_logits=True)
end_loss = tf.keras.losses.sparse_categorical_crossentropy(
end_positions,
tf.cast(end_logits, dtype=tf.float32),
from_logits=True)
loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
return loss
def _preprocess_eval_data(self, params):
eval_examples = self.squad_lib.read_squad_examples(
input_file=params.input_path,
is_training=False,
version_2_with_negative=params.version_2_with_negative)
temp_file_path = params.input_preprocessed_data_path or self.logging_dir
if not temp_file_path:
raise ValueError('You must specify a temporary directory, either in '
'params.input_preprocessed_data_path or logging_dir to '
'store intermediate evaluation TFRecord data.')
eval_writer = self.squad_lib.FeatureWriter(
filename=os.path.join(temp_file_path, 'eval.tf_record'),
is_training=False)
eval_features = []
def _append_feature(feature, is_padding):
if not is_padding:
eval_features.append(feature)
eval_writer.process_feature(feature)
kwargs = dict(
examples=eval_examples,
tokenizer=tokenization.FullTokenizer(
vocab_file=params.vocab_file,
do_lower_case=params.do_lower_case),
max_seq_length=params.seq_length,
doc_stride=params.doc_stride,
max_query_length=params.query_length,
is_training=False,
output_fn=_append_feature,
batch_size=params.global_batch_size)
if params.tokenization == 'SentencePiece':
kwargs['do_lower_case'] = params.do_lower_case
eval_dataset_size = self.squad_lib.convert_examples_to_features(**kwargs)
eval_writer.close()
logging.info('***** Evaluation input stats *****')
logging.info(' Num orig examples = %d', len(eval_examples))
logging.info(' Num split examples = %d', len(eval_features))
logging.info(' Batch size = %d', params.global_batch_size)
logging.info(' Dataset size = %d', eval_dataset_size)
return eval_writer.filename, eval_examples, eval_features
def build_inputs(self, params, input_context=None):
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
y = dict(
start_positions=tf.constant(0, dtype=tf.int32),
end_positions=tf.constant(1, dtype=tf.int32))
return (x, y)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
if params.is_training:
dataloader_params = params
else:
input_path = self._tf_record_input_path
dataloader_params = params.replace(input_path=input_path)
return data_loader_factory.get_data_loader(
dataloader_params).load(input_context)
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(
name='start_position_accuracy'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='end_position_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
start_logits, end_logits = model_outputs
metrics['start_position_accuracy'].update_state(
labels['start_positions'], start_logits)
metrics['end_position_accuracy'].update_state(
labels['end_positions'], end_logits)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
start_logits, end_logits = model_outputs
compiled_metrics.update_state(
y_true=labels, # labels has keys 'start_positions' and 'end_positions'.
y_pred={'start_positions': start_logits, 'end_positions': end_logits})
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, _ = inputs
unique_ids = features.pop('unique_ids')
model_outputs = self.inference_step(features, model)
start_logits, end_logits = model_outputs
logs = {
self.loss: 0.0, # TODO(lehou): compute the real validation loss.
'unique_ids': unique_ids,
'start_logits': start_logits,
'end_logits': end_logits,
}
return logs
raw_aggregated_result = collections.namedtuple(
'RawResult', ['unique_id', 'start_logits', 'end_logits'])
def aggregate_logs(self, state=None, step_outputs=None):
assert step_outputs is not None, 'Got no logs from self.validation_step.'
if state is None:
state = []
for unique_ids, start_logits, end_logits in zip(
step_outputs['unique_ids'],
step_outputs['start_logits'],
step_outputs['end_logits']):
u_ids, s_logits, e_logits = (
unique_ids.numpy(), start_logits.numpy(), end_logits.numpy())
for values in zip(u_ids, s_logits, e_logits):
state.append(self.raw_aggregated_result(
unique_id=values[0],
start_logits=values[1].tolist(),
end_logits=values[2].tolist()))
return state
def reduce_aggregated_logs(self, aggregated_logs):
all_predictions, _, scores_diff = (
self.squad_lib.postprocess_output(
self._eval_examples,
self._eval_features,
aggregated_logs,
self.task_config.n_best_size,
self.task_config.max_answer_length,
self.task_config.validation_data.do_lower_case,
version_2_with_negative=(
self.task_config.validation_data.version_2_with_negative),
null_score_diff_threshold=(
self.task_config.null_score_diff_threshold),
verbose=False))
with tf.io.gfile.GFile(
self.task_config.validation_data.input_path, 'r') as reader:
dataset_json = json.load(reader)
pred_dataset = dataset_json['data']
if self.task_config.validation_data.version_2_with_negative:
eval_metrics = squad_evaluate_v2_0.evaluate(
pred_dataset, all_predictions, scores_diff)
# Filter out useless metrics, such as start_position_accuracy that
# we did not actually compute.
eval_metrics = {
'exact_match': eval_metrics['final_exact'],
'exact_match_threshold': eval_metrics['final_exact_thresh'],
'final_f1': eval_metrics['final_f1'] / 100.0, # scale back to [0, 1].
'f1_threshold': eval_metrics['final_f1_thresh'],
'has_answer_exact_match': eval_metrics['HasAns_exact'],
'has_answer_f1': eval_metrics['HasAns_f1']}
else:
eval_metrics = squad_evaluate_v1_1.evaluate(pred_dataset, all_predictions)
# Filter out useless metrics, such as start_position_accuracy that
# we did not actually compute.
eval_metrics = {'exact_match': eval_metrics['exact_match'],
'final_f1': eval_metrics['final_f1']}
return eval_metrics
def predict(task: QuestionAnsweringTask, params: cfg.DataConfig,
model: tf.keras.Model):
tf_record_input_path, eval_examples, eval_features = (
task._preprocess_eval_data(params)) # pylint: disable=protected-access
# `tf_record_input_path` will overwrite `params.input_path`,
# when `task.buid_inputs()` is called.
task.set_preprocessed_eval_input_path(tf_record_input_path)
def predict_step(inputs):
return task.validation_step(inputs, model)
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
aggregated_outputs = utils.predict(predict_step, task.aggregate_logs, dataset)
all_predictions, all_nbest, scores_diff = (
task.squad_lib.postprocess_output(
eval_examples,
eval_features,
aggregated_outputs,
task.task_config.n_best_size,
task.task_config.max_answer_length,
task.task_config.validation_data.do_lower_case,
version_2_with_negative=(params.version_2_with_negative),
null_score_diff_threshold=task.task_config.null_score_diff_threshold,
verbose=False))
return all_predictions, all_nbest, scores_diff
| true | true |
f73cdca8b08d8e13c6d501dda8cb056a03f59629 | 11,532 | py | Python | tnmlearn/other/convenience.py | t2wain/machine-learning | 4b5e1a24fab7c4ab42f646f7785191ff3d3283ba | [
"MIT"
] | null | null | null | tnmlearn/other/convenience.py | t2wain/machine-learning | 4b5e1a24fab7c4ab42f646f7785191ff3d3283ba | [
"MIT"
] | null | null | null | tnmlearn/other/convenience.py | t2wain/machine-learning | 4b5e1a24fab7c4ab42f646f7785191ff3d3283ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# author: Adrian Rosebrock
# website: http://www.pyimagesearch.com
# import the necessary packages
import numpy as np
import cv2
import sys
# import any special Python 2.7 packages
if sys.version_info.major == 2:
from urllib import urlopen
# import any special Python 3 packages
elif sys.version_info.major == 3:
from urllib.request import urlopen
def translate(image, x, y):
# define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# return the translated image
return shifted
def rotate(image, angle, center=None, scale=1.0):
# grab the dimensions of the image
(h, w) = image.shape[:2]
# if the center is None, initialize it as the center of
# the image
if center is None:
center = (w // 2, h // 2)
# perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# return the rotated image
return rotated
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
# determine the area (i.e. total number of pixels in the image),
# initialize the output skeletonized image, and construct the
# morphological structuring element
area = image.shape[0] * image.shape[1]
skeleton = np.zeros(image.shape, dtype="uint8")
elem = cv2.getStructuringElement(structuring, size)
# keep looping until the erosions remove all pixels from the
# image
while True:
# erode and dilate the image using the structuring element
eroded = cv2.erode(image, elem)
temp = cv2.dilate(eroded, elem)
# subtract the temporary image from the original, eroded
# image, then take the bitwise 'or' between the skeleton
# and the temporary image
temp = cv2.subtract(image, temp)
skeleton = cv2.bitwise_or(skeleton, temp)
image = eroded.copy()
# if there are no more 'white' pixels in the image, then
# break from the loop
if area == area - cv2.countNonZero(image):
break
# return the skeletonized image
return skeleton
def opencv2matplotlib(image):
# OpenCV represents images in BGR order; however, Matplotlib
# expects the image in RGB order, so simply convert from BGR
# to RGB and return
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def url_to_image(url, readFlag=cv2.IMREAD_COLOR):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, readFlag)
# return the image
return image
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def grab_contours(cnts):
# if the length the contours tuple returned by cv2.findContours
# is '2' then we are using either OpenCV v2.4, v4-beta, or
# v4-official
if len(cnts) == 2:
cnts = cnts[0]
# if the length of the contours tuple is '3' then we are using
# either OpenCV v3, v4-pre, or v4-alpha
elif len(cnts) == 3:
cnts = cnts[1]
# otherwise OpenCV has changed their cv2.findContours return
# signature yet again and I have no idea WTH is going on
else:
raise Exception(("Contours tuple must have length 2 or 3, "
"otherwise OpenCV changed their cv2.findContours return "
"signature yet again. Refer to OpenCV's documentation "
"in that case"))
# return the actual contours array
return cnts
def is_cv2(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 2
if or_better:
return major >= 2
# otherwise we want to check for *strictly* OpenCV 2
return major == 2
def is_cv3(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 3
if or_better:
return major >= 3
# otherwise we want to check for *strictly* OpenCV 3
return major == 3
def is_cv4(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 4
if or_better:
return major >= 4
# otherwise we want to check for *strictly* OpenCV 4
return major == 4
def get_opencv_major_version(lib=None):
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return the major version number
return int(lib.__version__.split(".")[0])
def check_opencv_version(major, lib=None):
# this function may be removed in a future release as we now
# use the get_opencv_major_function to obtain the current OpenCV
# version and then perform the actual version check *within* the
# respective function
import warnings
message = """
The check_opencv_version function is deprecated and may be
removed in a future release. Use at your own risk.
"""
warnings.warn(message, DeprecationWarning, stacklevel=2)
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return whether or not the current OpenCV version matches the
# major version number
return lib.__version__.startswith(major)
def build_montages(image_list, image_shape, montage_shape):
"""
---------------------------------------------------------------------------------------------
author: Kyle Hounslow
---------------------------------------------------------------------------------------------
Converts a list of single images into a list of 'montage' images of specified rows and columns.
A new montage image is started once rows and columns of montage image is filled.
Empty space of incomplete montage images are filled with black pixels
---------------------------------------------------------------------------------------------
:param image_list: python list of input images
:param image_shape: tuple, size each image will be resized to for display (width, height)
:param montage_shape: tuple, shape of image montage (width, height)
:return: list of montage images in numpy array format
---------------------------------------------------------------------------------------------
example usage:
# load single image
img = cv2.imread('lena.jpg')
# duplicate image 25 times
num_imgs = 25
img_list = []
for i in xrange(num_imgs):
img_list.append(img)
# convert image list into a montage of 256x256 images tiled in a 5x5 montage
montages = make_montages_of_images(img_list, (256, 256), (5, 5))
# iterate through montages and display
for montage in montages:
cv2.imshow('montage image', montage)
cv2.waitKey(0)
----------------------------------------------------------------------------------------------
"""
if len(image_shape) != 2:
raise Exception('image shape must be list or tuple of length 2 (rows, cols)')
if len(montage_shape) != 2:
raise Exception('montage shape must be list or tuple of length 2 (rows, cols)')
image_montages = []
# start with black canvas to draw images onto
montage_image = np.zeros(shape=(image_shape[1] * (montage_shape[1]), image_shape[0] * montage_shape[0], 3),
dtype=np.uint8)
cursor_pos = [0, 0]
start_new_img = False
for img in image_list:
if type(img).__module__ != np.__name__:
raise Exception('input of type {} is not a valid numpy array'.format(type(img)))
start_new_img = False
img = cv2.resize(img, image_shape)
# draw image to black canvas
montage_image[cursor_pos[1]:cursor_pos[1] + image_shape[1], cursor_pos[0]:cursor_pos[0] + image_shape[0]] = img
cursor_pos[0] += image_shape[0] # increment cursor x position
if cursor_pos[0] >= montage_shape[0] * image_shape[0]:
cursor_pos[1] += image_shape[1] # increment cursor y position
cursor_pos[0] = 0
if cursor_pos[1] >= montage_shape[1] * image_shape[1]:
cursor_pos = [0, 0]
image_montages.append(montage_image)
# reset black canvas
montage_image = np.zeros(shape=(image_shape[1] * (montage_shape[1]), image_shape[0] * montage_shape[0], 3),
dtype=np.uint8)
start_new_img = True
if start_new_img is False:
image_montages.append(montage_image) # add unfinished montage
return image_montages
def adjust_brightness_contrast(image, brightness=0., contrast=0.):
"""
Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change
"""
beta = 0
# See the OpenCV docs for more info on the `beta` parameter to addWeighted
# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
return cv2.addWeighted(image,
1 + float(contrast) / 100.,
image,
beta,
float(brightness)) | 35.925234 | 123 | 0.623136 |
import numpy as np
import cv2
import sys
if sys.version_info.major == 2:
from urllib import urlopen
elif sys.version_info.major == 3:
from urllib.request import urlopen
def translate(image, x, y):
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return shifted
def rotate(image, angle, center=None, scale=1.0):
(h, w) = image.shape[:2]
if center is None:
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def rotate_bound(image, angle):
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
return cv2.warpAffine(image, M, (nW, nH))
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation=inter)
return resized
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
area = image.shape[0] * image.shape[1]
skeleton = np.zeros(image.shape, dtype="uint8")
elem = cv2.getStructuringElement(structuring, size)
while True:
eroded = cv2.erode(image, elem)
temp = cv2.dilate(eroded, elem)
temp = cv2.subtract(image, temp)
skeleton = cv2.bitwise_or(skeleton, temp)
image = eroded.copy()
if area == area - cv2.countNonZero(image):
break
return skeleton
def opencv2matplotlib(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def url_to_image(url, readFlag=cv2.IMREAD_COLOR):
resp = urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, readFlag)
return image
def auto_canny(image, sigma=0.33):
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged
def grab_contours(cnts):
if len(cnts) == 2:
cnts = cnts[0]
elif len(cnts) == 3:
cnts = cnts[1]
else:
raise Exception(("Contours tuple must have length 2 or 3, "
"otherwise OpenCV changed their cv2.findContours return "
"signature yet again. Refer to OpenCV's documentation "
"in that case"))
# return the actual contours array
return cnts
def is_cv2(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 2
if or_better:
return major >= 2
# otherwise we want to check for *strictly* OpenCV 2
return major == 2
def is_cv3(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 3
if or_better:
return major >= 3
# otherwise we want to check for *strictly* OpenCV 3
return major == 3
def is_cv4(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 4
if or_better:
return major >= 4
# otherwise we want to check for *strictly* OpenCV 4
return major == 4
def get_opencv_major_version(lib=None):
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return the major version number
return int(lib.__version__.split(".")[0])
def check_opencv_version(major, lib=None):
# this function may be removed in a future release as we now
# use the get_opencv_major_function to obtain the current OpenCV
# version and then perform the actual version check *within* the
# respective function
import warnings
message = """
The check_opencv_version function is deprecated and may be
removed in a future release. Use at your own risk.
"""
warnings.warn(message, DeprecationWarning, stacklevel=2)
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return whether or not the current OpenCV version matches the
# major version number
return lib.__version__.startswith(major)
def build_montages(image_list, image_shape, montage_shape):
if len(image_shape) != 2:
raise Exception('image shape must be list or tuple of length 2 (rows, cols)')
if len(montage_shape) != 2:
raise Exception('montage shape must be list or tuple of length 2 (rows, cols)')
image_montages = []
# start with black canvas to draw images onto
montage_image = np.zeros(shape=(image_shape[1] * (montage_shape[1]), image_shape[0] * montage_shape[0], 3),
dtype=np.uint8)
cursor_pos = [0, 0]
start_new_img = False
for img in image_list:
if type(img).__module__ != np.__name__:
raise Exception('input of type {} is not a valid numpy array'.format(type(img)))
start_new_img = False
img = cv2.resize(img, image_shape)
# draw image to black canvas
montage_image[cursor_pos[1]:cursor_pos[1] + image_shape[1], cursor_pos[0]:cursor_pos[0] + image_shape[0]] = img
cursor_pos[0] += image_shape[0] # increment cursor x position
if cursor_pos[0] >= montage_shape[0] * image_shape[0]:
cursor_pos[1] += image_shape[1] # increment cursor y position
cursor_pos[0] = 0
if cursor_pos[1] >= montage_shape[1] * image_shape[1]:
cursor_pos = [0, 0]
image_montages.append(montage_image)
# reset black canvas
montage_image = np.zeros(shape=(image_shape[1] * (montage_shape[1]), image_shape[0] * montage_shape[0], 3),
dtype=np.uint8)
start_new_img = True
if start_new_img is False:
image_montages.append(montage_image) # add unfinished montage
return image_montages
def adjust_brightness_contrast(image, brightness=0., contrast=0.):
beta = 0
# See the OpenCV docs for more info on the `beta` parameter to addWeighted
# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
return cv2.addWeighted(image,
1 + float(contrast) / 100.,
image,
beta,
float(brightness)) | true | true |
f73cdcbcda3e78d69025dbba4af7ab55bc59e09f | 1,268 | py | Python | lang/py/pylib/code/unittest/unittest_simple.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | 13 | 2020-01-04T07:37:38.000Z | 2021-08-31T05:19:58.000Z | lang/py/pylib/code/unittest/unittest_simple.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | 3 | 2020-06-05T22:42:53.000Z | 2020-08-24T07:18:54.000Z | lang/py/pylib/code/unittest/unittest_simple.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | 9 | 2020-10-19T04:53:06.000Z | 2021-08-31T05:20:01.000Z | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Simplistic examples of unit tests.
"""
#end_pymotw_header
import unittest
class SimplisticTest(unittest.TestCase):
def test(self):
self.failUnless(True)
if __name__ == '__main__':
unittest.main()
| 31.7 | 70 | 0.757886 |
import unittest
class SimplisticTest(unittest.TestCase):
def test(self):
self.failUnless(True)
if __name__ == '__main__':
unittest.main()
| true | true |
f73cdde7b3db41ee93d135a9246e19ad92cb3817 | 247 | py | Python | src/website/apps/taxonomy/lookups.py | rapidsms/rapidsms.org | db871949797150e654a42afe00fc9e8f9fc1559e | [
"Apache-2.0"
] | 2 | 2015-10-09T04:48:55.000Z | 2020-05-29T18:33:39.000Z | src/website/apps/taxonomy/lookups.py | rapidsms/rapidsms.org | db871949797150e654a42afe00fc9e8f9fc1559e | [
"Apache-2.0"
] | 22 | 2015-09-20T14:00:16.000Z | 2021-06-10T20:08:25.000Z | src/website/apps/taxonomy/lookups.py | rapidsms/rapidsms.org | db871949797150e654a42afe00fc9e8f9fc1559e | [
"Apache-2.0"
] | 6 | 2015-12-14T21:05:01.000Z | 2019-11-02T19:35:24.000Z | from selectable.base import ModelLookup
from selectable.registry import registry
from .models import Taxonomy
class TaxonomyLookup(ModelLookup):
model = Taxonomy
search_fields = ('name__icontains', )
registry.register(TaxonomyLookup)
| 19 | 41 | 0.793522 | from selectable.base import ModelLookup
from selectable.registry import registry
from .models import Taxonomy
class TaxonomyLookup(ModelLookup):
model = Taxonomy
search_fields = ('name__icontains', )
registry.register(TaxonomyLookup)
| true | true |
f73cde27fae7b08b820acf1682834ab2d9b1db14 | 10,416 | py | Python | test/doc/conf.py | yasics/vpp | a4d0956082f12ac8269fd415134af7f605c1f3c9 | [
"Apache-2.0"
] | 3 | 2021-03-17T12:40:27.000Z | 2021-05-25T10:31:21.000Z | test/doc/conf.py | yasics/vpp | a4d0956082f12ac8269fd415134af7f605c1f3c9 | [
"Apache-2.0"
] | 32 | 2021-03-24T06:04:08.000Z | 2021-09-14T02:02:22.000Z | test/doc/conf.py | yasics/vpp | a4d0956082f12ac8269fd415134af7f605c1f3c9 | [
"Apache-2.0"
] | 2 | 2021-03-23T19:50:15.000Z | 2022-02-21T10:32:18.000Z | # -*- coding: utf-8 -*-
#
# VPP test framework documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 13 08:45:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import subprocess
from datetime import date
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
autodoc_mock_imports = ['objgraph',
'parameterized',
'pexpect',
'psutil',
'pympler',
'scapy',
'syslog_rfc5424_parser',
'vpp_papi']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'VPP test framework'
copyright = f'{date.today().year}, FD.io VPP team'
author = u'FD.io VPP team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
output = subprocess.run(['../../src/scripts/version'], stdout=subprocess.PIPE)
version = f'{output.stdout.decode("utf-8")}'
# The full version, including alpha/beta/rc tags.
release = f'{output.stdout.decode("utf-8")}'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'VPP test framework v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'VPPtestframeworkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VPPtestframework.tex', u'VPP test framework Documentation',
u'VPP team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vpptestframework', u'VPP test framework Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VPPtestframework', u'VPP test framework Documentation',
author, 'VPPtestframework', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 29.507082 | 79 | 0.693836 |
import os
import sys
import subprocess
from datetime import date
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
]
autodoc_mock_imports = ['objgraph',
'parameterized',
'pexpect',
'psutil',
'pympler',
'scapy',
'syslog_rfc5424_parser',
'vpp_papi']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'VPP test framework'
copyright = f'{date.today().year}, FD.io VPP team'
author = u'FD.io VPP team'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
output = subprocess.run(['../../src/scripts/version'], stdout=subprocess.PIPE)
version = f'{output.stdout.decode("utf-8")}'
# The full version, including alpha/beta/rc tags.
release = f'{output.stdout.decode("utf-8")}'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'VPP test framework v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'VPPtestframeworkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VPPtestframework.tex', u'VPP test framework Documentation',
u'VPP team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vpptestframework', u'VPP test framework Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VPPtestframework', u'VPP test framework Documentation',
author, 'VPPtestframework', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
f73cdfb1e41c650c3e59db1891459d8e5bf73921 | 35,030 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_packet_captures_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_packet_captures_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_packet_captures_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations(object):
"""PacketCapturesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "_models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "_models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PacketCaptureResult"]
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureResult"
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def _get_status_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureQueryStatusResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def begin_get_status(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PacketCaptureQueryStatusResult"]
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PacketCaptureListResult"]
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
| 50.989811 | 240 | 0.670482 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def begin_create(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def get(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def _delete_initial(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def begin_delete(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def _stop_initial(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
url = self._stop_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'}
def begin_stop(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'}
def _get_status_initial(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
url = self._get_status_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'}
def begin_get_status(
self,
resource_group_name,
network_watcher_name,
packet_capture_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'}
def list(
self,
resource_group_name,
network_watcher_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'}
| true | true |
f73ce09aafb5da7bb9707cfcc441437a734781e4 | 1,144 | py | Python | python_practice/data_structure/queue/queue_link_list.py | jeremykid/FunAlgorithm | 0ee34f130574d2a42ce995a1a44545a7368f9add | [
"MIT"
] | null | null | null | python_practice/data_structure/queue/queue_link_list.py | jeremykid/FunAlgorithm | 0ee34f130574d2a42ce995a1a44545a7368f9add | [
"MIT"
] | null | null | null | python_practice/data_structure/queue/queue_link_list.py | jeremykid/FunAlgorithm | 0ee34f130574d2a42ce995a1a44545a7368f9add | [
"MIT"
] | null | null | null | import sys, os
sys.path.append(os.path.abspath(os.path.join('..', 'linked_list')))
from LinkedList import linked_list
from node import Node
class queue_linked_list():
def __init__(self):
self.head = None
self.tail = None
def enqueue(self, value):
tempNode = Node(value)
if (self.tail):
self.tail.set_next(tempNode)
self.tail = tempNode
else:
self.head = tempNode
self.tail = tempNode
def dequeue(self):
if (self.empty() == False):
tempHeadNext = self.head.get_next()
result = self.head
if (tempHeadNext == None):
self.tail = None
else:
self.head = tempHeadNext
else:
return "The queue is empty"
def empty(self):
if (self.head):
return False
else:
return True
def __str__(self):
current = self.head
output = ""
while current:
output += str(current) + " -> "
current = current.get_next()
output += " End "
return output | 26 | 67 | 0.519231 | import sys, os
sys.path.append(os.path.abspath(os.path.join('..', 'linked_list')))
from LinkedList import linked_list
from node import Node
class queue_linked_list():
def __init__(self):
self.head = None
self.tail = None
def enqueue(self, value):
tempNode = Node(value)
if (self.tail):
self.tail.set_next(tempNode)
self.tail = tempNode
else:
self.head = tempNode
self.tail = tempNode
def dequeue(self):
if (self.empty() == False):
tempHeadNext = self.head.get_next()
result = self.head
if (tempHeadNext == None):
self.tail = None
else:
self.head = tempHeadNext
else:
return "The queue is empty"
def empty(self):
if (self.head):
return False
else:
return True
def __str__(self):
current = self.head
output = ""
while current:
output += str(current) + " -> "
current = current.get_next()
output += " End "
return output | true | true |
f73ce0c5a62e6dd1c6e2c1d7bc9490117950691c | 2,467 | py | Python | research/nlp/dam/src/metric.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/nlp/dam/src/metric.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/nlp/dam/src/metric.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""DAM EvalMetric"""
from mindspore.nn.metrics import Metric
from src import ubuntu_evaluation as ub_eval
from src import douban_evaluation as db_eval
class EvalMetric(Metric):
"""DAM EvalMetric"""
def __init__(self, model_name="DAM_ubuntu", score_file=None):
super(EvalMetric, self).__init__()
self.model_name = model_name
self.score_file = score_file
self.pred_probs = []
self.true_labels = []
self.global_step = 0
def clear(self):
"""Clear the internal evaluation result."""
self.pred_probs = []
self.true_labels = []
self.global_step = 0
def update(self, *inputs):
"""Update list of predicts and labels."""
self.global_step += 1
print('eval {} its'.format(self.global_step))
batch_predict = inputs[0].asnumpy()
batch_label = inputs[1].asnumpy()
self.pred_probs.extend(batch_predict.flatten().tolist())
self.true_labels.extend(batch_label.flatten().tolist())
def eval(self):
"""Evaluating"""
if len(self.true_labels) != len(self.pred_probs):
raise RuntimeError('true_labels.size() is not equal to pred_probs.size()')
if self.model_name == "DAM_ubuntu":
auc = ub_eval.evaluate_m(self.pred_probs, self.true_labels)
elif self.model_name == "DAM_douban":
auc = db_eval.evaluate_m(self.pred_probs, self.true_labels)
else:
raise RuntimeError('Evaluation function is not defined')
if self.score_file is not None:
with open(self.score_file, 'w') as file_out:
for i in range(len(self.true_labels)):
file_out.write(str(self.pred_probs[i]) + '\t' + str(self.true_labels[i]) + '\n')
return auc
| 38.546875 | 100 | 0.638833 |
from mindspore.nn.metrics import Metric
from src import ubuntu_evaluation as ub_eval
from src import douban_evaluation as db_eval
class EvalMetric(Metric):
def __init__(self, model_name="DAM_ubuntu", score_file=None):
super(EvalMetric, self).__init__()
self.model_name = model_name
self.score_file = score_file
self.pred_probs = []
self.true_labels = []
self.global_step = 0
def clear(self):
self.pred_probs = []
self.true_labels = []
self.global_step = 0
def update(self, *inputs):
self.global_step += 1
print('eval {} its'.format(self.global_step))
batch_predict = inputs[0].asnumpy()
batch_label = inputs[1].asnumpy()
self.pred_probs.extend(batch_predict.flatten().tolist())
self.true_labels.extend(batch_label.flatten().tolist())
def eval(self):
if len(self.true_labels) != len(self.pred_probs):
raise RuntimeError('true_labels.size() is not equal to pred_probs.size()')
if self.model_name == "DAM_ubuntu":
auc = ub_eval.evaluate_m(self.pred_probs, self.true_labels)
elif self.model_name == "DAM_douban":
auc = db_eval.evaluate_m(self.pred_probs, self.true_labels)
else:
raise RuntimeError('Evaluation function is not defined')
if self.score_file is not None:
with open(self.score_file, 'w') as file_out:
for i in range(len(self.true_labels)):
file_out.write(str(self.pred_probs[i]) + '\t' + str(self.true_labels[i]) + '\n')
return auc
| true | true |
f73ce0dc7c7ae9358a7a96d27890ae5767e7afb1 | 523 | py | Python | M68kConstants.py | lallouslab/pwndra | bced561e5f9c4c033359caa25083bdf2289355f5 | [
"Apache-2.0"
] | null | null | null | M68kConstants.py | lallouslab/pwndra | bced561e5f9c4c033359caa25083bdf2289355f5 | [
"Apache-2.0"
] | null | null | null | M68kConstants.py | lallouslab/pwndra | bced561e5f9c4c033359caa25083bdf2289355f5 | [
"Apache-2.0"
] | null | null | null | # Replace Linux/m68k numeric constants with human readable names.
#@author b0bb
#@category Pwn
#@keybinding
#@menupath Analysis.Pwn.Constants.m68k
#@toolbar
from constants.Constants import Constants
import ghidra.app.util.opinion.ElfLoader as ElfLoader
def run():
if currentProgram.getExecutableFormat() != ElfLoader.ELF_NAME:
popup('Not an ELF file, cannot continue')
return
arch = 'm68k'
abi = 'default'
Constants(currentProgram, currentSelection, monitor, state, arch, abi)
run() | 22.73913 | 74 | 0.728489 |
from constants.Constants import Constants
import ghidra.app.util.opinion.ElfLoader as ElfLoader
def run():
if currentProgram.getExecutableFormat() != ElfLoader.ELF_NAME:
popup('Not an ELF file, cannot continue')
return
arch = 'm68k'
abi = 'default'
Constants(currentProgram, currentSelection, monitor, state, arch, abi)
run() | true | true |
f73ce0f806cf72d4f3deeaf69d8140f99802c59a | 3,087 | py | Python | asn/files.py | Ayanami-y/GreaterWMS | 9737f0db1d76b829ace921250efa9336635df637 | [
"Apache-2.0"
] | 1 | 2021-06-04T02:06:34.000Z | 2021-06-04T02:06:34.000Z | asn/files.py | Ayanami-y/GreaterWMS | 9737f0db1d76b829ace921250efa9336635df637 | [
"Apache-2.0"
] | null | null | null | asn/files.py | Ayanami-y/GreaterWMS | 9737f0db1d76b829ace921250efa9336635df637 | [
"Apache-2.0"
] | null | null | null | from rest_framework_csv.renderers import CSVStreamingRenderer
def list_file_headers():
return [
'asn_code',
'asn_status',
'total_weight',
'total_volume',
'supplier',
'creater',
'create_time',
'update_time'
]
def list_cn_data_header():
return dict([
('asn_code', u'ASN单号'),
('asn_status', u'ASN状态'),
('total_weight', u'总重量'),
('total_volume', u'总体积'),
('supplier', u'供应商'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间'),
])
def list_en_data_header():
return dict([
('asn_code', u'ASN Code'),
('asn_status', u'ASN Status'),
('total_weight', u'Total Weight'),
('total_volume', u'Total Volume'),
('supplier', u'Supplier'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time'),
])
def detail_file_headers():
return [
'asn_code',
'asn_status',
'supplier',
'goods_code',
'goods_qty',
'goods_actual_qty',
'sorted_qty',
'goods_shortage_qty',
'goods_more_qty',
'goods_damage_qty',
'goods_weight',
'goods_volume',
'creater',
'create_time',
'update_time'
]
def detail_cn_data_header():
return dict([
('asn_code', u'ASN单号'),
('asn_status', u'ASN状态'),
('supplier', u'供应商'),
('goods_code', u'商品编码'),
('goods_qty', u'订单数量'),
('goods_actual_qty', u'实际到货数量'),
('sorted_qty', u'已分拣数量'),
('goods_shortage_qty', u'少到货数量'),
('goods_more_qty', u'多到货数量'),
('goods_damage_qty', u'破损数量'),
('goods_weight', u'商品重量'),
('goods_volume', u'商品体积'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
])
def detail_en_data_header():
return dict([
('asn_code', u'ASN Code'),
('asn_status', u'ASN Status'),
('supplier', u'Supplier'),
('goods_code', u'Goods Code'),
('goods_qty', u'Goods Qty'),
('goods_actual_qty', u'Goods Actual Qty'),
('sorted_qty', u'Sorted Qty'),
('goods_shortage_qty', u'Goods Shortage Qty'),
('goods_more_qty', u'Goods More Qty'),
('goods_damage_qty', u'Goods Damage Qty'),
('goods_weight', u'Goods Weight'),
('goods_volume', u'Goods Volume'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
])
class FileListRenderCN(CSVStreamingRenderer):
header = list_file_headers()
labels = list_cn_data_header()
class FileListRenderEN(CSVStreamingRenderer):
header = list_file_headers()
labels = list_en_data_header()
class FileDetailRenderCN(CSVStreamingRenderer):
header = detail_file_headers()
labels = detail_cn_data_header()
class FileDetailRenderEN(CSVStreamingRenderer):
header = detail_file_headers()
labels = detail_en_data_header()
| 27.810811 | 61 | 0.561386 | from rest_framework_csv.renderers import CSVStreamingRenderer
def list_file_headers():
return [
'asn_code',
'asn_status',
'total_weight',
'total_volume',
'supplier',
'creater',
'create_time',
'update_time'
]
def list_cn_data_header():
return dict([
('asn_code', u'ASN单号'),
('asn_status', u'ASN状态'),
('total_weight', u'总重量'),
('total_volume', u'总体积'),
('supplier', u'供应商'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间'),
])
def list_en_data_header():
return dict([
('asn_code', u'ASN Code'),
('asn_status', u'ASN Status'),
('total_weight', u'Total Weight'),
('total_volume', u'Total Volume'),
('supplier', u'Supplier'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time'),
])
def detail_file_headers():
return [
'asn_code',
'asn_status',
'supplier',
'goods_code',
'goods_qty',
'goods_actual_qty',
'sorted_qty',
'goods_shortage_qty',
'goods_more_qty',
'goods_damage_qty',
'goods_weight',
'goods_volume',
'creater',
'create_time',
'update_time'
]
def detail_cn_data_header():
return dict([
('asn_code', u'ASN单号'),
('asn_status', u'ASN状态'),
('supplier', u'供应商'),
('goods_code', u'商品编码'),
('goods_qty', u'订单数量'),
('goods_actual_qty', u'实际到货数量'),
('sorted_qty', u'已分拣数量'),
('goods_shortage_qty', u'少到货数量'),
('goods_more_qty', u'多到货数量'),
('goods_damage_qty', u'破损数量'),
('goods_weight', u'商品重量'),
('goods_volume', u'商品体积'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
])
def detail_en_data_header():
return dict([
('asn_code', u'ASN Code'),
('asn_status', u'ASN Status'),
('supplier', u'Supplier'),
('goods_code', u'Goods Code'),
('goods_qty', u'Goods Qty'),
('goods_actual_qty', u'Goods Actual Qty'),
('sorted_qty', u'Sorted Qty'),
('goods_shortage_qty', u'Goods Shortage Qty'),
('goods_more_qty', u'Goods More Qty'),
('goods_damage_qty', u'Goods Damage Qty'),
('goods_weight', u'Goods Weight'),
('goods_volume', u'Goods Volume'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
])
class FileListRenderCN(CSVStreamingRenderer):
header = list_file_headers()
labels = list_cn_data_header()
class FileListRenderEN(CSVStreamingRenderer):
header = list_file_headers()
labels = list_en_data_header()
class FileDetailRenderCN(CSVStreamingRenderer):
header = detail_file_headers()
labels = detail_cn_data_header()
class FileDetailRenderEN(CSVStreamingRenderer):
header = detail_file_headers()
labels = detail_en_data_header()
| true | true |
f73ce1204a8902428142c12bd56a3c7b8848dd70 | 773 | py | Python | pennylane/beta/vqe/__init__.py | InduManimaran/pennylane | 375d25acc7bd2e6d5243b5273958b26513c33189 | [
"Apache-2.0"
] | 2 | 2021-06-29T01:30:08.000Z | 2021-08-23T10:38:52.000Z | pennylane/beta/vqe/__init__.py | InduManimaran/pennylane | 375d25acc7bd2e6d5243b5273958b26513c33189 | [
"Apache-2.0"
] | 4 | 2020-09-25T21:04:22.000Z | 2022-02-10T00:39:53.000Z | pennylane/beta/vqe/__init__.py | InduManimaran/pennylane | 375d25acc7bd2e6d5243b5273958b26513c33189 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains functionality for running Variational Quantum Eigensolver (VQE)
computations using PennyLane.
"""
from .vqe import Hamiltonian, circuits, aggregate, cost
| 40.684211 | 85 | 0.778784 |
from .vqe import Hamiltonian, circuits, aggregate, cost
| true | true |
f73ce1b2093e3b1a5ca3e9e8812834bc3f0d3432 | 1,783 | py | Python | medical_prescription/recommendation/test/test_view_recommendation_custom.py | ristovao/2017.2-Receituario-Medico | 5387eb80dfb354e948abe64f7d8bbe087fc4f136 | [
"MIT"
] | 11 | 2017-09-19T00:29:40.000Z | 2018-04-05T23:52:39.000Z | medical_prescription/recommendation/test/test_view_recommendation_custom.py | ristovao/2017.2-Receituario-Medico | 5387eb80dfb354e948abe64f7d8bbe087fc4f136 | [
"MIT"
] | 271 | 2017-09-09T00:07:28.000Z | 2017-12-07T05:00:45.000Z | medical_prescription/recommendation/test/test_view_recommendation_custom.py | ristovao/2017.2-Receituario-Medico | 5387eb80dfb354e948abe64f7d8bbe087fc4f136 | [
"MIT"
] | 26 | 2017-08-31T20:48:49.000Z | 2018-03-21T15:11:27.000Z | # Django
from django.test import TestCase
from django.test.client import RequestFactory
# Django Local
from recommendation.views import CustomRecommendationCreateView
from user.models import HealthProfessional
class CreateRecomendationCustomViewTeste(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.health_professional = HealthProfessional()
self.health_professional.pk = 1
self.health_professional.crm = '12345'
self.health_professional.crm_state = 'US'
self.health_professional.save()
def test_recommendation_get_with_health_professional(self):
request = self.factory.get('/recommendation/create_custom')
request.user = self.health_professional
# Get the response
response = CustomRecommendationCreateView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_recommendation_post_with_health_professional_valid(self):
context = {'name': "Diabetes",
'description': "Alguma descrição aceitavel"}
request = self.factory.post('/prescription/create_modal/', context)
request.user = self.health_professional
# Get the response
response = CustomRecommendationCreateView.as_view()(request)
self.assertEqual(response.status_code, 302)
def test_recommendation_post_with_health_professional_invalid(self):
context = {'name': "A",
'description': "Alguma descrição aceitavel"}
request = self.factory.post('/prescription/create_modal/', context)
request.user = self.health_professional
# Get the response
response = CustomRecommendationCreateView.as_view()(request)
self.assertEqual(response.status_code, 200)
| 36.387755 | 75 | 0.713404 |
from django.test import TestCase
from django.test.client import RequestFactory
from recommendation.views import CustomRecommendationCreateView
from user.models import HealthProfessional
class CreateRecomendationCustomViewTeste(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.health_professional = HealthProfessional()
self.health_professional.pk = 1
self.health_professional.crm = '12345'
self.health_professional.crm_state = 'US'
self.health_professional.save()
def test_recommendation_get_with_health_professional(self):
request = self.factory.get('/recommendation/create_custom')
request.user = self.health_professional
response = CustomRecommendationCreateView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_recommendation_post_with_health_professional_valid(self):
context = {'name': "Diabetes",
'description': "Alguma descrição aceitavel"}
request = self.factory.post('/prescription/create_modal/', context)
request.user = self.health_professional
response = CustomRecommendationCreateView.as_view()(request)
self.assertEqual(response.status_code, 302)
def test_recommendation_post_with_health_professional_invalid(self):
context = {'name': "A",
'description': "Alguma descrição aceitavel"}
request = self.factory.post('/prescription/create_modal/', context)
request.user = self.health_professional
response = CustomRecommendationCreateView.as_view()(request)
self.assertEqual(response.status_code, 200)
| true | true |
f73ce20fd80292af353bb5c73e149c26ec3a571d | 118 | py | Python | datasets/__init__.py | dynasty0/tf_hrnet_segmentation | 9e54bb14b518cdf285e5871c051b9cdd851f08d7 | [
"BSD-3-Clause"
] | 1 | 2021-08-31T02:46:12.000Z | 2021-08-31T02:46:12.000Z | datasets/__init__.py | dynasty0/tf_hrnet_segmentation | 9e54bb14b518cdf285e5871c051b9cdd851f08d7 | [
"BSD-3-Clause"
] | 1 | 2021-04-23T04:57:08.000Z | 2021-04-24T09:56:13.000Z | datasets/__init__.py | dynasty0/tf_hrnet_segmentation | 9e54bb14b518cdf285e5871c051b9cdd851f08d7 | [
"BSD-3-Clause"
] | null | null | null | from .celeba_dataset import *
from .ilsvrc12_dataset import *
__all__ =[
"CelebaDataset",
"Ilsvrc12Dataset"
] | 16.857143 | 31 | 0.720339 | from .celeba_dataset import *
from .ilsvrc12_dataset import *
__all__ =[
"CelebaDataset",
"Ilsvrc12Dataset"
] | true | true |
f73ce291d05952665fa74cf72c4dd7096062f5ef | 1,901 | py | Python | install/install.py | PABlond/Portfolio-CMS-for-freelancers | ace9e8ed526b1605ea4a8fbbfcee9461c1e6fa4d | [
"MIT"
] | 1 | 2019-11-04T07:04:35.000Z | 2019-11-04T07:04:35.000Z | install/install.py | PABlond/Portfolio-CMS-for-freelancers | ace9e8ed526b1605ea4a8fbbfcee9461c1e6fa4d | [
"MIT"
] | 4 | 2021-03-19T11:38:03.000Z | 2022-01-13T02:23:51.000Z | install/install.py | PABlond/Portfolio-CMS-for-freelancers | ace9e8ed526b1605ea4a8fbbfcee9461c1e6fa4d | [
"MIT"
] | null | null | null | import pandas
import utils
import config
var = config.Variable()
df = pandas.read_excel(var.config)
pwd = var.pwd
def run():
# Get data for gatsby-config.js
with open(var.gatsby_config_template, "r") as f:
gatsby_config = f.read()
# Get data for src/data/content.js
with open(var.content_template, "r") as f:
content = f.read()
# Apply changes to files
for index, row in df.iterrows():
value = row['Value']
gatsby_config = gatsby_config.replace(row['Key'], value)
if row['Key'] == '<BIOGRAPHY>':
value = list(filter(None, row['Value'].split('\n')))
content = content.replace(row['Key'], str(value))
# Write gatsby-config.js
with open(var.gatsby_config, "w+") as f:
f.write(gatsby_config)
# Write content.ts
with open(var.content, "w+") as f:
f.write(content)
# Geneate Open Source data
utils.export_xls_as_list(img_folder="{}/images/open_source/".format(pwd),
filename=var.open_source_xls,
filename_output="{}/src/data/{}.json".format(
pwd, var.open_source_xls.split('.xls')[0].split('/')[-1]))
# Generate Experience data
utils.export_xls_as_list(img_folder="{}/images/experiences/".format(pwd),
filename=var.experiences_xls,
filename_output="{}/src/data/{}.json".format(
pwd, var.experiences_xls.split('.xls')[0].split('/')[-1]))
# Generate Certifications data
utils.export_xls_as_list(img_folder="{}/images/certifications/".format(pwd),
filename=var.certifications_xls,
filename_output="{}/src/data/{}.json".format(
pwd, var.certifications_xls.split('.xls')[0].split('/')[-1])) | 33.350877 | 93 | 0.56707 | import pandas
import utils
import config
var = config.Variable()
df = pandas.read_excel(var.config)
pwd = var.pwd
def run():
with open(var.gatsby_config_template, "r") as f:
gatsby_config = f.read()
with open(var.content_template, "r") as f:
content = f.read()
for index, row in df.iterrows():
value = row['Value']
gatsby_config = gatsby_config.replace(row['Key'], value)
if row['Key'] == '<BIOGRAPHY>':
value = list(filter(None, row['Value'].split('\n')))
content = content.replace(row['Key'], str(value))
with open(var.gatsby_config, "w+") as f:
f.write(gatsby_config)
with open(var.content, "w+") as f:
f.write(content)
utils.export_xls_as_list(img_folder="{}/images/open_source/".format(pwd),
filename=var.open_source_xls,
filename_output="{}/src/data/{}.json".format(
pwd, var.open_source_xls.split('.xls')[0].split('/')[-1]))
utils.export_xls_as_list(img_folder="{}/images/experiences/".format(pwd),
filename=var.experiences_xls,
filename_output="{}/src/data/{}.json".format(
pwd, var.experiences_xls.split('.xls')[0].split('/')[-1]))
utils.export_xls_as_list(img_folder="{}/images/certifications/".format(pwd),
filename=var.certifications_xls,
filename_output="{}/src/data/{}.json".format(
pwd, var.certifications_xls.split('.xls')[0].split('/')[-1])) | true | true |
f73ce3195424216f1e501720eee6b1f118b8a67f | 36,264 | py | Python | dymos/transcriptions/transcription_base.py | yonghoonlee/dymos | 602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4 | [
"Apache-2.0"
] | null | null | null | dymos/transcriptions/transcription_base.py | yonghoonlee/dymos | 602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4 | [
"Apache-2.0"
] | 9 | 2021-05-24T15:14:37.000Z | 2021-06-28T21:12:55.000Z | dymos/transcriptions/transcription_base.py | yonghoonlee/dymos | 602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4 | [
"Apache-2.0"
] | null | null | null | from collections.abc import Sequence
import numpy as np
import openmdao.api as om
from .common import BoundaryConstraintComp, ControlGroup, PolynomialControlGroup, PathConstraintComp
from ..utils.constants import INF_BOUND
from ..utils.misc import get_rate_units, _unspecified
from ..utils.introspection import get_target_metadata, get_source_metadata
class TranscriptionBase(object):
"""
Base class for all dymos transcriptions.
Parameters
----------
**kwargs : dict
Dictionary of optional arguments.
"""
def __init__(self, **kwargs):
self.grid_data = None
self.options = om.OptionsDictionary()
self.options.declare('num_segments', types=int, desc='Number of segments')
self.options.declare('segment_ends', default=None, types=(Sequence, np.ndarray),
allow_none=True, desc='Locations of segment ends or None for equally '
'spaced segments')
self.options.declare('order', default=3, types=(int, Sequence, np.ndarray),
desc='Order of the state transcription. The order of the control '
'transcription is `order - 1`.')
self.options.declare('compressed', default=True, types=bool,
desc='Use compressed transcription, meaning state and control values'
'at segment boundaries are not duplicated on input. This '
'implicitly enforces value continuity between segments but in '
'some cases may make the problem more difficult to solve.')
self._declare_options()
self.initialize()
self.options.update(kwargs)
self.init_grid()
# Where to query var info.
self._rhs_source = None
def _declare_options(self):
pass
def initialize(self):
"""
Declare transcription options.
"""
pass
def init_grid(self):
"""
Setup the GridData object for the Transcription.
"""
raise NotImplementedError('Transcription {0} does not implement method'
'init_grid.'.format(self.__class__.__name__))
def setup_time(self, phase):
"""
Setup up the time component and time extents for the phase.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
time_options = phase.time_options
# Warn about invalid options
phase.check_time_options()
if not time_options['input_initial'] or not time_options['input_duration']:
phase.add_subsystem('time_extents', om.IndepVarComp(),
promotes_outputs=['*'])
def configure_time(self, phase):
"""
Configure the inputs/outputs on the time component.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
time_options = phase.time_options
# Determine the time unit.
if time_options['units'] in (None, _unspecified):
if time_options['targets']:
ode = phase._get_subsystem(self._rhs_source)
_, time_options['units'] = get_target_metadata(ode, name='time',
user_targets=time_options['targets'],
user_units=time_options['units'],
user_shape='')
time_units = time_options['units']
indeps = []
default_vals = {'t_initial': phase.time_options['initial_val'],
't_duration': phase.time_options['duration_val']}
if not time_options['input_initial']:
indeps.append('t_initial')
if not time_options['input_duration']:
indeps.append('t_duration')
for var in indeps:
phase.time_extents.add_output(var, val=default_vals[var], units=time_units)
if not (time_options['input_initial'] or time_options['fix_initial']):
lb, ub = time_options['initial_bounds']
lb = -INF_BOUND if lb is None else lb
ub = INF_BOUND if ub is None else ub
phase.add_design_var('t_initial',
lower=lb,
upper=ub,
scaler=time_options['initial_scaler'],
adder=time_options['initial_adder'],
ref0=time_options['initial_ref0'],
ref=time_options['initial_ref'])
if not (time_options['input_duration'] or time_options['fix_duration']):
lb, ub = time_options['duration_bounds']
lb = -INF_BOUND if lb is None else lb
ub = INF_BOUND if ub is None else ub
phase.add_design_var('t_duration',
lower=lb,
upper=ub,
scaler=time_options['duration_scaler'],
adder=time_options['duration_adder'],
ref0=time_options['duration_ref0'],
ref=time_options['duration_ref'])
def setup_controls(self, phase):
"""
Setup the control group.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
phase._check_control_options()
if phase.control_options:
control_group = ControlGroup(control_options=phase.control_options,
time_units=phase.time_options['units'],
grid_data=self.grid_data)
phase.add_subsystem('control_group',
subsys=control_group)
def _configure_state_introspection(self, state_name, options, phase):
"""
Modifies state options in-place, automatically determining 'targets', 'units', and 'shape'
if necessary.
The precedence rules for the state shape and units are as follows:
1. If the user has specified units and shape in the state options, use those.
2a. If the user has not specified shape, and targets exist, then pull the shape from the targets.
2b. If the user has not specified shape and no targets exist, then pull the shape from the rate source.
2c. If shape cannot be inferred, assume (1,)
3a. If the user has not specified units, first try to pull units from a target
3b. If there are no targets, pull units from the rate source and multiply by time units.
Parameters
----------
state_name : str
The name of the state variable of interest.
options : OptionsDictionary
The options dictionary for the state variable of interest.
phase : dymos.Phase
The phase associated with the transcription.
"""
time_units = phase.time_options['units']
user_targets = options['targets']
user_units = options['units']
user_shape = options['shape']
need_units = user_units is _unspecified
need_shape = user_shape in {None, _unspecified}
ode = phase._get_subsystem(self._rhs_source)
# Automatically determine targets of state if left _unspecified
if user_targets is _unspecified:
from dymos.utils.introspection import get_targets
options['targets'] = get_targets(ode, state_name, user_targets)
# 1. No introspection necessary
if not(need_shape or need_units):
return
# 2. Attempt target introspection
if options['targets']:
try:
from dymos.utils.introspection import get_state_target_metadata
tgt_shape, tgt_units = get_state_target_metadata(ode, state_name, options['targets'],
options['units'], options['shape'])
options['shape'] = tgt_shape
options['units'] = tgt_units
return
except ValueError:
pass
# 3. Attempt rate-source introspection
rate_src = options['rate_source']
rate_src_type = phase.classify_var(rate_src)
if rate_src_type in ['time', 'time_phase']:
rate_src_units = phase.time_options['units']
rate_src_shape = (1,)
elif rate_src_type == 'state':
rate_src_units = phase.state_options[rate_src]['units']
rate_src_shape = phase.state_options[rate_src]['shape']
elif rate_src_type in ['input_control', 'indep_control']:
rate_src_units = phase.control_options[rate_src]['units']
rate_src_shape = phase.control_options[rate_src]['shape']
elif rate_src_type in ['input_polynomial_control', 'indep_polynomial_control']:
rate_src_units = phase.polynomial_control_options[rate_src]['units']
rate_src_shape = phase.polynomial_control_options[rate_src]['shape']
elif rate_src_type == 'parameter':
rate_src_units = phase.parameter_options[rate_src]['units']
rate_src_shape = phase.parameter_options[rate_src]['shape']
elif rate_src_type == 'control_rate':
control_name = rate_src[:-5]
control = phase.control_options[control_name]
rate_src_units = get_rate_units(control['units'], time_units, deriv=1)
rate_src_shape = control['shape']
elif rate_src_type == 'control_rate2':
control_name = rate_src[:-6]
control = phase.control_options[control_name]
rate_src_units = get_rate_units(control['units'], time_units, deriv=2)
rate_src_shape = control['shape']
elif rate_src_type == 'polynomial_control_rate':
control_name = rate_src[:-5]
control = phase.polynomial_control_options[control_name]
rate_src_units = get_rate_units(control['units'], time_units, deriv=1)
rate_src_shape = control['shape']
elif rate_src_type == 'polynomial_control_rate2':
control_name = rate_src[:-6]
control = phase.polynomial_control_options[control_name]
rate_src_units = get_rate_units(control['units'], time_units, deriv=2)
rate_src_shape = control['shape']
elif rate_src_type == 'ode':
rate_src_shape, rate_src_units = get_source_metadata(ode,
src=rate_src,
user_units=options['units'],
user_shape=options['shape'])
else:
rate_src_shape = (1,)
rate_src_units = None
if need_shape:
options['shape'] = rate_src_shape
if need_units:
options['units'] = time_units if rate_src_units is None else f'{rate_src_units}*{time_units}'
return
def configure_controls(self, phase):
"""
Configure the inputs/outputs for the controls.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
pass
def setup_polynomial_controls(self, phase):
"""
Adds the polynomial control group to the model if any polynomial controls are present.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
if phase.polynomial_control_options:
sys = PolynomialControlGroup(grid_data=self.grid_data,
polynomial_control_options=phase.polynomial_control_options,
time_units=phase.time_options['units'])
phase.add_subsystem('polynomial_control_group', subsys=sys,
promotes_inputs=['*'], promotes_outputs=['*'])
def configure_polynomial_controls(self, phase):
"""
Configure the inputs/outputs for the polynomial controls.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
if phase.polynomial_control_options:
phase.polynomial_control_group.configure_io()
def setup_parameters(self, phase):
"""
Sets input defaults for parameters and optionally adds design variables.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
phase._check_parameter_options()
if phase.parameter_options:
for name, options in phase.parameter_options.items():
src_name = 'parameters:{0}'.format(name)
if options['opt']:
lb = -INF_BOUND if options['lower'] is None else options['lower']
ub = INF_BOUND if options['upper'] is None else options['upper']
phase.add_design_var(name=src_name,
lower=lb,
upper=ub,
scaler=options['scaler'],
adder=options['adder'],
ref0=options['ref0'],
ref=options['ref'])
def configure_parameters(self, phase):
"""
Configure parameter promotion.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
if phase.parameter_options:
ode = self._get_ode(phase)
for name, options in phase.parameter_options.items():
prom_name = f'parameters:{name}'
# Get units and shape from targets when needed.
shape, units, static = get_target_metadata(ode, name=name,
user_targets=options['targets'],
user_shape=options['shape'],
user_units=options['units'],
user_static_target=options['static_target'])
options['units'] = units
options['shape'] = shape
options['static_target'] = static
for tgts, src_idxs in self.get_parameter_connections(name, phase):
for pathname in tgts:
parts = pathname.split('.')
sub_sys = parts[0]
tgt_var = '.'.join(parts[1:])
if not options['static_target']:
phase.promotes(sub_sys, inputs=[(tgt_var, prom_name)],
src_indices=src_idxs, flat_src_indices=True)
else:
phase.promotes(sub_sys, inputs=[(tgt_var, prom_name)])
val = options['val']
_shape = options['shape']
shaped_val = np.broadcast_to(val, _shape)
phase.set_input_defaults(name=prom_name,
val=shaped_val,
units=options['units'])
def setup_states(self, phase):
"""
Setup the states for this transcription.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
raise NotImplementedError('Transcription {0} does not implement method '
'setup_states.'.format(self.__class__.__name__))
def setup_ode(self, phase):
"""
Setup the ode for this transcription.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
raise NotImplementedError('Transcription {0} does not implement method '
'setup_ode.'.format(self.__class__.__name__))
def setup_timeseries_outputs(self, phase):
"""
Setup the timeseries for this transcription.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
raise NotImplementedError('Transcription {0} does not implement method '
'setup_timeseries_outputs.'.format(self.__class__.__name__))
def setup_boundary_constraints(self, loc, phase):
"""
Setup the boundary constraints.
Adds BoundaryConstraintComp for initial and/or final boundary constraints if necessary
and issues appropriate connections.
Parameters
----------
loc : str
The kind of boundary constraints being setup. Must be one of 'initial' or 'final'.
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
if loc not in ('initial', 'final'):
raise ValueError('loc must be one of \'initial\' or \'final\'.')
bc_dict = phase._initial_boundary_constraints \
if loc == 'initial' else phase._final_boundary_constraints
if bc_dict:
phase.add_subsystem(f'{loc}_boundary_constraints',
subsys=BoundaryConstraintComp(loc=loc))
def configure_boundary_constraints(self, loc, phase):
"""
Configures the boundary constraints.
Adds BoundaryConstraintComp for initial and/or final boundary constraints if necessary
and issues appropriate connections.
Parameters
----------
loc : str
The kind of boundary constraints being setup. Must be one of 'initial' or 'final'.
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
bc_dict = phase._initial_boundary_constraints \
if loc == 'initial' else phase._final_boundary_constraints
sys_name = f'{loc}_boundary_constraints'
bc_comp = phase._get_subsystem(sys_name)
for var, options in bc_dict.items():
con_name = options['constraint_name']
_, shape, units, linear = self._get_boundary_constraint_src(var, loc, phase)
if options['indices'] is not None:
# Sliced shape.
con_shape = (len(options['indices']), )
# Indices provided, make sure lower/upper/equals have shape of the indices.
if options['lower'] and not np.isscalar(options['lower']) and \
np.asarray(options['lower']).shape != con_shape:
raise ValueError('The lower bounds of boundary constraint on {0} are not '
'compatible with its shape, and no indices were '
'provided.'.format(var))
if options['upper'] and not np.isscalar(options['upper']) and \
np.asarray(options['upper']).shape != con_shape:
raise ValueError('The upper bounds of boundary constraint on {0} are not '
'compatible with its shape, and no indices were '
'provided.'.format(var))
if options['equals'] and not np.isscalar(options['equals']) and \
np.asarray(options['equals']).shape != con_shape:
raise ValueError('The equality boundary constraint value on {0} is not '
'compatible the provided indices. Provide them as a '
'flat array with the same size as indices.'.format(var))
else:
# Indices not provided, make sure lower/upper/equals have shape of source.
if 'lower' in options and options['lower'] is not None and \
not np.isscalar(options['lower']) and np.asarray(options['lower']).shape != shape:
raise ValueError('The lower bounds of boundary constraint on {0} are not '
'compatible with its shape, and no indices were '
'provided. Expected a shape of {1} but given shape '
'is {2}'.format(var, shape, np.asarray(options['lower']).shape))
if 'upper' in options and options['upper'] is not None and \
not np.isscalar(options['upper']) and np.asarray(options['upper']).shape != shape:
raise ValueError('The upper bounds of boundary constraint on {0} are not '
'compatible with its shape, and no indices were '
'provided. Expected a shape of {1} but given shape '
'is {2}'.format(var, shape, np.asarray(options['upper']).shape))
if 'equals' in options and options['equals'] is not None and \
not np.isscalar(options['equals']) and np.asarray(options['equals']).shape != shape:
raise ValueError('The equality boundary constraint value on {0} is not '
'compatible with its shape, and no indices were '
'provided. Expected a shape of {1} but given shape '
'is {2}'.format(var, shape, np.asarray(options['equals']).shape))
# Constraint options are a copy of options with constraint_name key removed.
con_options = options.copy()
con_options.pop('constraint_name')
# By now, all possible constraint target shapes should have been introspected.
con_options['shape'] = options['shape'] = shape
# If user overrides the introspected unit, then change the unit on the add_constraint call.
con_units = options['units']
con_options['units'] = units if con_units is None else con_units
con_options['linear'] = linear
bc_comp._add_constraint(con_name, **con_options)
if bc_comp:
bc_comp.configure_io()
for var, options in bc_dict.items():
con_name = options['constraint_name']
src, shape, units, linear = self._get_boundary_constraint_src(var, loc, phase)
size = np.prod(shape)
# Build the correct src_indices regardless of shape
if loc == 'initial':
src_idxs = np.arange(size, dtype=int).reshape(shape)
else:
src_idxs = np.arange(-size, 0, dtype=int).reshape(shape)
src_idxs = (src_idxs,)
if 'parameters:' in src:
sys_name = '{0}_boundary_constraints'.format(loc)
tgt_name = '{0}_value_in:{1}'.format(loc, con_name)
phase.promotes(sys_name, inputs=[(tgt_name, src)],
src_indices=src_idxs, flat_src_indices=True)
else:
phase.connect(src, f'{loc}_boundary_constraints.{loc}_value_in:{con_name}',
src_indices=src_idxs, flat_src_indices=True)
def setup_path_constraints(self, phase):
"""
Add a path constraint component if necessary.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
gd = self.grid_data
if phase._path_constraints:
path_comp = PathConstraintComp(num_nodes=gd.num_nodes)
phase.add_subsystem('path_constraints', subsys=path_comp)
def configure_path_constraints(self, phase):
"""
Handle the common operations for configuration of the path constraints.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
time_units = phase.time_options['units']
for var, options in phase._path_constraints.items():
constraint_kwargs = options.copy()
con_units = constraint_kwargs['units'] = options.get('units', None)
con_name = constraint_kwargs.pop('constraint_name')
# Determine the path to the variable which we will be constraining
# This is more complicated for path constraints since, for instance,
# a single state variable has two sources which must be connected to
# the path component.
var_type = phase.classify_var(var)
if var_type == 'time':
constraint_kwargs['shape'] = (1,)
constraint_kwargs['units'] = time_units if con_units is None else con_units
constraint_kwargs['linear'] = True
elif var_type == 'time_phase':
constraint_kwargs['shape'] = (1,)
constraint_kwargs['units'] = time_units if con_units is None else con_units
constraint_kwargs['linear'] = True
elif var_type == 'state':
state_shape = phase.state_options[var]['shape']
state_units = phase.state_options[var]['units']
constraint_kwargs['shape'] = state_shape
constraint_kwargs['units'] = state_units if con_units is None else con_units
constraint_kwargs['linear'] = False
elif var_type == 'indep_control':
control_shape = phase.control_options[var]['shape']
control_units = phase.control_options[var]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = control_units if con_units is None else con_units
constraint_kwargs['linear'] = True
elif var_type == 'input_control':
control_shape = phase.control_options[var]['shape']
control_units = phase.control_options[var]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = control_units if con_units is None else con_units
constraint_kwargs['linear'] = True
elif var_type == 'indep_polynomial_control':
control_shape = phase.polynomial_control_options[var]['shape']
control_units = phase.polynomial_control_options[var]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = control_units if con_units is None else con_units
constraint_kwargs['linear'] = False
elif var_type == 'input_polynomial_control':
control_shape = phase.polynomial_control_options[var]['shape']
control_units = phase.polynomial_control_options[var]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = control_units if con_units is None else con_units
constraint_kwargs['linear'] = False
elif var_type == 'control_rate':
control_name = var[:-5]
control_shape = phase.control_options[control_name]['shape']
control_units = phase.control_options[control_name]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = get_rate_units(control_units, time_units, deriv=1) \
if con_units is None else con_units
elif var_type == 'control_rate2':
control_name = var[:-6]
control_shape = phase.control_options[control_name]['shape']
control_units = phase.control_options[control_name]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = get_rate_units(control_units, time_units, deriv=2) \
if con_units is None else con_units
elif var_type == 'polynomial_control_rate':
control_name = var[:-5]
control_shape = phase.polynomial_control_options[control_name]['shape']
control_units = phase.polynomial_control_options[control_name]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = get_rate_units(control_units, time_units, deriv=1) \
if con_units is None else con_units
elif var_type == 'polynomial_control_rate2':
control_name = var[:-6]
control_shape = phase.polynomial_control_options[control_name]['shape']
control_units = phase.polynomial_control_options[control_name]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = get_rate_units(control_units, time_units, deriv=2) \
if con_units is None else con_units
else:
# Failed to find variable, assume it is in the ODE. This requires introspection.
ode = phase._get_subsystem(self._rhs_source)
shape, units = get_source_metadata(ode, src=var,
user_units=options['units'],
user_shape=options['shape'])
constraint_kwargs['linear'] = False
constraint_kwargs['shape'] = shape
constraint_kwargs['units'] = units
# Propagate the introspected shape back into the options dict.
# Some transcriptions use this later.
options['shape'] = constraint_kwargs['shape']
constraint_kwargs.pop('constraint_name', None)
phase._get_subsystem('path_constraints')._add_path_constraint_configure(con_name, **constraint_kwargs)
def configure_objective(self, phase):
"""
Find the path of the objective(s) and add them.
Parameters
----------
phase : dymos.Phase
The phase object to which this transcription instance applies.
"""
for name, options in phase._objectives.items():
index = options['index']
loc = options['loc']
obj_path, shape, units, _ = self._get_boundary_constraint_src(name, loc, phase)
shape = options['shape'] if shape is None else shape
size = int(np.prod(shape))
if size > 1 and index is None:
raise ValueError('Objective variable is non-scaler {0} but no index specified '
'for objective'.format(shape))
idx = 0 if index is None else index
if idx < 0:
idx = size + idx
if idx >= size or idx < -size:
raise ValueError('Objective index={0}, but the shape of the objective '
'variable is {1}'.format(index, shape))
if loc == 'final':
obj_index = -size + idx
elif loc == 'initial':
obj_index = idx
else:
raise ValueError('Invalid value for objective loc: {0}. Must be '
'one of \'initial\' or \'final\'.'.format(loc))
from ..phase import Phase
super(Phase, phase).add_objective(obj_path, ref=options['ref'], ref0=options['ref0'],
index=obj_index, flat_indices=True, adder=options['adder'],
scaler=options['scaler'],
parallel_deriv_color=options['parallel_deriv_color'])
def _get_boundary_constraint_src(self, name, loc, phase):
raise NotImplementedError('Transcription {0} does not implement method'
'_get_boundary_constraint_source.'.format(self.__class__.__name__))
def _get_rate_source_path(self, name, loc, phase):
raise NotImplementedError('Transcription {0} does not implement method'
'_get_rate_source_path.'.format(self.__class__.__name__))
def _get_ode(self, phase):
"""
Returns an instance of the ODE used in the phase that can be interrogated for IO metadata.
Parameters
----------
phase : dm.Phase
The Phase instance to which this transcription applies
Returns
-------
ode : om.System
The OpenMDAO system which serves as the ODE for the given Phase.
"""
return phase._get_subsystem(self._rhs_source)
def get_parameter_connections(self, name, phase):
"""
Returns info about a parameter's target connections in the phase.
Parameters
----------
name : str
The name of the parameter for which connection information is desired.
phase : dymos.Phase
The phase object to which this transcription applies.
Returns
-------
list of (paths, indices)
A list containing a tuple of target paths and corresponding src_indices to which the
given design variable is to be connected.
"""
raise NotImplementedError('Transcription {0} does not implement method '
'get_parameter_connections.'.format(self.__class__.__name__))
def is_static_ode_output(self, var, phase, num_nodes):
"""
Test whether the given output is a static output of the ODE.
A variable is considered static if it's first dimension is different than the
number of nodes in the ODE.
Parameters
----------
var : str
The ode-relative path of the variable of interest.
phase : dymos.Phase
The phase to which this transcription applies.
num_nodes : int
The number of nodes in the ODE.
Returns
-------
bool
True if the given variable is a static output, otherwise False if it is dynamic.
Raises
------
KeyError
KeyError is raised if the given variable isn't present in the ode outputs.
"""
ode = phase._get_subsystem(self._rhs_source)
ode_outputs = {opts['prom_name']: opts for (k, opts) in
ode.get_io_metadata(iotypes=('output',), get_remote=True).items()}
ode_shape = ode_outputs[var]['shape']
return ode_shape[0] != num_nodes
def _requires_continuity_constraints(self, phase):
"""
Tests whether state and/or control and/or control rate continuity are required.
Parameters
----------
phase : dymos.Phase
The phase to which this transcription applies.
Returns
-------
state_continuity : bool
True if any state continuity is required to be enforced.
control_continuity : bool
True if any control value continuity is required to be enforced.
control_rate_continuity : bool
True if any control rate continuity is required to be enforced.
"""
raise NotImplementedError(f'The transcription {self.__class__} does not provide an '
f'implementation of _requires_continuity_constraints')
| 43.691566 | 114 | 0.566016 | from collections.abc import Sequence
import numpy as np
import openmdao.api as om
from .common import BoundaryConstraintComp, ControlGroup, PolynomialControlGroup, PathConstraintComp
from ..utils.constants import INF_BOUND
from ..utils.misc import get_rate_units, _unspecified
from ..utils.introspection import get_target_metadata, get_source_metadata
class TranscriptionBase(object):
def __init__(self, **kwargs):
self.grid_data = None
self.options = om.OptionsDictionary()
self.options.declare('num_segments', types=int, desc='Number of segments')
self.options.declare('segment_ends', default=None, types=(Sequence, np.ndarray),
allow_none=True, desc='Locations of segment ends or None for equally '
'spaced segments')
self.options.declare('order', default=3, types=(int, Sequence, np.ndarray),
desc='Order of the state transcription. The order of the control '
'transcription is `order - 1`.')
self.options.declare('compressed', default=True, types=bool,
desc='Use compressed transcription, meaning state and control values'
'at segment boundaries are not duplicated on input. This '
'implicitly enforces value continuity between segments but in '
'some cases may make the problem more difficult to solve.')
self._declare_options()
self.initialize()
self.options.update(kwargs)
self.init_grid()
self._rhs_source = None
def _declare_options(self):
pass
def initialize(self):
pass
def init_grid(self):
raise NotImplementedError('Transcription {0} does not implement method'
'init_grid.'.format(self.__class__.__name__))
def setup_time(self, phase):
time_options = phase.time_options
phase.check_time_options()
if not time_options['input_initial'] or not time_options['input_duration']:
phase.add_subsystem('time_extents', om.IndepVarComp(),
promotes_outputs=['*'])
def configure_time(self, phase):
time_options = phase.time_options
if time_options['units'] in (None, _unspecified):
if time_options['targets']:
ode = phase._get_subsystem(self._rhs_source)
_, time_options['units'] = get_target_metadata(ode, name='time',
user_targets=time_options['targets'],
user_units=time_options['units'],
user_shape='')
time_units = time_options['units']
indeps = []
default_vals = {'t_initial': phase.time_options['initial_val'],
't_duration': phase.time_options['duration_val']}
if not time_options['input_initial']:
indeps.append('t_initial')
if not time_options['input_duration']:
indeps.append('t_duration')
for var in indeps:
phase.time_extents.add_output(var, val=default_vals[var], units=time_units)
if not (time_options['input_initial'] or time_options['fix_initial']):
lb, ub = time_options['initial_bounds']
lb = -INF_BOUND if lb is None else lb
ub = INF_BOUND if ub is None else ub
phase.add_design_var('t_initial',
lower=lb,
upper=ub,
scaler=time_options['initial_scaler'],
adder=time_options['initial_adder'],
ref0=time_options['initial_ref0'],
ref=time_options['initial_ref'])
if not (time_options['input_duration'] or time_options['fix_duration']):
lb, ub = time_options['duration_bounds']
lb = -INF_BOUND if lb is None else lb
ub = INF_BOUND if ub is None else ub
phase.add_design_var('t_duration',
lower=lb,
upper=ub,
scaler=time_options['duration_scaler'],
adder=time_options['duration_adder'],
ref0=time_options['duration_ref0'],
ref=time_options['duration_ref'])
def setup_controls(self, phase):
phase._check_control_options()
if phase.control_options:
control_group = ControlGroup(control_options=phase.control_options,
time_units=phase.time_options['units'],
grid_data=self.grid_data)
phase.add_subsystem('control_group',
subsys=control_group)
def _configure_state_introspection(self, state_name, options, phase):
time_units = phase.time_options['units']
user_targets = options['targets']
user_units = options['units']
user_shape = options['shape']
need_units = user_units is _unspecified
need_shape = user_shape in {None, _unspecified}
ode = phase._get_subsystem(self._rhs_source)
if user_targets is _unspecified:
from dymos.utils.introspection import get_targets
options['targets'] = get_targets(ode, state_name, user_targets)
if not(need_shape or need_units):
return
if options['targets']:
try:
from dymos.utils.introspection import get_state_target_metadata
tgt_shape, tgt_units = get_state_target_metadata(ode, state_name, options['targets'],
options['units'], options['shape'])
options['shape'] = tgt_shape
options['units'] = tgt_units
return
except ValueError:
pass
rate_src = options['rate_source']
rate_src_type = phase.classify_var(rate_src)
if rate_src_type in ['time', 'time_phase']:
rate_src_units = phase.time_options['units']
rate_src_shape = (1,)
elif rate_src_type == 'state':
rate_src_units = phase.state_options[rate_src]['units']
rate_src_shape = phase.state_options[rate_src]['shape']
elif rate_src_type in ['input_control', 'indep_control']:
rate_src_units = phase.control_options[rate_src]['units']
rate_src_shape = phase.control_options[rate_src]['shape']
elif rate_src_type in ['input_polynomial_control', 'indep_polynomial_control']:
rate_src_units = phase.polynomial_control_options[rate_src]['units']
rate_src_shape = phase.polynomial_control_options[rate_src]['shape']
elif rate_src_type == 'parameter':
rate_src_units = phase.parameter_options[rate_src]['units']
rate_src_shape = phase.parameter_options[rate_src]['shape']
elif rate_src_type == 'control_rate':
control_name = rate_src[:-5]
control = phase.control_options[control_name]
rate_src_units = get_rate_units(control['units'], time_units, deriv=1)
rate_src_shape = control['shape']
elif rate_src_type == 'control_rate2':
control_name = rate_src[:-6]
control = phase.control_options[control_name]
rate_src_units = get_rate_units(control['units'], time_units, deriv=2)
rate_src_shape = control['shape']
elif rate_src_type == 'polynomial_control_rate':
control_name = rate_src[:-5]
control = phase.polynomial_control_options[control_name]
rate_src_units = get_rate_units(control['units'], time_units, deriv=1)
rate_src_shape = control['shape']
elif rate_src_type == 'polynomial_control_rate2':
control_name = rate_src[:-6]
control = phase.polynomial_control_options[control_name]
rate_src_units = get_rate_units(control['units'], time_units, deriv=2)
rate_src_shape = control['shape']
elif rate_src_type == 'ode':
rate_src_shape, rate_src_units = get_source_metadata(ode,
src=rate_src,
user_units=options['units'],
user_shape=options['shape'])
else:
rate_src_shape = (1,)
rate_src_units = None
if need_shape:
options['shape'] = rate_src_shape
if need_units:
options['units'] = time_units if rate_src_units is None else f'{rate_src_units}*{time_units}'
return
def configure_controls(self, phase):
pass
def setup_polynomial_controls(self, phase):
if phase.polynomial_control_options:
sys = PolynomialControlGroup(grid_data=self.grid_data,
polynomial_control_options=phase.polynomial_control_options,
time_units=phase.time_options['units'])
phase.add_subsystem('polynomial_control_group', subsys=sys,
promotes_inputs=['*'], promotes_outputs=['*'])
def configure_polynomial_controls(self, phase):
if phase.polynomial_control_options:
phase.polynomial_control_group.configure_io()
def setup_parameters(self, phase):
phase._check_parameter_options()
if phase.parameter_options:
for name, options in phase.parameter_options.items():
src_name = 'parameters:{0}'.format(name)
if options['opt']:
lb = -INF_BOUND if options['lower'] is None else options['lower']
ub = INF_BOUND if options['upper'] is None else options['upper']
phase.add_design_var(name=src_name,
lower=lb,
upper=ub,
scaler=options['scaler'],
adder=options['adder'],
ref0=options['ref0'],
ref=options['ref'])
def configure_parameters(self, phase):
if phase.parameter_options:
ode = self._get_ode(phase)
for name, options in phase.parameter_options.items():
prom_name = f'parameters:{name}'
shape, units, static = get_target_metadata(ode, name=name,
user_targets=options['targets'],
user_shape=options['shape'],
user_units=options['units'],
user_static_target=options['static_target'])
options['units'] = units
options['shape'] = shape
options['static_target'] = static
for tgts, src_idxs in self.get_parameter_connections(name, phase):
for pathname in tgts:
parts = pathname.split('.')
sub_sys = parts[0]
tgt_var = '.'.join(parts[1:])
if not options['static_target']:
phase.promotes(sub_sys, inputs=[(tgt_var, prom_name)],
src_indices=src_idxs, flat_src_indices=True)
else:
phase.promotes(sub_sys, inputs=[(tgt_var, prom_name)])
val = options['val']
_shape = options['shape']
shaped_val = np.broadcast_to(val, _shape)
phase.set_input_defaults(name=prom_name,
val=shaped_val,
units=options['units'])
def setup_states(self, phase):
raise NotImplementedError('Transcription {0} does not implement method '
'setup_states.'.format(self.__class__.__name__))
def setup_ode(self, phase):
raise NotImplementedError('Transcription {0} does not implement method '
'setup_ode.'.format(self.__class__.__name__))
def setup_timeseries_outputs(self, phase):
raise NotImplementedError('Transcription {0} does not implement method '
'setup_timeseries_outputs.'.format(self.__class__.__name__))
def setup_boundary_constraints(self, loc, phase):
if loc not in ('initial', 'final'):
raise ValueError('loc must be one of \'initial\' or \'final\'.')
bc_dict = phase._initial_boundary_constraints \
if loc == 'initial' else phase._final_boundary_constraints
if bc_dict:
phase.add_subsystem(f'{loc}_boundary_constraints',
subsys=BoundaryConstraintComp(loc=loc))
def configure_boundary_constraints(self, loc, phase):
bc_dict = phase._initial_boundary_constraints \
if loc == 'initial' else phase._final_boundary_constraints
sys_name = f'{loc}_boundary_constraints'
bc_comp = phase._get_subsystem(sys_name)
for var, options in bc_dict.items():
con_name = options['constraint_name']
_, shape, units, linear = self._get_boundary_constraint_src(var, loc, phase)
if options['indices'] is not None:
con_shape = (len(options['indices']), )
if options['lower'] and not np.isscalar(options['lower']) and \
np.asarray(options['lower']).shape != con_shape:
raise ValueError('The lower bounds of boundary constraint on {0} are not '
'compatible with its shape, and no indices were '
'provided.'.format(var))
if options['upper'] and not np.isscalar(options['upper']) and \
np.asarray(options['upper']).shape != con_shape:
raise ValueError('The upper bounds of boundary constraint on {0} are not '
'compatible with its shape, and no indices were '
'provided.'.format(var))
if options['equals'] and not np.isscalar(options['equals']) and \
np.asarray(options['equals']).shape != con_shape:
raise ValueError('The equality boundary constraint value on {0} is not '
'compatible the provided indices. Provide them as a '
'flat array with the same size as indices.'.format(var))
else:
if 'lower' in options and options['lower'] is not None and \
not np.isscalar(options['lower']) and np.asarray(options['lower']).shape != shape:
raise ValueError('The lower bounds of boundary constraint on {0} are not '
'compatible with its shape, and no indices were '
'provided. Expected a shape of {1} but given shape '
'is {2}'.format(var, shape, np.asarray(options['lower']).shape))
if 'upper' in options and options['upper'] is not None and \
not np.isscalar(options['upper']) and np.asarray(options['upper']).shape != shape:
raise ValueError('The upper bounds of boundary constraint on {0} are not '
'compatible with its shape, and no indices were '
'provided. Expected a shape of {1} but given shape '
'is {2}'.format(var, shape, np.asarray(options['upper']).shape))
if 'equals' in options and options['equals'] is not None and \
not np.isscalar(options['equals']) and np.asarray(options['equals']).shape != shape:
raise ValueError('The equality boundary constraint value on {0} is not '
'compatible with its shape, and no indices were '
'provided. Expected a shape of {1} but given shape '
'is {2}'.format(var, shape, np.asarray(options['equals']).shape))
con_options = options.copy()
con_options.pop('constraint_name')
con_options['shape'] = options['shape'] = shape
con_units = options['units']
con_options['units'] = units if con_units is None else con_units
con_options['linear'] = linear
bc_comp._add_constraint(con_name, **con_options)
if bc_comp:
bc_comp.configure_io()
for var, options in bc_dict.items():
con_name = options['constraint_name']
src, shape, units, linear = self._get_boundary_constraint_src(var, loc, phase)
size = np.prod(shape)
if loc == 'initial':
src_idxs = np.arange(size, dtype=int).reshape(shape)
else:
src_idxs = np.arange(-size, 0, dtype=int).reshape(shape)
src_idxs = (src_idxs,)
if 'parameters:' in src:
sys_name = '{0}_boundary_constraints'.format(loc)
tgt_name = '{0}_value_in:{1}'.format(loc, con_name)
phase.promotes(sys_name, inputs=[(tgt_name, src)],
src_indices=src_idxs, flat_src_indices=True)
else:
phase.connect(src, f'{loc}_boundary_constraints.{loc}_value_in:{con_name}',
src_indices=src_idxs, flat_src_indices=True)
def setup_path_constraints(self, phase):
gd = self.grid_data
if phase._path_constraints:
path_comp = PathConstraintComp(num_nodes=gd.num_nodes)
phase.add_subsystem('path_constraints', subsys=path_comp)
def configure_path_constraints(self, phase):
time_units = phase.time_options['units']
for var, options in phase._path_constraints.items():
constraint_kwargs = options.copy()
con_units = constraint_kwargs['units'] = options.get('units', None)
con_name = constraint_kwargs.pop('constraint_name')
var_type = phase.classify_var(var)
if var_type == 'time':
constraint_kwargs['shape'] = (1,)
constraint_kwargs['units'] = time_units if con_units is None else con_units
constraint_kwargs['linear'] = True
elif var_type == 'time_phase':
constraint_kwargs['shape'] = (1,)
constraint_kwargs['units'] = time_units if con_units is None else con_units
constraint_kwargs['linear'] = True
elif var_type == 'state':
state_shape = phase.state_options[var]['shape']
state_units = phase.state_options[var]['units']
constraint_kwargs['shape'] = state_shape
constraint_kwargs['units'] = state_units if con_units is None else con_units
constraint_kwargs['linear'] = False
elif var_type == 'indep_control':
control_shape = phase.control_options[var]['shape']
control_units = phase.control_options[var]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = control_units if con_units is None else con_units
constraint_kwargs['linear'] = True
elif var_type == 'input_control':
control_shape = phase.control_options[var]['shape']
control_units = phase.control_options[var]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = control_units if con_units is None else con_units
constraint_kwargs['linear'] = True
elif var_type == 'indep_polynomial_control':
control_shape = phase.polynomial_control_options[var]['shape']
control_units = phase.polynomial_control_options[var]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = control_units if con_units is None else con_units
constraint_kwargs['linear'] = False
elif var_type == 'input_polynomial_control':
control_shape = phase.polynomial_control_options[var]['shape']
control_units = phase.polynomial_control_options[var]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = control_units if con_units is None else con_units
constraint_kwargs['linear'] = False
elif var_type == 'control_rate':
control_name = var[:-5]
control_shape = phase.control_options[control_name]['shape']
control_units = phase.control_options[control_name]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = get_rate_units(control_units, time_units, deriv=1) \
if con_units is None else con_units
elif var_type == 'control_rate2':
control_name = var[:-6]
control_shape = phase.control_options[control_name]['shape']
control_units = phase.control_options[control_name]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = get_rate_units(control_units, time_units, deriv=2) \
if con_units is None else con_units
elif var_type == 'polynomial_control_rate':
control_name = var[:-5]
control_shape = phase.polynomial_control_options[control_name]['shape']
control_units = phase.polynomial_control_options[control_name]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = get_rate_units(control_units, time_units, deriv=1) \
if con_units is None else con_units
elif var_type == 'polynomial_control_rate2':
control_name = var[:-6]
control_shape = phase.polynomial_control_options[control_name]['shape']
control_units = phase.polynomial_control_options[control_name]['units']
constraint_kwargs['shape'] = control_shape
constraint_kwargs['units'] = get_rate_units(control_units, time_units, deriv=2) \
if con_units is None else con_units
else:
ode = phase._get_subsystem(self._rhs_source)
shape, units = get_source_metadata(ode, src=var,
user_units=options['units'],
user_shape=options['shape'])
constraint_kwargs['linear'] = False
constraint_kwargs['shape'] = shape
constraint_kwargs['units'] = units
options['shape'] = constraint_kwargs['shape']
constraint_kwargs.pop('constraint_name', None)
phase._get_subsystem('path_constraints')._add_path_constraint_configure(con_name, **constraint_kwargs)
def configure_objective(self, phase):
for name, options in phase._objectives.items():
index = options['index']
loc = options['loc']
obj_path, shape, units, _ = self._get_boundary_constraint_src(name, loc, phase)
shape = options['shape'] if shape is None else shape
size = int(np.prod(shape))
if size > 1 and index is None:
raise ValueError('Objective variable is non-scaler {0} but no index specified '
'for objective'.format(shape))
idx = 0 if index is None else index
if idx < 0:
idx = size + idx
if idx >= size or idx < -size:
raise ValueError('Objective index={0}, but the shape of the objective '
'variable is {1}'.format(index, shape))
if loc == 'final':
obj_index = -size + idx
elif loc == 'initial':
obj_index = idx
else:
raise ValueError('Invalid value for objective loc: {0}. Must be '
'one of \'initial\' or \'final\'.'.format(loc))
from ..phase import Phase
super(Phase, phase).add_objective(obj_path, ref=options['ref'], ref0=options['ref0'],
index=obj_index, flat_indices=True, adder=options['adder'],
scaler=options['scaler'],
parallel_deriv_color=options['parallel_deriv_color'])
def _get_boundary_constraint_src(self, name, loc, phase):
raise NotImplementedError('Transcription {0} does not implement method'
'_get_boundary_constraint_source.'.format(self.__class__.__name__))
def _get_rate_source_path(self, name, loc, phase):
raise NotImplementedError('Transcription {0} does not implement method'
'_get_rate_source_path.'.format(self.__class__.__name__))
def _get_ode(self, phase):
return phase._get_subsystem(self._rhs_source)
def get_parameter_connections(self, name, phase):
raise NotImplementedError('Transcription {0} does not implement method '
'get_parameter_connections.'.format(self.__class__.__name__))
def is_static_ode_output(self, var, phase, num_nodes):
ode = phase._get_subsystem(self._rhs_source)
ode_outputs = {opts['prom_name']: opts for (k, opts) in
ode.get_io_metadata(iotypes=('output',), get_remote=True).items()}
ode_shape = ode_outputs[var]['shape']
return ode_shape[0] != num_nodes
def _requires_continuity_constraints(self, phase):
raise NotImplementedError(f'The transcription {self.__class__} does not provide an '
f'implementation of _requires_continuity_constraints')
| true | true |
f73ce35c3ae0fa1b7eadc5a75b888c8a8208722c | 5,403 | py | Python | test/integration/sagemaker/test_tfs.py | flacout/sagemaker-tensorflow-serving-container | b28564b24049451359c74a4dfa33e5b7ad817248 | [
"Apache-2.0"
] | 143 | 2019-01-21T13:36:58.000Z | 2022-03-17T01:37:12.000Z | test/integration/sagemaker/test_tfs.py | flacout/sagemaker-tensorflow-serving-container | b28564b24049451359c74a4dfa33e5b7ad817248 | [
"Apache-2.0"
] | 207 | 2019-02-19T07:06:28.000Z | 2022-03-11T18:25:02.000Z | test/integration/sagemaker/test_tfs.py | flacout/sagemaker-tensorflow-serving-container | b28564b24049451359c74a4dfa33e5b7ad817248 | [
"Apache-2.0"
] | 118 | 2018-12-07T01:37:30.000Z | 2022-03-03T20:17:12.000Z | # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import pytest
import util
NON_P3_REGIONS = ["ap-southeast-1", "ap-southeast-2", "ap-south-1",
"ca-central-1", "eu-central-1", "eu-west-2", "us-west-1"]
@pytest.fixture(params=os.environ["TEST_VERSIONS"].split(","))
def version(request):
return request.param
@pytest.fixture(scope="session")
def repo(request):
return request.config.getoption("--repo") or "sagemaker-tensorflow-serving"
@pytest.fixture
def tag(request, version, instance_type):
if request.config.getoption("--tag"):
return request.config.getoption("--tag")
arch = "gpu" if instance_type.startswith("ml.p") else "cpu"
return f"{version}-{arch}"
@pytest.fixture
def image_uri(registry, region, repo, tag):
return util.image_uri(registry, region, repo, tag)
@pytest.fixture(params=os.environ["TEST_INSTANCE_TYPES"].split(","))
def instance_type(request, region):
return request.param
@pytest.fixture(scope="module")
def accelerator_type():
return None
@pytest.fixture(scope="session")
def tfs_model(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
"test/data/tfs-model.tar.gz")
@pytest.fixture(scope='session')
def python_model_with_requirements(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
"test/data/python-with-requirements.tar.gz")
@pytest.fixture(scope='session')
def python_model_with_lib(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
"test/data/python-with-lib.tar.gz")
def test_tfs_model(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, tfs_model,
image_uri, instance_type, accelerator_type):
input_data = {"instances": [1.0, 2.0, 5.0]}
util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, tfs_model,
image_uri, instance_type, accelerator_type, input_data)
def test_batch_transform(region, boto_session, sagemaker_client,
model_name, tfs_model, image_uri,
instance_type):
results = util.run_batch_transform_job(region=region,
boto_session=boto_session,
model_data=tfs_model,
image_uri=image_uri,
model_name=model_name,
sagemaker_client=sagemaker_client,
instance_type=instance_type)
assert len(results) == 10
for r in results:
assert r == [3.5, 4.0, 5.5]
def test_python_model_with_requirements(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name,
python_model_with_requirements, image_uri, instance_type,
accelerator_type):
if "p3" in instance_type:
pytest.skip("skip for p3 instance")
# the python service needs to transform this to get a valid prediction
input_data = {"x": [1.0, 2.0, 5.0]}
output_data = util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name,
python_model_with_requirements, image_uri,
instance_type, accelerator_type, input_data)
# python service adds this to tfs response
assert output_data["python"] is True
assert output_data["pillow"] == "6.0.0"
def test_python_model_with_lib(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, python_model_with_lib,
image_uri, instance_type, accelerator_type):
if "p3" in instance_type:
pytest.skip("skip for p3 instance")
# the python service needs to transform this to get a valid prediction
input_data = {"x": [1.0, 2.0, 5.0]}
output_data = util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, python_model_with_lib,
image_uri, instance_type, accelerator_type, input_data)
# python service adds this to tfs response
assert output_data["python"] is True
assert output_data["dummy_module"] == "0.1"
| 39.152174 | 110 | 0.605589 |
import os
import pytest
import util
NON_P3_REGIONS = ["ap-southeast-1", "ap-southeast-2", "ap-south-1",
"ca-central-1", "eu-central-1", "eu-west-2", "us-west-1"]
@pytest.fixture(params=os.environ["TEST_VERSIONS"].split(","))
def version(request):
return request.param
@pytest.fixture(scope="session")
def repo(request):
return request.config.getoption("--repo") or "sagemaker-tensorflow-serving"
@pytest.fixture
def tag(request, version, instance_type):
if request.config.getoption("--tag"):
return request.config.getoption("--tag")
arch = "gpu" if instance_type.startswith("ml.p") else "cpu"
return f"{version}-{arch}"
@pytest.fixture
def image_uri(registry, region, repo, tag):
return util.image_uri(registry, region, repo, tag)
@pytest.fixture(params=os.environ["TEST_INSTANCE_TYPES"].split(","))
def instance_type(request, region):
return request.param
@pytest.fixture(scope="module")
def accelerator_type():
return None
@pytest.fixture(scope="session")
def tfs_model(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
"test/data/tfs-model.tar.gz")
@pytest.fixture(scope='session')
def python_model_with_requirements(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
"test/data/python-with-requirements.tar.gz")
@pytest.fixture(scope='session')
def python_model_with_lib(region, boto_session):
return util.find_or_put_model_data(region,
boto_session,
"test/data/python-with-lib.tar.gz")
def test_tfs_model(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, tfs_model,
image_uri, instance_type, accelerator_type):
input_data = {"instances": [1.0, 2.0, 5.0]}
util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, tfs_model,
image_uri, instance_type, accelerator_type, input_data)
def test_batch_transform(region, boto_session, sagemaker_client,
model_name, tfs_model, image_uri,
instance_type):
results = util.run_batch_transform_job(region=region,
boto_session=boto_session,
model_data=tfs_model,
image_uri=image_uri,
model_name=model_name,
sagemaker_client=sagemaker_client,
instance_type=instance_type)
assert len(results) == 10
for r in results:
assert r == [3.5, 4.0, 5.5]
def test_python_model_with_requirements(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name,
python_model_with_requirements, image_uri, instance_type,
accelerator_type):
if "p3" in instance_type:
pytest.skip("skip for p3 instance")
input_data = {"x": [1.0, 2.0, 5.0]}
output_data = util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name,
python_model_with_requirements, image_uri,
instance_type, accelerator_type, input_data)
assert output_data["python"] is True
assert output_data["pillow"] == "6.0.0"
def test_python_model_with_lib(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, python_model_with_lib,
image_uri, instance_type, accelerator_type):
if "p3" in instance_type:
pytest.skip("skip for p3 instance")
input_data = {"x": [1.0, 2.0, 5.0]}
output_data = util.create_and_invoke_endpoint(boto_session, sagemaker_client,
sagemaker_runtime_client, model_name, python_model_with_lib,
image_uri, instance_type, accelerator_type, input_data)
assert output_data["python"] is True
assert output_data["dummy_module"] == "0.1"
| true | true |
f73ce4750e7df131956b6123354ec90427227883 | 10,897 | py | Python | PythonAPI/carla/agents/navigation/local_planner.py | tom-doerr/carla | 2f5409d4b63c8fa24320711e6a7a24e79bf1c833 | [
"MIT"
] | 103 | 2020-03-10T04:21:50.000Z | 2022-03-29T13:26:57.000Z | PythonAPI/carla/agents/navigation/local_planner.py | tom-doerr/carla | 2f5409d4b63c8fa24320711e6a7a24e79bf1c833 | [
"MIT"
] | 12 | 2020-04-11T11:36:01.000Z | 2021-12-09T11:35:56.000Z | PythonAPI/carla/agents/navigation/local_planner.py | jhidalgocarrio/carla | 5404807db43428f2c82451ffacef9f83fab782e1 | [
"MIT"
] | 8 | 2020-11-21T07:47:12.000Z | 2022-03-25T13:41:05.000Z | #!/usr/bin/env python
# Copyright (c) 2018 Intel Labs.
# authors: German Ros (german.ros@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module contains a local planner to perform low-level waypoint following based on PID controllers. """
from enum import Enum
from collections import deque
import random
import carla
from agents.navigation.controller import VehiclePIDController
from agents.tools.misc import draw_waypoints
class RoadOption(Enum):
"""
RoadOption represents the possible topological configurations when moving from a segment of lane to other.
"""
VOID = -1
LEFT = 1
RIGHT = 2
STRAIGHT = 3
LANEFOLLOW = 4
CHANGELANELEFT = 5
CHANGELANERIGHT = 6
class LocalPlanner(object):
"""
LocalPlanner implements the basic behavior of following a trajectory of waypoints that is generated on-the-fly.
The low-level motion of the vehicle is computed by using two PID controllers, one is used for the lateral control
and the other for the longitudinal control (cruise speed).
When multiple paths are available (intersections) this local planner makes a random choice.
"""
# minimum distance to target waypoint as a percentage (e.g. within 90% of
# total distance)
MIN_DISTANCE_PERCENTAGE = 0.9
def __init__(self, vehicle, opt_dict=None):
"""
:param vehicle: actor to apply to local planner logic onto
:param opt_dict: dictionary of arguments with the following semantics:
dt -- time difference between physics control in seconds. This is typically fixed from server side
using the arguments -benchmark -fps=F . In this case dt = 1/F
target_speed -- desired cruise speed in Km/h
sampling_radius -- search radius for next waypoints in seconds: e.g. 0.5 seconds ahead
lateral_control_dict -- dictionary of arguments to setup the lateral PID controller
{'K_P':, 'K_D':, 'K_I':, 'dt'}
longitudinal_control_dict -- dictionary of arguments to setup the longitudinal PID controller
{'K_P':, 'K_D':, 'K_I':, 'dt'}
"""
self._vehicle = vehicle
self._map = self._vehicle.get_world().get_map()
self._dt = None
self._target_speed = None
self._sampling_radius = None
self._min_distance = None
self._current_waypoint = None
self._target_road_option = None
self._next_waypoints = None
self.target_waypoint = None
self._vehicle_controller = None
self._global_plan = None
# queue with tuples of (waypoint, RoadOption)
self._waypoints_queue = deque(maxlen=20000)
self._buffer_size = 5
self._waypoint_buffer = deque(maxlen=self._buffer_size)
# initializing controller
self._init_controller(opt_dict)
def __del__(self):
if self._vehicle:
self._vehicle.destroy()
print("Destroying ego-vehicle!")
def reset_vehicle(self):
self._vehicle = None
print("Resetting ego-vehicle!")
def _init_controller(self, opt_dict):
"""
Controller initialization.
:param opt_dict: dictionary of arguments.
:return:
"""
# default params
self._dt = 1.0 / 20.0
self._target_speed = 20.0 # Km/h
self._sampling_radius = self._target_speed * 1 / 3.6 # 1 seconds horizon
self._min_distance = self._sampling_radius * self.MIN_DISTANCE_PERCENTAGE
args_lateral_dict = {
'K_P': 1.95,
'K_D': 0.01,
'K_I': 1.4,
'dt': self._dt}
args_longitudinal_dict = {
'K_P': 1.0,
'K_D': 0,
'K_I': 1,
'dt': self._dt}
# parameters overload
if opt_dict:
if 'dt' in opt_dict:
self._dt = opt_dict['dt']
if 'target_speed' in opt_dict:
self._target_speed = opt_dict['target_speed']
if 'sampling_radius' in opt_dict:
self._sampling_radius = self._target_speed * \
opt_dict['sampling_radius'] / 3.6
if 'lateral_control_dict' in opt_dict:
args_lateral_dict = opt_dict['lateral_control_dict']
if 'longitudinal_control_dict' in opt_dict:
args_longitudinal_dict = opt_dict['longitudinal_control_dict']
self._current_waypoint = self._map.get_waypoint(self._vehicle.get_location())
self._vehicle_controller = VehiclePIDController(self._vehicle,
args_lateral=args_lateral_dict,
args_longitudinal=args_longitudinal_dict)
self._global_plan = False
# compute initial waypoints
self._waypoints_queue.append((self._current_waypoint.next(self._sampling_radius)[0], RoadOption.LANEFOLLOW))
self._target_road_option = RoadOption.LANEFOLLOW
# fill waypoint trajectory queue
self._compute_next_waypoints(k=200)
def set_speed(self, speed):
"""
Request new target speed.
:param speed: new target speed in Km/h
:return:
"""
self._target_speed = speed
def _compute_next_waypoints(self, k=1):
"""
Add new waypoints to the trajectory queue.
:param k: how many waypoints to compute
:return:
"""
# check we do not overflow the queue
available_entries = self._waypoints_queue.maxlen - len(self._waypoints_queue)
k = min(available_entries, k)
for _ in range(k):
last_waypoint = self._waypoints_queue[-1][0]
next_waypoints = list(last_waypoint.next(self._sampling_radius))
if len(next_waypoints) == 1:
# only one option available ==> lanefollowing
next_waypoint = next_waypoints[0]
road_option = RoadOption.LANEFOLLOW
else:
# random choice between the possible options
road_options_list = _retrieve_options(
next_waypoints, last_waypoint)
road_option = random.choice(road_options_list)
next_waypoint = next_waypoints[road_options_list.index(
road_option)]
self._waypoints_queue.append((next_waypoint, road_option))
def set_global_plan(self, current_plan):
self._waypoints_queue.clear()
for elem in current_plan:
self._waypoints_queue.append(elem)
self._target_road_option = RoadOption.LANEFOLLOW
self._global_plan = True
def run_step(self, debug=True):
"""
Execute one step of local planning which involves running the longitudinal and lateral PID controllers to
follow the waypoints trajectory.
:param debug: boolean flag to activate waypoints debugging
:return:
"""
# not enough waypoints in the horizon? => add more!
if not self._global_plan and len(self._waypoints_queue) < int(self._waypoints_queue.maxlen * 0.5):
self._compute_next_waypoints(k=100)
if len(self._waypoints_queue) == 0 and len(self._waypoint_buffer) == 0:
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 1.0
control.hand_brake = False
control.manual_gear_shift = False
return control
# Buffering the waypoints
if not self._waypoint_buffer:
for i in range(self._buffer_size):
if self._waypoints_queue:
self._waypoint_buffer.append(
self._waypoints_queue.popleft())
else:
break
# current vehicle waypoint
vehicle_transform = self._vehicle.get_transform()
self._current_waypoint = self._map.get_waypoint(vehicle_transform.location)
# target waypoint
self.target_waypoint, self._target_road_option = self._waypoint_buffer[0]
# move using PID controllers
control = self._vehicle_controller.run_step(self._target_speed, self.target_waypoint)
# purge the queue of obsolete waypoints
max_index = -1
for i, (waypoint, _) in enumerate(self._waypoint_buffer):
if waypoint.transform.location.distance(vehicle_transform.location) < self._min_distance:
max_index = i
if max_index >= 0:
for i in range(max_index + 1):
self._waypoint_buffer.popleft()
if debug:
draw_waypoints(self._vehicle.get_world(), [self.target_waypoint], self._vehicle.get_location().z + 1.0)
return control
def done(self):
return len(self._waypoints_queue) == 0 and len(self._waypoint_buffer) == 0
def _retrieve_options(list_waypoints, current_waypoint):
"""
Compute the type of connection between the current active waypoint and the multiple waypoints present in
list_waypoints. The result is encoded as a list of RoadOption enums.
:param list_waypoints: list with the possible target waypoints in case of multiple options
:param current_waypoint: current active waypoint
:return: list of RoadOption enums representing the type of connection from the active waypoint to each
candidate in list_waypoints
"""
options = []
for next_waypoint in list_waypoints:
# this is needed because something we are linking to
# the beggining of an intersection, therefore the
# variation in angle is small
next_next_waypoint = next_waypoint.next(3.0)[0]
link = _compute_connection(current_waypoint, next_next_waypoint)
options.append(link)
return options
def _compute_connection(current_waypoint, next_waypoint, threshold=35):
"""
Compute the type of topological connection between an active waypoint (current_waypoint) and a target waypoint
(next_waypoint).
:param current_waypoint: active waypoint
:param next_waypoint: target waypoint
:return: the type of topological connection encoded as a RoadOption enum:
RoadOption.STRAIGHT
RoadOption.LEFT
RoadOption.RIGHT
"""
n = next_waypoint.transform.rotation.yaw
n = n % 360.0
c = current_waypoint.transform.rotation.yaw
c = c % 360.0
diff_angle = (n - c) % 180.0
if diff_angle < threshold or diff_angle > (180 - threshold):
return RoadOption.STRAIGHT
elif diff_angle > 90.0:
return RoadOption.LEFT
else:
return RoadOption.RIGHT
| 37.064626 | 117 | 0.634487 |
from enum import Enum
from collections import deque
import random
import carla
from agents.navigation.controller import VehiclePIDController
from agents.tools.misc import draw_waypoints
class RoadOption(Enum):
VOID = -1
LEFT = 1
RIGHT = 2
STRAIGHT = 3
LANEFOLLOW = 4
CHANGELANELEFT = 5
CHANGELANERIGHT = 6
class LocalPlanner(object):
MIN_DISTANCE_PERCENTAGE = 0.9
def __init__(self, vehicle, opt_dict=None):
self._vehicle = vehicle
self._map = self._vehicle.get_world().get_map()
self._dt = None
self._target_speed = None
self._sampling_radius = None
self._min_distance = None
self._current_waypoint = None
self._target_road_option = None
self._next_waypoints = None
self.target_waypoint = None
self._vehicle_controller = None
self._global_plan = None
self._waypoints_queue = deque(maxlen=20000)
self._buffer_size = 5
self._waypoint_buffer = deque(maxlen=self._buffer_size)
self._init_controller(opt_dict)
def __del__(self):
if self._vehicle:
self._vehicle.destroy()
print("Destroying ego-vehicle!")
def reset_vehicle(self):
self._vehicle = None
print("Resetting ego-vehicle!")
def _init_controller(self, opt_dict):
self._dt = 1.0 / 20.0
self._target_speed = 20.0
self._sampling_radius = self._target_speed * 1 / 3.6
self._min_distance = self._sampling_radius * self.MIN_DISTANCE_PERCENTAGE
args_lateral_dict = {
'K_P': 1.95,
'K_D': 0.01,
'K_I': 1.4,
'dt': self._dt}
args_longitudinal_dict = {
'K_P': 1.0,
'K_D': 0,
'K_I': 1,
'dt': self._dt}
if opt_dict:
if 'dt' in opt_dict:
self._dt = opt_dict['dt']
if 'target_speed' in opt_dict:
self._target_speed = opt_dict['target_speed']
if 'sampling_radius' in opt_dict:
self._sampling_radius = self._target_speed * \
opt_dict['sampling_radius'] / 3.6
if 'lateral_control_dict' in opt_dict:
args_lateral_dict = opt_dict['lateral_control_dict']
if 'longitudinal_control_dict' in opt_dict:
args_longitudinal_dict = opt_dict['longitudinal_control_dict']
self._current_waypoint = self._map.get_waypoint(self._vehicle.get_location())
self._vehicle_controller = VehiclePIDController(self._vehicle,
args_lateral=args_lateral_dict,
args_longitudinal=args_longitudinal_dict)
self._global_plan = False
self._waypoints_queue.append((self._current_waypoint.next(self._sampling_radius)[0], RoadOption.LANEFOLLOW))
self._target_road_option = RoadOption.LANEFOLLOW
self._compute_next_waypoints(k=200)
def set_speed(self, speed):
self._target_speed = speed
def _compute_next_waypoints(self, k=1):
available_entries = self._waypoints_queue.maxlen - len(self._waypoints_queue)
k = min(available_entries, k)
for _ in range(k):
last_waypoint = self._waypoints_queue[-1][0]
next_waypoints = list(last_waypoint.next(self._sampling_radius))
if len(next_waypoints) == 1:
next_waypoint = next_waypoints[0]
road_option = RoadOption.LANEFOLLOW
else:
road_options_list = _retrieve_options(
next_waypoints, last_waypoint)
road_option = random.choice(road_options_list)
next_waypoint = next_waypoints[road_options_list.index(
road_option)]
self._waypoints_queue.append((next_waypoint, road_option))
def set_global_plan(self, current_plan):
self._waypoints_queue.clear()
for elem in current_plan:
self._waypoints_queue.append(elem)
self._target_road_option = RoadOption.LANEFOLLOW
self._global_plan = True
def run_step(self, debug=True):
if not self._global_plan and len(self._waypoints_queue) < int(self._waypoints_queue.maxlen * 0.5):
self._compute_next_waypoints(k=100)
if len(self._waypoints_queue) == 0 and len(self._waypoint_buffer) == 0:
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 1.0
control.hand_brake = False
control.manual_gear_shift = False
return control
if not self._waypoint_buffer:
for i in range(self._buffer_size):
if self._waypoints_queue:
self._waypoint_buffer.append(
self._waypoints_queue.popleft())
else:
break
vehicle_transform = self._vehicle.get_transform()
self._current_waypoint = self._map.get_waypoint(vehicle_transform.location)
self.target_waypoint, self._target_road_option = self._waypoint_buffer[0]
control = self._vehicle_controller.run_step(self._target_speed, self.target_waypoint)
max_index = -1
for i, (waypoint, _) in enumerate(self._waypoint_buffer):
if waypoint.transform.location.distance(vehicle_transform.location) < self._min_distance:
max_index = i
if max_index >= 0:
for i in range(max_index + 1):
self._waypoint_buffer.popleft()
if debug:
draw_waypoints(self._vehicle.get_world(), [self.target_waypoint], self._vehicle.get_location().z + 1.0)
return control
def done(self):
return len(self._waypoints_queue) == 0 and len(self._waypoint_buffer) == 0
def _retrieve_options(list_waypoints, current_waypoint):
options = []
for next_waypoint in list_waypoints:
next_next_waypoint = next_waypoint.next(3.0)[0]
link = _compute_connection(current_waypoint, next_next_waypoint)
options.append(link)
return options
def _compute_connection(current_waypoint, next_waypoint, threshold=35):
n = next_waypoint.transform.rotation.yaw
n = n % 360.0
c = current_waypoint.transform.rotation.yaw
c = c % 360.0
diff_angle = (n - c) % 180.0
if diff_angle < threshold or diff_angle > (180 - threshold):
return RoadOption.STRAIGHT
elif diff_angle > 90.0:
return RoadOption.LEFT
else:
return RoadOption.RIGHT
| true | true |
f73ce4d84d4dcc3f9b02b6a829c7f3f8ca9efc77 | 788 | py | Python | profiles/signals.py | nivbend/memoir | c7239ae9db852f8e14ed53a2ba96d4c72eb47f30 | [
"MIT"
] | null | null | null | profiles/signals.py | nivbend/memoir | c7239ae9db852f8e14ed53a2ba96d4c72eb47f30 | [
"MIT"
] | 18 | 2016-08-26T21:26:58.000Z | 2017-03-21T20:14:35.000Z | profiles/signals.py | nivbend/memoir | c7239ae9db852f8e14ed53a2ba96d4c72eb47f30 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from django_auth_ldap.backend import populate_user
from .models import UserProfile
try:
USER_ATTR_IMAGE_URL = settings.AUTH_LDAP_USER_ATTR_IMAGE_URL
except AttributeError:
USER_ATTR_IMAGE_URL = None
@receiver(post_save, sender = settings.AUTH_USER_MODEL)
def create_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user = instance)
@receiver(populate_user)
def populate_profile(sender, user, ldap_user, **kwargs):
if USER_ATTR_IMAGE_URL is None:
return
(user.profile.image_url, ) = ldap_user.attrs.get(USER_ATTR_IMAGE_URL, ('', ))
user.profile.save()
| 31.52 | 81 | 0.780457 | from __future__ import unicode_literals
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from django_auth_ldap.backend import populate_user
from .models import UserProfile
try:
USER_ATTR_IMAGE_URL = settings.AUTH_LDAP_USER_ATTR_IMAGE_URL
except AttributeError:
USER_ATTR_IMAGE_URL = None
@receiver(post_save, sender = settings.AUTH_USER_MODEL)
def create_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user = instance)
@receiver(populate_user)
def populate_profile(sender, user, ldap_user, **kwargs):
if USER_ATTR_IMAGE_URL is None:
return
(user.profile.image_url, ) = ldap_user.attrs.get(USER_ATTR_IMAGE_URL, ('', ))
user.profile.save()
| true | true |
f73ce5a9d21c84995ddf67b52dd7d49a24ecd107 | 7,633 | py | Python | homeassistant/components/homematicip_cloud/__init__.py | mgosk/home-assistant | 3bf27b9afc50044ad0a244702e8a628247eeb3e0 | [
"Apache-2.0"
] | 2 | 2019-07-28T18:56:14.000Z | 2019-07-28T18:56:17.000Z | homeassistant/components/homematicip_cloud/__init__.py | richh1/home-assistant | a14c299a78259386bbcf7787689e3e7dfa5b1dfd | [
"Apache-2.0"
] | null | null | null | homeassistant/components/homematicip_cloud/__init__.py | richh1/home-assistant | a14c299a78259386bbcf7787689e3e7dfa5b1dfd | [
"Apache-2.0"
] | 1 | 2018-04-29T02:14:32.000Z | 2018-04-29T02:14:32.000Z | """Support for HomematicIP Cloud devices."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .config_flow import configured_haps
from .const import (
CONF_ACCESSPOINT, CONF_AUTHTOKEN, DOMAIN, HMIPC_AUTHTOKEN, HMIPC_HAPID,
HMIPC_NAME)
from .device import HomematicipGenericDevice # noqa: F401
from .hap import HomematicipAuth, HomematicipHAP # noqa: F401
_LOGGER = logging.getLogger(__name__)
ATTR_DURATION = 'duration'
ATTR_ENDTIME = 'endtime'
ATTR_TEMPERATURE = 'temperature'
ATTR_ACCESSPOINT_ID = 'accesspoint_id'
SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION = 'activate_eco_mode_with_duration'
SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD = 'activate_eco_mode_with_period'
SERVICE_ACTIVATE_VACATION = 'activate_vacation'
SERVICE_DEACTIVATE_ECO_MODE = 'deactivate_eco_mode'
SERVICE_DEACTIVATE_VACATION = 'deactivate_vacation'
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN, default=[]): vol.All(cv.ensure_list, [vol.Schema({
vol.Optional(CONF_NAME, default=''): vol.Any(cv.string),
vol.Required(CONF_ACCESSPOINT): cv.string,
vol.Required(CONF_AUTHTOKEN): cv.string,
})]),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION = vol.Schema({
vol.Required(ATTR_DURATION): cv.positive_int,
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD = vol.Schema({
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_ACTIVATE_VACATION = vol.Schema({
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Required(ATTR_TEMPERATURE, default=18.0):
vol.All(vol.Coerce(float), vol.Range(min=0, max=55)),
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_DEACTIVATE_ECO_MODE = vol.Schema({
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_DEACTIVATE_VACATION = vol.Schema({
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the HomematicIP Cloud component."""
hass.data[DOMAIN] = {}
accesspoints = config.get(DOMAIN, [])
for conf in accesspoints:
if conf[CONF_ACCESSPOINT] not in configured_haps(hass):
hass.async_add_job(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data={
HMIPC_HAPID: conf[CONF_ACCESSPOINT],
HMIPC_AUTHTOKEN: conf[CONF_AUTHTOKEN],
HMIPC_NAME: conf[CONF_NAME],
}
))
async def _async_activate_eco_mode_with_duration(service):
"""Service to activate eco mode with duration."""
duration = service.data[ATTR_DURATION]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_absence_with_duration(duration)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_absence_with_duration(duration)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION,
_async_activate_eco_mode_with_duration,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION)
async def _async_activate_eco_mode_with_period(service):
"""Service to activate eco mode with period."""
endtime = service.data[ATTR_ENDTIME]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_absence_with_period(endtime)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_absence_with_period(endtime)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD,
_async_activate_eco_mode_with_period,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD)
async def _async_activate_vacation(service):
"""Service to activate vacation."""
endtime = service.data[ATTR_ENDTIME]
temperature = service.data[ATTR_TEMPERATURE]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_vacation(endtime, temperature)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_vacation(endtime, temperature)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_VACATION, _async_activate_vacation,
schema=SCHEMA_ACTIVATE_VACATION)
async def _async_deactivate_eco_mode(service):
"""Service to deactivate eco mode."""
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.deactivate_absence()
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.deactivate_absence()
hass.services.async_register(
DOMAIN, SERVICE_DEACTIVATE_ECO_MODE, _async_deactivate_eco_mode,
schema=SCHEMA_DEACTIVATE_ECO_MODE)
async def _async_deactivate_vacation(service):
"""Service to deactivate vacation."""
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.deactivate_vacation()
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.deactivate_vacation()
hass.services.async_register(
DOMAIN, SERVICE_DEACTIVATE_VACATION, _async_deactivate_vacation,
schema=SCHEMA_DEACTIVATE_VACATION)
def _get_home(hapid: str):
"""Return a HmIP home."""
hap = hass.data[DOMAIN][hapid]
if hap:
return hap.home
return None
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up an access point from a config entry."""
hap = HomematicipHAP(hass, entry)
hapid = entry.data[HMIPC_HAPID].replace('-', '').upper()
hass.data[DOMAIN][hapid] = hap
if not await hap.async_setup():
return False
# Register hap as device in registry.
device_registry = await dr.async_get_registry(hass)
home = hap.home
# Add the HAP name from configuration if set.
hapname = home.label \
if not home.name else "{} {}".format(home.label, home.name)
device_registry.async_get_or_create(
config_entry_id=home.id,
identifiers={(DOMAIN, home.id)},
manufacturer='eQ-3',
name=hapname,
model=home.modelType,
sw_version=home.currentAPVersion,
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hap = hass.data[DOMAIN].pop(entry.data[HMIPC_HAPID])
return await hap.async_reset()
| 34.538462 | 77 | 0.675357 | import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .config_flow import configured_haps
from .const import (
CONF_ACCESSPOINT, CONF_AUTHTOKEN, DOMAIN, HMIPC_AUTHTOKEN, HMIPC_HAPID,
HMIPC_NAME)
from .device import HomematicipGenericDevice
from .hap import HomematicipAuth, HomematicipHAP
_LOGGER = logging.getLogger(__name__)
ATTR_DURATION = 'duration'
ATTR_ENDTIME = 'endtime'
ATTR_TEMPERATURE = 'temperature'
ATTR_ACCESSPOINT_ID = 'accesspoint_id'
SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION = 'activate_eco_mode_with_duration'
SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD = 'activate_eco_mode_with_period'
SERVICE_ACTIVATE_VACATION = 'activate_vacation'
SERVICE_DEACTIVATE_ECO_MODE = 'deactivate_eco_mode'
SERVICE_DEACTIVATE_VACATION = 'deactivate_vacation'
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN, default=[]): vol.All(cv.ensure_list, [vol.Schema({
vol.Optional(CONF_NAME, default=''): vol.Any(cv.string),
vol.Required(CONF_ACCESSPOINT): cv.string,
vol.Required(CONF_AUTHTOKEN): cv.string,
})]),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION = vol.Schema({
vol.Required(ATTR_DURATION): cv.positive_int,
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD = vol.Schema({
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_ACTIVATE_VACATION = vol.Schema({
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Required(ATTR_TEMPERATURE, default=18.0):
vol.All(vol.Coerce(float), vol.Range(min=0, max=55)),
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_DEACTIVATE_ECO_MODE = vol.Schema({
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
SCHEMA_DEACTIVATE_VACATION = vol.Schema({
vol.Optional(ATTR_ACCESSPOINT_ID):
vol.All(str, vol.Length(min=24, max=24)),
})
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
hass.data[DOMAIN] = {}
accesspoints = config.get(DOMAIN, [])
for conf in accesspoints:
if conf[CONF_ACCESSPOINT] not in configured_haps(hass):
hass.async_add_job(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data={
HMIPC_HAPID: conf[CONF_ACCESSPOINT],
HMIPC_AUTHTOKEN: conf[CONF_AUTHTOKEN],
HMIPC_NAME: conf[CONF_NAME],
}
))
async def _async_activate_eco_mode_with_duration(service):
duration = service.data[ATTR_DURATION]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_absence_with_duration(duration)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_absence_with_duration(duration)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION,
_async_activate_eco_mode_with_duration,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION)
async def _async_activate_eco_mode_with_period(service):
endtime = service.data[ATTR_ENDTIME]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_absence_with_period(endtime)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_absence_with_period(endtime)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD,
_async_activate_eco_mode_with_period,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD)
async def _async_activate_vacation(service):
endtime = service.data[ATTR_ENDTIME]
temperature = service.data[ATTR_TEMPERATURE]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.activate_vacation(endtime, temperature)
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.activate_vacation(endtime, temperature)
hass.services.async_register(
DOMAIN, SERVICE_ACTIVATE_VACATION, _async_activate_vacation,
schema=SCHEMA_ACTIVATE_VACATION)
async def _async_deactivate_eco_mode(service):
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.deactivate_absence()
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.deactivate_absence()
hass.services.async_register(
DOMAIN, SERVICE_DEACTIVATE_ECO_MODE, _async_deactivate_eco_mode,
schema=SCHEMA_DEACTIVATE_ECO_MODE)
async def _async_deactivate_vacation(service):
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hapid)
if home:
await home.deactivate_vacation()
else:
for hapid in hass.data[DOMAIN]:
home = hass.data[DOMAIN][hapid].home
await home.deactivate_vacation()
hass.services.async_register(
DOMAIN, SERVICE_DEACTIVATE_VACATION, _async_deactivate_vacation,
schema=SCHEMA_DEACTIVATE_VACATION)
def _get_home(hapid: str):
hap = hass.data[DOMAIN][hapid]
if hap:
return hap.home
return None
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
hap = HomematicipHAP(hass, entry)
hapid = entry.data[HMIPC_HAPID].replace('-', '').upper()
hass.data[DOMAIN][hapid] = hap
if not await hap.async_setup():
return False
device_registry = await dr.async_get_registry(hass)
home = hap.home
hapname = home.label \
if not home.name else "{} {}".format(home.label, home.name)
device_registry.async_get_or_create(
config_entry_id=home.id,
identifiers={(DOMAIN, home.id)},
manufacturer='eQ-3',
name=hapname,
model=home.modelType,
sw_version=home.currentAPVersion,
)
return True
async def async_unload_entry(hass, entry):
hap = hass.data[DOMAIN].pop(entry.data[HMIPC_HAPID])
return await hap.async_reset()
| true | true |
f73ce7007dcf428381faaa5cba3ed159e4dd0c92 | 1,833 | py | Python | hug/input_format.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | hug/input_format.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | hug/input_format.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | """hug/input_formats.py
Defines the built-in Hug input_formatting handlers
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import json as json_converter
import re
from hug.format import content_type
UNDERSCORE = (re.compile('(.)([A-Z][a-z]+)'), re.compile('([a-z0-9])([A-Z])'))
@content_type('application/json')
def json(body):
return json_converter.loads(body)
def _under_score_dict(dictionary):
new_dictionary = {}
for key, value in dictionary.items():
if isinstance(value, dict):
value = _under_score_dict(value)
if isinstance(key, str):
key = UNDERSCORE[1].sub(r'\1_\2', UNDERSCORE[0].sub(r'\1_\2', key)).lower()
new_dictionary[key] = value
return new_dictionary
def json_underscore(body):
return _under_score_dict(json(body))
| 38.1875 | 112 | 0.7485 | import json as json_converter
import re
from hug.format import content_type
UNDERSCORE = (re.compile('(.)([A-Z][a-z]+)'), re.compile('([a-z0-9])([A-Z])'))
@content_type('application/json')
def json(body):
return json_converter.loads(body)
def _under_score_dict(dictionary):
new_dictionary = {}
for key, value in dictionary.items():
if isinstance(value, dict):
value = _under_score_dict(value)
if isinstance(key, str):
key = UNDERSCORE[1].sub(r'\1_\2', UNDERSCORE[0].sub(r'\1_\2', key)).lower()
new_dictionary[key] = value
return new_dictionary
def json_underscore(body):
return _under_score_dict(json(body))
| true | true |
f73ce815f021946c17d274a89a2a9a2f8ef1867c | 1,388 | py | Python | setup.py | shx2/apegears | 3fa0408a15df3817fd206d1086d7e49e1b60594c | [
"MIT"
] | 5 | 2020-08-31T20:04:04.000Z | 2022-01-15T17:09:42.000Z | setup.py | shx2/apegears | 3fa0408a15df3817fd206d1086d7e49e1b60594c | [
"MIT"
] | 5 | 2020-10-23T02:41:45.000Z | 2020-11-03T02:27:57.000Z | setup.py | shx2/apegears | 3fa0408a15df3817fd206d1086d7e49e1b60594c | [
"MIT"
] | 1 | 2020-10-23T02:48:08.000Z | 2020-10-23T02:48:08.000Z | #!/usr/bin/env python3
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Read version info from apegears/version.py
version_vars = {}
with open(path.join("apegears", "version.py")) as fp:
exec(fp.read(), version_vars)
version_string = version_vars['__version_string__']
setup(
name='apegears',
version=version_string,
description='An improved ArgumentParser, fully compatible with argparse.',
long_description=long_description,
url='https://github.com/shx2/apegears',
author='shx2',
author_email='shx222@gmail.com',
license='MIT',
packages=find_packages(exclude=['tests*']),
platforms = ["POSIX", "Windows"],
install_requires=[],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Programming Language :: Python :: 3',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
],
keywords='CLI argparse ArgumentParser optparse func_argparse',
)
| 30.173913 | 78 | 0.682277 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
version_vars = {}
with open(path.join("apegears", "version.py")) as fp:
exec(fp.read(), version_vars)
version_string = version_vars['__version_string__']
setup(
name='apegears',
version=version_string,
description='An improved ArgumentParser, fully compatible with argparse.',
long_description=long_description,
url='https://github.com/shx2/apegears',
author='shx2',
author_email='shx222@gmail.com',
license='MIT',
packages=find_packages(exclude=['tests*']),
platforms = ["POSIX", "Windows"],
install_requires=[],
classifiers=[
'Programming Language :: Python :: 3',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
],
keywords='CLI argparse ArgumentParser optparse func_argparse',
)
| true | true |
f73ce82d3f49ea2cb2c39a15d79a96db93baddc9 | 416 | py | Python | Day01-15/04_loopTest_1.py | MaoningGuan/-Python-100-Days | d36e49d67a134278455438348efc41ffb28b778a | [
"MIT"
] | null | null | null | Day01-15/04_loopTest_1.py | MaoningGuan/-Python-100-Days | d36e49d67a134278455438348efc41ffb28b778a | [
"MIT"
] | null | null | null | Day01-15/04_loopTest_1.py | MaoningGuan/-Python-100-Days | d36e49d67a134278455438348efc41ffb28b778a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
猜数字游戏
"""
import random
answer = random.randint(1, 100)
counter = 0
while True:
counter += 1
number = int(input('请输入你猜测的数字:'))
if number > answer:
print('大一点')
elif number < answer:
print('小一点')
else:
print('猜测的数字正确,正确数字为:%d' % answer)
break
print('你总共猜了%d次。' % counter)
if counter > 7:
print('你的智商和运气都不太行哦。') | 18.909091 | 42 | 0.572115 |
import random
answer = random.randint(1, 100)
counter = 0
while True:
counter += 1
number = int(input('请输入你猜测的数字:'))
if number > answer:
print('大一点')
elif number < answer:
print('小一点')
else:
print('猜测的数字正确,正确数字为:%d' % answer)
break
print('你总共猜了%d次。' % counter)
if counter > 7:
print('你的智商和运气都不太行哦。') | true | true |
f73ce9977a469a3c04e0293258199b7b5438dcdf | 2,943 | py | Python | oss_scripts/pip_package/setup.py | vbod/text | 07c044b8b851ace1e9a033c9597cdb1bee2d69e0 | [
"Apache-2.0"
] | null | null | null | oss_scripts/pip_package/setup.py | vbod/text | 07c044b8b851ace1e9a033c9597cdb1bee2d69e0 | [
"Apache-2.0"
] | null | null | null | oss_scripts/pip_package/setup.py | vbod/text | 07c044b8b851ace1e9a033c9597cdb1bee2d69e0 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF.Text is a TensorFlow library of text related ops, modules, and subgraphs.
TF.Text is a TensorFlow library of text related ops, modules, and subgraphs. The
library can perform the preprocessing regularly required by text-based models,
and includes other features useful for sequence modeling not provided by core
TensorFlow.
See the README on GitHub for further documentation.
http://github.com/tensorflow/text
"""
import os
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install
from setuptools.dist import Distribution
project_name = 'tensorflow-text'
project_version = '2.3.0-rc0'
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def is_pure(self):
return False
def has_ext_modules(self):
return True
class InstallPlatlib(install):
"""This is needed to set the library to platlib compliant."""
def finalize_options(self):
"""For more info; see http://github.com/google/or-tools/issues/616 ."""
install.finalize_options(self)
self.install_lib = self.install_platlib
self.install_libbase = self.install_lib
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
DOCLINES = __doc__.split('\n')
setup(
name=project_name,
version=project_version.replace('-', ''),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
author='Google Inc.',
author_email='packages@tensorflow.org',
url='http://github.com/tensorflow/text',
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
cmdclass={'install': InstallPlatlib},
distclass=BinaryDistribution,
install_requires=[
'tensorflow>=2.3.0rc0, <2.4',
'tensorflow_hub>=0.8.0',
],
extras_require={
'tensorflow_gpu': ['tensorflow-gpu>=2.1.0, <2.2',],
'tests': [
'absl-py',
'pytest',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='tensorflow text machine learning',
)
| 30.978947 | 80 | 0.706762 |
import os
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install
from setuptools.dist import Distribution
project_name = 'tensorflow-text'
project_version = '2.3.0-rc0'
class BinaryDistribution(Distribution):
def is_pure(self):
return False
def has_ext_modules(self):
return True
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
self.install_lib = self.install_platlib
self.install_libbase = self.install_lib
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
DOCLINES = __doc__.split('\n')
setup(
name=project_name,
version=project_version.replace('-', ''),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
author='Google Inc.',
author_email='packages@tensorflow.org',
url='http://github.com/tensorflow/text',
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
cmdclass={'install': InstallPlatlib},
distclass=BinaryDistribution,
install_requires=[
'tensorflow>=2.3.0rc0, <2.4',
'tensorflow_hub>=0.8.0',
],
extras_require={
'tensorflow_gpu': ['tensorflow-gpu>=2.1.0, <2.2',],
'tests': [
'absl-py',
'pytest',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='tensorflow text machine learning',
)
| true | true |
f73ce9f26fea6e4a4b1d6562f5a98d048bd85bc5 | 455 | py | Python | newsapp/migrations/0005_business_image.py | Esther-Anyona/four-one-one | 6a5e019b35710941a669c1b49e993b683c99d615 | [
"MIT"
] | null | null | null | newsapp/migrations/0005_business_image.py | Esther-Anyona/four-one-one | 6a5e019b35710941a669c1b49e993b683c99d615 | [
"MIT"
] | null | null | null | newsapp/migrations/0005_business_image.py | Esther-Anyona/four-one-one | 6a5e019b35710941a669c1b49e993b683c99d615 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-24 08:24
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newsapp', '0004_profile_bio'),
]
operations = [
migrations.AddField(
model_name='business',
name='image',
field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='business_image'),
),
]
| 22.75 | 110 | 0.637363 |
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newsapp', '0004_profile_bio'),
]
operations = [
migrations.AddField(
model_name='business',
name='image',
field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='business_image'),
),
]
| true | true |
f73cea51eeb82a826d94a824065fe954fcfdc93e | 52,944 | py | Python | quarkchain/cluster/master.py | tim-yoshi/pyquarkchain | 1847542c166a180b5ffc3c6e917751be85fa15a6 | [
"MIT"
] | 3 | 2019-03-14T17:08:07.000Z | 2019-10-02T11:13:53.000Z | quarkchain/cluster/master.py | tim-yoshi/pyquarkchain | 1847542c166a180b5ffc3c6e917751be85fa15a6 | [
"MIT"
] | null | null | null | quarkchain/cluster/master.py | tim-yoshi/pyquarkchain | 1847542c166a180b5ffc3c6e917751be85fa15a6 | [
"MIT"
] | 1 | 2019-05-04T22:57:29.000Z | 2019-05-04T22:57:29.000Z | import argparse
import asyncio
import os
import psutil
import random
import time
from collections import deque
from typing import Optional, List, Union, Dict, Tuple
from quarkchain.cluster.guardian import Guardian
from quarkchain.cluster.miner import Miner, MiningWork, validate_seal
from quarkchain.cluster.p2p_commands import (
CommandOp,
Direction,
GetRootBlockHeaderListRequest,
GetRootBlockListRequest,
)
from quarkchain.cluster.protocol import (
ClusterMetadata,
ClusterConnection,
P2PConnection,
ROOT_BRANCH,
NULL_CONNECTION,
)
from quarkchain.cluster.root_state import RootState
from quarkchain.cluster.rpc import (
AddMinorBlockHeaderResponse,
GetEcoInfoListRequest,
GetNextBlockToMineRequest,
GetUnconfirmedHeadersRequest,
GetAccountDataRequest,
AddTransactionRequest,
AddRootBlockRequest,
AddMinorBlockRequest,
CreateClusterPeerConnectionRequest,
DestroyClusterPeerConnectionCommand,
SyncMinorBlockListRequest,
GetMinorBlockRequest,
GetTransactionRequest,
ArtificialTxConfig,
MineRequest,
GenTxRequest,
GetLogResponse,
GetLogRequest,
ShardStats,
EstimateGasRequest,
GetStorageRequest,
GetCodeRequest,
GasPriceRequest,
GetWorkRequest,
GetWorkResponse,
SubmitWorkRequest,
SubmitWorkResponse,
)
from quarkchain.cluster.rpc import (
ConnectToSlavesRequest,
ClusterOp,
CLUSTER_OP_SERIALIZER_MAP,
ExecuteTransactionRequest,
Ping,
GetTransactionReceiptRequest,
GetTransactionListByAddressRequest,
)
from quarkchain.cluster.simple_network import SimpleNetwork
from quarkchain.config import RootConfig
from quarkchain.env import DEFAULT_ENV
from quarkchain.core import (
Branch,
ChainMask,
Log,
Address,
RootBlock,
TransactionReceipt,
TypedTransaction,
MinorBlock,
)
from quarkchain.db import PersistentDb
from quarkchain.p2p.p2p_manager import P2PManager
from quarkchain.utils import Logger, check, time_ms
from quarkchain.cluster.cluster_config import ClusterConfig
TIMEOUT = 10
class SyncTask:
""" Given a header and a peer, the task will synchronize the local state
including root chain and shards with the peer up to the height of the header.
"""
def __init__(self, header, peer):
self.header = header
self.peer = peer
self.master_server = peer.master_server
self.root_state = peer.root_state
self.max_staleness = (
self.root_state.env.quark_chain_config.ROOT.MAX_STALE_ROOT_BLOCK_HEIGHT_DIFF
)
async def sync(self):
try:
await self.__run_sync()
except Exception as e:
Logger.log_exception()
self.peer.close_with_error(str(e))
async def __run_sync(self):
"""raise on any error so that sync() will close peer connection"""
if self.__has_block_hash(self.header.get_hash()):
return
# descending height
block_header_chain = [self.header]
while not self.__has_block_hash(block_header_chain[-1].hash_prev_block):
block_hash = block_header_chain[-1].hash_prev_block
height = block_header_chain[-1].height - 1
# abort if we have to download super old blocks
if self.root_state.tip.height - height > self.max_staleness:
Logger.warning(
"[R] abort syncing due to forking at super old block {} << {}".format(
height, self.root_state.tip.height
)
)
return
Logger.info(
"[R] downloading block header list from {} {}".format(
height, block_hash.hex()
)
)
block_header_list = await asyncio.wait_for(
self.__download_block_headers(block_hash), TIMEOUT
)
self.__validate_block_headers(block_header_list)
for header in block_header_list:
if self.__has_block_hash(header.get_hash()):
break
block_header_chain.append(header)
block_header_chain.reverse()
Logger.info(
"[R] going to download {} blocks ({} - {})".format(
len(block_header_chain),
block_header_chain[0].height,
block_header_chain[-1].height,
)
)
while len(block_header_chain) > 0:
block_chain = await asyncio.wait_for(
self.__download_blocks(block_header_chain[:100]), TIMEOUT
)
Logger.info(
"[R] downloaded {} blocks ({} - {}) from peer".format(
len(block_chain),
block_chain[0].header.height,
block_chain[-1].header.height,
)
)
if len(block_chain) != len(block_header_chain[:100]):
# TODO: tag bad peer
raise RuntimeError("Bad peer missing blocks for headers they have")
for block in block_chain:
await self.__add_block(block)
block_header_chain.pop(0)
def __has_block_hash(self, block_hash):
return self.root_state.contain_root_block_by_hash(block_hash)
def __validate_block_headers(self, block_header_list):
"""Raise on validation failure"""
# TODO: tag bad peer
consensus_type = self.root_state.env.quark_chain_config.ROOT.CONSENSUS_TYPE
for i in range(len(block_header_list) - 1):
header, prev = block_header_list[i : i + 2]
if header.height != prev.height + 1:
raise RuntimeError(
"Bad peer sending root block headers with discontinuous height"
)
if header.hash_prev_block != prev.get_hash():
raise RuntimeError(
"Bad peer sending root block headers with discontinuous hash_prev_block"
)
# check difficulty, potentially adjusted by guardian mechanism
adjusted_diff = None # type: Optional[int]
if not self.root_state.env.quark_chain_config.SKIP_ROOT_DIFFICULTY_CHECK:
# lower the difficulty for root block signed by guardian
if header.verify_signature(
self.root_state.env.quark_chain_config.guardian_public_key
):
adjusted_diff = Guardian.adjust_difficulty(
header.difficulty, header.height
)
# check PoW if applicable
validate_seal(header, consensus_type, adjusted_diff=adjusted_diff)
async def __download_block_headers(self, block_hash):
request = GetRootBlockHeaderListRequest(
block_hash=block_hash, limit=500, direction=Direction.GENESIS
)
op, resp, rpc_id = await self.peer.write_rpc_request(
CommandOp.GET_ROOT_BLOCK_HEADER_LIST_REQUEST, request
)
return resp.block_header_list
async def __download_blocks(self, block_header_list):
block_hash_list = [b.get_hash() for b in block_header_list]
op, resp, rpc_id = await self.peer.write_rpc_request(
CommandOp.GET_ROOT_BLOCK_LIST_REQUEST,
GetRootBlockListRequest(block_hash_list),
)
return resp.root_block_list
async def __add_block(self, root_block):
Logger.info(
"[R] syncing root block {} {}".format(
root_block.header.height, root_block.header.get_hash().hex()
)
)
start = time.time()
await self.__sync_minor_blocks(root_block.minor_block_header_list)
await self.master_server.add_root_block(root_block)
elapse = time.time() - start
Logger.info(
"[R] syncing root block {} {} took {:.2f} seconds".format(
root_block.header.height, root_block.header.get_hash().hex(), elapse
)
)
async def __sync_minor_blocks(self, minor_block_header_list):
minor_block_download_map = dict()
for m_block_header in minor_block_header_list:
m_block_hash = m_block_header.get_hash()
if not self.root_state.is_minor_block_validated(m_block_hash):
minor_block_download_map.setdefault(m_block_header.branch, []).append(
m_block_hash
)
future_list = []
for branch, m_block_hash_list in minor_block_download_map.items():
slave_conn = self.master_server.get_slave_connection(branch=branch)
future = slave_conn.write_rpc_request(
op=ClusterOp.SYNC_MINOR_BLOCK_LIST_REQUEST,
cmd=SyncMinorBlockListRequest(
m_block_hash_list, branch, self.peer.get_cluster_peer_id()
),
)
future_list.append(future)
result_list = await asyncio.gather(*future_list)
for result in result_list:
if result is Exception:
raise RuntimeError(
"Unable to download minor blocks from root block with exception {}".format(
result
)
)
_, result, _ = result
if result.error_code != 0:
raise RuntimeError("Unable to download minor blocks from root block")
if result.shard_stats:
self.master_server.update_shard_stats(result.shard_stats)
for k, v in result.block_coinbase_map.items():
self.root_state.add_validated_minor_block_hash(k, v.balance_map)
for m_header in minor_block_header_list:
if not self.root_state.is_minor_block_validated(m_header.get_hash()):
raise RuntimeError(
"minor block is still unavailable in master after root block sync"
)
class Synchronizer:
""" Buffer the headers received from peer and sync one by one """
def __init__(self):
self.tasks = dict()
self.running = False
self.running_task = None
def add_task(self, header, peer):
self.tasks[peer] = header
Logger.info(
"[R] added {} {} to sync queue (running={})".format(
header.height, header.get_hash().hex(), self.running
)
)
if not self.running:
self.running = True
asyncio.ensure_future(self.__run())
def get_stats(self):
def _task_to_dict(peer, header):
return {
"peerId": peer.id.hex(),
"peerIp": str(peer.ip),
"peerPort": peer.port,
"rootHeight": header.height,
"rootHash": header.get_hash().hex(),
}
return {
"runningTask": _task_to_dict(self.running_task[1], self.running_task[0])
if self.running_task
else None,
"queuedTasks": [
_task_to_dict(peer, header) for peer, header in self.tasks.items()
],
}
def _pop_best_task(self):
""" pop and return the task with heightest root """
check(len(self.tasks) > 0)
peer, header = max(self.tasks.items(), key=lambda pair: pair[1].height)
del self.tasks[peer]
return header, peer
async def __run(self):
Logger.info("[R] synchronizer started!")
while len(self.tasks) > 0:
self.running_task = self._pop_best_task()
header, peer = self.running_task
task = SyncTask(header, peer)
Logger.info(
"[R] start sync task {} {}".format(
header.height, header.get_hash().hex()
)
)
await task.sync()
Logger.info(
"[R] done sync task {} {}".format(
header.height, header.get_hash().hex()
)
)
self.running = False
self.running_task = None
Logger.info("[R] synchronizer finished!")
class SlaveConnection(ClusterConnection):
OP_NONRPC_MAP = {}
def __init__(
self, env, reader, writer, master_server, slave_id, chain_mask_list, name=None
):
super().__init__(
env,
reader,
writer,
CLUSTER_OP_SERIALIZER_MAP,
self.OP_NONRPC_MAP,
OP_RPC_MAP,
name=name,
)
self.master_server = master_server
self.id = slave_id
self.chain_mask_list = chain_mask_list
check(len(chain_mask_list) > 0)
asyncio.ensure_future(self.active_and_loop_forever())
def get_connection_to_forward(self, metadata):
""" Override ProxyConnection.get_connection_to_forward()
Forward traffic from slave to peer
"""
if metadata.cluster_peer_id == 0:
return None
peer = self.master_server.get_peer(metadata.cluster_peer_id)
if peer is None:
return NULL_CONNECTION
return peer
def validate_connection(self, connection):
return connection == NULL_CONNECTION or isinstance(connection, P2PConnection)
def has_shard(self, full_shard_id: int):
for chain_mask in self.chain_mask_list:
if chain_mask.contain_full_shard_id(full_shard_id):
return True
return False
def has_overlap(self, chain_mask: ChainMask):
for local_chain_mask in self.chain_mask_list:
if local_chain_mask.has_overlap(chain_mask):
return True
return False
async def send_ping(self, initialize_shard_state=False):
root_block = (
self.master_server.root_state.get_tip_block()
if initialize_shard_state
else None
)
req = Ping("", [], root_block)
op, resp, rpc_id = await self.write_rpc_request(
op=ClusterOp.PING,
cmd=req,
metadata=ClusterMetadata(branch=ROOT_BRANCH, cluster_peer_id=0),
)
return resp.id, resp.chain_mask_list
async def send_connect_to_slaves(self, slave_info_list):
""" Make slave connect to other slaves.
Returns True on success
"""
req = ConnectToSlavesRequest(slave_info_list)
op, resp, rpc_id = await self.write_rpc_request(
ClusterOp.CONNECT_TO_SLAVES_REQUEST, req
)
check(len(resp.result_list) == len(slave_info_list))
for i, result in enumerate(resp.result_list):
if len(result) > 0:
Logger.info(
"Slave {} failed to connect to {} with error {}".format(
self.id, slave_info_list[i].id, result
)
)
return False
Logger.info("Slave {} connected to other slaves successfully".format(self.id))
return True
def close(self):
Logger.info(
"Lost connection with slave {}. Shutting down master ...".format(self.id)
)
super().close()
self.master_server.shutdown()
def close_with_error(self, error):
Logger.info("Closing connection with slave {}".format(self.id))
return super().close_with_error(error)
async def add_transaction(self, tx):
request = AddTransactionRequest(tx)
_, resp, _ = await self.write_rpc_request(
ClusterOp.ADD_TRANSACTION_REQUEST, request
)
return resp.error_code == 0
async def execute_transaction(
self, tx: TypedTransaction, from_address, block_height: Optional[int]
):
request = ExecuteTransactionRequest(tx, from_address, block_height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.EXECUTE_TRANSACTION_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_minor_block_by_hash(self, block_hash, branch):
request = GetMinorBlockRequest(branch, minor_block_hash=block_hash)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_MINOR_BLOCK_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block
async def get_minor_block_by_height(self, height, branch):
request = GetMinorBlockRequest(branch, height=height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_MINOR_BLOCK_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block
async def get_transaction_by_hash(self, tx_hash, branch):
request = GetTransactionRequest(tx_hash, branch)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_REQUEST, request
)
if resp.error_code != 0:
return None, None
return resp.minor_block, resp.index
async def get_transaction_receipt(self, tx_hash, branch):
request = GetTransactionReceiptRequest(tx_hash, branch)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_RECEIPT_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block, resp.index, resp.receipt
async def get_transactions_by_address(self, address, start, limit):
request = GetTransactionListByAddressRequest(address, start, limit)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_LIST_BY_ADDRESS_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.tx_list, resp.next
async def get_logs(
self,
branch: Branch,
addresses: List[Address],
topics: List[List[bytes]],
start_block: int,
end_block: int,
) -> Optional[List[Log]]:
request = GetLogRequest(branch, addresses, topics, start_block, end_block)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_LOG_REQUEST, request
) # type: GetLogResponse
return resp.logs if resp.error_code == 0 else None
async def estimate_gas(
self, tx: TypedTransaction, from_address: Address
) -> Optional[int]:
request = EstimateGasRequest(tx, from_address)
_, resp, _ = await self.write_rpc_request(
ClusterOp.ESTIMATE_GAS_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_storage_at(
self, address: Address, key: int, block_height: Optional[int]
) -> Optional[bytes]:
request = GetStorageRequest(address, key, block_height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_STORAGE_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_code(
self, address: Address, block_height: Optional[int]
) -> Optional[bytes]:
request = GetCodeRequest(address, block_height)
_, resp, _ = await self.write_rpc_request(ClusterOp.GET_CODE_REQUEST, request)
return resp.result if resp.error_code == 0 else None
async def gas_price(self, branch: Branch) -> Optional[int]:
request = GasPriceRequest(branch)
_, resp, _ = await self.write_rpc_request(ClusterOp.GAS_PRICE_REQUEST, request)
return resp.result if resp.error_code == 0 else None
async def get_work(self, branch: Branch) -> Optional[MiningWork]:
request = GetWorkRequest(branch)
_, resp, _ = await self.write_rpc_request(ClusterOp.GET_WORK_REQUEST, request)
get_work_resp = resp # type: GetWorkResponse
if get_work_resp.error_code != 0:
return None
return MiningWork(
get_work_resp.header_hash, get_work_resp.height, get_work_resp.difficulty
)
async def submit_work(
self, branch: Branch, header_hash: bytes, nonce: int, mixhash: bytes
) -> bool:
request = SubmitWorkRequest(branch, header_hash, nonce, mixhash)
_, resp, _ = await self.write_rpc_request(
ClusterOp.SUBMIT_WORK_REQUEST, request
)
submit_work_resp = resp # type: SubmitWorkResponse
return submit_work_resp.error_code == 0 and submit_work_resp.success
# RPC handlers
async def handle_add_minor_block_header_request(self, req):
self.master_server.root_state.add_validated_minor_block_hash(
req.minor_block_header.get_hash(), req.coinbase_amount_map.balance_map
)
self.master_server.update_shard_stats(req.shard_stats)
self.master_server.update_tx_count_history(
req.tx_count, req.x_shard_tx_count, req.minor_block_header.create_time
)
return AddMinorBlockHeaderResponse(
error_code=0,
artificial_tx_config=self.master_server.get_artificial_tx_config(),
)
OP_RPC_MAP = {
ClusterOp.ADD_MINOR_BLOCK_HEADER_REQUEST: (
ClusterOp.ADD_MINOR_BLOCK_HEADER_RESPONSE,
SlaveConnection.handle_add_minor_block_header_request,
)
}
class MasterServer:
""" Master node in a cluster
It does two things to initialize the cluster:
1. Setup connection with all the slaves in ClusterConfig
2. Make slaves connect to each other
"""
def __init__(self, env, root_state, name="master"):
self.loop = asyncio.get_event_loop()
self.env = env
self.root_state = root_state
self.network = None # will be set by network constructor
self.cluster_config = env.cluster_config
# branch value -> a list of slave running the shard
self.branch_to_slaves = dict() # type: Dict[int, List[SlaveConnection]]
self.slave_pool = set()
self.cluster_active_future = self.loop.create_future()
self.shutdown_future = self.loop.create_future()
self.name = name
self.artificial_tx_config = ArtificialTxConfig(
target_root_block_time=self.env.quark_chain_config.ROOT.CONSENSUS_CONFIG.TARGET_BLOCK_TIME,
target_minor_block_time=next(
iter(self.env.quark_chain_config.shards.values())
).CONSENSUS_CONFIG.TARGET_BLOCK_TIME,
)
self.synchronizer = Synchronizer()
self.branch_to_shard_stats = dict() # type: Dict[int, ShardStats]
# (epoch in minute, tx_count in the minute)
self.tx_count_history = deque()
self.__init_root_miner()
def __init_root_miner(self):
miner_address = Address.create_from(
self.env.quark_chain_config.ROOT.COINBASE_ADDRESS
)
async def __create_block(retry=True):
while True:
block = await self.__create_root_block_to_mine(address=miner_address)
if block:
return block
if not retry:
break
await asyncio.sleep(1)
def __get_mining_params():
return {
"target_block_time": self.get_artificial_tx_config().target_root_block_time
}
root_config = self.env.quark_chain_config.ROOT # type: RootConfig
self.root_miner = Miner(
root_config.CONSENSUS_TYPE,
__create_block,
self.add_root_block,
__get_mining_params,
remote=root_config.CONSENSUS_CONFIG.REMOTE_MINE,
guardian_private_key=self.env.quark_chain_config.guardian_private_key,
)
def get_artificial_tx_config(self):
return self.artificial_tx_config
def __has_all_shards(self):
""" Returns True if all the shards have been run by at least one node """
return len(self.branch_to_slaves) == len(
self.env.quark_chain_config.get_full_shard_ids()
) and all([len(slaves) > 0 for _, slaves in self.branch_to_slaves.items()])
async def __connect(self, host, port):
""" Retries until success """
Logger.info("Trying to connect {}:{}".format(host, port))
while True:
try:
reader, writer = await asyncio.open_connection(
host, port, loop=self.loop
)
break
except Exception as e:
Logger.info("Failed to connect {} {}: {}".format(host, port, e))
await asyncio.sleep(
self.env.cluster_config.MASTER.MASTER_TO_SLAVE_CONNECT_RETRY_DELAY
)
Logger.info("Connected to {}:{}".format(host, port))
return reader, writer
async def __connect_to_slaves(self):
""" Master connects to all the slaves """
futures = []
slaves = []
for slave_info in self.cluster_config.get_slave_info_list():
host = slave_info.host.decode("ascii")
reader, writer = await self.__connect(host, slave_info.port)
slave = SlaveConnection(
self.env,
reader,
writer,
self,
slave_info.id,
slave_info.chain_mask_list,
name="{}_slave_{}".format(self.name, slave_info.id),
)
await slave.wait_until_active()
futures.append(slave.send_ping())
slaves.append(slave)
results = await asyncio.gather(*futures)
full_shard_ids = self.env.quark_chain_config.get_full_shard_ids()
for slave, result in zip(slaves, results):
# Verify the slave does have the same id and shard mask list as the config file
id, chain_mask_list = result
if id != slave.id:
Logger.error(
"Slave id does not match. expect {} got {}".format(slave.id, id)
)
self.shutdown()
if chain_mask_list != slave.chain_mask_list:
Logger.error(
"Slave {} shard mask list does not match. expect {} got {}".format(
slave.id, slave.chain_mask_list, chain_mask_list
)
)
self.shutdown()
self.slave_pool.add(slave)
for full_shard_id in full_shard_ids:
if slave.has_shard(full_shard_id):
self.branch_to_slaves.setdefault(full_shard_id, []).append(slave)
async def __setup_slave_to_slave_connections(self):
""" Make slaves connect to other slaves.
Retries until success.
"""
for slave in self.slave_pool:
await slave.wait_until_active()
success = await slave.send_connect_to_slaves(
self.cluster_config.get_slave_info_list()
)
if not success:
self.shutdown()
async def __init_shards(self):
futures = []
for slave in self.slave_pool:
futures.append(slave.send_ping(initialize_shard_state=True))
await asyncio.gather(*futures)
async def __send_mining_config_to_slaves(self, mining):
futures = []
for slave in self.slave_pool:
request = MineRequest(self.get_artificial_tx_config(), mining)
futures.append(slave.write_rpc_request(ClusterOp.MINE_REQUEST, request))
responses = await asyncio.gather(*futures)
check(all([resp.error_code == 0 for _, resp, _ in responses]))
async def start_mining(self):
await self.__send_mining_config_to_slaves(True)
self.root_miner.start()
Logger.warning(
"Mining started with root block time {} s, minor block time {} s".format(
self.get_artificial_tx_config().target_root_block_time,
self.get_artificial_tx_config().target_minor_block_time,
)
)
async def stop_mining(self):
await self.__send_mining_config_to_slaves(False)
self.root_miner.disable()
Logger.warning("Mining stopped")
def get_slave_connection(self, branch):
# TODO: Support forwarding to multiple connections (for replication)
check(len(self.branch_to_slaves[branch.value]) > 0)
return self.branch_to_slaves[branch.value][0]
def __log_summary(self):
for branch_value, slaves in self.branch_to_slaves.items():
Logger.info(
"[{}] is run by slave {}".format(
Branch(branch_value).to_str(), [s.id for s in slaves]
)
)
async def __init_cluster(self):
await self.__connect_to_slaves()
self.__log_summary()
if not self.__has_all_shards():
Logger.error("Missing some shards. Check cluster config file!")
return
await self.__setup_slave_to_slave_connections()
await self.__init_shards()
self.cluster_active_future.set_result(None)
def start(self):
self.loop.create_task(self.__init_cluster())
def do_loop(self):
try:
self.loop.run_until_complete(self.shutdown_future)
except KeyboardInterrupt:
pass
def wait_until_cluster_active(self):
# Wait until cluster is ready
self.loop.run_until_complete(self.cluster_active_future)
def shutdown(self):
# TODO: May set exception and disconnect all slaves
if not self.shutdown_future.done():
self.shutdown_future.set_result(None)
if not self.cluster_active_future.done():
self.cluster_active_future.set_exception(
RuntimeError("failed to start the cluster")
)
def get_shutdown_future(self):
return self.shutdown_future
async def __create_root_block_to_mine(self, address) -> Optional[RootBlock]:
futures = []
for slave in self.slave_pool:
request = GetUnconfirmedHeadersRequest()
futures.append(
slave.write_rpc_request(
ClusterOp.GET_UNCONFIRMED_HEADERS_REQUEST, request
)
)
responses = await asyncio.gather(*futures)
# Slaves may run multiple copies of the same branch
# branch_value -> HeaderList
full_shard_id_to_header_list = dict()
for response in responses:
_, response, _ = response
if response.error_code != 0:
return None
for headers_info in response.headers_info_list:
height = 0
for header in headers_info.header_list:
# check headers are ordered by height
check(height == 0 or height + 1 == header.height)
height = header.height
# Filter out the ones unknown to the master
if not self.root_state.is_minor_block_validated(header.get_hash()):
break
full_shard_id_to_header_list.setdefault(
headers_info.branch.get_full_shard_id(), []
).append(header)
header_list = []
full_shard_ids_to_check = self.env.quark_chain_config.get_initialized_full_shard_ids_before_root_height(
self.root_state.tip.height + 1
)
for full_shard_id in full_shard_ids_to_check:
headers = full_shard_id_to_header_list.get(full_shard_id, [])
header_list.extend(headers)
return self.root_state.create_block_to_mine(header_list, address)
async def __get_minor_block_to_mine(self, branch, address):
request = GetNextBlockToMineRequest(
branch=branch,
address=address.address_in_branch(branch),
artificial_tx_config=self.get_artificial_tx_config(),
)
slave = self.get_slave_connection(branch)
_, response, _ = await slave.write_rpc_request(
ClusterOp.GET_NEXT_BLOCK_TO_MINE_REQUEST, request
)
return response.block if response.error_code == 0 else None
async def get_next_block_to_mine(
self, address, branch_value: Optional[int]
) -> Optional[Union[RootBlock, MinorBlock]]:
"""Return root block is branch value provided is None."""
# Mining old blocks is useless
if self.synchronizer.running:
return None
if branch_value is None:
root = await self.__create_root_block_to_mine(address)
return root or None
block = await self.__get_minor_block_to_mine(Branch(branch_value), address)
return block or None
async def get_account_data(self, address: Address):
""" Returns a dict where key is Branch and value is AccountBranchData """
futures = []
for slave in self.slave_pool:
request = GetAccountDataRequest(address)
futures.append(
slave.write_rpc_request(ClusterOp.GET_ACCOUNT_DATA_REQUEST, request)
)
responses = await asyncio.gather(*futures)
# Slaves may run multiple copies of the same branch
# We only need one AccountBranchData per branch
branch_to_account_branch_data = dict()
for response in responses:
_, response, _ = response
check(response.error_code == 0)
for account_branch_data in response.account_branch_data_list:
branch_to_account_branch_data[
account_branch_data.branch
] = account_branch_data
check(
len(branch_to_account_branch_data)
== len(self.env.quark_chain_config.get_full_shard_ids())
)
return branch_to_account_branch_data
async def get_primary_account_data(
self, address: Address, block_height: Optional[int] = None
):
# TODO: Only query the shard who has the address
full_shard_id = self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
address.full_shard_key
)
slaves = self.branch_to_slaves.get(full_shard_id, None)
if not slaves:
return None
slave = slaves[0]
request = GetAccountDataRequest(address, block_height)
_, resp, _ = await slave.write_rpc_request(
ClusterOp.GET_ACCOUNT_DATA_REQUEST, request
)
for account_branch_data in resp.account_branch_data_list:
if account_branch_data.branch.value == full_shard_id:
return account_branch_data
return None
async def add_transaction(self, tx: TypedTransaction, from_peer=None):
""" Add transaction to the cluster and broadcast to peers """
evm_tx = tx.tx.to_evm_tx()
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
branch = Branch(evm_tx.from_full_shard_id)
if branch.value not in self.branch_to_slaves:
return False
futures = []
for slave in self.branch_to_slaves[branch.value]:
futures.append(slave.add_transaction(tx))
success = all(await asyncio.gather(*futures))
if not success:
return False
if self.network is not None:
for peer in self.network.iterate_peers():
if peer == from_peer:
continue
try:
peer.send_transaction(tx)
except Exception:
Logger.log_exception()
return True
async def execute_transaction(
self, tx: TypedTransaction, from_address, block_height: Optional[int]
) -> Optional[bytes]:
""" Execute transaction without persistence """
evm_tx = tx.tx.to_evm_tx()
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
branch = Branch(evm_tx.from_full_shard_id)
if branch.value not in self.branch_to_slaves:
return None
futures = []
for slave in self.branch_to_slaves[branch.value]:
futures.append(slave.execute_transaction(tx, from_address, block_height))
responses = await asyncio.gather(*futures)
# failed response will return as None
success = all(r is not None for r in responses) and len(set(responses)) == 1
if not success:
return None
check(len(responses) >= 1)
return responses[0]
def handle_new_root_block_header(self, header, peer):
self.synchronizer.add_task(header, peer)
async def add_root_block(self, r_block):
""" Add root block locally and broadcast root block to all shards and .
All update root block should be done in serial to avoid inconsistent global root block state.
"""
self.root_state.validate_block(r_block) # throw exception if failed
update_tip = False
try:
update_tip = self.root_state.add_block(r_block)
success = True
except ValueError:
Logger.log_exception()
success = False
try:
if update_tip and self.network is not None:
for peer in self.network.iterate_peers():
peer.send_updated_tip()
except Exception:
pass
if success:
future_list = self.broadcast_rpc(
op=ClusterOp.ADD_ROOT_BLOCK_REQUEST,
req=AddRootBlockRequest(r_block, False),
)
result_list = await asyncio.gather(*future_list)
check(all([resp.error_code == 0 for _, resp, _ in result_list]))
async def add_raw_minor_block(self, branch, block_data):
if branch.value not in self.branch_to_slaves:
return False
request = AddMinorBlockRequest(block_data)
# TODO: support multiple slaves running the same shard
_, resp, _ = await self.get_slave_connection(branch).write_rpc_request(
ClusterOp.ADD_MINOR_BLOCK_REQUEST, request
)
return resp.error_code == 0
async def add_root_block_from_miner(self, block):
""" Should only be called by miner """
# TODO: push candidate block to miner
if block.header.hash_prev_block != self.root_state.tip.get_hash():
Logger.info(
"[R] dropped stale root block {} mined locally".format(
block.header.height
)
)
return False
await self.add_root_block(block)
def broadcast_command(self, op, cmd):
""" Broadcast command to all slaves.
"""
for slave_conn in self.slave_pool:
slave_conn.write_command(
op=op, cmd=cmd, metadata=ClusterMetadata(ROOT_BRANCH, 0)
)
def broadcast_rpc(self, op, req):
""" Broadcast RPC request to all slaves.
"""
future_list = []
for slave_conn in self.slave_pool:
future_list.append(
slave_conn.write_rpc_request(
op=op, cmd=req, metadata=ClusterMetadata(ROOT_BRANCH, 0)
)
)
return future_list
# ------------------------------ Cluster Peer Connection Management --------------
def get_peer(self, cluster_peer_id):
if self.network is None:
return None
return self.network.get_peer_by_cluster_peer_id(cluster_peer_id)
async def create_peer_cluster_connections(self, cluster_peer_id):
future_list = self.broadcast_rpc(
op=ClusterOp.CREATE_CLUSTER_PEER_CONNECTION_REQUEST,
req=CreateClusterPeerConnectionRequest(cluster_peer_id),
)
result_list = await asyncio.gather(*future_list)
# TODO: Check result_list
return
def destroy_peer_cluster_connections(self, cluster_peer_id):
# Broadcast connection lost to all slaves
self.broadcast_command(
op=ClusterOp.DESTROY_CLUSTER_PEER_CONNECTION_COMMAND,
cmd=DestroyClusterPeerConnectionCommand(cluster_peer_id),
)
async def set_target_block_time(self, root_block_time, minor_block_time):
root_block_time = (
root_block_time
if root_block_time
else self.artificial_tx_config.target_root_block_time
)
minor_block_time = (
minor_block_time
if minor_block_time
else self.artificial_tx_config.target_minor_block_time
)
self.artificial_tx_config = ArtificialTxConfig(
target_root_block_time=root_block_time,
target_minor_block_time=minor_block_time,
)
await self.start_mining()
async def set_mining(self, mining):
if mining:
await self.start_mining()
else:
await self.stop_mining()
async def create_transactions(
self, num_tx_per_shard, xshard_percent, tx: TypedTransaction
):
"""Create transactions and add to the network for load testing"""
futures = []
for slave in self.slave_pool:
request = GenTxRequest(num_tx_per_shard, xshard_percent, tx)
futures.append(slave.write_rpc_request(ClusterOp.GEN_TX_REQUEST, request))
responses = await asyncio.gather(*futures)
check(all([resp.error_code == 0 for _, resp, _ in responses]))
def update_shard_stats(self, shard_state):
self.branch_to_shard_stats[shard_state.branch.value] = shard_state
def update_tx_count_history(self, tx_count, xshard_tx_count, timestamp):
""" maintain a list of tuples of (epoch minute, tx count, xshard tx count) of 12 hours window
Note that this is also counting transactions on forks and thus larger than if only couting the best chains. """
minute = int(timestamp / 60) * 60
if len(self.tx_count_history) == 0 or self.tx_count_history[-1][0] < minute:
self.tx_count_history.append((minute, tx_count, xshard_tx_count))
else:
old = self.tx_count_history.pop()
self.tx_count_history.append(
(old[0], old[1] + tx_count, old[2] + xshard_tx_count)
)
while (
len(self.tx_count_history) > 0
and self.tx_count_history[0][0] < time.time() - 3600 * 12
):
self.tx_count_history.popleft()
def get_block_count(self):
header = self.root_state.tip
shard_r_c = self.root_state.db.get_block_count(header.height)
return {"rootHeight": header.height, "shardRC": shard_r_c}
async def get_stats(self):
shards = []
for shard_stats in self.branch_to_shard_stats.values():
shard = dict()
shard["fullShardId"] = shard_stats.branch.get_full_shard_id()
shard["chainId"] = shard_stats.branch.get_chain_id()
shard["shardId"] = shard_stats.branch.get_shard_id()
shard["height"] = shard_stats.height
shard["difficulty"] = shard_stats.difficulty
shard["coinbaseAddress"] = "0x" + shard_stats.coinbase_address.to_hex()
shard["timestamp"] = shard_stats.timestamp
shard["txCount60s"] = shard_stats.tx_count60s
shard["pendingTxCount"] = shard_stats.pending_tx_count
shard["totalTxCount"] = shard_stats.total_tx_count
shard["blockCount60s"] = shard_stats.block_count60s
shard["staleBlockCount60s"] = shard_stats.stale_block_count60s
shard["lastBlockTime"] = shard_stats.last_block_time
shards.append(shard)
shards.sort(key=lambda x: x["fullShardId"])
tx_count60s = sum(
[
shard_stats.tx_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
block_count60s = sum(
[
shard_stats.block_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
pending_tx_count = sum(
[
shard_stats.pending_tx_count
for shard_stats in self.branch_to_shard_stats.values()
]
)
stale_block_count60s = sum(
[
shard_stats.stale_block_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
total_tx_count = sum(
[
shard_stats.total_tx_count
for shard_stats in self.branch_to_shard_stats.values()
]
)
root_last_block_time = 0
if self.root_state.tip.height >= 3:
prev = self.root_state.db.get_root_block_by_hash(
self.root_state.tip.hash_prev_block
)
root_last_block_time = (
self.root_state.tip.create_time - prev.header.create_time
)
tx_count_history = []
for item in self.tx_count_history:
tx_count_history.append(
{"timestamp": item[0], "txCount": item[1], "xShardTxCount": item[2]}
)
return {
"networkId": self.env.quark_chain_config.NETWORK_ID,
"chainSize": self.env.quark_chain_config.CHAIN_SIZE,
"shardServerCount": len(self.slave_pool),
"rootHeight": self.root_state.tip.height,
"rootDifficulty": self.root_state.tip.difficulty,
"rootCoinbaseAddress": "0x" + self.root_state.tip.coinbase_address.to_hex(),
"rootTimestamp": self.root_state.tip.create_time,
"rootLastBlockTime": root_last_block_time,
"txCount60s": tx_count60s,
"blockCount60s": block_count60s,
"staleBlockCount60s": stale_block_count60s,
"pendingTxCount": pending_tx_count,
"totalTxCount": total_tx_count,
"syncing": self.synchronizer.running,
"mining": self.root_miner.is_enabled(),
"shards": shards,
"peers": [
"{}:{}".format(peer.ip, peer.port)
for _, peer in self.network.active_peer_pool.items()
],
"minor_block_interval": self.get_artificial_tx_config().target_minor_block_time,
"root_block_interval": self.get_artificial_tx_config().target_root_block_time,
"cpus": psutil.cpu_percent(percpu=True),
"txCountHistory": tx_count_history,
}
def is_syncing(self):
return self.synchronizer.running
def is_mining(self):
return self.root_miner.is_enabled()
async def get_minor_block_by_hash(self, block_hash, branch):
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_minor_block_by_hash(block_hash, branch)
async def get_minor_block_by_height(self, height: Optional[int], branch):
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
# use latest height if not specified
height = (
height
if height is not None
else self.branch_to_shard_stats[branch.value].height
)
return await slave.get_minor_block_by_height(height, branch)
async def get_transaction_by_hash(self, tx_hash, branch):
""" Returns (MinorBlock, i) where i is the index of the tx in the block tx_list """
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_transaction_by_hash(tx_hash, branch)
async def get_transaction_receipt(
self, tx_hash, branch
) -> Optional[Tuple[MinorBlock, int, TransactionReceipt]]:
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_transaction_receipt(tx_hash, branch)
async def get_transactions_by_address(self, address, start, limit):
full_shard_id = self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
address.full_shard_key
)
slave = self.branch_to_slaves[full_shard_id][0]
return await slave.get_transactions_by_address(address, start, limit)
async def get_logs(
self,
addresses: List[Address],
topics: List[List[bytes]],
start_block: Union[int, str],
end_block: Union[int, str],
branch: Branch,
) -> Optional[List[Log]]:
if branch.value not in self.branch_to_slaves:
return None
if start_block == "latest":
start_block = self.branch_to_shard_stats[branch.value].height
if end_block == "latest":
end_block = self.branch_to_shard_stats[branch.value].height
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_logs(branch, addresses, topics, start_block, end_block)
async def estimate_gas(
self, tx: TypedTransaction, from_address: Address
) -> Optional[int]:
evm_tx = tx.tx.to_evm_tx()
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
branch = Branch(evm_tx.from_full_shard_id)
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.estimate_gas(tx, from_address)
async def get_storage_at(
self, address: Address, key: int, block_height: Optional[int]
) -> Optional[bytes]:
full_shard_id = self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
address.full_shard_key
)
if full_shard_id not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[full_shard_id][0]
return await slave.get_storage_at(address, key, block_height)
async def get_code(
self, address: Address, block_height: Optional[int]
) -> Optional[bytes]:
full_shard_id = self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
address.full_shard_key
)
if full_shard_id not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[full_shard_id][0]
return await slave.get_code(address, block_height)
async def gas_price(self, branch: Branch) -> Optional[int]:
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.gas_price(branch)
async def get_work(self, branch: Optional[Branch]) -> Optional[MiningWork]:
if not branch: # get root chain work
work, _ = await self.root_miner.get_work()
return work
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_work(branch)
async def submit_work(
self, branch: Optional[Branch], header_hash: bytes, nonce: int, mixhash: bytes
) -> bool:
if not branch: # submit root chain work
return await self.root_miner.submit_work(header_hash, nonce, mixhash)
if branch.value not in self.branch_to_slaves:
return False
slave = self.branch_to_slaves[branch.value][0]
return await slave.submit_work(branch, header_hash, nonce, mixhash)
def parse_args():
parser = argparse.ArgumentParser()
ClusterConfig.attach_arguments(parser)
args = parser.parse_args()
env = DEFAULT_ENV.copy()
env.cluster_config = ClusterConfig.create_from_args(args)
# initialize database
if not env.cluster_config.use_mem_db():
env.db = PersistentDb(
"{path}/master.db".format(path=env.cluster_config.DB_PATH_ROOT),
clean=env.cluster_config.CLEAN,
)
return env
def main():
from quarkchain.cluster.jsonrpc import JSONRPCServer
os.chdir(os.path.dirname(os.path.abspath(__file__)))
env = parse_args()
root_state = RootState(env)
master = MasterServer(env, root_state)
master.start()
master.wait_until_cluster_active()
# kick off simulated mining if enabled
if env.cluster_config.START_SIMULATED_MINING:
asyncio.ensure_future(master.start_mining())
loop = asyncio.get_event_loop()
if env.cluster_config.use_p2p():
network = P2PManager(env, master, loop)
else:
network = SimpleNetwork(env, master, loop)
network.start()
public_json_rpc_server = JSONRPCServer.start_public_server(env, master)
private_json_rpc_server = JSONRPCServer.start_private_server(env, master)
master.do_loop()
public_json_rpc_server.shutdown()
private_json_rpc_server.shutdown()
Logger.info("Master server is shutdown")
if __name__ == "__main__":
main()
| 37.310782 | 119 | 0.624207 | import argparse
import asyncio
import os
import psutil
import random
import time
from collections import deque
from typing import Optional, List, Union, Dict, Tuple
from quarkchain.cluster.guardian import Guardian
from quarkchain.cluster.miner import Miner, MiningWork, validate_seal
from quarkchain.cluster.p2p_commands import (
CommandOp,
Direction,
GetRootBlockHeaderListRequest,
GetRootBlockListRequest,
)
from quarkchain.cluster.protocol import (
ClusterMetadata,
ClusterConnection,
P2PConnection,
ROOT_BRANCH,
NULL_CONNECTION,
)
from quarkchain.cluster.root_state import RootState
from quarkchain.cluster.rpc import (
AddMinorBlockHeaderResponse,
GetEcoInfoListRequest,
GetNextBlockToMineRequest,
GetUnconfirmedHeadersRequest,
GetAccountDataRequest,
AddTransactionRequest,
AddRootBlockRequest,
AddMinorBlockRequest,
CreateClusterPeerConnectionRequest,
DestroyClusterPeerConnectionCommand,
SyncMinorBlockListRequest,
GetMinorBlockRequest,
GetTransactionRequest,
ArtificialTxConfig,
MineRequest,
GenTxRequest,
GetLogResponse,
GetLogRequest,
ShardStats,
EstimateGasRequest,
GetStorageRequest,
GetCodeRequest,
GasPriceRequest,
GetWorkRequest,
GetWorkResponse,
SubmitWorkRequest,
SubmitWorkResponse,
)
from quarkchain.cluster.rpc import (
ConnectToSlavesRequest,
ClusterOp,
CLUSTER_OP_SERIALIZER_MAP,
ExecuteTransactionRequest,
Ping,
GetTransactionReceiptRequest,
GetTransactionListByAddressRequest,
)
from quarkchain.cluster.simple_network import SimpleNetwork
from quarkchain.config import RootConfig
from quarkchain.env import DEFAULT_ENV
from quarkchain.core import (
Branch,
ChainMask,
Log,
Address,
RootBlock,
TransactionReceipt,
TypedTransaction,
MinorBlock,
)
from quarkchain.db import PersistentDb
from quarkchain.p2p.p2p_manager import P2PManager
from quarkchain.utils import Logger, check, time_ms
from quarkchain.cluster.cluster_config import ClusterConfig
TIMEOUT = 10
class SyncTask:
def __init__(self, header, peer):
self.header = header
self.peer = peer
self.master_server = peer.master_server
self.root_state = peer.root_state
self.max_staleness = (
self.root_state.env.quark_chain_config.ROOT.MAX_STALE_ROOT_BLOCK_HEIGHT_DIFF
)
async def sync(self):
try:
await self.__run_sync()
except Exception as e:
Logger.log_exception()
self.peer.close_with_error(str(e))
async def __run_sync(self):
if self.__has_block_hash(self.header.get_hash()):
return
block_header_chain = [self.header]
while not self.__has_block_hash(block_header_chain[-1].hash_prev_block):
block_hash = block_header_chain[-1].hash_prev_block
height = block_header_chain[-1].height - 1
if self.root_state.tip.height - height > self.max_staleness:
Logger.warning(
"[R] abort syncing due to forking at super old block {} << {}".format(
height, self.root_state.tip.height
)
)
return
Logger.info(
"[R] downloading block header list from {} {}".format(
height, block_hash.hex()
)
)
block_header_list = await asyncio.wait_for(
self.__download_block_headers(block_hash), TIMEOUT
)
self.__validate_block_headers(block_header_list)
for header in block_header_list:
if self.__has_block_hash(header.get_hash()):
break
block_header_chain.append(header)
block_header_chain.reverse()
Logger.info(
"[R] going to download {} blocks ({} - {})".format(
len(block_header_chain),
block_header_chain[0].height,
block_header_chain[-1].height,
)
)
while len(block_header_chain) > 0:
block_chain = await asyncio.wait_for(
self.__download_blocks(block_header_chain[:100]), TIMEOUT
)
Logger.info(
"[R] downloaded {} blocks ({} - {}) from peer".format(
len(block_chain),
block_chain[0].header.height,
block_chain[-1].header.height,
)
)
if len(block_chain) != len(block_header_chain[:100]):
raise RuntimeError("Bad peer missing blocks for headers they have")
for block in block_chain:
await self.__add_block(block)
block_header_chain.pop(0)
def __has_block_hash(self, block_hash):
return self.root_state.contain_root_block_by_hash(block_hash)
def __validate_block_headers(self, block_header_list):
consensus_type = self.root_state.env.quark_chain_config.ROOT.CONSENSUS_TYPE
for i in range(len(block_header_list) - 1):
header, prev = block_header_list[i : i + 2]
if header.height != prev.height + 1:
raise RuntimeError(
"Bad peer sending root block headers with discontinuous height"
)
if header.hash_prev_block != prev.get_hash():
raise RuntimeError(
"Bad peer sending root block headers with discontinuous hash_prev_block"
)
adjusted_diff = None
if not self.root_state.env.quark_chain_config.SKIP_ROOT_DIFFICULTY_CHECK:
if header.verify_signature(
self.root_state.env.quark_chain_config.guardian_public_key
):
adjusted_diff = Guardian.adjust_difficulty(
header.difficulty, header.height
)
validate_seal(header, consensus_type, adjusted_diff=adjusted_diff)
async def __download_block_headers(self, block_hash):
request = GetRootBlockHeaderListRequest(
block_hash=block_hash, limit=500, direction=Direction.GENESIS
)
op, resp, rpc_id = await self.peer.write_rpc_request(
CommandOp.GET_ROOT_BLOCK_HEADER_LIST_REQUEST, request
)
return resp.block_header_list
async def __download_blocks(self, block_header_list):
block_hash_list = [b.get_hash() for b in block_header_list]
op, resp, rpc_id = await self.peer.write_rpc_request(
CommandOp.GET_ROOT_BLOCK_LIST_REQUEST,
GetRootBlockListRequest(block_hash_list),
)
return resp.root_block_list
async def __add_block(self, root_block):
Logger.info(
"[R] syncing root block {} {}".format(
root_block.header.height, root_block.header.get_hash().hex()
)
)
start = time.time()
await self.__sync_minor_blocks(root_block.minor_block_header_list)
await self.master_server.add_root_block(root_block)
elapse = time.time() - start
Logger.info(
"[R] syncing root block {} {} took {:.2f} seconds".format(
root_block.header.height, root_block.header.get_hash().hex(), elapse
)
)
async def __sync_minor_blocks(self, minor_block_header_list):
minor_block_download_map = dict()
for m_block_header in minor_block_header_list:
m_block_hash = m_block_header.get_hash()
if not self.root_state.is_minor_block_validated(m_block_hash):
minor_block_download_map.setdefault(m_block_header.branch, []).append(
m_block_hash
)
future_list = []
for branch, m_block_hash_list in minor_block_download_map.items():
slave_conn = self.master_server.get_slave_connection(branch=branch)
future = slave_conn.write_rpc_request(
op=ClusterOp.SYNC_MINOR_BLOCK_LIST_REQUEST,
cmd=SyncMinorBlockListRequest(
m_block_hash_list, branch, self.peer.get_cluster_peer_id()
),
)
future_list.append(future)
result_list = await asyncio.gather(*future_list)
for result in result_list:
if result is Exception:
raise RuntimeError(
"Unable to download minor blocks from root block with exception {}".format(
result
)
)
_, result, _ = result
if result.error_code != 0:
raise RuntimeError("Unable to download minor blocks from root block")
if result.shard_stats:
self.master_server.update_shard_stats(result.shard_stats)
for k, v in result.block_coinbase_map.items():
self.root_state.add_validated_minor_block_hash(k, v.balance_map)
for m_header in minor_block_header_list:
if not self.root_state.is_minor_block_validated(m_header.get_hash()):
raise RuntimeError(
"minor block is still unavailable in master after root block sync"
)
class Synchronizer:
def __init__(self):
self.tasks = dict()
self.running = False
self.running_task = None
def add_task(self, header, peer):
self.tasks[peer] = header
Logger.info(
"[R] added {} {} to sync queue (running={})".format(
header.height, header.get_hash().hex(), self.running
)
)
if not self.running:
self.running = True
asyncio.ensure_future(self.__run())
def get_stats(self):
def _task_to_dict(peer, header):
return {
"peerId": peer.id.hex(),
"peerIp": str(peer.ip),
"peerPort": peer.port,
"rootHeight": header.height,
"rootHash": header.get_hash().hex(),
}
return {
"runningTask": _task_to_dict(self.running_task[1], self.running_task[0])
if self.running_task
else None,
"queuedTasks": [
_task_to_dict(peer, header) for peer, header in self.tasks.items()
],
}
def _pop_best_task(self):
check(len(self.tasks) > 0)
peer, header = max(self.tasks.items(), key=lambda pair: pair[1].height)
del self.tasks[peer]
return header, peer
async def __run(self):
Logger.info("[R] synchronizer started!")
while len(self.tasks) > 0:
self.running_task = self._pop_best_task()
header, peer = self.running_task
task = SyncTask(header, peer)
Logger.info(
"[R] start sync task {} {}".format(
header.height, header.get_hash().hex()
)
)
await task.sync()
Logger.info(
"[R] done sync task {} {}".format(
header.height, header.get_hash().hex()
)
)
self.running = False
self.running_task = None
Logger.info("[R] synchronizer finished!")
class SlaveConnection(ClusterConnection):
OP_NONRPC_MAP = {}
def __init__(
self, env, reader, writer, master_server, slave_id, chain_mask_list, name=None
):
super().__init__(
env,
reader,
writer,
CLUSTER_OP_SERIALIZER_MAP,
self.OP_NONRPC_MAP,
OP_RPC_MAP,
name=name,
)
self.master_server = master_server
self.id = slave_id
self.chain_mask_list = chain_mask_list
check(len(chain_mask_list) > 0)
asyncio.ensure_future(self.active_and_loop_forever())
def get_connection_to_forward(self, metadata):
if metadata.cluster_peer_id == 0:
return None
peer = self.master_server.get_peer(metadata.cluster_peer_id)
if peer is None:
return NULL_CONNECTION
return peer
def validate_connection(self, connection):
return connection == NULL_CONNECTION or isinstance(connection, P2PConnection)
def has_shard(self, full_shard_id: int):
for chain_mask in self.chain_mask_list:
if chain_mask.contain_full_shard_id(full_shard_id):
return True
return False
def has_overlap(self, chain_mask: ChainMask):
for local_chain_mask in self.chain_mask_list:
if local_chain_mask.has_overlap(chain_mask):
return True
return False
async def send_ping(self, initialize_shard_state=False):
root_block = (
self.master_server.root_state.get_tip_block()
if initialize_shard_state
else None
)
req = Ping("", [], root_block)
op, resp, rpc_id = await self.write_rpc_request(
op=ClusterOp.PING,
cmd=req,
metadata=ClusterMetadata(branch=ROOT_BRANCH, cluster_peer_id=0),
)
return resp.id, resp.chain_mask_list
async def send_connect_to_slaves(self, slave_info_list):
req = ConnectToSlavesRequest(slave_info_list)
op, resp, rpc_id = await self.write_rpc_request(
ClusterOp.CONNECT_TO_SLAVES_REQUEST, req
)
check(len(resp.result_list) == len(slave_info_list))
for i, result in enumerate(resp.result_list):
if len(result) > 0:
Logger.info(
"Slave {} failed to connect to {} with error {}".format(
self.id, slave_info_list[i].id, result
)
)
return False
Logger.info("Slave {} connected to other slaves successfully".format(self.id))
return True
def close(self):
Logger.info(
"Lost connection with slave {}. Shutting down master ...".format(self.id)
)
super().close()
self.master_server.shutdown()
def close_with_error(self, error):
Logger.info("Closing connection with slave {}".format(self.id))
return super().close_with_error(error)
async def add_transaction(self, tx):
request = AddTransactionRequest(tx)
_, resp, _ = await self.write_rpc_request(
ClusterOp.ADD_TRANSACTION_REQUEST, request
)
return resp.error_code == 0
async def execute_transaction(
self, tx: TypedTransaction, from_address, block_height: Optional[int]
):
request = ExecuteTransactionRequest(tx, from_address, block_height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.EXECUTE_TRANSACTION_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_minor_block_by_hash(self, block_hash, branch):
request = GetMinorBlockRequest(branch, minor_block_hash=block_hash)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_MINOR_BLOCK_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block
async def get_minor_block_by_height(self, height, branch):
request = GetMinorBlockRequest(branch, height=height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_MINOR_BLOCK_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block
async def get_transaction_by_hash(self, tx_hash, branch):
request = GetTransactionRequest(tx_hash, branch)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_REQUEST, request
)
if resp.error_code != 0:
return None, None
return resp.minor_block, resp.index
async def get_transaction_receipt(self, tx_hash, branch):
request = GetTransactionReceiptRequest(tx_hash, branch)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_RECEIPT_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block, resp.index, resp.receipt
async def get_transactions_by_address(self, address, start, limit):
request = GetTransactionListByAddressRequest(address, start, limit)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_LIST_BY_ADDRESS_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.tx_list, resp.next
async def get_logs(
self,
branch: Branch,
addresses: List[Address],
topics: List[List[bytes]],
start_block: int,
end_block: int,
) -> Optional[List[Log]]:
request = GetLogRequest(branch, addresses, topics, start_block, end_block)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_LOG_REQUEST, request
)
return resp.logs if resp.error_code == 0 else None
async def estimate_gas(
self, tx: TypedTransaction, from_address: Address
) -> Optional[int]:
request = EstimateGasRequest(tx, from_address)
_, resp, _ = await self.write_rpc_request(
ClusterOp.ESTIMATE_GAS_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_storage_at(
self, address: Address, key: int, block_height: Optional[int]
) -> Optional[bytes]:
request = GetStorageRequest(address, key, block_height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_STORAGE_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_code(
self, address: Address, block_height: Optional[int]
) -> Optional[bytes]:
request = GetCodeRequest(address, block_height)
_, resp, _ = await self.write_rpc_request(ClusterOp.GET_CODE_REQUEST, request)
return resp.result if resp.error_code == 0 else None
async def gas_price(self, branch: Branch) -> Optional[int]:
request = GasPriceRequest(branch)
_, resp, _ = await self.write_rpc_request(ClusterOp.GAS_PRICE_REQUEST, request)
return resp.result if resp.error_code == 0 else None
async def get_work(self, branch: Branch) -> Optional[MiningWork]:
request = GetWorkRequest(branch)
_, resp, _ = await self.write_rpc_request(ClusterOp.GET_WORK_REQUEST, request)
get_work_resp = resp
if get_work_resp.error_code != 0:
return None
return MiningWork(
get_work_resp.header_hash, get_work_resp.height, get_work_resp.difficulty
)
async def submit_work(
self, branch: Branch, header_hash: bytes, nonce: int, mixhash: bytes
) -> bool:
request = SubmitWorkRequest(branch, header_hash, nonce, mixhash)
_, resp, _ = await self.write_rpc_request(
ClusterOp.SUBMIT_WORK_REQUEST, request
)
submit_work_resp = resp
return submit_work_resp.error_code == 0 and submit_work_resp.success
async def handle_add_minor_block_header_request(self, req):
self.master_server.root_state.add_validated_minor_block_hash(
req.minor_block_header.get_hash(), req.coinbase_amount_map.balance_map
)
self.master_server.update_shard_stats(req.shard_stats)
self.master_server.update_tx_count_history(
req.tx_count, req.x_shard_tx_count, req.minor_block_header.create_time
)
return AddMinorBlockHeaderResponse(
error_code=0,
artificial_tx_config=self.master_server.get_artificial_tx_config(),
)
OP_RPC_MAP = {
ClusterOp.ADD_MINOR_BLOCK_HEADER_REQUEST: (
ClusterOp.ADD_MINOR_BLOCK_HEADER_RESPONSE,
SlaveConnection.handle_add_minor_block_header_request,
)
}
class MasterServer:
def __init__(self, env, root_state, name="master"):
self.loop = asyncio.get_event_loop()
self.env = env
self.root_state = root_state
self.network = None
self.cluster_config = env.cluster_config
self.branch_to_slaves = dict()
self.slave_pool = set()
self.cluster_active_future = self.loop.create_future()
self.shutdown_future = self.loop.create_future()
self.name = name
self.artificial_tx_config = ArtificialTxConfig(
target_root_block_time=self.env.quark_chain_config.ROOT.CONSENSUS_CONFIG.TARGET_BLOCK_TIME,
target_minor_block_time=next(
iter(self.env.quark_chain_config.shards.values())
).CONSENSUS_CONFIG.TARGET_BLOCK_TIME,
)
self.synchronizer = Synchronizer()
self.branch_to_shard_stats = dict()
self.tx_count_history = deque()
self.__init_root_miner()
def __init_root_miner(self):
miner_address = Address.create_from(
self.env.quark_chain_config.ROOT.COINBASE_ADDRESS
)
async def __create_block(retry=True):
while True:
block = await self.__create_root_block_to_mine(address=miner_address)
if block:
return block
if not retry:
break
await asyncio.sleep(1)
def __get_mining_params():
return {
"target_block_time": self.get_artificial_tx_config().target_root_block_time
}
root_config = self.env.quark_chain_config.ROOT
self.root_miner = Miner(
root_config.CONSENSUS_TYPE,
__create_block,
self.add_root_block,
__get_mining_params,
remote=root_config.CONSENSUS_CONFIG.REMOTE_MINE,
guardian_private_key=self.env.quark_chain_config.guardian_private_key,
)
def get_artificial_tx_config(self):
return self.artificial_tx_config
def __has_all_shards(self):
return len(self.branch_to_slaves) == len(
self.env.quark_chain_config.get_full_shard_ids()
) and all([len(slaves) > 0 for _, slaves in self.branch_to_slaves.items()])
async def __connect(self, host, port):
Logger.info("Trying to connect {}:{}".format(host, port))
while True:
try:
reader, writer = await asyncio.open_connection(
host, port, loop=self.loop
)
break
except Exception as e:
Logger.info("Failed to connect {} {}: {}".format(host, port, e))
await asyncio.sleep(
self.env.cluster_config.MASTER.MASTER_TO_SLAVE_CONNECT_RETRY_DELAY
)
Logger.info("Connected to {}:{}".format(host, port))
return reader, writer
async def __connect_to_slaves(self):
futures = []
slaves = []
for slave_info in self.cluster_config.get_slave_info_list():
host = slave_info.host.decode("ascii")
reader, writer = await self.__connect(host, slave_info.port)
slave = SlaveConnection(
self.env,
reader,
writer,
self,
slave_info.id,
slave_info.chain_mask_list,
name="{}_slave_{}".format(self.name, slave_info.id),
)
await slave.wait_until_active()
futures.append(slave.send_ping())
slaves.append(slave)
results = await asyncio.gather(*futures)
full_shard_ids = self.env.quark_chain_config.get_full_shard_ids()
for slave, result in zip(slaves, results):
id, chain_mask_list = result
if id != slave.id:
Logger.error(
"Slave id does not match. expect {} got {}".format(slave.id, id)
)
self.shutdown()
if chain_mask_list != slave.chain_mask_list:
Logger.error(
"Slave {} shard mask list does not match. expect {} got {}".format(
slave.id, slave.chain_mask_list, chain_mask_list
)
)
self.shutdown()
self.slave_pool.add(slave)
for full_shard_id in full_shard_ids:
if slave.has_shard(full_shard_id):
self.branch_to_slaves.setdefault(full_shard_id, []).append(slave)
async def __setup_slave_to_slave_connections(self):
for slave in self.slave_pool:
await slave.wait_until_active()
success = await slave.send_connect_to_slaves(
self.cluster_config.get_slave_info_list()
)
if not success:
self.shutdown()
async def __init_shards(self):
futures = []
for slave in self.slave_pool:
futures.append(slave.send_ping(initialize_shard_state=True))
await asyncio.gather(*futures)
async def __send_mining_config_to_slaves(self, mining):
futures = []
for slave in self.slave_pool:
request = MineRequest(self.get_artificial_tx_config(), mining)
futures.append(slave.write_rpc_request(ClusterOp.MINE_REQUEST, request))
responses = await asyncio.gather(*futures)
check(all([resp.error_code == 0 for _, resp, _ in responses]))
async def start_mining(self):
await self.__send_mining_config_to_slaves(True)
self.root_miner.start()
Logger.warning(
"Mining started with root block time {} s, minor block time {} s".format(
self.get_artificial_tx_config().target_root_block_time,
self.get_artificial_tx_config().target_minor_block_time,
)
)
async def stop_mining(self):
await self.__send_mining_config_to_slaves(False)
self.root_miner.disable()
Logger.warning("Mining stopped")
def get_slave_connection(self, branch):
check(len(self.branch_to_slaves[branch.value]) > 0)
return self.branch_to_slaves[branch.value][0]
def __log_summary(self):
for branch_value, slaves in self.branch_to_slaves.items():
Logger.info(
"[{}] is run by slave {}".format(
Branch(branch_value).to_str(), [s.id for s in slaves]
)
)
async def __init_cluster(self):
await self.__connect_to_slaves()
self.__log_summary()
if not self.__has_all_shards():
Logger.error("Missing some shards. Check cluster config file!")
return
await self.__setup_slave_to_slave_connections()
await self.__init_shards()
self.cluster_active_future.set_result(None)
def start(self):
self.loop.create_task(self.__init_cluster())
def do_loop(self):
try:
self.loop.run_until_complete(self.shutdown_future)
except KeyboardInterrupt:
pass
def wait_until_cluster_active(self):
self.loop.run_until_complete(self.cluster_active_future)
def shutdown(self):
if not self.shutdown_future.done():
self.shutdown_future.set_result(None)
if not self.cluster_active_future.done():
self.cluster_active_future.set_exception(
RuntimeError("failed to start the cluster")
)
def get_shutdown_future(self):
return self.shutdown_future
async def __create_root_block_to_mine(self, address) -> Optional[RootBlock]:
futures = []
for slave in self.slave_pool:
request = GetUnconfirmedHeadersRequest()
futures.append(
slave.write_rpc_request(
ClusterOp.GET_UNCONFIRMED_HEADERS_REQUEST, request
)
)
responses = await asyncio.gather(*futures)
full_shard_id_to_header_list = dict()
for response in responses:
_, response, _ = response
if response.error_code != 0:
return None
for headers_info in response.headers_info_list:
height = 0
for header in headers_info.header_list:
check(height == 0 or height + 1 == header.height)
height = header.height
if not self.root_state.is_minor_block_validated(header.get_hash()):
break
full_shard_id_to_header_list.setdefault(
headers_info.branch.get_full_shard_id(), []
).append(header)
header_list = []
full_shard_ids_to_check = self.env.quark_chain_config.get_initialized_full_shard_ids_before_root_height(
self.root_state.tip.height + 1
)
for full_shard_id in full_shard_ids_to_check:
headers = full_shard_id_to_header_list.get(full_shard_id, [])
header_list.extend(headers)
return self.root_state.create_block_to_mine(header_list, address)
async def __get_minor_block_to_mine(self, branch, address):
request = GetNextBlockToMineRequest(
branch=branch,
address=address.address_in_branch(branch),
artificial_tx_config=self.get_artificial_tx_config(),
)
slave = self.get_slave_connection(branch)
_, response, _ = await slave.write_rpc_request(
ClusterOp.GET_NEXT_BLOCK_TO_MINE_REQUEST, request
)
return response.block if response.error_code == 0 else None
async def get_next_block_to_mine(
self, address, branch_value: Optional[int]
) -> Optional[Union[RootBlock, MinorBlock]]:
if self.synchronizer.running:
return None
if branch_value is None:
root = await self.__create_root_block_to_mine(address)
return root or None
block = await self.__get_minor_block_to_mine(Branch(branch_value), address)
return block or None
async def get_account_data(self, address: Address):
futures = []
for slave in self.slave_pool:
request = GetAccountDataRequest(address)
futures.append(
slave.write_rpc_request(ClusterOp.GET_ACCOUNT_DATA_REQUEST, request)
)
responses = await asyncio.gather(*futures)
branch_to_account_branch_data = dict()
for response in responses:
_, response, _ = response
check(response.error_code == 0)
for account_branch_data in response.account_branch_data_list:
branch_to_account_branch_data[
account_branch_data.branch
] = account_branch_data
check(
len(branch_to_account_branch_data)
== len(self.env.quark_chain_config.get_full_shard_ids())
)
return branch_to_account_branch_data
async def get_primary_account_data(
self, address: Address, block_height: Optional[int] = None
):
full_shard_id = self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
address.full_shard_key
)
slaves = self.branch_to_slaves.get(full_shard_id, None)
if not slaves:
return None
slave = slaves[0]
request = GetAccountDataRequest(address, block_height)
_, resp, _ = await slave.write_rpc_request(
ClusterOp.GET_ACCOUNT_DATA_REQUEST, request
)
for account_branch_data in resp.account_branch_data_list:
if account_branch_data.branch.value == full_shard_id:
return account_branch_data
return None
async def add_transaction(self, tx: TypedTransaction, from_peer=None):
evm_tx = tx.tx.to_evm_tx()
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
branch = Branch(evm_tx.from_full_shard_id)
if branch.value not in self.branch_to_slaves:
return False
futures = []
for slave in self.branch_to_slaves[branch.value]:
futures.append(slave.add_transaction(tx))
success = all(await asyncio.gather(*futures))
if not success:
return False
if self.network is not None:
for peer in self.network.iterate_peers():
if peer == from_peer:
continue
try:
peer.send_transaction(tx)
except Exception:
Logger.log_exception()
return True
async def execute_transaction(
self, tx: TypedTransaction, from_address, block_height: Optional[int]
) -> Optional[bytes]:
evm_tx = tx.tx.to_evm_tx()
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
branch = Branch(evm_tx.from_full_shard_id)
if branch.value not in self.branch_to_slaves:
return None
futures = []
for slave in self.branch_to_slaves[branch.value]:
futures.append(slave.execute_transaction(tx, from_address, block_height))
responses = await asyncio.gather(*futures)
success = all(r is not None for r in responses) and len(set(responses)) == 1
if not success:
return None
check(len(responses) >= 1)
return responses[0]
def handle_new_root_block_header(self, header, peer):
self.synchronizer.add_task(header, peer)
async def add_root_block(self, r_block):
self.root_state.validate_block(r_block)
update_tip = False
try:
update_tip = self.root_state.add_block(r_block)
success = True
except ValueError:
Logger.log_exception()
success = False
try:
if update_tip and self.network is not None:
for peer in self.network.iterate_peers():
peer.send_updated_tip()
except Exception:
pass
if success:
future_list = self.broadcast_rpc(
op=ClusterOp.ADD_ROOT_BLOCK_REQUEST,
req=AddRootBlockRequest(r_block, False),
)
result_list = await asyncio.gather(*future_list)
check(all([resp.error_code == 0 for _, resp, _ in result_list]))
async def add_raw_minor_block(self, branch, block_data):
if branch.value not in self.branch_to_slaves:
return False
request = AddMinorBlockRequest(block_data)
_, resp, _ = await self.get_slave_connection(branch).write_rpc_request(
ClusterOp.ADD_MINOR_BLOCK_REQUEST, request
)
return resp.error_code == 0
async def add_root_block_from_miner(self, block):
if block.header.hash_prev_block != self.root_state.tip.get_hash():
Logger.info(
"[R] dropped stale root block {} mined locally".format(
block.header.height
)
)
return False
await self.add_root_block(block)
def broadcast_command(self, op, cmd):
for slave_conn in self.slave_pool:
slave_conn.write_command(
op=op, cmd=cmd, metadata=ClusterMetadata(ROOT_BRANCH, 0)
)
def broadcast_rpc(self, op, req):
future_list = []
for slave_conn in self.slave_pool:
future_list.append(
slave_conn.write_rpc_request(
op=op, cmd=req, metadata=ClusterMetadata(ROOT_BRANCH, 0)
)
)
return future_list
def get_peer(self, cluster_peer_id):
if self.network is None:
return None
return self.network.get_peer_by_cluster_peer_id(cluster_peer_id)
async def create_peer_cluster_connections(self, cluster_peer_id):
future_list = self.broadcast_rpc(
op=ClusterOp.CREATE_CLUSTER_PEER_CONNECTION_REQUEST,
req=CreateClusterPeerConnectionRequest(cluster_peer_id),
)
result_list = await asyncio.gather(*future_list)
return
def destroy_peer_cluster_connections(self, cluster_peer_id):
self.broadcast_command(
op=ClusterOp.DESTROY_CLUSTER_PEER_CONNECTION_COMMAND,
cmd=DestroyClusterPeerConnectionCommand(cluster_peer_id),
)
async def set_target_block_time(self, root_block_time, minor_block_time):
root_block_time = (
root_block_time
if root_block_time
else self.artificial_tx_config.target_root_block_time
)
minor_block_time = (
minor_block_time
if minor_block_time
else self.artificial_tx_config.target_minor_block_time
)
self.artificial_tx_config = ArtificialTxConfig(
target_root_block_time=root_block_time,
target_minor_block_time=minor_block_time,
)
await self.start_mining()
async def set_mining(self, mining):
if mining:
await self.start_mining()
else:
await self.stop_mining()
async def create_transactions(
self, num_tx_per_shard, xshard_percent, tx: TypedTransaction
):
futures = []
for slave in self.slave_pool:
request = GenTxRequest(num_tx_per_shard, xshard_percent, tx)
futures.append(slave.write_rpc_request(ClusterOp.GEN_TX_REQUEST, request))
responses = await asyncio.gather(*futures)
check(all([resp.error_code == 0 for _, resp, _ in responses]))
def update_shard_stats(self, shard_state):
self.branch_to_shard_stats[shard_state.branch.value] = shard_state
def update_tx_count_history(self, tx_count, xshard_tx_count, timestamp):
minute = int(timestamp / 60) * 60
if len(self.tx_count_history) == 0 or self.tx_count_history[-1][0] < minute:
self.tx_count_history.append((minute, tx_count, xshard_tx_count))
else:
old = self.tx_count_history.pop()
self.tx_count_history.append(
(old[0], old[1] + tx_count, old[2] + xshard_tx_count)
)
while (
len(self.tx_count_history) > 0
and self.tx_count_history[0][0] < time.time() - 3600 * 12
):
self.tx_count_history.popleft()
def get_block_count(self):
header = self.root_state.tip
shard_r_c = self.root_state.db.get_block_count(header.height)
return {"rootHeight": header.height, "shardRC": shard_r_c}
async def get_stats(self):
shards = []
for shard_stats in self.branch_to_shard_stats.values():
shard = dict()
shard["fullShardId"] = shard_stats.branch.get_full_shard_id()
shard["chainId"] = shard_stats.branch.get_chain_id()
shard["shardId"] = shard_stats.branch.get_shard_id()
shard["height"] = shard_stats.height
shard["difficulty"] = shard_stats.difficulty
shard["coinbaseAddress"] = "0x" + shard_stats.coinbase_address.to_hex()
shard["timestamp"] = shard_stats.timestamp
shard["txCount60s"] = shard_stats.tx_count60s
shard["pendingTxCount"] = shard_stats.pending_tx_count
shard["totalTxCount"] = shard_stats.total_tx_count
shard["blockCount60s"] = shard_stats.block_count60s
shard["staleBlockCount60s"] = shard_stats.stale_block_count60s
shard["lastBlockTime"] = shard_stats.last_block_time
shards.append(shard)
shards.sort(key=lambda x: x["fullShardId"])
tx_count60s = sum(
[
shard_stats.tx_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
block_count60s = sum(
[
shard_stats.block_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
pending_tx_count = sum(
[
shard_stats.pending_tx_count
for shard_stats in self.branch_to_shard_stats.values()
]
)
stale_block_count60s = sum(
[
shard_stats.stale_block_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
total_tx_count = sum(
[
shard_stats.total_tx_count
for shard_stats in self.branch_to_shard_stats.values()
]
)
root_last_block_time = 0
if self.root_state.tip.height >= 3:
prev = self.root_state.db.get_root_block_by_hash(
self.root_state.tip.hash_prev_block
)
root_last_block_time = (
self.root_state.tip.create_time - prev.header.create_time
)
tx_count_history = []
for item in self.tx_count_history:
tx_count_history.append(
{"timestamp": item[0], "txCount": item[1], "xShardTxCount": item[2]}
)
return {
"networkId": self.env.quark_chain_config.NETWORK_ID,
"chainSize": self.env.quark_chain_config.CHAIN_SIZE,
"shardServerCount": len(self.slave_pool),
"rootHeight": self.root_state.tip.height,
"rootDifficulty": self.root_state.tip.difficulty,
"rootCoinbaseAddress": "0x" + self.root_state.tip.coinbase_address.to_hex(),
"rootTimestamp": self.root_state.tip.create_time,
"rootLastBlockTime": root_last_block_time,
"txCount60s": tx_count60s,
"blockCount60s": block_count60s,
"staleBlockCount60s": stale_block_count60s,
"pendingTxCount": pending_tx_count,
"totalTxCount": total_tx_count,
"syncing": self.synchronizer.running,
"mining": self.root_miner.is_enabled(),
"shards": shards,
"peers": [
"{}:{}".format(peer.ip, peer.port)
for _, peer in self.network.active_peer_pool.items()
],
"minor_block_interval": self.get_artificial_tx_config().target_minor_block_time,
"root_block_interval": self.get_artificial_tx_config().target_root_block_time,
"cpus": psutil.cpu_percent(percpu=True),
"txCountHistory": tx_count_history,
}
def is_syncing(self):
return self.synchronizer.running
def is_mining(self):
return self.root_miner.is_enabled()
async def get_minor_block_by_hash(self, block_hash, branch):
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_minor_block_by_hash(block_hash, branch)
async def get_minor_block_by_height(self, height: Optional[int], branch):
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
height = (
height
if height is not None
else self.branch_to_shard_stats[branch.value].height
)
return await slave.get_minor_block_by_height(height, branch)
async def get_transaction_by_hash(self, tx_hash, branch):
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_transaction_by_hash(tx_hash, branch)
async def get_transaction_receipt(
self, tx_hash, branch
) -> Optional[Tuple[MinorBlock, int, TransactionReceipt]]:
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_transaction_receipt(tx_hash, branch)
async def get_transactions_by_address(self, address, start, limit):
full_shard_id = self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
address.full_shard_key
)
slave = self.branch_to_slaves[full_shard_id][0]
return await slave.get_transactions_by_address(address, start, limit)
async def get_logs(
self,
addresses: List[Address],
topics: List[List[bytes]],
start_block: Union[int, str],
end_block: Union[int, str],
branch: Branch,
) -> Optional[List[Log]]:
if branch.value not in self.branch_to_slaves:
return None
if start_block == "latest":
start_block = self.branch_to_shard_stats[branch.value].height
if end_block == "latest":
end_block = self.branch_to_shard_stats[branch.value].height
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_logs(branch, addresses, topics, start_block, end_block)
async def estimate_gas(
self, tx: TypedTransaction, from_address: Address
) -> Optional[int]:
evm_tx = tx.tx.to_evm_tx()
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
branch = Branch(evm_tx.from_full_shard_id)
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.estimate_gas(tx, from_address)
async def get_storage_at(
self, address: Address, key: int, block_height: Optional[int]
) -> Optional[bytes]:
full_shard_id = self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
address.full_shard_key
)
if full_shard_id not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[full_shard_id][0]
return await slave.get_storage_at(address, key, block_height)
async def get_code(
self, address: Address, block_height: Optional[int]
) -> Optional[bytes]:
full_shard_id = self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
address.full_shard_key
)
if full_shard_id not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[full_shard_id][0]
return await slave.get_code(address, block_height)
async def gas_price(self, branch: Branch) -> Optional[int]:
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.gas_price(branch)
async def get_work(self, branch: Optional[Branch]) -> Optional[MiningWork]:
if not branch:
work, _ = await self.root_miner.get_work()
return work
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_work(branch)
async def submit_work(
self, branch: Optional[Branch], header_hash: bytes, nonce: int, mixhash: bytes
) -> bool:
if not branch:
return await self.root_miner.submit_work(header_hash, nonce, mixhash)
if branch.value not in self.branch_to_slaves:
return False
slave = self.branch_to_slaves[branch.value][0]
return await slave.submit_work(branch, header_hash, nonce, mixhash)
def parse_args():
parser = argparse.ArgumentParser()
ClusterConfig.attach_arguments(parser)
args = parser.parse_args()
env = DEFAULT_ENV.copy()
env.cluster_config = ClusterConfig.create_from_args(args)
if not env.cluster_config.use_mem_db():
env.db = PersistentDb(
"{path}/master.db".format(path=env.cluster_config.DB_PATH_ROOT),
clean=env.cluster_config.CLEAN,
)
return env
def main():
from quarkchain.cluster.jsonrpc import JSONRPCServer
os.chdir(os.path.dirname(os.path.abspath(__file__)))
env = parse_args()
root_state = RootState(env)
master = MasterServer(env, root_state)
master.start()
master.wait_until_cluster_active()
if env.cluster_config.START_SIMULATED_MINING:
asyncio.ensure_future(master.start_mining())
loop = asyncio.get_event_loop()
if env.cluster_config.use_p2p():
network = P2PManager(env, master, loop)
else:
network = SimpleNetwork(env, master, loop)
network.start()
public_json_rpc_server = JSONRPCServer.start_public_server(env, master)
private_json_rpc_server = JSONRPCServer.start_private_server(env, master)
master.do_loop()
public_json_rpc_server.shutdown()
private_json_rpc_server.shutdown()
Logger.info("Master server is shutdown")
if __name__ == "__main__":
main()
| true | true |
f73cecf3e4d7f6300e2bd14b05ac5ead060e242c | 20,112 | py | Python | tests/utils/test_search_utils.py | jene4ekjene4ek/my_mlflow | 1ac1b5d6657789168253101ae3a8477cff54dd9e | [
"Apache-2.0"
] | 1 | 2021-01-10T14:00:57.000Z | 2021-01-10T14:00:57.000Z | tests/utils/test_search_utils.py | jene4ekjene4ek/my_mlflow | 1ac1b5d6657789168253101ae3a8477cff54dd9e | [
"Apache-2.0"
] | null | null | null | tests/utils/test_search_utils.py | jene4ekjene4ek/my_mlflow | 1ac1b5d6657789168253101ae3a8477cff54dd9e | [
"Apache-2.0"
] | 1 | 2020-12-29T18:01:42.000Z | 2020-12-29T18:01:42.000Z | import base64
import json
import pytest
from mlflow.entities import RunInfo, RunData, Run, LifecycleStage, RunStatus, Metric, Param, RunTag
from mlflow.exceptions import MlflowException
from mlflow.utils.search_utils import SearchUtils
@pytest.mark.parametrize("filter_string, parsed_filter", [
("metric.acc >= 0.94", [{'comparator': '>=', 'key': 'acc', 'type': 'metric', 'value': '0.94'}]),
("metric.acc>=100", [{'comparator': '>=', 'key': 'acc', 'type': 'metric', 'value': '100'}]),
("params.m!='tf'", [{'comparator': '!=', 'key': 'm', 'type': 'parameter', 'value': 'tf'}]),
('params."m"!="tf"', [{'comparator': '!=', 'key': 'm', 'type': 'parameter', 'value': 'tf'}]),
('metric."legit name" >= 0.243', [{'comparator': '>=',
'key': 'legit name',
'type': 'metric',
'value': '0.243'}]),
("metrics.XYZ = 3", [{'comparator': '=', 'key': 'XYZ', 'type': 'metric', 'value': '3'}]),
('params."cat dog" = "pets"', [{'comparator': '=',
'key': 'cat dog',
'type': 'parameter',
'value': 'pets'}]),
('metrics."X-Y-Z" = 3', [{'comparator': '=', 'key': 'X-Y-Z', 'type': 'metric', 'value': '3'}]),
('metrics."X//Y#$$@&Z" = 3', [{'comparator': '=',
'key': 'X//Y#$$@&Z',
'type': 'metric',
'value': '3'}]),
("params.model = 'LinearRegression'", [{'comparator': '=',
'key': 'model',
'type': 'parameter',
'value': "LinearRegression"}]),
("metrics.rmse < 1 and params.model_class = 'LR'", [
{'comparator': '<', 'key': 'rmse', 'type': 'metric', 'value': '1'},
{'comparator': '=', 'key': 'model_class', 'type': 'parameter', 'value': "LR"}
]),
('', []),
("`metric`.a >= 0.1", [{'comparator': '>=', 'key': 'a', 'type': 'metric', 'value': '0.1'}]),
("`params`.model >= 'LR'", [{'comparator': '>=',
'key': 'model',
'type': 'parameter',
'value': "LR"}]),
("tags.version = 'commit-hash'", [{'comparator': '=',
'key': 'version',
'type': 'tag',
'value': "commit-hash"}]),
("`tags`.source_name = 'a notebook'", [{'comparator': '=',
'key': 'source_name',
'type': 'tag',
'value': "a notebook"}]),
('metrics."accuracy.2.0" > 5', [{'comparator': '>',
'key': 'accuracy.2.0',
'type': 'metric',
'value': '5'}]),
('metrics.`spacey name` > 5', [{'comparator': '>',
'key': 'spacey name',
'type': 'metric',
'value': '5'}]),
('params."p.a.r.a.m" != "a"', [{'comparator': '!=',
'key': 'p.a.r.a.m',
'type': 'parameter',
'value': 'a'}]),
('tags."t.a.g" = "a"', [{'comparator': '=',
'key': 't.a.g',
'type': 'tag',
'value': 'a'}]),
("attribute.artifact_uri = '1/23/4'", [{'type': 'attribute',
'comparator': '=',
'key': 'artifact_uri',
'value': '1/23/4'}]),
("run.status = 'RUNNING'", [{'type': 'attribute',
'comparator': '=',
'key': 'status',
'value': 'RUNNING'}]),
])
def test_filter(filter_string, parsed_filter):
assert SearchUtils.parse_search_filter(filter_string) == parsed_filter
@pytest.mark.parametrize("filter_string, parsed_filter", [
("params.m = 'LR'", [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]),
("params.m = \"LR\"", [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]),
('params.m = "LR"', [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]),
('params.m = "L\'Hosp"', [{'type': 'parameter', 'comparator': '=',
'key': 'm', 'value': "L'Hosp"}]),
])
def test_correct_quote_trimming(filter_string, parsed_filter):
assert SearchUtils.parse_search_filter(filter_string) == parsed_filter
@pytest.mark.parametrize("filter_string, error_message", [
("metric.acc >= 0.94; metrics.rmse < 1", "Search filter contained multiple expression"),
("m.acc >= 0.94", "Invalid entity type"),
("acc >= 0.94", "Invalid identifier"),
("p.model >= 'LR'", "Invalid entity type"),
("attri.x != 1", "Invalid entity type"),
("a.x != 1", "Invalid entity type"),
("model >= 'LR'", "Invalid identifier"),
("metrics.A > 0.1 OR params.B = 'LR'", "Invalid clause(s) in filter string"),
("metrics.A > 0.1 NAND params.B = 'LR'", "Invalid clause(s) in filter string"),
("metrics.A > 0.1 AND (params.B = 'LR')", "Invalid clause(s) in filter string"),
("`metrics.A > 0.1", "Invalid clause(s) in filter string"),
("param`.A > 0.1", "Invalid clause(s) in filter string"),
("`dummy.A > 0.1", "Invalid clause(s) in filter string"),
("dummy`.A > 0.1", "Invalid clause(s) in filter string"),
("attribute.start != 1", "Invalid attribute key"),
("attribute.start_time != 1", "Invalid attribute key"),
("attribute.end_time != 1", "Invalid attribute key"),
("attribute.run_id != 1", "Invalid attribute key"),
("attribute.run_uuid != 1", "Invalid attribute key"),
("attribute.experiment_id != 1", "Invalid attribute key"),
("attribute.lifecycle_stage = 'ACTIVE'", "Invalid attribute key"),
("attribute.name != 1", "Invalid attribute key"),
("attribute.time != 1", "Invalid attribute key"),
("attribute._status != 'RUNNING'", "Invalid attribute key"),
("attribute.status = true", "Invalid clause(s) in filter string"),
])
def test_error_filter(filter_string, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_search_filter(filter_string)
assert error_message in e.value.message
@pytest.mark.parametrize("filter_string, error_message", [
("metric.model = 'LR'", "Expected numeric value type for metric"),
("metric.model = '5'", "Expected numeric value type for metric"),
("params.acc = 5", "Expected a quoted string value for param"),
("tags.acc = 5", "Expected a quoted string value for tag"),
("metrics.acc != metrics.acc", "Expected numeric value type for metric"),
("1.0 > metrics.acc", "Expected 'Identifier' found"),
("attribute.status = 1", "Expected a quoted string value for attributes"),
])
def test_error_comparison_clauses(filter_string, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_search_filter(filter_string)
assert error_message in e.value.message
@pytest.mark.parametrize("filter_string, error_message", [
("params.acc = LR", "value is either not quoted or unidentified quote types"),
("tags.acc = LR", "value is either not quoted or unidentified quote types"),
("params.acc = `LR`", "value is either not quoted or unidentified quote types"),
("params.'acc = LR", "Invalid clause(s) in filter string"),
("params.acc = 'LR", "Invalid clause(s) in filter string"),
("params.acc = LR'", "Invalid clause(s) in filter string"),
("params.acc = \"LR'", "Invalid clause(s) in filter string"),
("tags.acc = \"LR'", "Invalid clause(s) in filter string"),
("tags.acc = = 'LR'", "Invalid clause(s) in filter string"),
("attribute.status IS 'RUNNING'", "Invalid clause(s) in filter string"),
])
def test_bad_quotes(filter_string, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_search_filter(filter_string)
assert error_message in e.value.message
@pytest.mark.parametrize("filter_string, error_message", [
("params.acc LR !=", "Invalid clause(s) in filter string"),
("params.acc LR", "Invalid clause(s) in filter string"),
("metric.acc !=", "Invalid clause(s) in filter string"),
("acc != 1.0", "Invalid identifier"),
("foo is null", "Invalid clause(s) in filter string"),
("1=1", "Expected 'Identifier' found"),
("1==2", "Expected 'Identifier' found"),
])
def test_invalid_clauses(filter_string, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_search_filter(filter_string)
assert error_message in e.value.message
@pytest.mark.parametrize("entity_type, bad_comparators, key, entity_value", [
("metrics", ["~", "~="], "abc", 1.0),
("params", [">", "<", ">=", "<=", "~"], "abc", "'my-param-value'"),
("tags", [">", "<", ">=", "<=", "~"], "abc", "'my-tag-value'"),
("attributes", [">", "<", ">=", "<=", "~"], "status", "'my-tag-value'"),
])
def test_bad_comparators(entity_type, bad_comparators, key, entity_value):
run = Run(run_info=RunInfo(
run_uuid="hi", run_id="hi", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(metrics=[], params=[], tags=[])
)
for bad_comparator in bad_comparators:
bad_filter = "{entity_type}.{key} {comparator} {value}".format(
entity_type=entity_type, key=key, comparator=bad_comparator, value=entity_value)
with pytest.raises(MlflowException) as e:
SearchUtils.filter([run], bad_filter)
assert "Invalid comparator" in str(e.value.message)
@pytest.mark.parametrize("filter_string, matching_runs", [
(None, [0, 1, 2]),
("", [0, 1, 2]),
("attributes.status = 'FAILED'", [0, 2]),
("metrics.key1 = 123", [1]),
("metrics.key1 != 123", [0, 2]),
("metrics.key1 >= 123", [1, 2]),
("params.my_param = 'A'", [0, 1]),
("tags.tag1 = 'D'", [2]),
("tags.tag1 != 'D'", [1]),
("params.my_param = 'A' AND attributes.status = 'FAILED'", [0]),
])
def test_correct_filtering(filter_string, matching_runs):
runs = [
Run(run_info=RunInfo(
run_uuid="hi", run_id="hi", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 121, 1, 0)],
params=[Param("my_param", "A")],
tags=[])),
Run(run_info=RunInfo(
run_uuid="hi2", run_id="hi2", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FINISHED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 123, 1, 0)],
params=[Param("my_param", "A")],
tags=[RunTag("tag1", "C")])),
Run(run_info=RunInfo(
run_uuid="hi3", run_id="hi3", experiment_id=1,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 125, 1, 0)],
params=[Param("my_param", "B")],
tags=[RunTag("tag1", "D")])),
]
filtered_runs = SearchUtils.filter(runs, filter_string)
assert set(filtered_runs) == set([runs[i] for i in matching_runs])
@pytest.mark.parametrize("order_bys, matching_runs", [
(None, [2, 1, 0]),
([], [2, 1, 0]),
(["tags.noSuchTag"], [2, 1, 0]),
(["attributes.status"], [2, 0, 1]),
(["attributes.start_time"], [0, 2, 1]),
(["metrics.key1 asc"], [0, 1, 2]),
(["metrics.\"key1\" desc"], [2, 1, 0]),
(["params.my_param"], [1, 0, 2]),
(["params.my_param aSc", "attributes.status ASC"], [0, 1, 2]),
(["params.my_param", "attributes.status DESC"], [1, 0, 2]),
(["params.my_param DESC", "attributes.status DESC"], [2, 1, 0]),
(["params.`my_param` DESC", "attributes.status DESC"], [2, 1, 0]),
(["tags.tag1"], [1, 2, 0]),
(["tags.tag1 DESC"], [2, 1, 0]),
])
def test_correct_sorting(order_bys, matching_runs):
runs = [
Run(run_info=RunInfo(
run_uuid="9", run_id="9", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 121, 1, 0)],
params=[Param("my_param", "A")],
tags=[])),
Run(run_info=RunInfo(
run_uuid="8", run_id="8", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FINISHED),
start_time=1, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 123, 1, 0)],
params=[Param("my_param", "A")],
tags=[RunTag("tag1", "C")])),
Run(run_info=RunInfo(
run_uuid="7", run_id="7", experiment_id=1,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=1, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 125, 1, 0)],
params=[Param("my_param", "B")],
tags=[RunTag("tag1", "D")])),
]
sorted_runs = SearchUtils.sort(runs, order_bys)
sorted_run_indices = []
for run in sorted_runs:
for i, r in enumerate(runs):
if r == run:
sorted_run_indices.append(i)
break
assert sorted_run_indices == matching_runs
def test_order_by_metric_with_nans_and_infs():
metric_vals_str = ["nan", "inf", "-inf", "-1000", "0", "1000"]
runs = [
Run(run_info=RunInfo(run_id=x, run_uuid=x, experiment_id=0, user_id="user",
status=RunStatus.to_string(RunStatus.FINISHED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("x", float(x), 1, 0)])
) for x in metric_vals_str
]
sorted_runs_asc = [
x.info.run_id for x in SearchUtils.sort(runs, ["metrics.x asc"])
]
sorted_runs_desc = [
x.info.run_id for x in SearchUtils.sort(runs, ["metrics.x desc"])
]
# asc
assert ["-inf", "-1000", "0", "1000", "inf", "nan"] == sorted_runs_asc
# desc
assert ["inf", "1000", "0", "-1000", "-inf", "nan"] == sorted_runs_desc
@pytest.mark.parametrize("order_by, error_message", [
("m.acc", "Invalid entity type"),
("acc", "Invalid identifier"),
("attri.x", "Invalid entity type"),
("`metrics.A", "Invalid order_by clause"),
("`metrics.A`", "Invalid entity type"),
("attribute.start", "Invalid attribute key"),
("attribute.run_id", "Invalid attribute key"),
("attribute.experiment_id", "Invalid attribute key"),
("metrics.A != 1", "Invalid order_by clause"),
("params.my_param ", "Invalid order_by clause"),
("attribute.run_id ACS", "Invalid ordering key"),
("attribute.run_id decs", "Invalid ordering key"),
])
def test_invalid_order_by_search_runs(order_by, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_order_by_for_search_runs(order_by)
assert error_message in e.value.message
@pytest.mark.parametrize("order_by, ascending_expected", [
("metrics.`Mean Square Error`", True),
("metrics.`Mean Square Error` ASC", True),
("metrics.`Mean Square Error` DESC", False),
])
def test_space_order_by_search_runs(order_by, ascending_expected):
identifier_type, identifier_name, ascending = SearchUtils.parse_order_by_for_search_runs(
order_by)
assert identifier_type == "metric"
assert identifier_name == "Mean Square Error"
assert ascending == ascending_expected
@pytest.mark.parametrize("order_by, error_message", [
("creation_timestamp DESC", "Invalid order by key"),
('last_updated_timestamp DESC blah', "Invalid order_by clause"),
('', "Invalid order_by clause"),
('timestamp somerandomstuff ASC', "Invalid order_by clause"),
('timestamp somerandomstuff', "Invalid order_by clause"),
('timestamp decs', "Invalid order_by clause"),
('timestamp ACS', "Invalid order_by clause"),
('name aCs', "Invalid ordering key")
])
def test_invalid_order_by_search_registered_models(order_by, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_order_by_for_search_registered_models(order_by)
assert error_message in e.value.message
@pytest.mark.parametrize("page_token, max_results, matching_runs, expected_next_page_token", [
(None, 1, [0], {"offset": 1}),
(None, 2, [0, 1], {"offset": 2}),
(None, 3, [0, 1, 2], None),
(None, 5, [0, 1, 2], None),
({"offset": 1}, 1, [1], {"offset": 2}),
({"offset": 1}, 2, [1, 2], None),
({"offset": 1}, 3, [1, 2], None),
({"offset": 2}, 1, [2], None),
({"offset": 2}, 2, [2], None),
({"offset": 2}, 0, [], {"offset": 2}),
({"offset": 3}, 1, [], None),
])
def test_pagination(page_token, max_results, matching_runs, expected_next_page_token):
runs = [
Run(run_info=RunInfo(
run_uuid="0", run_id="0", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData([], [], [])),
Run(run_info=RunInfo(
run_uuid="1", run_id="1", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData([], [], [])),
Run(run_info=RunInfo(
run_uuid="2", run_id="2", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData([], [], []))
]
encoded_page_token = None
if page_token:
encoded_page_token = base64.b64encode(json.dumps(page_token).encode("utf-8"))
paginated_runs, next_page_token = SearchUtils.paginate(runs, encoded_page_token, max_results)
paginated_run_indices = []
for run in paginated_runs:
for i, r in enumerate(runs):
if r == run:
paginated_run_indices.append(i)
break
assert paginated_run_indices == matching_runs
decoded_next_page_token = None
if next_page_token:
decoded_next_page_token = json.loads(base64.b64decode(next_page_token))
assert decoded_next_page_token == expected_next_page_token
@pytest.mark.parametrize("page_token, error_message", [
(base64.b64encode(json.dumps({}).encode("utf-8")), "Invalid page token"),
(base64.b64encode(json.dumps({"offset": "a"}).encode("utf-8")), "Invalid page token"),
(base64.b64encode(json.dumps({"offsoot": 7}).encode("utf-8")), "Invalid page token"),
(base64.b64encode("not json".encode("utf-8")), "Invalid page token"),
("not base64", "Invalid page token"),
])
def test_invalid_page_tokens(page_token, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.paginate([], page_token, 1)
assert error_message in e.value.message
| 47.771971 | 100 | 0.559716 | import base64
import json
import pytest
from mlflow.entities import RunInfo, RunData, Run, LifecycleStage, RunStatus, Metric, Param, RunTag
from mlflow.exceptions import MlflowException
from mlflow.utils.search_utils import SearchUtils
@pytest.mark.parametrize("filter_string, parsed_filter", [
("metric.acc >= 0.94", [{'comparator': '>=', 'key': 'acc', 'type': 'metric', 'value': '0.94'}]),
("metric.acc>=100", [{'comparator': '>=', 'key': 'acc', 'type': 'metric', 'value': '100'}]),
("params.m!='tf'", [{'comparator': '!=', 'key': 'm', 'type': 'parameter', 'value': 'tf'}]),
('params."m"!="tf"', [{'comparator': '!=', 'key': 'm', 'type': 'parameter', 'value': 'tf'}]),
('metric."legit name" >= 0.243', [{'comparator': '>=',
'key': 'legit name',
'type': 'metric',
'value': '0.243'}]),
("metrics.XYZ = 3", [{'comparator': '=', 'key': 'XYZ', 'type': 'metric', 'value': '3'}]),
('params."cat dog" = "pets"', [{'comparator': '=',
'key': 'cat dog',
'type': 'parameter',
'value': 'pets'}]),
('metrics."X-Y-Z" = 3', [{'comparator': '=', 'key': 'X-Y-Z', 'type': 'metric', 'value': '3'}]),
('metrics."X//Y#$$@&Z" = 3', [{'comparator': '=',
'key': 'X//Y#$$@&Z',
'type': 'metric',
'value': '3'}]),
("params.model = 'LinearRegression'", [{'comparator': '=',
'key': 'model',
'type': 'parameter',
'value': "LinearRegression"}]),
("metrics.rmse < 1 and params.model_class = 'LR'", [
{'comparator': '<', 'key': 'rmse', 'type': 'metric', 'value': '1'},
{'comparator': '=', 'key': 'model_class', 'type': 'parameter', 'value': "LR"}
]),
('', []),
("`metric`.a >= 0.1", [{'comparator': '>=', 'key': 'a', 'type': 'metric', 'value': '0.1'}]),
("`params`.model >= 'LR'", [{'comparator': '>=',
'key': 'model',
'type': 'parameter',
'value': "LR"}]),
("tags.version = 'commit-hash'", [{'comparator': '=',
'key': 'version',
'type': 'tag',
'value': "commit-hash"}]),
("`tags`.source_name = 'a notebook'", [{'comparator': '=',
'key': 'source_name',
'type': 'tag',
'value': "a notebook"}]),
('metrics."accuracy.2.0" > 5', [{'comparator': '>',
'key': 'accuracy.2.0',
'type': 'metric',
'value': '5'}]),
('metrics.`spacey name` > 5', [{'comparator': '>',
'key': 'spacey name',
'type': 'metric',
'value': '5'}]),
('params."p.a.r.a.m" != "a"', [{'comparator': '!=',
'key': 'p.a.r.a.m',
'type': 'parameter',
'value': 'a'}]),
('tags."t.a.g" = "a"', [{'comparator': '=',
'key': 't.a.g',
'type': 'tag',
'value': 'a'}]),
("attribute.artifact_uri = '1/23/4'", [{'type': 'attribute',
'comparator': '=',
'key': 'artifact_uri',
'value': '1/23/4'}]),
("run.status = 'RUNNING'", [{'type': 'attribute',
'comparator': '=',
'key': 'status',
'value': 'RUNNING'}]),
])
def test_filter(filter_string, parsed_filter):
assert SearchUtils.parse_search_filter(filter_string) == parsed_filter
@pytest.mark.parametrize("filter_string, parsed_filter", [
("params.m = 'LR'", [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]),
("params.m = \"LR\"", [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]),
('params.m = "LR"', [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]),
('params.m = "L\'Hosp"', [{'type': 'parameter', 'comparator': '=',
'key': 'm', 'value': "L'Hosp"}]),
])
def test_correct_quote_trimming(filter_string, parsed_filter):
assert SearchUtils.parse_search_filter(filter_string) == parsed_filter
@pytest.mark.parametrize("filter_string, error_message", [
("metric.acc >= 0.94; metrics.rmse < 1", "Search filter contained multiple expression"),
("m.acc >= 0.94", "Invalid entity type"),
("acc >= 0.94", "Invalid identifier"),
("p.model >= 'LR'", "Invalid entity type"),
("attri.x != 1", "Invalid entity type"),
("a.x != 1", "Invalid entity type"),
("model >= 'LR'", "Invalid identifier"),
("metrics.A > 0.1 OR params.B = 'LR'", "Invalid clause(s) in filter string"),
("metrics.A > 0.1 NAND params.B = 'LR'", "Invalid clause(s) in filter string"),
("metrics.A > 0.1 AND (params.B = 'LR')", "Invalid clause(s) in filter string"),
("`metrics.A > 0.1", "Invalid clause(s) in filter string"),
("param`.A > 0.1", "Invalid clause(s) in filter string"),
("`dummy.A > 0.1", "Invalid clause(s) in filter string"),
("dummy`.A > 0.1", "Invalid clause(s) in filter string"),
("attribute.start != 1", "Invalid attribute key"),
("attribute.start_time != 1", "Invalid attribute key"),
("attribute.end_time != 1", "Invalid attribute key"),
("attribute.run_id != 1", "Invalid attribute key"),
("attribute.run_uuid != 1", "Invalid attribute key"),
("attribute.experiment_id != 1", "Invalid attribute key"),
("attribute.lifecycle_stage = 'ACTIVE'", "Invalid attribute key"),
("attribute.name != 1", "Invalid attribute key"),
("attribute.time != 1", "Invalid attribute key"),
("attribute._status != 'RUNNING'", "Invalid attribute key"),
("attribute.status = true", "Invalid clause(s) in filter string"),
])
def test_error_filter(filter_string, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_search_filter(filter_string)
assert error_message in e.value.message
@pytest.mark.parametrize("filter_string, error_message", [
("metric.model = 'LR'", "Expected numeric value type for metric"),
("metric.model = '5'", "Expected numeric value type for metric"),
("params.acc = 5", "Expected a quoted string value for param"),
("tags.acc = 5", "Expected a quoted string value for tag"),
("metrics.acc != metrics.acc", "Expected numeric value type for metric"),
("1.0 > metrics.acc", "Expected 'Identifier' found"),
("attribute.status = 1", "Expected a quoted string value for attributes"),
])
def test_error_comparison_clauses(filter_string, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_search_filter(filter_string)
assert error_message in e.value.message
@pytest.mark.parametrize("filter_string, error_message", [
("params.acc = LR", "value is either not quoted or unidentified quote types"),
("tags.acc = LR", "value is either not quoted or unidentified quote types"),
("params.acc = `LR`", "value is either not quoted or unidentified quote types"),
("params.'acc = LR", "Invalid clause(s) in filter string"),
("params.acc = 'LR", "Invalid clause(s) in filter string"),
("params.acc = LR'", "Invalid clause(s) in filter string"),
("params.acc = \"LR'", "Invalid clause(s) in filter string"),
("tags.acc = \"LR'", "Invalid clause(s) in filter string"),
("tags.acc = = 'LR'", "Invalid clause(s) in filter string"),
("attribute.status IS 'RUNNING'", "Invalid clause(s) in filter string"),
])
def test_bad_quotes(filter_string, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_search_filter(filter_string)
assert error_message in e.value.message
@pytest.mark.parametrize("filter_string, error_message", [
("params.acc LR !=", "Invalid clause(s) in filter string"),
("params.acc LR", "Invalid clause(s) in filter string"),
("metric.acc !=", "Invalid clause(s) in filter string"),
("acc != 1.0", "Invalid identifier"),
("foo is null", "Invalid clause(s) in filter string"),
("1=1", "Expected 'Identifier' found"),
("1==2", "Expected 'Identifier' found"),
])
def test_invalid_clauses(filter_string, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_search_filter(filter_string)
assert error_message in e.value.message
@pytest.mark.parametrize("entity_type, bad_comparators, key, entity_value", [
("metrics", ["~", "~="], "abc", 1.0),
("params", [">", "<", ">=", "<=", "~"], "abc", "'my-param-value'"),
("tags", [">", "<", ">=", "<=", "~"], "abc", "'my-tag-value'"),
("attributes", [">", "<", ">=", "<=", "~"], "status", "'my-tag-value'"),
])
def test_bad_comparators(entity_type, bad_comparators, key, entity_value):
run = Run(run_info=RunInfo(
run_uuid="hi", run_id="hi", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(metrics=[], params=[], tags=[])
)
for bad_comparator in bad_comparators:
bad_filter = "{entity_type}.{key} {comparator} {value}".format(
entity_type=entity_type, key=key, comparator=bad_comparator, value=entity_value)
with pytest.raises(MlflowException) as e:
SearchUtils.filter([run], bad_filter)
assert "Invalid comparator" in str(e.value.message)
@pytest.mark.parametrize("filter_string, matching_runs", [
(None, [0, 1, 2]),
("", [0, 1, 2]),
("attributes.status = 'FAILED'", [0, 2]),
("metrics.key1 = 123", [1]),
("metrics.key1 != 123", [0, 2]),
("metrics.key1 >= 123", [1, 2]),
("params.my_param = 'A'", [0, 1]),
("tags.tag1 = 'D'", [2]),
("tags.tag1 != 'D'", [1]),
("params.my_param = 'A' AND attributes.status = 'FAILED'", [0]),
])
def test_correct_filtering(filter_string, matching_runs):
runs = [
Run(run_info=RunInfo(
run_uuid="hi", run_id="hi", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 121, 1, 0)],
params=[Param("my_param", "A")],
tags=[])),
Run(run_info=RunInfo(
run_uuid="hi2", run_id="hi2", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FINISHED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 123, 1, 0)],
params=[Param("my_param", "A")],
tags=[RunTag("tag1", "C")])),
Run(run_info=RunInfo(
run_uuid="hi3", run_id="hi3", experiment_id=1,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 125, 1, 0)],
params=[Param("my_param", "B")],
tags=[RunTag("tag1", "D")])),
]
filtered_runs = SearchUtils.filter(runs, filter_string)
assert set(filtered_runs) == set([runs[i] for i in matching_runs])
@pytest.mark.parametrize("order_bys, matching_runs", [
(None, [2, 1, 0]),
([], [2, 1, 0]),
(["tags.noSuchTag"], [2, 1, 0]),
(["attributes.status"], [2, 0, 1]),
(["attributes.start_time"], [0, 2, 1]),
(["metrics.key1 asc"], [0, 1, 2]),
(["metrics.\"key1\" desc"], [2, 1, 0]),
(["params.my_param"], [1, 0, 2]),
(["params.my_param aSc", "attributes.status ASC"], [0, 1, 2]),
(["params.my_param", "attributes.status DESC"], [1, 0, 2]),
(["params.my_param DESC", "attributes.status DESC"], [2, 1, 0]),
(["params.`my_param` DESC", "attributes.status DESC"], [2, 1, 0]),
(["tags.tag1"], [1, 2, 0]),
(["tags.tag1 DESC"], [2, 1, 0]),
])
def test_correct_sorting(order_bys, matching_runs):
runs = [
Run(run_info=RunInfo(
run_uuid="9", run_id="9", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 121, 1, 0)],
params=[Param("my_param", "A")],
tags=[])),
Run(run_info=RunInfo(
run_uuid="8", run_id="8", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FINISHED),
start_time=1, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 123, 1, 0)],
params=[Param("my_param", "A")],
tags=[RunTag("tag1", "C")])),
Run(run_info=RunInfo(
run_uuid="7", run_id="7", experiment_id=1,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=1, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("key1", 125, 1, 0)],
params=[Param("my_param", "B")],
tags=[RunTag("tag1", "D")])),
]
sorted_runs = SearchUtils.sort(runs, order_bys)
sorted_run_indices = []
for run in sorted_runs:
for i, r in enumerate(runs):
if r == run:
sorted_run_indices.append(i)
break
assert sorted_run_indices == matching_runs
def test_order_by_metric_with_nans_and_infs():
metric_vals_str = ["nan", "inf", "-inf", "-1000", "0", "1000"]
runs = [
Run(run_info=RunInfo(run_id=x, run_uuid=x, experiment_id=0, user_id="user",
status=RunStatus.to_string(RunStatus.FINISHED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData(
metrics=[Metric("x", float(x), 1, 0)])
) for x in metric_vals_str
]
sorted_runs_asc = [
x.info.run_id for x in SearchUtils.sort(runs, ["metrics.x asc"])
]
sorted_runs_desc = [
x.info.run_id for x in SearchUtils.sort(runs, ["metrics.x desc"])
]
# asc
assert ["-inf", "-1000", "0", "1000", "inf", "nan"] == sorted_runs_asc
# desc
assert ["inf", "1000", "0", "-1000", "-inf", "nan"] == sorted_runs_desc
@pytest.mark.parametrize("order_by, error_message", [
("m.acc", "Invalid entity type"),
("acc", "Invalid identifier"),
("attri.x", "Invalid entity type"),
("`metrics.A", "Invalid order_by clause"),
("`metrics.A`", "Invalid entity type"),
("attribute.start", "Invalid attribute key"),
("attribute.run_id", "Invalid attribute key"),
("attribute.experiment_id", "Invalid attribute key"),
("metrics.A != 1", "Invalid order_by clause"),
("params.my_param ", "Invalid order_by clause"),
("attribute.run_id ACS", "Invalid ordering key"),
("attribute.run_id decs", "Invalid ordering key"),
])
def test_invalid_order_by_search_runs(order_by, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_order_by_for_search_runs(order_by)
assert error_message in e.value.message
@pytest.mark.parametrize("order_by, ascending_expected", [
("metrics.`Mean Square Error`", True),
("metrics.`Mean Square Error` ASC", True),
("metrics.`Mean Square Error` DESC", False),
])
def test_space_order_by_search_runs(order_by, ascending_expected):
identifier_type, identifier_name, ascending = SearchUtils.parse_order_by_for_search_runs(
order_by)
assert identifier_type == "metric"
assert identifier_name == "Mean Square Error"
assert ascending == ascending_expected
@pytest.mark.parametrize("order_by, error_message", [
("creation_timestamp DESC", "Invalid order by key"),
('last_updated_timestamp DESC blah', "Invalid order_by clause"),
('', "Invalid order_by clause"),
('timestamp somerandomstuff ASC', "Invalid order_by clause"),
('timestamp somerandomstuff', "Invalid order_by clause"),
('timestamp decs', "Invalid order_by clause"),
('timestamp ACS', "Invalid order_by clause"),
('name aCs', "Invalid ordering key")
])
def test_invalid_order_by_search_registered_models(order_by, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.parse_order_by_for_search_registered_models(order_by)
assert error_message in e.value.message
@pytest.mark.parametrize("page_token, max_results, matching_runs, expected_next_page_token", [
(None, 1, [0], {"offset": 1}),
(None, 2, [0, 1], {"offset": 2}),
(None, 3, [0, 1, 2], None),
(None, 5, [0, 1, 2], None),
({"offset": 1}, 1, [1], {"offset": 2}),
({"offset": 1}, 2, [1, 2], None),
({"offset": 1}, 3, [1, 2], None),
({"offset": 2}, 1, [2], None),
({"offset": 2}, 2, [2], None),
({"offset": 2}, 0, [], {"offset": 2}),
({"offset": 3}, 1, [], None),
])
def test_pagination(page_token, max_results, matching_runs, expected_next_page_token):
runs = [
Run(run_info=RunInfo(
run_uuid="0", run_id="0", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData([], [], [])),
Run(run_info=RunInfo(
run_uuid="1", run_id="1", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData([], [], [])),
Run(run_info=RunInfo(
run_uuid="2", run_id="2", experiment_id=0,
user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED),
start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE),
run_data=RunData([], [], []))
]
encoded_page_token = None
if page_token:
encoded_page_token = base64.b64encode(json.dumps(page_token).encode("utf-8"))
paginated_runs, next_page_token = SearchUtils.paginate(runs, encoded_page_token, max_results)
paginated_run_indices = []
for run in paginated_runs:
for i, r in enumerate(runs):
if r == run:
paginated_run_indices.append(i)
break
assert paginated_run_indices == matching_runs
decoded_next_page_token = None
if next_page_token:
decoded_next_page_token = json.loads(base64.b64decode(next_page_token))
assert decoded_next_page_token == expected_next_page_token
@pytest.mark.parametrize("page_token, error_message", [
(base64.b64encode(json.dumps({}).encode("utf-8")), "Invalid page token"),
(base64.b64encode(json.dumps({"offset": "a"}).encode("utf-8")), "Invalid page token"),
(base64.b64encode(json.dumps({"offsoot": 7}).encode("utf-8")), "Invalid page token"),
(base64.b64encode("not json".encode("utf-8")), "Invalid page token"),
("not base64", "Invalid page token"),
])
def test_invalid_page_tokens(page_token, error_message):
with pytest.raises(MlflowException) as e:
SearchUtils.paginate([], page_token, 1)
assert error_message in e.value.message
| true | true |
f73ced32569e4fc87bf5de6a4e502ea584df54d9 | 304 | py | Python | Online-Judges/CodingBat/Python/String-02/String_2-03-cat_dog.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | 3 | 2021-06-15T01:19:23.000Z | 2022-03-16T18:23:53.000Z | Online-Judges/CodingBat/Python/String-02/String_2-03-cat_dog.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | null | null | null | Online-Judges/CodingBat/Python/String-02/String_2-03-cat_dog.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | null | null | null | def cat_dog(str):
count_cat = 0
count_dog = 0
for i in range(len(str)-2):
if str[i:i+3] == "cat":
count_cat += 1
for i in range(len(str)-2):
if str[i:i+3] == "dog":
count_dog += 1
if count_cat == count_dog:
return True
return False
| 23.384615 | 31 | 0.5 | def cat_dog(str):
count_cat = 0
count_dog = 0
for i in range(len(str)-2):
if str[i:i+3] == "cat":
count_cat += 1
for i in range(len(str)-2):
if str[i:i+3] == "dog":
count_dog += 1
if count_cat == count_dog:
return True
return False
| true | true |
f73ceee577a1826ee0684236241bd2a85a707770 | 4,685 | py | Python | pandas/tests/plotting/test_groupby.py | CJL89/pandas | 6210077d32a9e9675526ea896e6d1f9189629d4a | [
"BSD-3-Clause"
] | 603 | 2020-12-23T13:49:32.000Z | 2022-03-31T23:38:03.000Z | pandas/tests/plotting/test_groupby.py | CJL89/pandas | 6210077d32a9e9675526ea896e6d1f9189629d4a | [
"BSD-3-Clause"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | pandas/tests/plotting/test_groupby.py | CJL89/pandas | 6210077d32a9e9675526ea896e6d1f9189629d4a | [
"BSD-3-Clause"
] | 35 | 2021-03-26T03:12:04.000Z | 2022-03-23T10:15:10.000Z | """ Test cases for GroupBy.plot """
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, Series
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.xfail(
is_platform_windows(),
reason="Looks like LinePlot._is_ts_plot is wrong",
strict=False,
)
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
# Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame(
{"def": [1, 1, 1, 2, 2, 2, 3, 3, 3], "val": np.random.randn(9)},
index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0],
)
df.groupby("def")["val"].plot()
tm.close()
df.groupby("def")["val"].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
df.groupby("z").plot.scatter("x", "y")
tm.close()
df.groupby("z")["x"].plot.line()
tm.close()
def test_plot_kwargs(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
res = df.groupby("z").plot(kind="scatter", x="x", y="y")
# check that a scatter plot is effectively plotted: the axes should
# contain a PathCollection from the scatter plot (GH11805)
assert len(res["a"].collections) == 1
res = df.groupby("z").plot.scatter(x="x", y="y")
assert len(res["a"].collections) == 1
@pytest.mark.parametrize("column, expected_axes_num", [(None, 2), ("b", 1)])
def test_groupby_hist_frame_with_legend(self, column, expected_axes_num):
# GH 6279 - DataFrameGroupBy histogram can have a legend
expected_layout = (1, expected_axes_num)
expected_labels = column or [["a"], ["b"]]
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
for axes in g.hist(legend=True, column=column):
self._check_axes_shape(
axes, axes_num=expected_axes_num, layout=expected_layout
)
for ax, expected_label in zip(axes[0], expected_labels):
self._check_legend_labels(ax, expected_label)
@pytest.mark.parametrize("column", [None, "b"])
def test_groupby_hist_frame_with_legend_raises(self, column):
# GH 6279 - DataFrameGroupBy histogram with legend and label raises
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
with pytest.raises(ValueError, match="Cannot use both legend and label"):
g.hist(legend=True, column=column, label="d")
def test_groupby_hist_series_with_legend(self):
# GH 6279 - SeriesGroupBy histogram can have a legend
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
for ax in g["a"].hist(legend=True):
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
self._check_legend_labels(ax, ["1", "2"])
def test_groupby_hist_series_with_legend_raises(self):
# GH 6279 - SeriesGroupBy histogram with legend and label raises
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
with pytest.raises(ValueError, match="Cannot use both legend and label"):
g.hist(legend=True, label="d")
| 38.089431 | 88 | 0.58698 |
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, Series
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.xfail(
is_platform_windows(),
reason="Looks like LinePlot._is_ts_plot is wrong",
strict=False,
)
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
df = DataFrame(
{"def": [1, 1, 1, 2, 2, 2, 3, 3, 3], "val": np.random.randn(9)},
index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0],
)
df.groupby("def")["val"].plot()
tm.close()
df.groupby("def")["val"].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
df.groupby("z").plot.scatter("x", "y")
tm.close()
df.groupby("z")["x"].plot.line()
tm.close()
def test_plot_kwargs(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
res = df.groupby("z").plot(kind="scatter", x="x", y="y")
assert len(res["a"].collections) == 1
res = df.groupby("z").plot.scatter(x="x", y="y")
assert len(res["a"].collections) == 1
@pytest.mark.parametrize("column, expected_axes_num", [(None, 2), ("b", 1)])
def test_groupby_hist_frame_with_legend(self, column, expected_axes_num):
expected_layout = (1, expected_axes_num)
expected_labels = column or [["a"], ["b"]]
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
for axes in g.hist(legend=True, column=column):
self._check_axes_shape(
axes, axes_num=expected_axes_num, layout=expected_layout
)
for ax, expected_label in zip(axes[0], expected_labels):
self._check_legend_labels(ax, expected_label)
@pytest.mark.parametrize("column", [None, "b"])
def test_groupby_hist_frame_with_legend_raises(self, column):
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
with pytest.raises(ValueError, match="Cannot use both legend and label"):
g.hist(legend=True, column=column, label="d")
def test_groupby_hist_series_with_legend(self):
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
for ax in g["a"].hist(legend=True):
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
self._check_legend_labels(ax, ["1", "2"])
def test_groupby_hist_series_with_legend_raises(self):
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
with pytest.raises(ValueError, match="Cannot use both legend and label"):
g.hist(legend=True, label="d")
| true | true |
f73cefc3726c4e243f639210df3e2095e6ff4c8d | 758 | py | Python | main_app/migrations/0001_initial.py | vrathnayake/dJango | c0adb3f37c0e10ba22ee0c67503a2b07844aa260 | [
"MIT"
] | null | null | null | main_app/migrations/0001_initial.py | vrathnayake/dJango | c0adb3f37c0e10ba22ee0c67503a2b07844aa260 | [
"MIT"
] | null | null | null | main_app/migrations/0001_initial.py | vrathnayake/dJango | c0adb3f37c0e10ba22ee0c67503a2b07844aa260 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-01-24 22:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Treasure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('material', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('image', models.CharField(max_length=100)),
],
),
]
| 29.153846 | 114 | 0.575198 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Treasure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('material', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('image', models.CharField(max_length=100)),
],
),
]
| true | true |
f73cf04dc926a67440f658e48b29967329e3fdd9 | 2,217 | py | Python | newsdatascraper/models.py | erikqu/NewsDataScraper | 686102b53d11ba14773ac9d468a2f4b1b4c45a42 | [
"MIT"
] | 5 | 2019-08-13T00:12:02.000Z | 2021-12-14T02:38:41.000Z | newsdatascraper/models.py | erikqu/NewsDataScraper | 686102b53d11ba14773ac9d468a2f4b1b4c45a42 | [
"MIT"
] | null | null | null | newsdatascraper/models.py | erikqu/NewsDataScraper | 686102b53d11ba14773ac9d468a2f4b1b4c45a42 | [
"MIT"
] | null | null | null | import json
import csv
import pickle
class ArticleFromJson: # pragma: no cover
"""Data model for one article"""
def __init__(
self,
publisher: str,
title: str,
description: str,
url: str,
date_published,
content: str,
author: str = 'Not Found',
):
self.publisher = publisher
self.title = title
self.description = description
self.url = url
self.date_published = date_published
self.content = content
self.author = author
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.__dict__ == other.__dict__
class Articles: # pragma: no cover
"""Model to contain a list of article data.
Also has functions to serialize that data"""
def __init__(self, articles: list):
self.articles = articles
def __add__(self, other: 'Articles'):
articles = self.articles + other.articles
return Articles(articles)
def __radd__(self, other: 'Articles'):
articles = self.articles + other.articles
return Articles(articles)
def to_csv(self, csv_name: str):
"""Create a .csv file from the articles data to better visualize"""
with open(csv_name, "w") as f:
writer = csv.DictWriter(f, vars(self.articles[0]).keys())
writer.writeheader()
for article in self.articles:
writer.writerow(vars(article))
f.close()
def to_json(self):
"""Serializes the article objects to json"""
article_list = [vars(article) for article in self.articles]
return json.dumps({"articles": article_list})
def to_pickle(self, pickle_name: str):
"""Serialization of article objects to byte stream"""
with open(pickle_name, "wb") as f:
pickle.dump(self.articles, f)
f.close()
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
for index, article in enumerate(self.articles):
if article != other.articles[index]:
return False
return True
| 29.56 | 75 | 0.601714 | import json
import csv
import pickle
class ArticleFromJson:
def __init__(
self,
publisher: str,
title: str,
description: str,
url: str,
date_published,
content: str,
author: str = 'Not Found',
):
self.publisher = publisher
self.title = title
self.description = description
self.url = url
self.date_published = date_published
self.content = content
self.author = author
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.__dict__ == other.__dict__
class Articles:
def __init__(self, articles: list):
self.articles = articles
def __add__(self, other: 'Articles'):
articles = self.articles + other.articles
return Articles(articles)
def __radd__(self, other: 'Articles'):
articles = self.articles + other.articles
return Articles(articles)
def to_csv(self, csv_name: str):
with open(csv_name, "w") as f:
writer = csv.DictWriter(f, vars(self.articles[0]).keys())
writer.writeheader()
for article in self.articles:
writer.writerow(vars(article))
f.close()
def to_json(self):
article_list = [vars(article) for article in self.articles]
return json.dumps({"articles": article_list})
def to_pickle(self, pickle_name: str):
with open(pickle_name, "wb") as f:
pickle.dump(self.articles, f)
f.close()
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
for index, article in enumerate(self.articles):
if article != other.articles[index]:
return False
return True
| true | true |
f73cf0e3aa3957672cd28f652a1f0461874ddcc9 | 14,332 | py | Python | policy_gradient.py | rey-allan/rl | 6124bcfe5de8a9a316c41fb75b50a3e9babfc970 | [
"MIT"
] | null | null | null | policy_gradient.py | rey-allan/rl | 6124bcfe5de8a9a316c41fb75b50a3e9babfc970 | [
"MIT"
] | null | null | null | policy_gradient.py | rey-allan/rl | 6124bcfe5de8a9a316c41fb75b50a3e9babfc970 | [
"MIT"
] | null | null | null | """Implementation of different policy gradient methods"""
import argparse
import numpy as np
import plot as plt
import random
from collections import namedtuple
from env import Action, Easy21, State
from tqdm import tqdm
from typing import Callable, List
# For reproducibility
random.seed(0)
np.random.seed(0)
Trajectory = namedtuple("Trajectory", ["state", "action", "reward"])
def encode(s: State, a: Action) -> np.ndarray:
"""
Encodes the given state-action pair using coarse coding as specified in the Easy21 assignment:
A binary feature vector rho(s, a) with 3 ∗ 6 ∗ 2 = 36 features. Each binary feature
has a value of 1 iff (s, a) lies within the cuboid of state-space corresponding to
that feature, and the action corresponding to that feature. The cuboids have the
following overlapping intervals:
- dealer(s) = {[1, 4], [4, 7], [7, 10]}
- player(s) = {[1, 6], [4, 9], [7, 12], [10, 15], [13, 18], [16, 21]}
- a = {hit, stick}
:param State s: The state to encode
:param Action a: The action to encode
:return: A binary feature vector representing the encoded state-action pair
:rtype: np.ndarray
"""
# `range` is end-exclusive so we add a 1 to make sure we capture the intervals inclusive ends
dealer = [range(1, 5), range(4, 8), range(7, 11)]
player = [range(1, 7), range(4, 10), range(7, 13), range(10, 16), range(13, 19), range(16, 22)]
encoded = np.zeros((3, 6, 2))
for i, d in enumerate(dealer):
for j, p in enumerate(player):
for k, action in enumerate([Action.hit, Action.stick]):
if s.dealer_first_card in d and s.player_sum in p and a == action:
encoded[i, j, k] = 1
return encoded.flatten()
def softmax(x: np.ndarray) -> np.ndarray:
"""
Computes the softmax of the given array
:param np.ndarray x: The input array
:return: The softmax of each element of the input array
:rtype: np.ndarray
"""
return np.exp(x) / np.sum(np.exp(x))
class REINFORCEWithBaseline:
"""
REINFORCE algorithm with baseline
Uses softmax on linear action preferences for the policy, and
linear approximation for the value function. Feature vectors
are computed using coarse coding as described in the Easy21
assignment.
"""
def __init__(self):
self._env = Easy21(seed=24)
def learn(self, epochs=200, alpha_policy=0.01, alpha_value=0.01, gamma=0.9, verbose=False, **kwargs) -> np.ndarray:
"""
Learns the optimal value function.
:param int epochs: The number of epochs to take to learn the value function
:param float alpha_policy: The learning rate for the policy approximation
:param float alpha_value: The learning rate for the value approximation
:param float gamma: The discount factor
:param bool verbose: Whether to use verbose mode or not
:return: The optimal value function
:rtype: np.ndarray
"""
# Value function
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
# Policy function
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
for _ in tqdm(range(epochs), disable=not verbose):
trajectories = self._sample_episode(pi, theta)
# Reverse the list so we start backpropagating the return from the last episode
trajectories.reverse()
# Learn from the episode
g = 0
for i, t in enumerate(trajectories):
g = t.reward + gamma * g
x = encode(t.state, t.action)
# Baseline
v = np.dot(w, x)
delta = g - v
# SGD update of the value function
w += alpha_value * delta * x
# SGD update of the policy function
probs = pi(t.state, theta)
eligibility_vector = x - np.sum([p * encode(t.state, a) for a, p in enumerate(probs)])
theta += alpha_policy * gamma ** i * delta * eligibility_vector
# Compute the optimal value function which is simply the value of the best action in each state
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
def _sample_episode(self, pi: Callable[[State, Action, np.ndarray], float], theta: np.ndarray) -> List[Trajectory]:
# Samples trajectories following policy `pi` with an optional starting state-action pair
trajectories = []
s = self._env.reset()
# The policy selects the action with some constant exploration as in the Easy21 assignment
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
a = policy(s)
while True:
s_prime, r, done = self._env.step(a)
trajectories.append(Trajectory(s, a, r))
if done:
break
s = s_prime
a = policy(s)
return trajectories
class OneStepActorCritic:
"""
One-step Actor-Critic
Uses softmax on linear action preferences for the policy, and
linear approximation for the value function. Feature vectors
are computed using coarse coding as described in the Easy21
assignment.
"""
def __init__(self):
self._env = Easy21(seed=24)
def learn(self, epochs=200, alpha_policy=0.01, alpha_value=0.01, gamma=0.9, verbose=False, **kwargs) -> np.ndarray:
"""
Learns the optimal value function.
:param int epochs: The number of epochs to take to learn the value function
:param float alpha_policy: The learning rate for the policy approximation
:param float alpha_value: The learning rate for the value approximation
:param float gamma: The discount factor
:param bool verbose: Whether to use verbose mode or not
:return: The optimal value function
:rtype: np.ndarray
"""
# Value function
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
# Policy function
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
# The policy selects the action with some constant exploration as in the Easy21 assignment
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
for _ in tqdm(range(epochs), disable=not verbose):
I = 1
s = self._env.reset()
done = False
while not done:
a = policy(s)
s_prime, r, done = self._env.step(a)
# Compute the delta
if done:
delta = r - np.dot(w, encode(s, a))
else:
delta = r + gamma * np.max(value_approximator(s_prime)) - np.dot(w, encode(s, a))
# SGD update of the value function
x = encode(s, a)
w += alpha_value * delta * x
# SGD update of the policy function
probs = pi(s, theta)
eligibility_vector = x - np.sum([p * encode(s, a) for a, p in enumerate(probs)])
theta += alpha_policy * I * delta * eligibility_vector
I *= gamma
s = s_prime
# Compute the optimal value function which is simply the value of the best action in each state
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
class ActorCriticWithEligibilityTraces:
"""
Actor-Critic with eligibility traces
Uses softmax on linear action preferences for the policy, and
linear approximation for the value function. Feature vectors
are computed using coarse coding as described in the Easy21
assignment.
"""
def __init__(self):
self._env = Easy21(seed=24)
def learn(
self,
epochs=200,
alpha_policy=0.01,
alpha_value=0.01,
gamma=0.9,
lambda_value=1.0,
lambda_policy=1.0,
verbose=False,
**kwargs
) -> np.ndarray:
"""
Learns the optimal value function.
:param int epochs: The number of epochs to take to learn the value function
:param float alpha_policy: The learning rate for the policy approximation
:param float alpha_value: The learning rate for the value approximation
:param float gamma: The discount factor
:param float lambda_value: The trace decay rate for the value approximation
:param float lambda_policy: The trace decay rate for the policy approximation
:param bool verbose: Whether to use verbose mode or not
:return: The optimal value function
:rtype: np.ndarray
"""
# Value function
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
# Policy function
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
# The policy selects the action with some constant exploration as in the Easy21 assignment
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
for _ in tqdm(range(epochs), disable=not verbose):
I = 1
s = self._env.reset()
done = False
z_w = np.zeros_like(w)
z_theta = np.zeros_like(theta)
while not done:
a = policy(s)
s_prime, r, done = self._env.step(a)
# Compute the delta
if done:
delta = r - np.dot(w, encode(s, a))
else:
delta = r + gamma * np.max(value_approximator(s_prime)) - np.dot(w, encode(s, a))
# SGD update of the value function
x = encode(s, a)
z_w = gamma * lambda_value * z_w + x
w += alpha_value * delta * z_w
# SGD update of the policy function
probs = pi(s, theta)
eligibility_vector = x - np.sum([p * encode(s, a) for a, p in enumerate(probs)])
z_theta = gamma * lambda_policy * z_theta + I * eligibility_vector
theta += alpha_policy * delta * z_theta
I *= gamma
s = s_prime
# Compute the optimal value function which is simply the value of the best action in each state
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run policy gradient methods")
parser.add_argument("--reinforce-with-baseline", action="store_true", help="Execute REINFORCE with Baseline")
parser.add_argument("--one-step-ac", action="store_true", help="Execute One-step Actor-Critic")
parser.add_argument(
"--ac-eligibility-traces", action="store_true", help="Execute Actor-Critic with eligibility traces"
)
parser.add_argument("--epochs", type=int, default=200, help="Epochs to train")
parser.add_argument(
"--alpha-value", type=float, default=0.01, help="Learning rate to use for the value function approximation"
)
parser.add_argument(
"--alpha-policy", type=float, default=0.01, help="Learning rate to use for the policy function approximation"
)
parser.add_argument(
"--lambda-value", type=float, default=1.0, help="Trace decay rate to use for the value function approximation"
)
parser.add_argument(
"--lambda-policy", type=float, default=1.0, help="Trace decay rate to use for the policy function approximation"
)
parser.add_argument("--gamma", type=float, default=0.9, help="Discount factor")
parser.add_argument("--verbose", action="store_true", help="Run in verbose mode")
args = parser.parse_args()
# The optimal value function obtained
V = None
# The algorithm to run
policy_grad = None
# The title of the plot
title = None
if args.reinforce_with_baseline:
print("Running REINFORCE with Baseline")
policy_grad = REINFORCEWithBaseline()
title = "reinforce_with_baseline"
elif args.one_step_ac:
print("Running One-step Actor-Critic")
policy_grad = OneStepActorCritic()
title = "one_step_actor_critic"
elif args.ac_eligibility_traces:
print("Running Actor-Critic with eligibility traces")
policy_grad = ActorCriticWithEligibilityTraces()
title = "actor_critic_eligibility_traces"
if policy_grad is not None:
V = policy_grad.learn(
epochs=args.epochs,
alpha_value=args.alpha_value,
alpha_policy=args.alpha_policy,
lambda_value=args.lambda_value,
lambda_policy=args.lambda_policy,
gamma=args.gamma,
verbose=args.verbose,
)
if V is not None:
# Plot the value function as a surface
# Remove the state where the dealer's first card is 0 and the player's sum is 0 because these are not possible
# They were kept in the value function to avoid having to deal with 0-index vs 1-index
plt.plot_value_function(range(1, Easy21.state_space[0]), range(1, Easy21.state_space[1]), V[1:, 1:], title)
| 38.840108 | 120 | 0.612824 | import argparse
import numpy as np
import plot as plt
import random
from collections import namedtuple
from env import Action, Easy21, State
from tqdm import tqdm
from typing import Callable, List
random.seed(0)
np.random.seed(0)
Trajectory = namedtuple("Trajectory", ["state", "action", "reward"])
def encode(s: State, a: Action) -> np.ndarray:
dealer = [range(1, 5), range(4, 8), range(7, 11)]
player = [range(1, 7), range(4, 10), range(7, 13), range(10, 16), range(13, 19), range(16, 22)]
encoded = np.zeros((3, 6, 2))
for i, d in enumerate(dealer):
for j, p in enumerate(player):
for k, action in enumerate([Action.hit, Action.stick]):
if s.dealer_first_card in d and s.player_sum in p and a == action:
encoded[i, j, k] = 1
return encoded.flatten()
def softmax(x: np.ndarray) -> np.ndarray:
return np.exp(x) / np.sum(np.exp(x))
class REINFORCEWithBaseline:
def __init__(self):
self._env = Easy21(seed=24)
def learn(self, epochs=200, alpha_policy=0.01, alpha_value=0.01, gamma=0.9, verbose=False, **kwargs) -> np.ndarray:
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
for _ in tqdm(range(epochs), disable=not verbose):
trajectories = self._sample_episode(pi, theta)
trajectories.reverse()
g = 0
for i, t in enumerate(trajectories):
g = t.reward + gamma * g
x = encode(t.state, t.action)
v = np.dot(w, x)
delta = g - v
w += alpha_value * delta * x
probs = pi(t.state, theta)
eligibility_vector = x - np.sum([p * encode(t.state, a) for a, p in enumerate(probs)])
theta += alpha_policy * gamma ** i * delta * eligibility_vector
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
def _sample_episode(self, pi: Callable[[State, Action, np.ndarray], float], theta: np.ndarray) -> List[Trajectory]:
trajectories = []
s = self._env.reset()
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
a = policy(s)
while True:
s_prime, r, done = self._env.step(a)
trajectories.append(Trajectory(s, a, r))
if done:
break
s = s_prime
a = policy(s)
return trajectories
class OneStepActorCritic:
def __init__(self):
self._env = Easy21(seed=24)
def learn(self, epochs=200, alpha_policy=0.01, alpha_value=0.01, gamma=0.9, verbose=False, **kwargs) -> np.ndarray:
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
for _ in tqdm(range(epochs), disable=not verbose):
I = 1
s = self._env.reset()
done = False
while not done:
a = policy(s)
s_prime, r, done = self._env.step(a)
if done:
delta = r - np.dot(w, encode(s, a))
else:
delta = r + gamma * np.max(value_approximator(s_prime)) - np.dot(w, encode(s, a))
x = encode(s, a)
w += alpha_value * delta * x
probs = pi(s, theta)
eligibility_vector = x - np.sum([p * encode(s, a) for a, p in enumerate(probs)])
theta += alpha_policy * I * delta * eligibility_vector
I *= gamma
s = s_prime
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
class ActorCriticWithEligibilityTraces:
def __init__(self):
self._env = Easy21(seed=24)
def learn(
self,
epochs=200,
alpha_policy=0.01,
alpha_value=0.01,
gamma=0.9,
lambda_value=1.0,
lambda_policy=1.0,
verbose=False,
**kwargs
) -> np.ndarray:
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
for _ in tqdm(range(epochs), disable=not verbose):
I = 1
s = self._env.reset()
done = False
z_w = np.zeros_like(w)
z_theta = np.zeros_like(theta)
while not done:
a = policy(s)
s_prime, r, done = self._env.step(a)
if done:
delta = r - np.dot(w, encode(s, a))
else:
delta = r + gamma * np.max(value_approximator(s_prime)) - np.dot(w, encode(s, a))
x = encode(s, a)
z_w = gamma * lambda_value * z_w + x
w += alpha_value * delta * z_w
probs = pi(s, theta)
eligibility_vector = x - np.sum([p * encode(s, a) for a, p in enumerate(probs)])
z_theta = gamma * lambda_policy * z_theta + I * eligibility_vector
theta += alpha_policy * delta * z_theta
I *= gamma
s = s_prime
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run policy gradient methods")
parser.add_argument("--reinforce-with-baseline", action="store_true", help="Execute REINFORCE with Baseline")
parser.add_argument("--one-step-ac", action="store_true", help="Execute One-step Actor-Critic")
parser.add_argument(
"--ac-eligibility-traces", action="store_true", help="Execute Actor-Critic with eligibility traces"
)
parser.add_argument("--epochs", type=int, default=200, help="Epochs to train")
parser.add_argument(
"--alpha-value", type=float, default=0.01, help="Learning rate to use for the value function approximation"
)
parser.add_argument(
"--alpha-policy", type=float, default=0.01, help="Learning rate to use for the policy function approximation"
)
parser.add_argument(
"--lambda-value", type=float, default=1.0, help="Trace decay rate to use for the value function approximation"
)
parser.add_argument(
"--lambda-policy", type=float, default=1.0, help="Trace decay rate to use for the policy function approximation"
)
parser.add_argument("--gamma", type=float, default=0.9, help="Discount factor")
parser.add_argument("--verbose", action="store_true", help="Run in verbose mode")
args = parser.parse_args()
V = None
policy_grad = None
title = None
if args.reinforce_with_baseline:
print("Running REINFORCE with Baseline")
policy_grad = REINFORCEWithBaseline()
title = "reinforce_with_baseline"
elif args.one_step_ac:
print("Running One-step Actor-Critic")
policy_grad = OneStepActorCritic()
title = "one_step_actor_critic"
elif args.ac_eligibility_traces:
print("Running Actor-Critic with eligibility traces")
policy_grad = ActorCriticWithEligibilityTraces()
title = "actor_critic_eligibility_traces"
if policy_grad is not None:
V = policy_grad.learn(
epochs=args.epochs,
alpha_value=args.alpha_value,
alpha_policy=args.alpha_policy,
lambda_value=args.lambda_value,
lambda_policy=args.lambda_policy,
gamma=args.gamma,
verbose=args.verbose,
)
if V is not None:
plt.plot_value_function(range(1, Easy21.state_space[0]), range(1, Easy21.state_space[1]), V[1:, 1:], title)
| true | true |
f73cf1cd9472f494b49bd2ce550dbfc38b02b7bd | 92 | py | Python | exercises/for_loops.py | moheeeldin19-meet/y2s18-python_review | d47a343de76adbd8445d1d22227fdb09932edf58 | [
"MIT"
] | null | null | null | exercises/for_loops.py | moheeeldin19-meet/y2s18-python_review | d47a343de76adbd8445d1d22227fdb09932edf58 | [
"MIT"
] | null | null | null | exercises/for_loops.py | moheeeldin19-meet/y2s18-python_review | d47a343de76adbd8445d1d22227fdb09932edf58 | [
"MIT"
] | null | null | null | # Write your solution for 1.1 here!
x=0
for i in range(101):
x+=i
print(x)
| 13.142857 | 35 | 0.543478 |
x=0
for i in range(101):
x+=i
print(x)
| true | true |
f73cf32a7962a7e162bdf513bca407d581fec4f4 | 3,971 | py | Python | engines/ep/management/cli_auth_utils.py | scwright027/kv_engine | 6fc9dc957844f077d44dc6992794ffe35e91e1f7 | [
"BSD-3-Clause"
] | 1 | 2019-06-13T07:33:09.000Z | 2019-06-13T07:33:09.000Z | engines/ep/management/cli_auth_utils.py | paolococchi/kv_engine | 40256dca6bf77fb4bcc18e8ef7d9b8f991bf4e45 | [
"BSD-3-Clause"
] | null | null | null | engines/ep/management/cli_auth_utils.py | paolococchi/kv_engine | 40256dca6bf77fb4bcc18e8ef7d9b8f991bf4e45 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import getpass
import inspect
import os
import sys
import clitool
import mc_bin_client
import memcacheConstants
from functools import wraps
def cmd_decorator(f):
"""Decorate a function with code to authenticate based on
the following additional arguments passed by keyword:
bucketName
username
password
passwordFromStdin - if true, user will be prompted for password on stdin
"""
@wraps(f)
def g(*args, **kwargs):
# check arguments are suitable for wrapped func
mc = args[0]
spec = inspect.getargspec(f)
max_args = len(spec.args)
defaults = len(spec.defaults) if spec.defaults else 0
min_args = max_args - defaults
if len(args) < min_args:
print(("Error: too few arguments - command "
"expected a minimum of %s but was passed "
"%s: %s"
% (min_args - 1, len(args) - 1, list(args[1:]))), file=sys.stderr)
sys.exit(2)
if spec.varargs is None:
if len(args) > max_args:
print(("Error: too many arguments - command "
"expected a maximum of %s but was passed "
"%s: %s"
% (max_args - 1, len(args) - 1, list(args[1:]))), file=sys.stderr)
sys.exit(2)
# extract auth and bucket parameters (not passed to wrapped function)
bucket = kwargs.pop('bucketName', None)
username = kwargs.pop('username', None) or bucket
cli_password = kwargs.pop('password', None)
stdin_password = (getpass.getpass()
if kwargs.pop('passwordFromStdin', False)
else None)
env_password = os.getenv("CB_PASSWORD", None)
password = cli_password or stdin_password or env_password
# try to auth
if username is not None or password is not None:
bucket = bucket or 'default'
username = username or bucket
password = password or ''
try:
mc.sasl_auth_plain(username, password)
except mc_bin_client.MemcachedError:
print("Authentication error for user:{0} bucket:{1}"
.format(username, bucket))
sys.exit(1)
# HELO
mc.enable_xerror()
mc.enable_collections()
mc.hello("{0} {1}".format(os.path.split(sys.argv[0])[1],
os.getenv("EP_ENGINE_VERSION",
"unknown version")))
# call function for one or all buckets
try:
if kwargs.pop('allBuckets', None):
buckets = mc.list_buckets()
for bucket in buckets:
print('*' * 78)
print(bucket)
print()
mc.bucket_select(bucket)
f(*args, **kwargs)
elif bucket is not None:
mc.bucket_select(bucket)
f(*args, **kwargs)
else:
f(*args, **kwargs)
except mc_bin_client.ErrorEaccess:
print("No access to bucket:{0} - permission denied "
"or bucket does not exist.".format(bucket))
sys.exit(1)
return g
def get_authed_clitool(extraUsage="", allBuckets=True):
c = clitool.CliTool(extraUsage)
if allBuckets:
c.addFlag('-a', 'allBuckets', 'iterate over all buckets')
c.addOption('-b', 'bucketName', 'the bucket to get stats from (Default: default)')
c.addOption('-u', 'username', 'the user as which to authenticate (Default: bucketName)')
c.addOption('-p', 'password', 'the password for the bucket if one exists')
c.addFlag('-S', 'passwordFromStdin', 'read password from stdin')
return c
| 34.833333 | 104 | 0.5379 |
import getpass
import inspect
import os
import sys
import clitool
import mc_bin_client
import memcacheConstants
from functools import wraps
def cmd_decorator(f):
@wraps(f)
def g(*args, **kwargs):
mc = args[0]
spec = inspect.getargspec(f)
max_args = len(spec.args)
defaults = len(spec.defaults) if spec.defaults else 0
min_args = max_args - defaults
if len(args) < min_args:
print(("Error: too few arguments - command "
"expected a minimum of %s but was passed "
"%s: %s"
% (min_args - 1, len(args) - 1, list(args[1:]))), file=sys.stderr)
sys.exit(2)
if spec.varargs is None:
if len(args) > max_args:
print(("Error: too many arguments - command "
"expected a maximum of %s but was passed "
"%s: %s"
% (max_args - 1, len(args) - 1, list(args[1:]))), file=sys.stderr)
sys.exit(2)
bucket = kwargs.pop('bucketName', None)
username = kwargs.pop('username', None) or bucket
cli_password = kwargs.pop('password', None)
stdin_password = (getpass.getpass()
if kwargs.pop('passwordFromStdin', False)
else None)
env_password = os.getenv("CB_PASSWORD", None)
password = cli_password or stdin_password or env_password
if username is not None or password is not None:
bucket = bucket or 'default'
username = username or bucket
password = password or ''
try:
mc.sasl_auth_plain(username, password)
except mc_bin_client.MemcachedError:
print("Authentication error for user:{0} bucket:{1}"
.format(username, bucket))
sys.exit(1)
mc.enable_xerror()
mc.enable_collections()
mc.hello("{0} {1}".format(os.path.split(sys.argv[0])[1],
os.getenv("EP_ENGINE_VERSION",
"unknown version")))
try:
if kwargs.pop('allBuckets', None):
buckets = mc.list_buckets()
for bucket in buckets:
print('*' * 78)
print(bucket)
print()
mc.bucket_select(bucket)
f(*args, **kwargs)
elif bucket is not None:
mc.bucket_select(bucket)
f(*args, **kwargs)
else:
f(*args, **kwargs)
except mc_bin_client.ErrorEaccess:
print("No access to bucket:{0} - permission denied "
"or bucket does not exist.".format(bucket))
sys.exit(1)
return g
def get_authed_clitool(extraUsage="", allBuckets=True):
c = clitool.CliTool(extraUsage)
if allBuckets:
c.addFlag('-a', 'allBuckets', 'iterate over all buckets')
c.addOption('-b', 'bucketName', 'the bucket to get stats from (Default: default)')
c.addOption('-u', 'username', 'the user as which to authenticate (Default: bucketName)')
c.addOption('-p', 'password', 'the password for the bucket if one exists')
c.addFlag('-S', 'passwordFromStdin', 'read password from stdin')
return c
| true | true |
f73cf4b7b989bdf9a60326d5a7f1b1b125497465 | 11,108 | py | Python | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/stringprep.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 9 | 2019-11-22T04:58:40.000Z | 2022-02-26T16:47:28.000Z | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/stringprep.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | null | null | null | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/stringprep.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 8 | 2017-09-27T10:31:18.000Z | 2022-01-08T10:30:46.000Z | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: stringprep.py
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
from unicodedata import ucd_3_2_0 as unicodedata
def in_table_a1(code):
if unicodedata.category(code) != 'Cn':
return False
c = ord(code)
if 64976 <= c < 65008:
return False
return c & 65535 not in (65534, 65535)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + range(65024, 65040))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {181: 'μ',
223: 'ss',304: 'i̇',329: 'ʼn',383: 's',
496: 'ǰ',837: 'ι',890: ' ι',912: 'ΐ',
944: 'ΰ',962: 'σ',976: 'β',977: 'θ',
978: 'υ',979: 'ύ',980: 'ϋ',981: 'φ',
982: 'π',1008: 'κ',1009: 'ρ',1010: 'σ',
1013: 'ε',1415: 'եւ',7830: 'ẖ',7831: 'ẗ',
7832: 'ẘ',7833: 'ẙ',7834: 'aʾ',7835: 'ṡ',
8016: 'ὐ',8018: 'ὒ',8020: 'ὔ',8022: 'ὖ',
8064: 'ἀι',8065: 'ἁι',8066: 'ἂι',8067: 'ἃι',
8068: 'ἄι',8069: 'ἅι',8070: 'ἆι',8071: 'ἇι',
8072: 'ἀι',8073: 'ἁι',8074: 'ἂι',8075: 'ἃι',
8076: 'ἄι',8077: 'ἅι',8078: 'ἆι',8079: 'ἇι',
8080: 'ἠι',8081: 'ἡι',8082: 'ἢι',8083: 'ἣι',
8084: 'ἤι',8085: 'ἥι',8086: 'ἦι',8087: 'ἧι',
8088: 'ἠι',8089: 'ἡι',8090: 'ἢι',8091: 'ἣι',
8092: 'ἤι',8093: 'ἥι',8094: 'ἦι',8095: 'ἧι',
8096: 'ὠι',8097: 'ὡι',8098: 'ὢι',8099: 'ὣι',
8100: 'ὤι',8101: 'ὥι',8102: 'ὦι',8103: 'ὧι',
8104: 'ὠι',8105: 'ὡι',8106: 'ὢι',8107: 'ὣι',
8108: 'ὤι',8109: 'ὥι',8110: 'ὦι',8111: 'ὧι',
8114: 'ὰι',8115: 'αι',8116: 'άι',8118: 'ᾶ',
8119: 'ᾶι',8124: 'αι',8126: 'ι',8130: 'ὴι',
8131: 'ηι',8132: 'ήι',8134: 'ῆ',8135: 'ῆι',
8140: 'ηι',8146: 'ῒ',8147: 'ΐ',8150: 'ῖ',
8151: 'ῗ',8162: 'ῢ',8163: 'ΰ',8164: 'ῤ',
8166: 'ῦ',8167: 'ῧ',8178: 'ὼι',8179: 'ωι',
8180: 'ώι',8182: 'ῶ',8183: 'ῶι',8188: 'ωι',
8360: 'rs',8450: 'c',8451: '°c',8455: 'ɛ',
8457: '°f',8459: 'h',8460: 'h',8461: 'h',
8464: 'i',8465: 'i',8466: 'l',8469: 'n',
8470: 'no',8473: 'p',8474: 'q',8475: 'r',
8476: 'r',8477: 'r',8480: 'sm',8481: 'tel',
8482: 'tm',8484: 'z',8488: 'z',8492: 'b',
8493: 'c',8496: 'e',8497: 'f',8499: 'm',
8510: 'γ',8511: 'π',8517: 'd',13169: 'hpa',
13171: 'au',13173: 'ov',13184: 'pa',13185: 'na',
13186: 'μa',13187: 'ma',13188: 'ka',13189: 'kb',
13190: 'mb',13191: 'gb',13194: 'pf',13195: 'nf',
13196: 'μf',13200: 'hz',13201: 'khz',13202: 'mhz',
13203: 'ghz',13204: 'thz',13225: 'pa',13226: 'kpa',
13227: 'mpa',13228: 'gpa',13236: 'pv',13237: 'nv',
13238: 'μv',13239: 'mv',13240: 'kv',13241: 'mv',
13242: 'pw',13243: 'nw',13244: 'μw',13245: 'mw',
13246: 'kw',13247: 'mw',13248: 'kω',13249: 'mω',
13251: 'bq',13254: 'c∕kg',13255: 'co.',13256: 'db',
13257: 'gy',13259: 'hp',13261: 'kk',13262: 'km',
13271: 'ph',13273: 'ppm',13274: 'pr',13276: 'sv',
13277: 'wb',64256: 'ff',64257: 'fi',64258: 'fl',
64259: 'ffi',64260: 'ffl',64261: 'st',64262: 'st',
64275: 'մն',64276: 'մե',64277: 'մի',64278: 'վն',
64279: 'մխ',119808: 'a',119809: 'b',119810: 'c',
119811: 'd',119812: 'e',119813: 'f',119814: 'g',
119815: 'h',119816: 'i',119817: 'j',119818: 'k',
119819: 'l',119820: 'm',119821: 'n',119822: 'o',
119823: 'p',119824: 'q',119825: 'r',119826: 's',
119827: 't',119828: 'u',119829: 'v',119830: 'w',
119831: 'x',119832: 'y',119833: 'z',119860: 'a',
119861: 'b',119862: 'c',119863: 'd',119864: 'e',
119865: 'f',119866: 'g',119867: 'h',119868: 'i',
119869: 'j',119870: 'k',119871: 'l',119872: 'm',
119873: 'n',119874: 'o',119875: 'p',119876: 'q',
119877: 'r',119878: 's',119879: 't',119880: 'u',
119881: 'v',119882: 'w',119883: 'x',119884: 'y',
119885: 'z',119912: 'a',119913: 'b',119914: 'c',
119915: 'd',119916: 'e',119917: 'f',119918: 'g',
119919: 'h',119920: 'i',119921: 'j',119922: 'k',
119923: 'l',119924: 'm',119925: 'n',119926: 'o',
119927: 'p',119928: 'q',119929: 'r',119930: 's',
119931: 't',119932: 'u',119933: 'v',119934: 'w',
119935: 'x',119936: 'y',119937: 'z',119964: 'a',
119966: 'c',119967: 'd',119970: 'g',119973: 'j',
119974: 'k',119977: 'n',119978: 'o',119979: 'p',
119980: 'q',119982: 's',119983: 't',119984: 'u',
119985: 'v',119986: 'w',119987: 'x',119988: 'y',
119989: 'z',120016: 'a',120017: 'b',120018: 'c',
120019: 'd',120020: 'e',120021: 'f',120022: 'g',
120023: 'h',120024: 'i',120025: 'j',120026: 'k',
120027: 'l',120028: 'm',120029: 'n',120030: 'o',
120031: 'p',120032: 'q',120033: 'r',120034: 's',
120035: 't',120036: 'u',120037: 'v',120038: 'w',
120039: 'x',120040: 'y',120041: 'z',120068: 'a',
120069: 'b',120071: 'd',120072: 'e',120073: 'f',
120074: 'g',120077: 'j',120078: 'k',120079: 'l',
120080: 'm',120081: 'n',120082: 'o',120083: 'p',
120084: 'q',120086: 's',120087: 't',120088: 'u',
120089: 'v',120090: 'w',120091: 'x',120092: 'y',
120120: 'a',120121: 'b',120123: 'd',120124: 'e',
120125: 'f',120126: 'g',120128: 'i',120129: 'j',
120130: 'k',120131: 'l',120132: 'm',120134: 'o',
120138: 's',120139: 't',120140: 'u',120141: 'v',
120142: 'w',120143: 'x',120144: 'y',120172: 'a',
120173: 'b',120174: 'c',120175: 'd',120176: 'e',
120177: 'f',120178: 'g',120179: 'h',120180: 'i',
120181: 'j',120182: 'k',120183: 'l',120184: 'm',
120185: 'n',120186: 'o',120187: 'p',120188: 'q',
120189: 'r',120190: 's',120191: 't',120192: 'u',
120193: 'v',120194: 'w',120195: 'x',120196: 'y',
120197: 'z',120224: 'a',120225: 'b',120226: 'c',
120227: 'd',120228: 'e',120229: 'f',120230: 'g',
120231: 'h',120232: 'i',120233: 'j',120234: 'k',
120235: 'l',120236: 'm',120237: 'n',120238: 'o',
120239: 'p',120240: 'q',120241: 'r',120242: 's',
120243: 't',120244: 'u',120245: 'v',120246: 'w',
120247: 'x',120248: 'y',120249: 'z',120276: 'a',
120277: 'b',120278: 'c',120279: 'd',120280: 'e',
120281: 'f',120282: 'g',120283: 'h',120284: 'i',
120285: 'j',120286: 'k',120287: 'l',120288: 'm',
120289: 'n',120290: 'o',120291: 'p',120292: 'q',
120293: 'r',120294: 's',120295: 't',120296: 'u',
120297: 'v',120298: 'w',120299: 'x',120300: 'y',
120301: 'z',120328: 'a',120329: 'b',120330: 'c',
120331: 'd',120332: 'e',120333: 'f',120334: 'g',
120335: 'h',120336: 'i',120337: 'j',120338: 'k',
120339: 'l',120340: 'm',120341: 'n',120342: 'o',
120343: 'p',120344: 'q',120345: 'r',120346: 's',
120347: 't',120348: 'u',120349: 'v',120350: 'w',
120351: 'x',120352: 'y',120353: 'z',120380: 'a',
120381: 'b',120382: 'c',120383: 'd',120384: 'e',
120385: 'f',120386: 'g',120387: 'h',120388: 'i',
120389: 'j',120390: 'k',120391: 'l',120392: 'm',
120393: 'n',120394: 'o',120395: 'p',120396: 'q',
120397: 'r',120398: 's',120399: 't',120400: 'u',
120401: 'v',120402: 'w',120403: 'x',120404: 'y',
120405: 'z',120432: 'a',120433: 'b',120434: 'c',
120435: 'd',120436: 'e',120437: 'f',120438: 'g',
120439: 'h',120440: 'i',120441: 'j',120442: 'k',
120443: 'l',120444: 'm',120445: 'n',120446: 'o',
120447: 'p',120448: 'q',120449: 'r',120450: 's',
120451: 't',120452: 'u',120453: 'v',120454: 'w',
120455: 'x',120456: 'y',120457: 'z',120488: 'α',
120489: 'β',120490: 'γ',120491: 'δ',120492: 'ε',
120493: 'ζ',120494: 'η',120495: 'θ',120496: 'ι',
120497: 'κ',120498: 'λ',120499: 'μ',120500: 'ν',
120501: 'ξ',120502: 'ο',120503: 'π',120504: 'ρ',
120505: 'θ',120506: 'σ',120507: 'τ',120508: 'υ',
120509: 'φ',120510: 'χ',120511: 'ψ',120512: 'ω',
120531: 'σ',120546: 'α',120547: 'β',120548: 'γ',
120549: 'δ',120550: 'ε',120551: 'ζ',120552: 'η',
120553: 'θ',120554: 'ι',120555: 'κ',120556: 'λ',
120557: 'μ',120558: 'ν',120559: 'ξ',120560: 'ο',
120561: 'π',120562: 'ρ',120563: 'θ',120564: 'σ',
120565: 'τ',120566: 'υ',120567: 'φ',120568: 'χ',
120569: 'ψ',120570: 'ω',120589: 'σ',120604: 'α',
120605: 'β',120606: 'γ',120607: 'δ',120608: 'ε',
120609: 'ζ',120610: 'η',120611: 'θ',120612: 'ι',
120613: 'κ',120614: 'λ',120615: 'μ',120616: 'ν',
120617: 'ξ',120618: 'ο',120619: 'π',120620: 'ρ',
120621: 'θ',120622: 'σ',120623: 'τ',120624: 'υ',
120625: 'φ',120626: 'χ',120627: 'ψ',120628: 'ω',
120647: 'σ',120662: 'α',120663: 'β',120664: 'γ',
120665: 'δ',120666: 'ε',120667: 'ζ',120668: 'η',
120669: 'θ',120670: 'ι',120671: 'κ',120672: 'λ',
120673: 'μ',120674: 'ν',120675: 'ξ',120676: 'ο',
120677: 'π',120678: 'ρ',120679: 'θ',120680: 'σ',
120681: 'τ',120682: 'υ',120683: 'φ',120684: 'χ',
120685: 'ψ',120686: 'ω',120705: 'σ',120720: 'α',
120721: 'β',120722: 'γ',120723: 'δ',120724: 'ε',
120725: 'ζ',120726: 'η',120727: 'θ',120728: 'ι',
120729: 'κ',120730: 'λ',120731: 'μ',120732: 'ν',
120733: 'ξ',120734: 'ο',120735: 'π',120736: 'ρ',
120737: 'θ',120738: 'σ',120739: 'τ',120740: 'υ',
120741: 'φ',120742: 'χ',120743: 'ψ',120744: 'ω',
120763: 'σ'}
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None:
return r
else:
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize('NFKC', al)
bl = ''.join([ map_table_b3(ch) for ch in b ])
c = unicodedata.normalize('NFKC', bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == ' '
def in_table_c12(code):
return unicodedata.category(code) == 'Zs' and code != ' '
def in_table_c11_c12(code):
return unicodedata.category(code) == 'Zs'
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == 'Cc'
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + range(8288, 8292) + range(8298, 8304) + range(65529, 65533) + range(119155, 119163))
def in_table_c22(code):
c = ord(code)
if c < 128:
return False
if unicodedata.category(code) == 'Cc':
return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == 'Cc' or ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == 'Co'
def in_table_c4(code):
c = ord(code)
if c < 64976:
return False
if c < 65008:
return True
return ord(code) & 65535 in (65534, 65535)
def in_table_c5(code):
return unicodedata.category(code) == 'Cs'
c6_set = set(range(65529, 65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272, 12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + range(8234, 8239) + range(8298, 8304))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + range(917536, 917632))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ('R', 'AL')
def in_table_d2(code):
return unicodedata.bidirectional(code) == 'L' | 38.435986 | 155 | 0.557796 |
from unicodedata import ucd_3_2_0 as unicodedata
def in_table_a1(code):
if unicodedata.category(code) != 'Cn':
return False
c = ord(code)
if 64976 <= c < 65008:
return False
return c & 65535 not in (65534, 65535)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + range(65024, 65040))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {181: 'μ',
223: 'ss',304: 'i̇',329: 'ʼn',383: 's',
496: 'ǰ',837: 'ι',890: ' ι',912: 'ΐ',
944: 'ΰ',962: 'σ',976: 'β',977: 'θ',
978: 'υ',979: 'ύ',980: 'ϋ',981: 'φ',
982: 'π',1008: 'κ',1009: 'ρ',1010: 'σ',
1013: 'ε',1415: 'եւ',7830: 'ẖ',7831: 'ẗ',
7832: 'ẘ',7833: 'ẙ',7834: 'aʾ',7835: 'ṡ',
8016: 'ὐ',8018: 'ὒ',8020: 'ὔ',8022: 'ὖ',
8064: 'ἀι',8065: 'ἁι',8066: 'ἂι',8067: 'ἃι',
8068: 'ἄι',8069: 'ἅι',8070: 'ἆι',8071: 'ἇι',
8072: 'ἀι',8073: 'ἁι',8074: 'ἂι',8075: 'ἃι',
8076: 'ἄι',8077: 'ἅι',8078: 'ἆι',8079: 'ἇι',
8080: 'ἠι',8081: 'ἡι',8082: 'ἢι',8083: 'ἣι',
8084: 'ἤι',8085: 'ἥι',8086: 'ἦι',8087: 'ἧι',
8088: 'ἠι',8089: 'ἡι',8090: 'ἢι',8091: 'ἣι',
8092: 'ἤι',8093: 'ἥι',8094: 'ἦι',8095: 'ἧι',
8096: 'ὠι',8097: 'ὡι',8098: 'ὢι',8099: 'ὣι',
8100: 'ὤι',8101: 'ὥι',8102: 'ὦι',8103: 'ὧι',
8104: 'ὠι',8105: 'ὡι',8106: 'ὢι',8107: 'ὣι',
8108: 'ὤι',8109: 'ὥι',8110: 'ὦι',8111: 'ὧι',
8114: 'ὰι',8115: 'αι',8116: 'άι',8118: 'ᾶ',
8119: 'ᾶι',8124: 'αι',8126: 'ι',8130: 'ὴι',
8131: 'ηι',8132: 'ήι',8134: 'ῆ',8135: 'ῆι',
8140: 'ηι',8146: 'ῒ',8147: 'ΐ',8150: 'ῖ',
8151: 'ῗ',8162: 'ῢ',8163: 'ΰ',8164: 'ῤ',
8166: 'ῦ',8167: 'ῧ',8178: 'ὼι',8179: 'ωι',
8180: 'ώι',8182: 'ῶ',8183: 'ῶι',8188: 'ωι',
8360: 'rs',8450: 'c',8451: '°c',8455: 'ɛ',
8457: '°f',8459: 'h',8460: 'h',8461: 'h',
8464: 'i',8465: 'i',8466: 'l',8469: 'n',
8470: 'no',8473: 'p',8474: 'q',8475: 'r',
8476: 'r',8477: 'r',8480: 'sm',8481: 'tel',
8482: 'tm',8484: 'z',8488: 'z',8492: 'b',
8493: 'c',8496: 'e',8497: 'f',8499: 'm',
8510: 'γ',8511: 'π',8517: 'd',13169: 'hpa',
13171: 'au',13173: 'ov',13184: 'pa',13185: 'na',
13186: 'μa',13187: 'ma',13188: 'ka',13189: 'kb',
13190: 'mb',13191: 'gb',13194: 'pf',13195: 'nf',
13196: 'μf',13200: 'hz',13201: 'khz',13202: 'mhz',
13203: 'ghz',13204: 'thz',13225: 'pa',13226: 'kpa',
13227: 'mpa',13228: 'gpa',13236: 'pv',13237: 'nv',
13238: 'μv',13239: 'mv',13240: 'kv',13241: 'mv',
13242: 'pw',13243: 'nw',13244: 'μw',13245: 'mw',
13246: 'kw',13247: 'mw',13248: 'kω',13249: 'mω',
13251: 'bq',13254: 'c∕kg',13255: 'co.',13256: 'db',
13257: 'gy',13259: 'hp',13261: 'kk',13262: 'km',
13271: 'ph',13273: 'ppm',13274: 'pr',13276: 'sv',
13277: 'wb',64256: 'ff',64257: 'fi',64258: 'fl',
64259: 'ffi',64260: 'ffl',64261: 'st',64262: 'st',
64275: 'մն',64276: 'մե',64277: 'մի',64278: 'վն',
64279: 'մխ',119808: 'a',119809: 'b',119810: 'c',
119811: 'd',119812: 'e',119813: 'f',119814: 'g',
119815: 'h',119816: 'i',119817: 'j',119818: 'k',
119819: 'l',119820: 'm',119821: 'n',119822: 'o',
119823: 'p',119824: 'q',119825: 'r',119826: 's',
119827: 't',119828: 'u',119829: 'v',119830: 'w',
119831: 'x',119832: 'y',119833: 'z',119860: 'a',
119861: 'b',119862: 'c',119863: 'd',119864: 'e',
119865: 'f',119866: 'g',119867: 'h',119868: 'i',
119869: 'j',119870: 'k',119871: 'l',119872: 'm',
119873: 'n',119874: 'o',119875: 'p',119876: 'q',
119877: 'r',119878: 's',119879: 't',119880: 'u',
119881: 'v',119882: 'w',119883: 'x',119884: 'y',
119885: 'z',119912: 'a',119913: 'b',119914: 'c',
119915: 'd',119916: 'e',119917: 'f',119918: 'g',
119919: 'h',119920: 'i',119921: 'j',119922: 'k',
119923: 'l',119924: 'm',119925: 'n',119926: 'o',
119927: 'p',119928: 'q',119929: 'r',119930: 's',
119931: 't',119932: 'u',119933: 'v',119934: 'w',
119935: 'x',119936: 'y',119937: 'z',119964: 'a',
119966: 'c',119967: 'd',119970: 'g',119973: 'j',
119974: 'k',119977: 'n',119978: 'o',119979: 'p',
119980: 'q',119982: 's',119983: 't',119984: 'u',
119985: 'v',119986: 'w',119987: 'x',119988: 'y',
119989: 'z',120016: 'a',120017: 'b',120018: 'c',
120019: 'd',120020: 'e',120021: 'f',120022: 'g',
120023: 'h',120024: 'i',120025: 'j',120026: 'k',
120027: 'l',120028: 'm',120029: 'n',120030: 'o',
120031: 'p',120032: 'q',120033: 'r',120034: 's',
120035: 't',120036: 'u',120037: 'v',120038: 'w',
120039: 'x',120040: 'y',120041: 'z',120068: 'a',
120069: 'b',120071: 'd',120072: 'e',120073: 'f',
120074: 'g',120077: 'j',120078: 'k',120079: 'l',
120080: 'm',120081: 'n',120082: 'o',120083: 'p',
120084: 'q',120086: 's',120087: 't',120088: 'u',
120089: 'v',120090: 'w',120091: 'x',120092: 'y',
120120: 'a',120121: 'b',120123: 'd',120124: 'e',
120125: 'f',120126: 'g',120128: 'i',120129: 'j',
120130: 'k',120131: 'l',120132: 'm',120134: 'o',
120138: 's',120139: 't',120140: 'u',120141: 'v',
120142: 'w',120143: 'x',120144: 'y',120172: 'a',
120173: 'b',120174: 'c',120175: 'd',120176: 'e',
120177: 'f',120178: 'g',120179: 'h',120180: 'i',
120181: 'j',120182: 'k',120183: 'l',120184: 'm',
120185: 'n',120186: 'o',120187: 'p',120188: 'q',
120189: 'r',120190: 's',120191: 't',120192: 'u',
120193: 'v',120194: 'w',120195: 'x',120196: 'y',
120197: 'z',120224: 'a',120225: 'b',120226: 'c',
120227: 'd',120228: 'e',120229: 'f',120230: 'g',
120231: 'h',120232: 'i',120233: 'j',120234: 'k',
120235: 'l',120236: 'm',120237: 'n',120238: 'o',
120239: 'p',120240: 'q',120241: 'r',120242: 's',
120243: 't',120244: 'u',120245: 'v',120246: 'w',
120247: 'x',120248: 'y',120249: 'z',120276: 'a',
120277: 'b',120278: 'c',120279: 'd',120280: 'e',
120281: 'f',120282: 'g',120283: 'h',120284: 'i',
120285: 'j',120286: 'k',120287: 'l',120288: 'm',
120289: 'n',120290: 'o',120291: 'p',120292: 'q',
120293: 'r',120294: 's',120295: 't',120296: 'u',
120297: 'v',120298: 'w',120299: 'x',120300: 'y',
120301: 'z',120328: 'a',120329: 'b',120330: 'c',
120331: 'd',120332: 'e',120333: 'f',120334: 'g',
120335: 'h',120336: 'i',120337: 'j',120338: 'k',
120339: 'l',120340: 'm',120341: 'n',120342: 'o',
120343: 'p',120344: 'q',120345: 'r',120346: 's',
120347: 't',120348: 'u',120349: 'v',120350: 'w',
120351: 'x',120352: 'y',120353: 'z',120380: 'a',
120381: 'b',120382: 'c',120383: 'd',120384: 'e',
120385: 'f',120386: 'g',120387: 'h',120388: 'i',
120389: 'j',120390: 'k',120391: 'l',120392: 'm',
120393: 'n',120394: 'o',120395: 'p',120396: 'q',
120397: 'r',120398: 's',120399: 't',120400: 'u',
120401: 'v',120402: 'w',120403: 'x',120404: 'y',
120405: 'z',120432: 'a',120433: 'b',120434: 'c',
120435: 'd',120436: 'e',120437: 'f',120438: 'g',
120439: 'h',120440: 'i',120441: 'j',120442: 'k',
120443: 'l',120444: 'm',120445: 'n',120446: 'o',
120447: 'p',120448: 'q',120449: 'r',120450: 's',
120451: 't',120452: 'u',120453: 'v',120454: 'w',
120455: 'x',120456: 'y',120457: 'z',120488: 'α',
120489: 'β',120490: 'γ',120491: 'δ',120492: 'ε',
120493: 'ζ',120494: 'η',120495: 'θ',120496: 'ι',
120497: 'κ',120498: 'λ',120499: 'μ',120500: 'ν',
120501: 'ξ',120502: 'ο',120503: 'π',120504: 'ρ',
120505: 'θ',120506: 'σ',120507: 'τ',120508: 'υ',
120509: 'φ',120510: 'χ',120511: 'ψ',120512: 'ω',
120531: 'σ',120546: 'α',120547: 'β',120548: 'γ',
120549: 'δ',120550: 'ε',120551: 'ζ',120552: 'η',
120553: 'θ',120554: 'ι',120555: 'κ',120556: 'λ',
120557: 'μ',120558: 'ν',120559: 'ξ',120560: 'ο',
120561: 'π',120562: 'ρ',120563: 'θ',120564: 'σ',
120565: 'τ',120566: 'υ',120567: 'φ',120568: 'χ',
120569: 'ψ',120570: 'ω',120589: 'σ',120604: 'α',
120605: 'β',120606: 'γ',120607: 'δ',120608: 'ε',
120609: 'ζ',120610: 'η',120611: 'θ',120612: 'ι',
120613: 'κ',120614: 'λ',120615: 'μ',120616: 'ν',
120617: 'ξ',120618: 'ο',120619: 'π',120620: 'ρ',
120621: 'θ',120622: 'σ',120623: 'τ',120624: 'υ',
120625: 'φ',120626: 'χ',120627: 'ψ',120628: 'ω',
120647: 'σ',120662: 'α',120663: 'β',120664: 'γ',
120665: 'δ',120666: 'ε',120667: 'ζ',120668: 'η',
120669: 'θ',120670: 'ι',120671: 'κ',120672: 'λ',
120673: 'μ',120674: 'ν',120675: 'ξ',120676: 'ο',
120677: 'π',120678: 'ρ',120679: 'θ',120680: 'σ',
120681: 'τ',120682: 'υ',120683: 'φ',120684: 'χ',
120685: 'ψ',120686: 'ω',120705: 'σ',120720: 'α',
120721: 'β',120722: 'γ',120723: 'δ',120724: 'ε',
120725: 'ζ',120726: 'η',120727: 'θ',120728: 'ι',
120729: 'κ',120730: 'λ',120731: 'μ',120732: 'ν',
120733: 'ξ',120734: 'ο',120735: 'π',120736: 'ρ',
120737: 'θ',120738: 'σ',120739: 'τ',120740: 'υ',
120741: 'φ',120742: 'χ',120743: 'ψ',120744: 'ω',
120763: 'σ'}
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None:
return r
else:
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize('NFKC', al)
bl = ''.join([ map_table_b3(ch) for ch in b ])
c = unicodedata.normalize('NFKC', bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == ' '
def in_table_c12(code):
return unicodedata.category(code) == 'Zs' and code != ' '
def in_table_c11_c12(code):
return unicodedata.category(code) == 'Zs'
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == 'Cc'
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + range(8288, 8292) + range(8298, 8304) + range(65529, 65533) + range(119155, 119163))
def in_table_c22(code):
c = ord(code)
if c < 128:
return False
if unicodedata.category(code) == 'Cc':
return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == 'Cc' or ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == 'Co'
def in_table_c4(code):
c = ord(code)
if c < 64976:
return False
if c < 65008:
return True
return ord(code) & 65535 in (65534, 65535)
def in_table_c5(code):
return unicodedata.category(code) == 'Cs'
c6_set = set(range(65529, 65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272, 12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + range(8234, 8239) + range(8298, 8304))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + range(917536, 917632))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ('R', 'AL')
def in_table_d2(code):
return unicodedata.bidirectional(code) == 'L' | true | true |
f73cf664aa456d02f7e264675360420a7f633c5a | 1,316 | py | Python | setup.py | marcohong/xform | 653ef732aca1cf05f8e973b51a5bc2abbdb46589 | [
"MIT"
] | 2 | 2021-06-04T02:29:23.000Z | 2021-11-24T07:43:07.000Z | setup.py | marcohong/xform | 653ef732aca1cf05f8e973b51a5bc2abbdb46589 | [
"MIT"
] | null | null | null | setup.py | marcohong/xform | 653ef732aca1cf05f8e973b51a5bc2abbdb46589 | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='xargs',
version='0.4.3',
author='Maco',
description='Binding form data validation framework.',
long_description=long_description,
long_description_content_type='text/markdown',
author_email='macohong@hotmail.com',
zip_safe=False,
license='MIT License',
url='https://github.com/marcohong/xform',
keywords=['Form validation', 'Data Binding',
'Tornado web', 'aiohttp web', 'Sanic web'],
packages=['xform', 'xform/adapters'],
python_requires='>=3.6',
install_requires=['multidict', 'attrs'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Programming Language :: Python :: Implementation :: CPython",
'Operating System :: OS Independent',
'Framework :: AsyncIO',
]
)
| 34.631579 | 70 | 0.616261 | from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='xargs',
version='0.4.3',
author='Maco',
description='Binding form data validation framework.',
long_description=long_description,
long_description_content_type='text/markdown',
author_email='macohong@hotmail.com',
zip_safe=False,
license='MIT License',
url='https://github.com/marcohong/xform',
keywords=['Form validation', 'Data Binding',
'Tornado web', 'aiohttp web', 'Sanic web'],
packages=['xform', 'xform/adapters'],
python_requires='>=3.6',
install_requires=['multidict', 'attrs'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Programming Language :: Python :: Implementation :: CPython",
'Operating System :: OS Independent',
'Framework :: AsyncIO',
]
)
| true | true |
f73cf66c1b4a50d5ca0177942c270fee63ecab95 | 12,968 | py | Python | playbooks/roles/repo_server/files/openstack-wheel-builder.py | digideskio/digideskOPEN | 0ec13cbb35ad2edead9e8381472483b0bbabf9e9 | [
"Apache-2.0"
] | null | null | null | playbooks/roles/repo_server/files/openstack-wheel-builder.py | digideskio/digideskOPEN | 0ec13cbb35ad2edead9e8381472483b0bbabf9e9 | [
"Apache-2.0"
] | null | null | null | playbooks/roles/repo_server/files/openstack-wheel-builder.py | digideskio/digideskOPEN | 0ec13cbb35ad2edead9e8381472483b0bbabf9e9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2015, Kevin Carter <kevin.carter@rackspace.com>
import os
import traceback
import yaml
from cloudlib import arguments
from cloudlib import shell
REQUIREMENTS_FILE_TYPES = [
'requirements.txt',
'global-requirements.txt',
'test-requirements.txt',
'dev-requirements.txt'
]
# List of variable names that could be used within the yaml files that
# represent lists of python packages.
BUILT_IN_PIP_PACKAGE_VARS = [
'service_pip_dependencies',
'pip_common_packages',
'pip_container_packages',
'pip_packages'
]
class DependencyFileProcessor(object):
def __init__(self, local_path):
"""Find and process dependent files from a local_path.
:type local_path: ``str``
:return:
"""
self.pip = dict()
self.pip['git_package'] = list()
self.pip['py_package'] = list()
self.git_pip_install = 'git+%s@%s'
self.file_names = self._get_files(path=local_path)
# Process everything simply by calling the method
self._process_files(ext=('yaml', 'yml'))
def _filter_files(self, file_names, ext):
"""Filter the files and return a sorted list.
:type file_names:
:type ext: ``str`` or ``tuple``
:returns: ``list``
"""
_file_names = list()
for file_name in file_names:
if file_name.endswith(ext):
if '/defaults/' in file_name or '/vars/' in file_name:
_file_names.append(file_name)
else:
continue
elif os.path.basename(file_name) in REQUIREMENTS_FILE_TYPES:
with open(file_name, 'rb') as f:
packages = [
i.split()[0] for i in f.read().splitlines()
if i
if not i.startswith('#')
]
self.pip['py_package'].extend(packages)
else:
return sorted(_file_names, reverse=True)
@staticmethod
def _get_files(path):
"""Return a list of all files in the defaults/repo_packages directory.
:type path: ``str``
:returns: ``list``
"""
paths = os.walk(os.path.abspath(path))
files = list()
for fpath, _, afiles in paths:
for afile in afiles:
files.append(os.path.join(fpath, afile))
else:
return files
def _check_plugins(self, git_repo_plugins, git_data):
"""Check if the git url is a plugin type.
:type git_repo_plugins: ``dict``
:type git_data: ``dict``
"""
for repo_plugin in git_repo_plugins:
plugin = '%s/%s' % (
repo_plugin['path'].strip('/'),
repo_plugin['package'].lstrip('/')
)
package = self.git_pip_install % (
git_data['repo'],
'%s#egg=%s&subdirectory=%s' % (
git_data['branch'],
repo_plugin['package'].strip('/'),
plugin
)
)
self.pip['git_package'].append(package)
def _process_git(self, loaded_yaml, git_item):
"""Process git repos.
:type loaded_yaml: ``dict``
:type git_item: ``str``
"""
git_data = dict()
if git_item.split('_')[0] == 'git':
var_name = 'git'
else:
var_name = git_item.split('_')[0]
git_data['repo'] = loaded_yaml.get(git_item)
git_data['branch'] = loaded_yaml.get(
'%s_git_install_branch' % var_name.replace('.', '_')
)
if not git_data['branch']:
git_data['branch'] = loaded_yaml.get(
'git_install_branch',
'master'
)
package = self.git_pip_install % (
git_data['repo'], git_data['branch']
)
self.pip['git_package'].append(package)
git_repo_plugins = loaded_yaml.get('%s_repo_plugins' % var_name)
if git_repo_plugins:
self._check_plugins(
git_repo_plugins=git_repo_plugins,
git_data=git_data
)
def _process_files(self, ext):
"""Process files.
:type ext: ``tuple``
"""
file_names = self._filter_files(
file_names=self.file_names,
ext=ext
)
for file_name in file_names:
with open(file_name, 'rb') as f:
loaded_config = yaml.safe_load(f.read())
for key, values in loaded_config.items():
if key.endswith('git_repo'):
self._process_git(
loaded_yaml=loaded_config,
git_item=key
)
if [i for i in BUILT_IN_PIP_PACKAGE_VARS if i in key]:
self.pip['py_package'].extend(values)
def _arguments():
"""Return CLI arguments."""
arguments_dict = {
'optional_args': {
'local_path': {
'commands': [
'--local-path'
],
'help': 'Local path to cloned code.',
'metavar': '[PATH]',
'required': True
},
'report_file': {
'commands': [
'--report-file'
],
'help': 'Full path to write the package report to',
'metavar': '[FILE_PATH]',
'required': True
},
'storage_pool': {
'commands': [
'--storage-pool'
],
'help': 'Full path to the directory where you want to store'
' built wheels.',
'metavar': '[PATH]',
'required': True
},
'release_directory': {
'commands': [
'--release-directory'
],
'help': 'Full path to the directory where the releaesed links'
' will be stored.',
'metavar': '[PATH]',
'required': True
},
'add_on_repos': {
'commands': [
'--add-on-repos'
],
'help': 'Full repo path to require as an additional add on'
' repo. Example:'
' "git+https://github.com/rcbops/other-repo@master"',
'metavar': '[REPO_NAME]',
'nargs': '+'
},
'link_pool': {
'commands': [
'--link-pool'
],
'help': 'Full path to the directory links are stored.',
'metavar': '[PATH]',
'required': True
}
}
}
return arguments.ArgumentParserator(
arguments_dict=arguments_dict,
epilog='Licensed Apache2',
title='Discover all of the requirements within the'
' openstack-ansible project.',
detail='Requirement lookup',
description='Discover all of the requirements within the'
' openstack-ansible project.',
env_name='OS_ANSIBLE'
).arg_parser()
def _abs_path(path):
return os.path.abspath(
os.path.expanduser(
path
)
)
def _run_command(command):
print('Running "%s"' % command[2])
run_command = shell.ShellCommands(debug=True)
info, success = run_command.run_command(' '.join(command))
if not success:
raise SystemExit(info)
else:
print(info)
def main():
"""Run the main application."""
user_vars = _arguments()
return_list = list()
try:
dfp = DependencyFileProcessor(
local_path=_abs_path(user_vars['local_path'])
)
return_list.extend(dfp.pip['py_package'])
return_list.extend(dfp.pip['git_package'])
except Exception as exp:
raise SystemExit(
'Execution failure. Path: "%s", Error: "%s", Trace:\n%s' % (
user_vars['local_path'],
str(exp),
traceback.format_exc()
)
)
else:
return_data = {
'packages': list(),
'remote_packages': list()
}
for file_name in sorted(set(return_list)):
is_url = file_name.startswith(('http:', 'https:', 'git+'))
if is_url:
if '@' not in file_name:
return_data['packages'].append(file_name)
else:
return_data['remote_packages'].append(file_name)
else:
return_data['packages'].append(file_name)
else:
return_data['packages'] = ' '.join(
['"%s"' % i for i in set(return_data['packages'])]
)
if user_vars['add_on_repos']:
return_data['remote_packages'].extend(
[i.strip() for i in user_vars['add_on_repos']]
)
return_data['remote_packages'] = ' '.join(
['"%s"' % i for i in set(return_data['remote_packages'])]
)
# Build report
report_command = [
'yaprt',
'--debug',
'create-report',
'--report-file',
_abs_path(user_vars['report_file']),
'--git-install-repos',
return_data['remote_packages'],
'--packages',
return_data['packages']
]
_run_command(report_command)
# Build requirements wheels
requirements_command = [
'yaprt',
'--debug',
'build-wheels',
'--report-file',
_abs_path(user_vars['report_file']),
'--storage-pool',
_abs_path(user_vars['storage_pool']),
'--link-dir',
_abs_path(user_vars['release_directory']),
'--pip-extra-link-dirs',
_abs_path(user_vars['link_pool']),
'--pip-index',
'https://rpc-repo.rackspace.com/pools',
'--pip-extra-index',
'https://pypi.python.org/simple',
'--pip-bulk-operation',
'--build-output',
'/tmp/openstack-wheel-output',
'--build-dir',
'/tmp/openstack-builder',
'--build-requirements',
'--force-clean'
]
_run_command(requirements_command)
# Build wheels from git-repos
requirements_command = [
'yaprt',
'--debug',
'build-wheels',
'--report-file',
_abs_path(user_vars['report_file']),
'--storage-pool',
_abs_path(user_vars['storage_pool']),
'--link-dir',
_abs_path(user_vars['release_directory']),
'--pip-extra-link-dirs',
_abs_path(user_vars['link_pool']),
'--pip-no-deps',
'--pip-no-index',
'--build-output',
'/tmp/openstack-wheel-output',
'--build-dir',
'/tmp/openstack-builder',
'--build-branches',
'--build-releases',
'--force-clean'
]
_run_command(requirements_command)
# Create HTML index for all files in the release directory
index_command = [
'yaprt',
'--debug',
'create-html-indexes',
'--repo-dir',
_abs_path(user_vars['release_directory'])
]
_run_command(index_command)
# Store the git repositories
index_command = [
'yaprt',
'store-repos',
'--report-file',
_abs_path(user_vars['report_file']),
'--git-repo-path',
'/var/www/repo/openstackgit'
]
_run_command(index_command)
if __name__ == '__main__':
main()
| 31.706601 | 78 | 0.490824 |
import os
import traceback
import yaml
from cloudlib import arguments
from cloudlib import shell
REQUIREMENTS_FILE_TYPES = [
'requirements.txt',
'global-requirements.txt',
'test-requirements.txt',
'dev-requirements.txt'
]
BUILT_IN_PIP_PACKAGE_VARS = [
'service_pip_dependencies',
'pip_common_packages',
'pip_container_packages',
'pip_packages'
]
class DependencyFileProcessor(object):
def __init__(self, local_path):
self.pip = dict()
self.pip['git_package'] = list()
self.pip['py_package'] = list()
self.git_pip_install = 'git+%s@%s'
self.file_names = self._get_files(path=local_path)
self._process_files(ext=('yaml', 'yml'))
def _filter_files(self, file_names, ext):
_file_names = list()
for file_name in file_names:
if file_name.endswith(ext):
if '/defaults/' in file_name or '/vars/' in file_name:
_file_names.append(file_name)
else:
continue
elif os.path.basename(file_name) in REQUIREMENTS_FILE_TYPES:
with open(file_name, 'rb') as f:
packages = [
i.split()[0] for i in f.read().splitlines()
if i
if not i.startswith('#')
]
self.pip['py_package'].extend(packages)
else:
return sorted(_file_names, reverse=True)
@staticmethod
def _get_files(path):
paths = os.walk(os.path.abspath(path))
files = list()
for fpath, _, afiles in paths:
for afile in afiles:
files.append(os.path.join(fpath, afile))
else:
return files
def _check_plugins(self, git_repo_plugins, git_data):
for repo_plugin in git_repo_plugins:
plugin = '%s/%s' % (
repo_plugin['path'].strip('/'),
repo_plugin['package'].lstrip('/')
)
package = self.git_pip_install % (
git_data['repo'],
'%s#egg=%s&subdirectory=%s' % (
git_data['branch'],
repo_plugin['package'].strip('/'),
plugin
)
)
self.pip['git_package'].append(package)
def _process_git(self, loaded_yaml, git_item):
git_data = dict()
if git_item.split('_')[0] == 'git':
var_name = 'git'
else:
var_name = git_item.split('_')[0]
git_data['repo'] = loaded_yaml.get(git_item)
git_data['branch'] = loaded_yaml.get(
'%s_git_install_branch' % var_name.replace('.', '_')
)
if not git_data['branch']:
git_data['branch'] = loaded_yaml.get(
'git_install_branch',
'master'
)
package = self.git_pip_install % (
git_data['repo'], git_data['branch']
)
self.pip['git_package'].append(package)
git_repo_plugins = loaded_yaml.get('%s_repo_plugins' % var_name)
if git_repo_plugins:
self._check_plugins(
git_repo_plugins=git_repo_plugins,
git_data=git_data
)
def _process_files(self, ext):
file_names = self._filter_files(
file_names=self.file_names,
ext=ext
)
for file_name in file_names:
with open(file_name, 'rb') as f:
loaded_config = yaml.safe_load(f.read())
for key, values in loaded_config.items():
if key.endswith('git_repo'):
self._process_git(
loaded_yaml=loaded_config,
git_item=key
)
if [i for i in BUILT_IN_PIP_PACKAGE_VARS if i in key]:
self.pip['py_package'].extend(values)
def _arguments():
arguments_dict = {
'optional_args': {
'local_path': {
'commands': [
'--local-path'
],
'help': 'Local path to cloned code.',
'metavar': '[PATH]',
'required': True
},
'report_file': {
'commands': [
'--report-file'
],
'help': 'Full path to write the package report to',
'metavar': '[FILE_PATH]',
'required': True
},
'storage_pool': {
'commands': [
'--storage-pool'
],
'help': 'Full path to the directory where you want to store'
' built wheels.',
'metavar': '[PATH]',
'required': True
},
'release_directory': {
'commands': [
'--release-directory'
],
'help': 'Full path to the directory where the releaesed links'
' will be stored.',
'metavar': '[PATH]',
'required': True
},
'add_on_repos': {
'commands': [
'--add-on-repos'
],
'help': 'Full repo path to require as an additional add on'
' repo. Example:'
' "git+https://github.com/rcbops/other-repo@master"',
'metavar': '[REPO_NAME]',
'nargs': '+'
},
'link_pool': {
'commands': [
'--link-pool'
],
'help': 'Full path to the directory links are stored.',
'metavar': '[PATH]',
'required': True
}
}
}
return arguments.ArgumentParserator(
arguments_dict=arguments_dict,
epilog='Licensed Apache2',
title='Discover all of the requirements within the'
' openstack-ansible project.',
detail='Requirement lookup',
description='Discover all of the requirements within the'
' openstack-ansible project.',
env_name='OS_ANSIBLE'
).arg_parser()
def _abs_path(path):
return os.path.abspath(
os.path.expanduser(
path
)
)
def _run_command(command):
print('Running "%s"' % command[2])
run_command = shell.ShellCommands(debug=True)
info, success = run_command.run_command(' '.join(command))
if not success:
raise SystemExit(info)
else:
print(info)
def main():
user_vars = _arguments()
return_list = list()
try:
dfp = DependencyFileProcessor(
local_path=_abs_path(user_vars['local_path'])
)
return_list.extend(dfp.pip['py_package'])
return_list.extend(dfp.pip['git_package'])
except Exception as exp:
raise SystemExit(
'Execution failure. Path: "%s", Error: "%s", Trace:\n%s' % (
user_vars['local_path'],
str(exp),
traceback.format_exc()
)
)
else:
return_data = {
'packages': list(),
'remote_packages': list()
}
for file_name in sorted(set(return_list)):
is_url = file_name.startswith(('http:', 'https:', 'git+'))
if is_url:
if '@' not in file_name:
return_data['packages'].append(file_name)
else:
return_data['remote_packages'].append(file_name)
else:
return_data['packages'].append(file_name)
else:
return_data['packages'] = ' '.join(
['"%s"' % i for i in set(return_data['packages'])]
)
if user_vars['add_on_repos']:
return_data['remote_packages'].extend(
[i.strip() for i in user_vars['add_on_repos']]
)
return_data['remote_packages'] = ' '.join(
['"%s"' % i for i in set(return_data['remote_packages'])]
)
report_command = [
'yaprt',
'--debug',
'create-report',
'--report-file',
_abs_path(user_vars['report_file']),
'--git-install-repos',
return_data['remote_packages'],
'--packages',
return_data['packages']
]
_run_command(report_command)
requirements_command = [
'yaprt',
'--debug',
'build-wheels',
'--report-file',
_abs_path(user_vars['report_file']),
'--storage-pool',
_abs_path(user_vars['storage_pool']),
'--link-dir',
_abs_path(user_vars['release_directory']),
'--pip-extra-link-dirs',
_abs_path(user_vars['link_pool']),
'--pip-index',
'https://rpc-repo.rackspace.com/pools',
'--pip-extra-index',
'https://pypi.python.org/simple',
'--pip-bulk-operation',
'--build-output',
'/tmp/openstack-wheel-output',
'--build-dir',
'/tmp/openstack-builder',
'--build-requirements',
'--force-clean'
]
_run_command(requirements_command)
requirements_command = [
'yaprt',
'--debug',
'build-wheels',
'--report-file',
_abs_path(user_vars['report_file']),
'--storage-pool',
_abs_path(user_vars['storage_pool']),
'--link-dir',
_abs_path(user_vars['release_directory']),
'--pip-extra-link-dirs',
_abs_path(user_vars['link_pool']),
'--pip-no-deps',
'--pip-no-index',
'--build-output',
'/tmp/openstack-wheel-output',
'--build-dir',
'/tmp/openstack-builder',
'--build-branches',
'--build-releases',
'--force-clean'
]
_run_command(requirements_command)
index_command = [
'yaprt',
'--debug',
'create-html-indexes',
'--repo-dir',
_abs_path(user_vars['release_directory'])
]
_run_command(index_command)
index_command = [
'yaprt',
'store-repos',
'--report-file',
_abs_path(user_vars['report_file']),
'--git-repo-path',
'/var/www/repo/openstackgit'
]
_run_command(index_command)
if __name__ == '__main__':
main()
| true | true |
f73cf6b2817245d0a3cbe170c9e4dc347bdcf0ad | 610 | py | Python | db_tools/errors.py | ScottSnapperLab/db_tools | f11ce6baf0cb690e7d9eec43e785910d1f892ca8 | [
"MIT"
] | 1 | 2018-05-25T17:00:59.000Z | 2018-05-25T17:00:59.000Z | db_tools/errors.py | ScottSnapperLab/db_tools | f11ce6baf0cb690e7d9eec43e785910d1f892ca8 | [
"MIT"
] | 1 | 2018-03-09T15:59:15.000Z | 2018-03-09T15:59:15.000Z | db_tools/errors.py | ScottSnapperLab/db_tools | f11ce6baf0cb690e7d9eec43e785910d1f892ca8 | [
"MIT"
] | 3 | 2018-01-09T18:07:25.000Z | 2018-02-20T16:44:12.000Z | #!/usr/bin/env python
"""Provide error classes."""
# Imports
from db_tools import __author__, __email__
class DBToolsError(Exception):
"""Base error class."""
class NotImplementedYet(NotImplementedError, DBToolsError):
"""Raise when a section of code that has been left for another time is asked to execute."""
def __init__(self, msg=None):
"""Set up the Exception."""
if msg is None:
msg = f"That bonehead {__author__} should really hear your rage about this disgraceful oversight! Feel free to tell them at {__email__}"
self.args = (msg, *self.args)
| 30.5 | 148 | 0.681967 |
from db_tools import __author__, __email__
class DBToolsError(Exception):
class NotImplementedYet(NotImplementedError, DBToolsError):
def __init__(self, msg=None):
if msg is None:
msg = f"That bonehead {__author__} should really hear your rage about this disgraceful oversight! Feel free to tell them at {__email__}"
self.args = (msg, *self.args)
| true | true |
f73cf92b8d16003b4dbe116864ff5454dea6a4f6 | 6,383 | py | Python | src/emotions_tpu.py | Novixous/Emotion-Trainer | a71d7c6ac3a0686e28ad7ee0b3a5489289ee233d | [
"MIT"
] | null | null | null | src/emotions_tpu.py | Novixous/Emotion-Trainer | a71d7c6ac3a0686e28ad7ee0b3a5489289ee233d | [
"MIT"
] | null | null | null | src/emotions_tpu.py | Novixous/Emotion-Trainer | a71d7c6ac3a0686e28ad7ee0b3a5489289ee233d | [
"MIT"
] | null | null | null | import numpy as np
import argparse
import matplotlib.pyplot as plt
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# command line argument
ap = argparse.ArgumentParser()
ap.add_argument("--mode",help="train/display")
mode = ap.parse_args().mode
def combine_gen(*gens):
while True:
for g in gens:
yield next(g)
# plots accuracy and loss curves
def plot_model_history(model_history):
"""
Plot Accuracy and Loss curves given the model_history
"""
fig, axs = plt.subplots(1,2,figsize=(15,5))
# summarize history for accuracy
axs[0].plot(range(1,len(model_history.history['accuracy'])+1),model_history.history['accuracy'])
axs[0].plot(range(1,len(model_history.history['val_accuracy'])+1),model_history.history['val_accuracy'])
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1,len(model_history.history['accuracy'])+1),len(model_history.history['accuracy'])/10)
axs[0].legend(['train', 'val'], loc='best')
# summarize history for loss
axs[1].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss'])
axs[1].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss'])
axs[1].set_title('Model Loss')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10)
axs[1].legend(['train', 'val'], loc='best')
fig.savefig('plot.png')
plt.show()
# Define data generators
train_dir = 'data/train'
val_dir = 'data/test'
eval_dir = 'data/evaluate'
clone_time = 30
num_train = 28709 * clone_time
num_val = 7178
batch_size = 64
num_epoch = 10
train_datagen = ImageDataGenerator(
rescale=1./255,
brightness_range=[0.2,1.5],
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)
eval_datagen = ImageDataGenerator(rescale=1./255)
train_generators = []
for x in range(clone_time):
train_generators.append(train_datagen.flow_from_directory(
train_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical'))
validation_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
evaluation_generator = eval_datagen.flow_from_directory(
eval_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
# Create the model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
model.summary()
# If you want to train the same model or try other models, go for this
if mode == "train":
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
model_info = model.fit(
combine_gen(*train_generators),
steps_per_epoch=num_train // batch_size,
epochs=num_epoch,
validation_data=validation_generator,
validation_steps=num_val // batch_size)
# plot_model_history(model_info)
model.save_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
# emotions will be displayed on your face from the webcam feed
elif mode == "display":
model.load_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
# prevents openCL usage and unnecessary logging messages
cv2.ocl.setUseOpenCL(False)
# dictionary which assigns each label an emotion (alphabetical order)
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
# start the webcam feed
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
if not ret:
break
frame = cv2.flip(frame, 1)
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print(prediction)
# finalArr.append(prediction)
maxindex = int(np.argmax(prediction))
cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Video', cv2.resize(frame,(1600,960),interpolation = cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
# np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
# for x in finalArr:
# print(x)
cv2.destroyAllWindows()
elif mode == "evaluate":
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
model.load_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
result = model.evaluate(evaluation_generator)
| 37.547059 | 130 | 0.681184 | import numpy as np
import argparse
import matplotlib.pyplot as plt
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
ap = argparse.ArgumentParser()
ap.add_argument("--mode",help="train/display")
mode = ap.parse_args().mode
def combine_gen(*gens):
while True:
for g in gens:
yield next(g)
def plot_model_history(model_history):
fig, axs = plt.subplots(1,2,figsize=(15,5))
axs[0].plot(range(1,len(model_history.history['accuracy'])+1),model_history.history['accuracy'])
axs[0].plot(range(1,len(model_history.history['val_accuracy'])+1),model_history.history['val_accuracy'])
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1,len(model_history.history['accuracy'])+1),len(model_history.history['accuracy'])/10)
axs[0].legend(['train', 'val'], loc='best')
axs[1].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss'])
axs[1].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss'])
axs[1].set_title('Model Loss')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10)
axs[1].legend(['train', 'val'], loc='best')
fig.savefig('plot.png')
plt.show()
train_dir = 'data/train'
val_dir = 'data/test'
eval_dir = 'data/evaluate'
clone_time = 30
num_train = 28709 * clone_time
num_val = 7178
batch_size = 64
num_epoch = 10
train_datagen = ImageDataGenerator(
rescale=1./255,
brightness_range=[0.2,1.5],
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)
eval_datagen = ImageDataGenerator(rescale=1./255)
train_generators = []
for x in range(clone_time):
train_generators.append(train_datagen.flow_from_directory(
train_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical'))
validation_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
evaluation_generator = eval_datagen.flow_from_directory(
eval_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
model.summary()
if mode == "train":
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
model_info = model.fit(
combine_gen(*train_generators),
steps_per_epoch=num_train // batch_size,
epochs=num_epoch,
validation_data=validation_generator,
validation_steps=num_val // batch_size)
model.save_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
elif mode == "display":
model.load_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
cv2.ocl.setUseOpenCL(False)
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
while True:
ret, frame = cap.read()
if not ret:
break
frame = cv2.flip(frame, 1)
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print(prediction)
maxindex = int(np.argmax(prediction))
cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Video', cv2.resize(frame,(1600,960),interpolation = cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
elif mode == "evaluate":
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
model.load_weights('model-epoch-augmentated{}.h5'.format(num_epoch))
result = model.evaluate(evaluation_generator)
| true | true |
f73cf9a27021c93616be5b0375218d312ee68a00 | 3,330 | py | Python | resources/lib/kodi/context_menu_utils.py | Sopor/plugin.video.netflix-1 | ecefb537cdffa368e104864b313fbcc010b44b68 | [
"MIT"
] | null | null | null | resources/lib/kodi/context_menu_utils.py | Sopor/plugin.video.netflix-1 | ecefb537cdffa368e104864b313fbcc010b44b68 | [
"MIT"
] | null | null | null | resources/lib/kodi/context_menu_utils.py | Sopor/plugin.video.netflix-1 | ecefb537cdffa368e104864b313fbcc010b44b68 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2020 Stefano Gottardo (original implementation module)
Miscellaneous utility functions for generating context menu items
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import resources.lib.common as common
from resources.lib.globals import G
# Normally it wouldn't be necessary to split a module so small into two files,
# unfortunately use 'get_local_string' on a variable in the module header, makes that method (get_local_string)
# run immediately upon loading of the add-on modules, making it impossible to load the service instance.
# Separating the process of the loading of local strings would cause a huge slowdown in the processing of video lists.
def ctx_item_url(paths, mode=G.MODE_ACTION):
"""Return a function that builds an URL from a videoid for the predefined path"""
def ctx_url_builder(videoid, params):
"""Build a context menu item URL"""
return common.build_url(paths, videoid, params, mode=mode)
return ctx_url_builder
CONTEXT_MENU_ACTIONS = {
'export': {
'label': common.get_local_string(30018),
'url': ctx_item_url(['export'], G.MODE_LIBRARY)},
'remove': {
'label': common.get_local_string(30030),
'url': ctx_item_url(['remove'], G.MODE_LIBRARY)},
'update': {
'label': common.get_local_string(30061),
'url': ctx_item_url(['update'], G.MODE_LIBRARY)},
'export_new_episodes': {
'label': common.get_local_string(30195),
'url': ctx_item_url(['export_new_episodes'], G.MODE_LIBRARY)},
'exclude_from_auto_update': {
'label': common.get_local_string(30196),
'url': ctx_item_url(['exclude_from_auto_update'], G.MODE_LIBRARY)},
'include_in_auto_update': {
'label': common.get_local_string(30197),
'url': ctx_item_url(['include_in_auto_update'], G.MODE_LIBRARY)},
'rate': {
'label': common.get_local_string(30019),
'url': ctx_item_url(['rate'])},
'rate_thumb': {
'label': common.get_local_string(30019),
'url': ctx_item_url(['rate_thumb'])},
'add_to_list': {
'label': common.get_local_string(30021),
'url': ctx_item_url(['my_list', 'add'])},
'remove_from_list': {
'label': common.get_local_string(30020),
'url': ctx_item_url(['my_list', 'remove'])},
'trailer': {
'label': common.get_local_string(30179),
'url': ctx_item_url(['trailer'])},
'force_update_list': {
'label': common.get_local_string(30214),
'url': ctx_item_url(['force_update_list'])},
'change_watched_status': {
'label': common.get_local_string(30236),
'url': ctx_item_url(['change_watched_status'])},
'search_remove': {
'label': common.get_local_string(15015),
'url': ctx_item_url(['search', 'search', 'remove'], G.MODE_DIRECTORY)},
'search_edit': {
'label': common.get_local_string(21450),
'url': ctx_item_url(['search', 'search', 'edit'], G.MODE_DIRECTORY)},
'remove_watched_status': {
'label': common.get_local_string(15015),
'url': ctx_item_url(['remove_watched_status'])},
}
| 41.625 | 118 | 0.666366 |
from __future__ import absolute_import, division, unicode_literals
import resources.lib.common as common
from resources.lib.globals import G
# unfortunately use 'get_local_string' on a variable in the module header, makes that method (get_local_string)
# run immediately upon loading of the add-on modules, making it impossible to load the service instance.
# Separating the process of the loading of local strings would cause a huge slowdown in the processing of video lists.
def ctx_item_url(paths, mode=G.MODE_ACTION):
def ctx_url_builder(videoid, params):
return common.build_url(paths, videoid, params, mode=mode)
return ctx_url_builder
CONTEXT_MENU_ACTIONS = {
'export': {
'label': common.get_local_string(30018),
'url': ctx_item_url(['export'], G.MODE_LIBRARY)},
'remove': {
'label': common.get_local_string(30030),
'url': ctx_item_url(['remove'], G.MODE_LIBRARY)},
'update': {
'label': common.get_local_string(30061),
'url': ctx_item_url(['update'], G.MODE_LIBRARY)},
'export_new_episodes': {
'label': common.get_local_string(30195),
'url': ctx_item_url(['export_new_episodes'], G.MODE_LIBRARY)},
'exclude_from_auto_update': {
'label': common.get_local_string(30196),
'url': ctx_item_url(['exclude_from_auto_update'], G.MODE_LIBRARY)},
'include_in_auto_update': {
'label': common.get_local_string(30197),
'url': ctx_item_url(['include_in_auto_update'], G.MODE_LIBRARY)},
'rate': {
'label': common.get_local_string(30019),
'url': ctx_item_url(['rate'])},
'rate_thumb': {
'label': common.get_local_string(30019),
'url': ctx_item_url(['rate_thumb'])},
'add_to_list': {
'label': common.get_local_string(30021),
'url': ctx_item_url(['my_list', 'add'])},
'remove_from_list': {
'label': common.get_local_string(30020),
'url': ctx_item_url(['my_list', 'remove'])},
'trailer': {
'label': common.get_local_string(30179),
'url': ctx_item_url(['trailer'])},
'force_update_list': {
'label': common.get_local_string(30214),
'url': ctx_item_url(['force_update_list'])},
'change_watched_status': {
'label': common.get_local_string(30236),
'url': ctx_item_url(['change_watched_status'])},
'search_remove': {
'label': common.get_local_string(15015),
'url': ctx_item_url(['search', 'search', 'remove'], G.MODE_DIRECTORY)},
'search_edit': {
'label': common.get_local_string(21450),
'url': ctx_item_url(['search', 'search', 'edit'], G.MODE_DIRECTORY)},
'remove_watched_status': {
'label': common.get_local_string(15015),
'url': ctx_item_url(['remove_watched_status'])},
}
| true | true |
f73cf9e98acc79937549705b6f20437879812e33 | 937 | py | Python | model-optimizer/mo/front/mxnet/extractors/l2_normalization.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 2 | 2021-04-19T06:08:35.000Z | 2021-08-25T02:43:43.000Z | model-optimizer/mo/front/mxnet/extractors/l2_normalization.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 6 | 2022-01-11T18:56:22.000Z | 2022-02-21T13:20:20.000Z | model-optimizer/mo/front/mxnet/extractors/l2_normalization.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 3 | 2021-02-05T17:11:17.000Z | 2021-04-19T08:33:31.000Z | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.partial_infer.elemental import copy_shape_infer
def l2_normalization_ext(attrs):
eps = attrs.float('eps', 1e-10)
node_attrs = {
'op': 'Normalize',
'type': 'Normalize',
'eps': eps,
'across_spatial': 0,
'channel_shared': 0,
'infer': copy_shape_infer
}
return node_attrs
| 30.225806 | 73 | 0.70651 | from mo.front.common.partial_infer.elemental import copy_shape_infer
def l2_normalization_ext(attrs):
eps = attrs.float('eps', 1e-10)
node_attrs = {
'op': 'Normalize',
'type': 'Normalize',
'eps': eps,
'across_spatial': 0,
'channel_shared': 0,
'infer': copy_shape_infer
}
return node_attrs
| true | true |
f73cf9ed066b594d8af8b16eb6a2ffaab0220af6 | 2,648 | py | Python | xknx/dpt/dpt_color.py | iligiddi/xknx | c450d5934c8ddc608741229a7d14168013c3684c | [
"MIT"
] | null | null | null | xknx/dpt/dpt_color.py | iligiddi/xknx | c450d5934c8ddc608741229a7d14168013c3684c | [
"MIT"
] | null | null | null | xknx/dpt/dpt_color.py | iligiddi/xknx | c450d5934c8ddc608741229a7d14168013c3684c | [
"MIT"
] | null | null | null | """Implementation of the KNX date data point."""
from __future__ import annotations
from typing import NamedTuple
from xknx.exceptions import ConversionError
from .dpt import DPTBase
class XYYColor(NamedTuple):
"""
Representation of XY color with brightness.
`color`: tuple(x-axis, y-axis) each 0..1; None if invalid.
`brightness`: int 0..255; None if invalid.
tuple(tuple(float, float) | None, int | None)
"""
color: tuple[float, float] | None = None
brightness: int | None = None
class DPTColorXYY(DPTBase):
"""Abstraction for KNX 6 octet color xyY (DPT 242.600)."""
payload_length = 6
@classmethod
def from_knx(cls, raw: tuple[int, ...]) -> XYYColor:
"""Parse/deserialize from KNX/IP raw data."""
cls.test_bytesarray(raw)
x_axis_int = raw[0] << 8 | raw[1]
y_axis_int = raw[2] << 8 | raw[3]
brightness = raw[4]
color_valid = raw[5] >> 1 & 0b1
brightness_valid = raw[5] & 0b1
return XYYColor(
color=(
# round to 5 digits for better readability but still preserving precicion
round(x_axis_int / 0xFFFF, 5),
round(y_axis_int / 0xFFFF, 5),
)
if color_valid
else None,
brightness=brightness if brightness_valid else None,
)
@classmethod
def to_knx(
cls, value: XYYColor | tuple[tuple[float, float] | None, int | None]
) -> tuple[int, int, int, int, int, int]:
"""Serialize to KNX/IP raw data."""
try:
if not isinstance(value, XYYColor):
value = XYYColor(*value)
color_valid = False
brightness_valid = False
x_axis, y_axis, brightness = 0, 0, 0
if value.color is not None:
for _ in (axis for axis in value.color if not 0 <= axis <= 1):
raise ValueError
color_valid = True
x_axis, y_axis = (round(axis * 0xFFFF) for axis in value.color)
if value.brightness is not None:
if not 0 <= value.brightness <= 255:
raise ValueError
brightness_valid = True
brightness = int(value.brightness)
return (
x_axis >> 8,
x_axis & 0xFF,
y_axis >> 8,
y_axis & 0xFF,
brightness,
color_valid << 1 | brightness_valid,
)
except (ValueError, TypeError):
raise ConversionError("Could not serialize %s" % cls.__name__, value=value)
| 30.790698 | 89 | 0.549849 | from __future__ import annotations
from typing import NamedTuple
from xknx.exceptions import ConversionError
from .dpt import DPTBase
class XYYColor(NamedTuple):
color: tuple[float, float] | None = None
brightness: int | None = None
class DPTColorXYY(DPTBase):
payload_length = 6
@classmethod
def from_knx(cls, raw: tuple[int, ...]) -> XYYColor:
cls.test_bytesarray(raw)
x_axis_int = raw[0] << 8 | raw[1]
y_axis_int = raw[2] << 8 | raw[3]
brightness = raw[4]
color_valid = raw[5] >> 1 & 0b1
brightness_valid = raw[5] & 0b1
return XYYColor(
color=(
round(x_axis_int / 0xFFFF, 5),
round(y_axis_int / 0xFFFF, 5),
)
if color_valid
else None,
brightness=brightness if brightness_valid else None,
)
@classmethod
def to_knx(
cls, value: XYYColor | tuple[tuple[float, float] | None, int | None]
) -> tuple[int, int, int, int, int, int]:
try:
if not isinstance(value, XYYColor):
value = XYYColor(*value)
color_valid = False
brightness_valid = False
x_axis, y_axis, brightness = 0, 0, 0
if value.color is not None:
for _ in (axis for axis in value.color if not 0 <= axis <= 1):
raise ValueError
color_valid = True
x_axis, y_axis = (round(axis * 0xFFFF) for axis in value.color)
if value.brightness is not None:
if not 0 <= value.brightness <= 255:
raise ValueError
brightness_valid = True
brightness = int(value.brightness)
return (
x_axis >> 8,
x_axis & 0xFF,
y_axis >> 8,
y_axis & 0xFF,
brightness,
color_valid << 1 | brightness_valid,
)
except (ValueError, TypeError):
raise ConversionError("Could not serialize %s" % cls.__name__, value=value)
| true | true |
f73cfabfc4605a3b2f73acdd941bd367b03624d5 | 28,713 | py | Python | deepxde/model.py | mitchelldaneker/deepxde | 62e09b62ceaab6bda2ebbd02dc30ad99c2990302 | [
"Apache-2.0"
] | 955 | 2019-06-21T21:56:02.000Z | 2022-03-31T03:44:45.000Z | deepxde/model.py | mitchelldaneker/deepxde | 62e09b62ceaab6bda2ebbd02dc30ad99c2990302 | [
"Apache-2.0"
] | 517 | 2019-07-25T16:47:44.000Z | 2022-03-31T17:37:58.000Z | deepxde/model.py | mitchelldaneker/deepxde | 62e09b62ceaab6bda2ebbd02dc30ad99c2990302 | [
"Apache-2.0"
] | 374 | 2019-06-24T00:44:16.000Z | 2022-03-30T08:17:36.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ["Model", "TrainState", "LossHistory"]
import pickle
from collections import OrderedDict
import numpy as np
from . import config
from . import display
from . import gradients as grad
from . import losses as losses_module
from . import metrics as metrics_module
from . import optimizers
from . import utils
from .backend import backend_name, tf, torch
from .callbacks import CallbackList
class Model(object):
"""A ``Model`` trains a ``NN`` on a ``Data``.
Args:
data: ``deepxde.data.Data`` instance.
net: ``deepxde.nn.NN`` instance.
"""
def __init__(self, data, net):
self.data = data
self.net = net
self.opt_name = None
self.batch_size = None
self.callbacks = None
self.metrics = None
self.external_trainable_variables = []
self.train_state = TrainState()
self.losshistory = LossHistory()
self.stop_training = False
# Backend-dependent attributes
self.opt = None
# Tensor or callable
self.outputs = None
self.outputs_losses = None
self.train_step = None
if backend_name == "tensorflow.compat.v1":
self.sess = None
self.saver = None
@utils.timing
def compile(
self,
optimizer,
lr=None,
loss="MSE",
metrics=None,
decay=None,
loss_weights=None,
external_trainable_variables=None,
):
"""Configures the model for training.
Args:
optimizer: String. Name of optimizer.
lr: A Tensor or a floating point value. The learning rate. For L-BFGS, use
`dde.optimizers.set_LBFGS_options` to set the hyperparameters.
loss: If the same loss is used for all errors, then `loss` is a String (name
of objective function) or objective function. If different errors use
different losses, then `loss` is a list whose size is equal to the
number of errors.
metrics: List of metrics to be evaluated by the model during training.
decay: Tuple. Name and parameters of decay to the initial learning rate. One
of the following options:
- `inverse time decay <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/InverseTimeDecay>`_: ("inverse time", decay_steps, decay_rate)
- `cosine decay <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/CosineDecay>`_: ("cosine", decay_steps, alpha)
loss_weights: A list specifying scalar coefficients (Python floats) to
weight the loss contributions. The loss value that will be minimized by
the model will then be the weighted sum of all individual losses,
weighted by the loss_weights coefficients.
external_trainable_variables: A trainable ``tf.Variable`` object or a list
of trainable ``tf.Variable`` objects. The unknown parameters in the
physics systems that need to be recovered. If the backend is
tensorflow.compat.v1, `external_trainable_variables` is ignored, and all
trainable ``tf.Variable`` objects are automatically collected.
"""
print("Compiling model...")
self.opt_name = optimizer
loss_fn = losses_module.get(loss)
if external_trainable_variables is None:
self.external_trainable_variables = []
else:
if backend_name == "tensorflow.compat.v1":
print(
"Warning: For the backend tensorflow.compat.v1, "
"`external_trainable_variables` is ignored, and all trainable "
"``tf.Variable`` objects are automatically collected."
)
if not isinstance(external_trainable_variables, list):
external_trainable_variables = [external_trainable_variables]
self.external_trainable_variables = external_trainable_variables
if backend_name == "tensorflow.compat.v1":
self._compile_tensorflow_compat_v1(lr, loss_fn, decay, loss_weights)
elif backend_name == "tensorflow":
self._compile_tensorflow(lr, loss_fn, decay, loss_weights)
elif backend_name == "pytorch":
self._compile_pytorch(lr, loss_fn, decay, loss_weights)
# metrics may use model variables such as self.net, and thus are instantiated
# after backend compile.
metrics = metrics or []
self.metrics = [metrics_module.get(m) for m in metrics]
def _compile_tensorflow_compat_v1(self, lr, loss_fn, decay, loss_weights):
"""tensorflow.compat.v1"""
if not self.net.built:
self.net.build()
if self.sess is None:
self.sess = tf.Session()
self.saver = tf.train.Saver(max_to_keep=None)
# Data losses
losses = self.data.losses(self.net.targets, self.net.outputs, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
# Regularization loss
if self.net.regularizer is not None:
losses.append(tf.losses.get_regularization_loss())
losses = tf.convert_to_tensor(losses)
# Weighted losses
if loss_weights is not None:
losses *= loss_weights
self.losshistory.set_loss_weights(loss_weights)
total_loss = tf.math.reduce_sum(losses)
# Tensors
self.outputs = self.net.outputs
self.outputs_losses = [self.net.outputs, losses]
self.train_step = optimizers.get(
total_loss, self.opt_name, learning_rate=lr, decay=decay
)
def _compile_tensorflow(self, lr, loss_fn, decay, loss_weights):
"""tensorflow"""
# TODO: Avoid creating multiple graphs by using tf.TensorSpec.
@tf.function
def outputs(training, inputs):
return self.net(inputs, training=training)
# TODO: Avoid creating multiple graphs by using tf.TensorSpec.
@tf.function
def outputs_losses(training, inputs, targets, auxiliary_vars):
self.net.training = training
self.net.inputs = inputs
self.net.auxiliary_vars = auxiliary_vars
# Don't call outputs() decorated by @tf.function above, otherwise the
# gradient of outputs wrt inputs will be lost here.
outputs_ = self.net(inputs, training=training)
# Data losses
losses = self.data.losses(targets, outputs_, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
# Regularization loss
if self.net.regularizer is not None:
losses += [tf.math.reduce_sum(self.net.losses)]
losses = tf.convert_to_tensor(losses)
# Weighted losses
if loss_weights is not None:
losses *= loss_weights
self.losshistory.set_loss_weights(loss_weights)
return outputs_, losses
opt = optimizers.get(self.opt_name, learning_rate=lr, decay=decay)
@tf.function
def train_step(inputs, targets, auxiliary_vars):
# inputs and targets are np.ndarray and automatically converted to Tensor.
with tf.GradientTape() as tape:
losses = outputs_losses(True, inputs, targets, auxiliary_vars)[1]
total_loss = tf.math.reduce_sum(losses)
trainable_variables = (
self.net.trainable_variables + self.external_trainable_variables
)
grads = tape.gradient(total_loss, trainable_variables)
opt.apply_gradients(zip(grads, trainable_variables))
def train_step_tfp(
inputs, targets, auxiliary_vars, previous_optimizer_results=None
):
def build_loss():
losses = outputs_losses(True, inputs, targets, auxiliary_vars)[1]
return tf.math.reduce_sum(losses)
trainable_variables = (
self.net.trainable_variables + self.external_trainable_variables
)
return opt(trainable_variables, build_loss, previous_optimizer_results)
# Callables
self.outputs = outputs
self.outputs_losses = outputs_losses
self.train_step = (
train_step
if not optimizers.is_external_optimizer(self.opt_name)
else train_step_tfp
)
def _compile_pytorch(self, lr, loss_fn, decay, loss_weights):
"""pytorch"""
def outputs(training, inputs):
self.net.train(mode=training)
with torch.no_grad():
return self.net(torch.as_tensor(inputs))
def outputs_losses(training, inputs, targets):
self.net.train(mode=training)
self.net.inputs = torch.as_tensor(inputs)
self.net.inputs.requires_grad_()
outputs_ = self.net(self.net.inputs)
# Data losses
if targets is not None:
targets = torch.as_tensor(targets)
losses = self.data.losses(targets, outputs_, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
# TODO: regularization
losses = torch.stack(losses)
# Weighted losses
if loss_weights is not None:
losses *= torch.as_tensor(loss_weights)
self.losshistory.set_loss_weights(loss_weights)
# Clear cached Jacobians and Hessians.
grad.clear()
return outputs_, losses
# Another way is using per-parameter options
# https://pytorch.org/docs/stable/optim.html#per-parameter-options,
# but not all optimizers (such as L-BFGS) support this.
trainable_variables = (
list(self.net.parameters()) + self.external_trainable_variables
)
self.opt = optimizers.get(
trainable_variables, self.opt_name, learning_rate=lr, decay=decay
)
def train_step(inputs, targets):
def closure():
losses = outputs_losses(True, inputs, targets)[1]
total_loss = torch.sum(losses)
self.opt.zero_grad()
total_loss.backward()
return total_loss
self.opt.step(closure)
# Callables
self.outputs = outputs
self.outputs_losses = outputs_losses
self.train_step = train_step
def _outputs(self, training, inputs):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(training, inputs)
return self.sess.run(self.outputs, feed_dict=feed_dict)
# tensorflow and pytorch
outs = self.outputs(training, inputs)
return utils.to_numpy(outs)
def _outputs_losses(self, training, inputs, targets, auxiliary_vars):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(training, inputs, targets, auxiliary_vars)
return self.sess.run(self.outputs_losses, feed_dict=feed_dict)
if backend_name == "tensorflow":
outs = self.outputs_losses(training, inputs, targets, auxiliary_vars)
elif backend_name == "pytorch":
# TODO: auxiliary_vars
self.net.requires_grad_(requires_grad=False)
outs = self.outputs_losses(training, inputs, targets)
self.net.requires_grad_()
return utils.to_numpy(outs)
def _train_step(self, inputs, targets, auxiliary_vars):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(True, inputs, targets, auxiliary_vars)
self.sess.run(self.train_step, feed_dict=feed_dict)
elif backend_name == "tensorflow":
self.train_step(inputs, targets, auxiliary_vars)
elif backend_name == "pytorch":
# TODO: auxiliary_vars
self.train_step(inputs, targets)
@utils.timing
def train(
self,
epochs=None,
batch_size=None,
display_every=1000,
disregard_previous_best=False,
callbacks=None,
model_restore_path=None,
model_save_path=None,
):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Args:
epochs: Integer. Number of iterations to train the model. Note: It is the
number of iterations, not the number of epochs.
batch_size: Integer or ``None``. If you solve PDEs via ``dde.data.PDE`` or
``dde.data.TimePDE``, do not use `batch_size`, and instead use
`dde.callbacks.PDEResidualResampler
<https://deepxde.readthedocs.io/en/latest/modules/deepxde.html#deepxde.callbacks.PDEResidualResampler>`_,
see an `example <https://github.com/lululxvi/deepxde/blob/master/examples/diffusion_1d_resample.py>`_.
display_every: Integer. Print the loss and metrics every this steps.
disregard_previous_best: If ``True``, disregard the previous saved best
model.
callbacks: List of ``dde.callbacks.Callback`` instances. List of callbacks
to apply during training.
model_restore_path: String. Path where parameters were previously saved.
See ``save_path`` in `tf.train.Saver.restore <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Saver#restore>`_.
model_save_path: String. Prefix of filenames created for the checkpoint.
See ``save_path`` in `tf.train.Saver.save <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Saver#save>`_.
"""
self.batch_size = batch_size
self.callbacks = CallbackList(callbacks=callbacks)
self.callbacks.set_model(self)
if disregard_previous_best:
self.train_state.disregard_best()
if backend_name == "tensorflow.compat.v1":
if self.train_state.step == 0:
print("Initializing variables...")
self.sess.run(tf.global_variables_initializer())
else:
utils.guarantee_initialized_variables(self.sess)
if model_restore_path is not None:
self.restore(model_restore_path, verbose=1)
print("Training model...\n")
self.stop_training = False
self.train_state.set_data_train(*self.data.train_next_batch(self.batch_size))
self.train_state.set_data_test(*self.data.test())
self._test()
self.callbacks.on_train_begin()
if optimizers.is_external_optimizer(self.opt_name):
if backend_name == "tensorflow.compat.v1":
self._train_tensorflow_compat_v1_scipy(display_every)
elif backend_name == "tensorflow":
self._train_tensorflow_tfp()
elif backend_name == "pytorch":
self._train_pytorch_lbfgs()
else:
if epochs is None:
raise ValueError("No epochs for {}.".format(self.opt_name))
self._train_sgd(epochs, display_every)
self.callbacks.on_train_end()
print("")
display.training_display.summary(self.train_state)
if model_save_path is not None:
self.save(model_save_path, verbose=1)
return self.losshistory, self.train_state
def _train_sgd(self, epochs, display_every):
for i in range(epochs):
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_state.epoch += 1
self.train_state.step += 1
if self.train_state.step % display_every == 0 or i + 1 == epochs:
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _train_tensorflow_compat_v1_scipy(self, display_every):
def loss_callback(loss_train):
self.train_state.epoch += 1
self.train_state.step += 1
if self.train_state.step % display_every == 0:
self.train_state.loss_train = loss_train
self.train_state.loss_test = None
self.train_state.metrics_test = None
self.losshistory.append(
self.train_state.step, self.train_state.loss_train, None, None
)
display.training_display(self.train_state)
self.train_state.set_data_train(*self.data.train_next_batch(self.batch_size))
feed_dict = self.net.feed_dict(
True,
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_step.minimize(
self.sess,
feed_dict=feed_dict,
fetches=[self.outputs_losses[1]],
loss_callback=loss_callback,
)
self._test()
def _train_tensorflow_tfp(self):
# There is only one optimization step. If using multiple steps with/without
# previous_optimizer_results, L-BFGS failed to reach a small error. The reason
# could be that tfp.optimizer.lbfgs_minimize will start from scratch for each
# call.
n_iter = 0
while n_iter < optimizers.LBFGS_options["maxiter"]:
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
results = self.train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter += results.num_iterations.numpy()
self.train_state.epoch += results.num_iterations.numpy()
self.train_state.step += results.num_iterations.numpy()
self._test()
if results.converged or results.failed:
break
def _train_pytorch_lbfgs(self):
prev_n_iter = 0
while prev_n_iter < optimizers.LBFGS_options["maxiter"]:
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter = self.opt.state_dict()["state"][0]["n_iter"]
if prev_n_iter == n_iter:
# Converged
break
self.train_state.epoch += n_iter - prev_n_iter
self.train_state.step += n_iter - prev_n_iter
prev_n_iter = n_iter
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _test(self):
(
self.train_state.y_pred_train,
self.train_state.loss_train,
) = self._outputs_losses(
True,
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_state.y_pred_test, self.train_state.loss_test = self._outputs_losses(
False,
self.train_state.X_test,
self.train_state.y_test,
self.train_state.test_aux_vars,
)
if isinstance(self.train_state.y_test, (list, tuple)):
self.train_state.metrics_test = [
m(self.train_state.y_test[i], self.train_state.y_pred_test[i])
for m in self.metrics
for i in range(len(self.train_state.y_test))
]
else:
self.train_state.metrics_test = [
m(self.train_state.y_test, self.train_state.y_pred_test)
for m in self.metrics
]
self.train_state.update_best()
self.losshistory.append(
self.train_state.step,
self.train_state.loss_train,
self.train_state.loss_test,
self.train_state.metrics_test,
)
display.training_display(self.train_state)
def predict(self, x, operator=None, callbacks=None):
"""Generates output predictions for the input samples."""
if isinstance(x, tuple):
x = tuple(np.array(xi, dtype=config.real(np)) for xi in x)
else:
x = np.array(x, dtype=config.real(np))
self.callbacks = CallbackList(callbacks=callbacks)
self.callbacks.set_model(self)
self.callbacks.on_predict_begin()
if operator is None:
y = self._outputs(False, x)
else:
# TODO: predict operator with auxiliary_vars
if backend_name == "tensorflow.compat.v1":
if utils.get_num_args(operator) == 2:
op = operator(self.net.inputs, self.net.outputs)
elif utils.get_num_args(operator) == 3:
op = operator(self.net.inputs, self.net.outputs, x)
y = self.sess.run(op, feed_dict=self.net.feed_dict(False, x))
elif backend_name == "tensorflow":
if utils.get_num_args(operator) == 2:
@tf.function
def op(inputs):
y = self.net(inputs)
return operator(inputs, y)
elif utils.get_num_args(operator) == 3:
@tf.function
def op(inputs):
y = self.net(inputs)
return operator(inputs, y, x)
y = op(x)
y = utils.to_numpy(y)
elif backend_name == "pytorch":
inputs = torch.as_tensor(x)
inputs.requires_grad_()
outputs = self.net(inputs)
if utils.get_num_args(operator) == 2:
y = operator(inputs, outputs)
elif utils.get_num_args(operator) == 3:
y = operator(inputs, outputs, x)
y = utils.to_numpy(y)
self.callbacks.on_predict_end()
return y
# def evaluate(self, x, y, callbacks=None):
# """Returns the loss values & metrics values for the model in test mode."""
# raise NotImplementedError(
# "Model.evaluate to be implemented. Alternatively, use Model.predict."
# )
def state_dict(self):
"""Returns a dictionary containing all variables."""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
destination = OrderedDict()
variables_names = [v.name for v in tf.global_variables()]
values = self.sess.run(variables_names)
for k, v in zip(variables_names, values):
destination[k] = v
return destination
def save(self, save_path, protocol="tf.train.Saver", verbose=0):
"""Saves all variables to a disk file.
Args:
protocol (string): If `protocol` is "tf.train.Saver", save using
`tf.train.Save <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Saver#attributes>`_.
If `protocol` is "pickle", save using the Python pickle module. Only
"tf.train.Saver" protocol supports ``restore()``.
"""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
if verbose > 0:
print(
"Epoch {}: saving model to {}-{} ...\n".format(
self.train_state.epoch, save_path, self.train_state.epoch
)
)
if protocol == "tf.train.Saver":
self.saver.save(self.sess, save_path, global_step=self.train_state.epoch)
elif protocol == "pickle":
with open("{}-{}.pkl".format(save_path, self.train_state.epoch), "wb") as f:
pickle.dump(self.state_dict(), f)
def restore(self, save_path, verbose=0):
"""Restore all variables from a disk file."""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
if verbose > 0:
print("Restoring model from {} ...\n".format(save_path))
self.saver.restore(self.sess, save_path)
def print_model(self):
"""Prints all trainable variables."""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
variables_names = [v.name for v in tf.trainable_variables()]
values = self.sess.run(variables_names)
for k, v in zip(variables_names, values):
print("Variable: {}, Shape: {}".format(k, v.shape))
print(v)
class TrainState(object):
def __init__(self):
self.epoch = 0
self.step = 0
# Current data
self.X_train = None
self.y_train = None
self.train_aux_vars = None
self.X_test = None
self.y_test = None
self.test_aux_vars = None
# Results of current step
# Train results
self.loss_train = None
self.y_pred_train = None
# Test results
self.loss_test = None
self.y_pred_test = None
self.y_std_test = None
self.metrics_test = None
# The best results correspond to the min train loss
self.best_step = 0
self.best_loss_train = np.inf
self.best_loss_test = np.inf
self.best_y = None
self.best_ystd = None
self.best_metrics = None
def set_data_train(self, X_train, y_train, train_aux_vars=None):
self.X_train = X_train
self.y_train = y_train
self.train_aux_vars = train_aux_vars
def set_data_test(self, X_test, y_test, test_aux_vars=None):
self.X_test = X_test
self.y_test = y_test
self.test_aux_vars = test_aux_vars
def update_best(self):
if self.best_loss_train > np.sum(self.loss_train):
self.best_step = self.step
self.best_loss_train = np.sum(self.loss_train)
self.best_loss_test = np.sum(self.loss_test)
self.best_y = self.y_pred_test
self.best_ystd = self.y_std_test
self.best_metrics = self.metrics_test
def disregard_best(self):
self.best_loss_train = np.inf
def packed_data(self):
def merge_values(values):
if values is None:
return None
return np.hstack(values) if isinstance(values, (list, tuple)) else values
X_train = merge_values(self.X_train)
y_train = merge_values(self.y_train)
X_test = merge_values(self.X_test)
y_test = merge_values(self.y_test)
best_y = merge_values(self.best_y)
best_ystd = merge_values(self.best_ystd)
return X_train, y_train, X_test, y_test, best_y, best_ystd
class LossHistory(object):
def __init__(self):
self.steps = []
self.loss_train = []
self.loss_test = []
self.metrics_test = []
self.loss_weights = 1
def set_loss_weights(self, loss_weights):
self.loss_weights = loss_weights
def append(self, step, loss_train, loss_test, metrics_test):
self.steps.append(step)
self.loss_train.append(loss_train)
if loss_test is None:
loss_test = self.loss_test[-1]
if metrics_test is None:
metrics_test = self.metrics_test[-1]
self.loss_test.append(loss_test)
self.metrics_test.append(metrics_test)
| 39.549587 | 174 | 0.601574 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ["Model", "TrainState", "LossHistory"]
import pickle
from collections import OrderedDict
import numpy as np
from . import config
from . import display
from . import gradients as grad
from . import losses as losses_module
from . import metrics as metrics_module
from . import optimizers
from . import utils
from .backend import backend_name, tf, torch
from .callbacks import CallbackList
class Model(object):
def __init__(self, data, net):
self.data = data
self.net = net
self.opt_name = None
self.batch_size = None
self.callbacks = None
self.metrics = None
self.external_trainable_variables = []
self.train_state = TrainState()
self.losshistory = LossHistory()
self.stop_training = False
self.opt = None
self.outputs = None
self.outputs_losses = None
self.train_step = None
if backend_name == "tensorflow.compat.v1":
self.sess = None
self.saver = None
@utils.timing
def compile(
self,
optimizer,
lr=None,
loss="MSE",
metrics=None,
decay=None,
loss_weights=None,
external_trainable_variables=None,
):
print("Compiling model...")
self.opt_name = optimizer
loss_fn = losses_module.get(loss)
if external_trainable_variables is None:
self.external_trainable_variables = []
else:
if backend_name == "tensorflow.compat.v1":
print(
"Warning: For the backend tensorflow.compat.v1, "
"`external_trainable_variables` is ignored, and all trainable "
"``tf.Variable`` objects are automatically collected."
)
if not isinstance(external_trainable_variables, list):
external_trainable_variables = [external_trainable_variables]
self.external_trainable_variables = external_trainable_variables
if backend_name == "tensorflow.compat.v1":
self._compile_tensorflow_compat_v1(lr, loss_fn, decay, loss_weights)
elif backend_name == "tensorflow":
self._compile_tensorflow(lr, loss_fn, decay, loss_weights)
elif backend_name == "pytorch":
self._compile_pytorch(lr, loss_fn, decay, loss_weights)
metrics = metrics or []
self.metrics = [metrics_module.get(m) for m in metrics]
def _compile_tensorflow_compat_v1(self, lr, loss_fn, decay, loss_weights):
if not self.net.built:
self.net.build()
if self.sess is None:
self.sess = tf.Session()
self.saver = tf.train.Saver(max_to_keep=None)
losses = self.data.losses(self.net.targets, self.net.outputs, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
if self.net.regularizer is not None:
losses.append(tf.losses.get_regularization_loss())
losses = tf.convert_to_tensor(losses)
if loss_weights is not None:
losses *= loss_weights
self.losshistory.set_loss_weights(loss_weights)
total_loss = tf.math.reduce_sum(losses)
self.outputs = self.net.outputs
self.outputs_losses = [self.net.outputs, losses]
self.train_step = optimizers.get(
total_loss, self.opt_name, learning_rate=lr, decay=decay
)
def _compile_tensorflow(self, lr, loss_fn, decay, loss_weights):
@tf.function
def outputs(training, inputs):
return self.net(inputs, training=training)
@tf.function
def outputs_losses(training, inputs, targets, auxiliary_vars):
self.net.training = training
self.net.inputs = inputs
self.net.auxiliary_vars = auxiliary_vars
# gradient of outputs wrt inputs will be lost here.
outputs_ = self.net(inputs, training=training)
# Data losses
losses = self.data.losses(targets, outputs_, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
# Regularization loss
if self.net.regularizer is not None:
losses += [tf.math.reduce_sum(self.net.losses)]
losses = tf.convert_to_tensor(losses)
# Weighted losses
if loss_weights is not None:
losses *= loss_weights
self.losshistory.set_loss_weights(loss_weights)
return outputs_, losses
opt = optimizers.get(self.opt_name, learning_rate=lr, decay=decay)
@tf.function
def train_step(inputs, targets, auxiliary_vars):
# inputs and targets are np.ndarray and automatically converted to Tensor.
with tf.GradientTape() as tape:
losses = outputs_losses(True, inputs, targets, auxiliary_vars)[1]
total_loss = tf.math.reduce_sum(losses)
trainable_variables = (
self.net.trainable_variables + self.external_trainable_variables
)
grads = tape.gradient(total_loss, trainable_variables)
opt.apply_gradients(zip(grads, trainable_variables))
def train_step_tfp(
inputs, targets, auxiliary_vars, previous_optimizer_results=None
):
def build_loss():
losses = outputs_losses(True, inputs, targets, auxiliary_vars)[1]
return tf.math.reduce_sum(losses)
trainable_variables = (
self.net.trainable_variables + self.external_trainable_variables
)
return opt(trainable_variables, build_loss, previous_optimizer_results)
# Callables
self.outputs = outputs
self.outputs_losses = outputs_losses
self.train_step = (
train_step
if not optimizers.is_external_optimizer(self.opt_name)
else train_step_tfp
)
def _compile_pytorch(self, lr, loss_fn, decay, loss_weights):
def outputs(training, inputs):
self.net.train(mode=training)
with torch.no_grad():
return self.net(torch.as_tensor(inputs))
def outputs_losses(training, inputs, targets):
self.net.train(mode=training)
self.net.inputs = torch.as_tensor(inputs)
self.net.inputs.requires_grad_()
outputs_ = self.net(self.net.inputs)
# Data losses
if targets is not None:
targets = torch.as_tensor(targets)
losses = self.data.losses(targets, outputs_, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
# TODO: regularization
losses = torch.stack(losses)
# Weighted losses
if loss_weights is not None:
losses *= torch.as_tensor(loss_weights)
self.losshistory.set_loss_weights(loss_weights)
# Clear cached Jacobians and Hessians.
grad.clear()
return outputs_, losses
# Another way is using per-parameter options
# https://pytorch.org/docs/stable/optim.html#per-parameter-options,
# but not all optimizers (such as L-BFGS) support this.
trainable_variables = (
list(self.net.parameters()) + self.external_trainable_variables
)
self.opt = optimizers.get(
trainable_variables, self.opt_name, learning_rate=lr, decay=decay
)
def train_step(inputs, targets):
def closure():
losses = outputs_losses(True, inputs, targets)[1]
total_loss = torch.sum(losses)
self.opt.zero_grad()
total_loss.backward()
return total_loss
self.opt.step(closure)
# Callables
self.outputs = outputs
self.outputs_losses = outputs_losses
self.train_step = train_step
def _outputs(self, training, inputs):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(training, inputs)
return self.sess.run(self.outputs, feed_dict=feed_dict)
# tensorflow and pytorch
outs = self.outputs(training, inputs)
return utils.to_numpy(outs)
def _outputs_losses(self, training, inputs, targets, auxiliary_vars):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(training, inputs, targets, auxiliary_vars)
return self.sess.run(self.outputs_losses, feed_dict=feed_dict)
if backend_name == "tensorflow":
outs = self.outputs_losses(training, inputs, targets, auxiliary_vars)
elif backend_name == "pytorch":
# TODO: auxiliary_vars
self.net.requires_grad_(requires_grad=False)
outs = self.outputs_losses(training, inputs, targets)
self.net.requires_grad_()
return utils.to_numpy(outs)
def _train_step(self, inputs, targets, auxiliary_vars):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(True, inputs, targets, auxiliary_vars)
self.sess.run(self.train_step, feed_dict=feed_dict)
elif backend_name == "tensorflow":
self.train_step(inputs, targets, auxiliary_vars)
elif backend_name == "pytorch":
# TODO: auxiliary_vars
self.train_step(inputs, targets)
@utils.timing
def train(
self,
epochs=None,
batch_size=None,
display_every=1000,
disregard_previous_best=False,
callbacks=None,
model_restore_path=None,
model_save_path=None,
):
self.batch_size = batch_size
self.callbacks = CallbackList(callbacks=callbacks)
self.callbacks.set_model(self)
if disregard_previous_best:
self.train_state.disregard_best()
if backend_name == "tensorflow.compat.v1":
if self.train_state.step == 0:
print("Initializing variables...")
self.sess.run(tf.global_variables_initializer())
else:
utils.guarantee_initialized_variables(self.sess)
if model_restore_path is not None:
self.restore(model_restore_path, verbose=1)
print("Training model...\n")
self.stop_training = False
self.train_state.set_data_train(*self.data.train_next_batch(self.batch_size))
self.train_state.set_data_test(*self.data.test())
self._test()
self.callbacks.on_train_begin()
if optimizers.is_external_optimizer(self.opt_name):
if backend_name == "tensorflow.compat.v1":
self._train_tensorflow_compat_v1_scipy(display_every)
elif backend_name == "tensorflow":
self._train_tensorflow_tfp()
elif backend_name == "pytorch":
self._train_pytorch_lbfgs()
else:
if epochs is None:
raise ValueError("No epochs for {}.".format(self.opt_name))
self._train_sgd(epochs, display_every)
self.callbacks.on_train_end()
print("")
display.training_display.summary(self.train_state)
if model_save_path is not None:
self.save(model_save_path, verbose=1)
return self.losshistory, self.train_state
def _train_sgd(self, epochs, display_every):
for i in range(epochs):
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_state.epoch += 1
self.train_state.step += 1
if self.train_state.step % display_every == 0 or i + 1 == epochs:
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _train_tensorflow_compat_v1_scipy(self, display_every):
def loss_callback(loss_train):
self.train_state.epoch += 1
self.train_state.step += 1
if self.train_state.step % display_every == 0:
self.train_state.loss_train = loss_train
self.train_state.loss_test = None
self.train_state.metrics_test = None
self.losshistory.append(
self.train_state.step, self.train_state.loss_train, None, None
)
display.training_display(self.train_state)
self.train_state.set_data_train(*self.data.train_next_batch(self.batch_size))
feed_dict = self.net.feed_dict(
True,
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_step.minimize(
self.sess,
feed_dict=feed_dict,
fetches=[self.outputs_losses[1]],
loss_callback=loss_callback,
)
self._test()
def _train_tensorflow_tfp(self):
# There is only one optimization step. If using multiple steps with/without
# previous_optimizer_results, L-BFGS failed to reach a small error. The reason
# could be that tfp.optimizer.lbfgs_minimize will start from scratch for each
# call.
n_iter = 0
while n_iter < optimizers.LBFGS_options["maxiter"]:
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
results = self.train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter += results.num_iterations.numpy()
self.train_state.epoch += results.num_iterations.numpy()
self.train_state.step += results.num_iterations.numpy()
self._test()
if results.converged or results.failed:
break
def _train_pytorch_lbfgs(self):
prev_n_iter = 0
while prev_n_iter < optimizers.LBFGS_options["maxiter"]:
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter = self.opt.state_dict()["state"][0]["n_iter"]
if prev_n_iter == n_iter:
# Converged
break
self.train_state.epoch += n_iter - prev_n_iter
self.train_state.step += n_iter - prev_n_iter
prev_n_iter = n_iter
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _test(self):
(
self.train_state.y_pred_train,
self.train_state.loss_train,
) = self._outputs_losses(
True,
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_state.y_pred_test, self.train_state.loss_test = self._outputs_losses(
False,
self.train_state.X_test,
self.train_state.y_test,
self.train_state.test_aux_vars,
)
if isinstance(self.train_state.y_test, (list, tuple)):
self.train_state.metrics_test = [
m(self.train_state.y_test[i], self.train_state.y_pred_test[i])
for m in self.metrics
for i in range(len(self.train_state.y_test))
]
else:
self.train_state.metrics_test = [
m(self.train_state.y_test, self.train_state.y_pred_test)
for m in self.metrics
]
self.train_state.update_best()
self.losshistory.append(
self.train_state.step,
self.train_state.loss_train,
self.train_state.loss_test,
self.train_state.metrics_test,
)
display.training_display(self.train_state)
def predict(self, x, operator=None, callbacks=None):
if isinstance(x, tuple):
x = tuple(np.array(xi, dtype=config.real(np)) for xi in x)
else:
x = np.array(x, dtype=config.real(np))
self.callbacks = CallbackList(callbacks=callbacks)
self.callbacks.set_model(self)
self.callbacks.on_predict_begin()
if operator is None:
y = self._outputs(False, x)
else:
# TODO: predict operator with auxiliary_vars
if backend_name == "tensorflow.compat.v1":
if utils.get_num_args(operator) == 2:
op = operator(self.net.inputs, self.net.outputs)
elif utils.get_num_args(operator) == 3:
op = operator(self.net.inputs, self.net.outputs, x)
y = self.sess.run(op, feed_dict=self.net.feed_dict(False, x))
elif backend_name == "tensorflow":
if utils.get_num_args(operator) == 2:
@tf.function
def op(inputs):
y = self.net(inputs)
return operator(inputs, y)
elif utils.get_num_args(operator) == 3:
@tf.function
def op(inputs):
y = self.net(inputs)
return operator(inputs, y, x)
y = op(x)
y = utils.to_numpy(y)
elif backend_name == "pytorch":
inputs = torch.as_tensor(x)
inputs.requires_grad_()
outputs = self.net(inputs)
if utils.get_num_args(operator) == 2:
y = operator(inputs, outputs)
elif utils.get_num_args(operator) == 3:
y = operator(inputs, outputs, x)
y = utils.to_numpy(y)
self.callbacks.on_predict_end()
return y
# def evaluate(self, x, y, callbacks=None):
# """Returns the loss values & metrics values for the model in test mode."""
# raise NotImplementedError(
# "Model.evaluate to be implemented. Alternatively, use Model.predict."
# )
def state_dict(self):
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
destination = OrderedDict()
variables_names = [v.name for v in tf.global_variables()]
values = self.sess.run(variables_names)
for k, v in zip(variables_names, values):
destination[k] = v
return destination
def save(self, save_path, protocol="tf.train.Saver", verbose=0):
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
if verbose > 0:
print(
"Epoch {}: saving model to {}-{} ...\n".format(
self.train_state.epoch, save_path, self.train_state.epoch
)
)
if protocol == "tf.train.Saver":
self.saver.save(self.sess, save_path, global_step=self.train_state.epoch)
elif protocol == "pickle":
with open("{}-{}.pkl".format(save_path, self.train_state.epoch), "wb") as f:
pickle.dump(self.state_dict(), f)
def restore(self, save_path, verbose=0):
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
if verbose > 0:
print("Restoring model from {} ...\n".format(save_path))
self.saver.restore(self.sess, save_path)
def print_model(self):
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
variables_names = [v.name for v in tf.trainable_variables()]
values = self.sess.run(variables_names)
for k, v in zip(variables_names, values):
print("Variable: {}, Shape: {}".format(k, v.shape))
print(v)
class TrainState(object):
def __init__(self):
self.epoch = 0
self.step = 0
# Current data
self.X_train = None
self.y_train = None
self.train_aux_vars = None
self.X_test = None
self.y_test = None
self.test_aux_vars = None
# Results of current step
# Train results
self.loss_train = None
self.y_pred_train = None
# Test results
self.loss_test = None
self.y_pred_test = None
self.y_std_test = None
self.metrics_test = None
# The best results correspond to the min train loss
self.best_step = 0
self.best_loss_train = np.inf
self.best_loss_test = np.inf
self.best_y = None
self.best_ystd = None
self.best_metrics = None
def set_data_train(self, X_train, y_train, train_aux_vars=None):
self.X_train = X_train
self.y_train = y_train
self.train_aux_vars = train_aux_vars
def set_data_test(self, X_test, y_test, test_aux_vars=None):
self.X_test = X_test
self.y_test = y_test
self.test_aux_vars = test_aux_vars
def update_best(self):
if self.best_loss_train > np.sum(self.loss_train):
self.best_step = self.step
self.best_loss_train = np.sum(self.loss_train)
self.best_loss_test = np.sum(self.loss_test)
self.best_y = self.y_pred_test
self.best_ystd = self.y_std_test
self.best_metrics = self.metrics_test
def disregard_best(self):
self.best_loss_train = np.inf
def packed_data(self):
def merge_values(values):
if values is None:
return None
return np.hstack(values) if isinstance(values, (list, tuple)) else values
X_train = merge_values(self.X_train)
y_train = merge_values(self.y_train)
X_test = merge_values(self.X_test)
y_test = merge_values(self.y_test)
best_y = merge_values(self.best_y)
best_ystd = merge_values(self.best_ystd)
return X_train, y_train, X_test, y_test, best_y, best_ystd
class LossHistory(object):
def __init__(self):
self.steps = []
self.loss_train = []
self.loss_test = []
self.metrics_test = []
self.loss_weights = 1
def set_loss_weights(self, loss_weights):
self.loss_weights = loss_weights
def append(self, step, loss_train, loss_test, metrics_test):
self.steps.append(step)
self.loss_train.append(loss_train)
if loss_test is None:
loss_test = self.loss_test[-1]
if metrics_test is None:
metrics_test = self.metrics_test[-1]
self.loss_test.append(loss_test)
self.metrics_test.append(metrics_test)
| true | true |
f73cfaefb7a4527a990f4d7515e5b63e30ce4a25 | 1,694 | py | Python | utils/util.py | lahu2046/textAI | a000a871ef13fa9b65183f9f3605dd4ef58b49f3 | [
"Apache-2.0"
] | null | null | null | utils/util.py | lahu2046/textAI | a000a871ef13fa9b65183f9f3605dd4ef58b49f3 | [
"Apache-2.0"
] | 10 | 2020-01-28T22:20:46.000Z | 2022-02-10T00:29:15.000Z | utils/util.py | lahu2046/textAI | a000a871ef13fa9b65183f9f3605dd4ef58b49f3 | [
"Apache-2.0"
] | null | null | null | import requests
import platform
import os
sys = platform.system()
dataset_cmd = "python dataset/prepare_data.py -fold=1 -num_folds=1024 -base_fn=dataset/tf/data_1024 -input_fn=dataset/data -max_seq_length=1024 > tf.log"
train_cmd = "python train/train1.py --config_file=configs/mega.json --input_file=dataset/tf/data_1024train_wiki19_0001.tfrecord --output_dir=models/mega/ --max_seq_length=1024 --train_batch_size=512 --learning_rate=1e-4 --num_train_steps=30000 --num_warmup_steps=10000 --save_checkpoints_steps=1000 --iterations_per_loop=1000 --use_tpu=False --tpu_name=None --num_tpu_cores=256 --init_checkpoint=models/mega/model.ckpt-100000 > train.log"
generate_cmd = "python scripts/interactive_conditional_samples.py -model_config_fn configs/mega.json -model_ckpt models/mega/model.ckpt-100000 -eos_token 511 -min_len {} -samples {} -inp_text {} -id {} -type {}"
if sys == "Windows":
rm_file_cmd = r"del /q dataset\data\*"
else:
rm_file_cmd = "rm -f dataset/data/*"
def bytes2str(data):
if isinstance(data, bytes):
try:
data = data.decode('utf-8')
except:
data = data.decode('gbk')
data = data.strip()
return data
def get_cmd_popen(cmd):
os.system(cmd)
def post_requests(json_data):
return requests.post("http://10.huangtongwei.cn:8099/v1/ai/result", json=json_data)
def get_requests(id_num, files):
file_list = files.split(",")
for file in file_list:
print("file name is ", file)
rs = requests.get("http://10.huangtongwei.cn:8099/v1/ai/task/file/{}/{}".format(id_num, file))
with open("dataset/data/{}".format(file), 'w', encoding="utf-8") as f:
f.write(rs.text)
| 41.317073 | 438 | 0.70425 | import requests
import platform
import os
sys = platform.system()
dataset_cmd = "python dataset/prepare_data.py -fold=1 -num_folds=1024 -base_fn=dataset/tf/data_1024 -input_fn=dataset/data -max_seq_length=1024 > tf.log"
train_cmd = "python train/train1.py --config_file=configs/mega.json --input_file=dataset/tf/data_1024train_wiki19_0001.tfrecord --output_dir=models/mega/ --max_seq_length=1024 --train_batch_size=512 --learning_rate=1e-4 --num_train_steps=30000 --num_warmup_steps=10000 --save_checkpoints_steps=1000 --iterations_per_loop=1000 --use_tpu=False --tpu_name=None --num_tpu_cores=256 --init_checkpoint=models/mega/model.ckpt-100000 > train.log"
generate_cmd = "python scripts/interactive_conditional_samples.py -model_config_fn configs/mega.json -model_ckpt models/mega/model.ckpt-100000 -eos_token 511 -min_len {} -samples {} -inp_text {} -id {} -type {}"
if sys == "Windows":
rm_file_cmd = r"del /q dataset\data\*"
else:
rm_file_cmd = "rm -f dataset/data/*"
def bytes2str(data):
if isinstance(data, bytes):
try:
data = data.decode('utf-8')
except:
data = data.decode('gbk')
data = data.strip()
return data
def get_cmd_popen(cmd):
os.system(cmd)
def post_requests(json_data):
return requests.post("http://10.huangtongwei.cn:8099/v1/ai/result", json=json_data)
def get_requests(id_num, files):
file_list = files.split(",")
for file in file_list:
print("file name is ", file)
rs = requests.get("http://10.huangtongwei.cn:8099/v1/ai/task/file/{}/{}".format(id_num, file))
with open("dataset/data/{}".format(file), 'w', encoding="utf-8") as f:
f.write(rs.text)
| true | true |
f73cfafeeb37a3a672b842662fff025a873092cf | 10,680 | py | Python | Logchart.py | kinghows/Logchart | fd26433a2b29883a32c4ff25dfce83a15799caec | [
"MIT"
] | 1 | 2022-01-24T03:58:16.000Z | 2022-01-24T03:58:16.000Z | Logchart.py | kinghows/Logchart | fd26433a2b29883a32c4ff25dfce83a15799caec | [
"MIT"
] | null | null | null | Logchart.py | kinghows/Logchart | fd26433a2b29883a32c4ff25dfce83a15799caec | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
# coding: utf-8
# Logchart V1.0.0 for python3
# Log Chart
# Copyright (C) 2017-2017 Kinghow - Kinghow@hotmail.com
# Git repository available at https://github.com/kinghows/Logchart
import getopt
import sys
import configparser
import os
from pyecharts import options as opts
from pyecharts.globals import ThemeType
from pyecharts.charts import Page
from pyecharts.charts import Tab
from pyecharts.charts import Line
def chart(chart_type,title,xlist,ylist,datas,style,themetype):
zdict={}
for i in range(len(ylist)):
zdict[ylist[i]]=[]
for row in datas:
zdict[row[1]].append(str(row[2]))
if chart_type == 'line': # 折线图
if style.setdefault('toolbox_opts_is_show',False):
toolbox_opts=opts.ToolboxOpts()
else:
toolbox_opts=None
if style.setdefault('datazoom_opts',None)=='horizontal':
datazoom_opts=opts.DataZoomOpts()
elif style.setdefault('datazoom_opts',None)=='vertical':
datazoom_opts=opts.DataZoomOpts(orient="vertical")
elif style.setdefault('datazoom_opts',None)=='inside':
datazoom_opts=opts.DataZoomOpts(type_="inside")
else:
datazoom_opts=None
c = Line(themetype)
c.set_global_opts(title_opts=opts.TitleOpts(title=title,pos_top=style.setdefault('title_pos_top',None),
pos_right=style.setdefault('title_pos_right',None)),
legend_opts=opts.LegendOpts(pos_top=style.setdefault('legend_pos_top',None),
pos_left=style.setdefault('legend_pos_left',None),
pos_right=style.setdefault('legend_pos_right',None)),
toolbox_opts=toolbox_opts,
datazoom_opts=datazoom_opts,
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=style.setdefault('yaxis_opts_rotate',0),
formatter=style.setdefault('xaxis_opts_formatter',"{value}")),
axistick_opts=opts.AxisTickOpts(is_align_with_label=True),
is_scale=False,
boundary_gap=False,),
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=style.setdefault('yaxis_opts_formatter',"{value}"))),
)
c.add_xaxis(xlist)
for i in range(len(ylist)):
name = ylist[i]
if title == name :
c.add_yaxis(name, zdict[name],
markpoint_opts=opts.MarkPointOpts(data=[opts.MarkPointItem(type_=style.setdefault('type_',"max"))]),
is_smooth=style.setdefault('is_smooth',True),
label_opts=opts.LabelOpts(is_show=style.setdefault('is_show',False)),
areastyle_opts=opts.AreaStyleOpts(opacity=style.setdefault('opacity',0))
)
return c
if __name__=="__main__":
config_file="Logchart.ini"
logfile_directory = ""
monitor_index =["cn_flush_bio","total write bio","total read bio"]
mutli_chart_type ="tab"
style = {'themetype':'LIGHT','is_smooth':True,'is_show':False,'opacity':0,'datazoom_opts':'inside','toolbox_opts_is_show':True}
opt, args = getopt.getopt(sys.argv[1:], "d:m:")
for o,v in opt:
if o == "-d":
logfile_directory = v
elif o == "-m":
monitor_index = v.split(",")
if len(logfile_directory)==0 and os.path.exists(config_file):
v =''
config = configparser.ConfigParser()
config.read(config_file)
logfile_directory = config.get("set","logfile_directory")
monitor_index = config.get("set","monitor_index").split(",")
mutli_chart_type = config.get("set", "mutli_chart_type")
try:
v=config.get("set","chartstyle")
except:
pass
else:
if v != '':
style = eval(v)
style_themetype=style.setdefault('themetype','WHITE')
if style_themetype=='WHITE':
themetype=init_opts=opts.InitOpts(theme=ThemeType.WHITE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='LIGHT':
themetype=init_opts=opts.InitOpts(theme=ThemeType.LIGHT,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='DARK':
themetype=init_opts=opts.InitOpts(theme=ThemeType.DARK,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='CHALK':
themetype=init_opts=opts.InitOpts(theme=ThemeType.CHALK,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='ESSOS':
themetype=init_opts=opts.InitOpts(theme=ThemeType.ESSOS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='INFOGRAPHIC':
themetype=init_opts=opts.InitOpts(theme=ThemeType.INFOGRAPHIC,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='MACARONS':
themetype=init_opts=opts.InitOpts(theme=ThemeType.MACARONS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='PURPLE_PASSION':
themetype=init_opts=opts.InitOpts(theme=ThemeType.PURPLE_PASSION,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='ROMA':
themetype=init_opts=opts.InitOpts(theme=ThemeType.ROMA,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='ROMANTIC':
themetype=init_opts=opts.InitOpts(theme=ThemeType.ROMANTIC,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='SHINE':
themetype=init_opts=opts.InitOpts(theme=ThemeType.SHINE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='VINTAGE':
themetype=init_opts=opts.InitOpts(theme=ThemeType.VINTAGE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='WALDEN':
themetype=init_opts=opts.InitOpts(theme=ThemeType.WALDEN,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='WESTEROS':
themetype=init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='WONDERLAND':
themetype=init_opts=opts.InitOpts(theme=ThemeType.WONDERLAND,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
if os.path.exists(logfile_directory):
filenames=os.listdir(logfile_directory)
for logfilename in filenames:
if "omv-debugonoff.log" in logfilename and ".html" not in logfilename and ".gz" not in logfilename:
logfile = os.path.join(logfile_directory,logfilename)
htmlfile = logfile + '.html'
if not os.path.exists(htmlfile):
if mutli_chart_type=='page':
page = Page()
else:
page = Tab()
xlist=[]
datalist=[]
cn_flush_bio_p = 0
#title = logfilename[logfilename.index('omv-debugonoff.log-')+19:logfilename.index('omv-debugonoff.log-')+27]
srcFile = open(logfile, 'r+')
lines = srcFile.readlines()
for line in lines:
x = line[11:19]
#keyv = eval(line[20:].replace("\\n"," "))
#proc_harx = keyv.setdefault("proc_harx",none)
#proc_hatx = keyv.setdefault("proc_hatx",none)
#proc_lfsm_monitor = keyv.setdefault("proc_lfsm_monitor",none)
xlist.append(x)
for index in monitor_index:
if index in line:
if index =="cn_flush_bio":
tempv = line[line.index(index)+14:]
if ":" in tempv :
cn_flush_bio_c = int(tempv[:tempv.index(":")])
if cn_flush_bio_p != 0 :
keyv = cn_flush_bio_c-cn_flush_bio_p
else:
keyv = 0
else:
cn_flush_bio_c = 0
keyv = 0
cn_flush_bio_p = cn_flush_bio_c
else:
tempv = line[line.index(index):]
keyv = int(tempv[tempv.index("MB)")+4:tempv.index("mpage/perquery")-1])
data = []
data.append(x)
data.append(index)
data.append(keyv)
datalist.append(data)
srcFile.close()
for index in monitor_index:
if mutli_chart_type=='page':
page.add(chart('line',index,xlist,monitor_index,datalist,style,themetype))
else:
page.add(chart('line',index,xlist,monitor_index,datalist,style,themetype),index)
page.render(path=htmlfile)
else:
print('Please check '+logfile_directory+' exists!')
| 55.625 | 176 | 0.56985 |
import getopt
import sys
import configparser
import os
from pyecharts import options as opts
from pyecharts.globals import ThemeType
from pyecharts.charts import Page
from pyecharts.charts import Tab
from pyecharts.charts import Line
def chart(chart_type,title,xlist,ylist,datas,style,themetype):
zdict={}
for i in range(len(ylist)):
zdict[ylist[i]]=[]
for row in datas:
zdict[row[1]].append(str(row[2]))
if chart_type == 'line':
if style.setdefault('toolbox_opts_is_show',False):
toolbox_opts=opts.ToolboxOpts()
else:
toolbox_opts=None
if style.setdefault('datazoom_opts',None)=='horizontal':
datazoom_opts=opts.DataZoomOpts()
elif style.setdefault('datazoom_opts',None)=='vertical':
datazoom_opts=opts.DataZoomOpts(orient="vertical")
elif style.setdefault('datazoom_opts',None)=='inside':
datazoom_opts=opts.DataZoomOpts(type_="inside")
else:
datazoom_opts=None
c = Line(themetype)
c.set_global_opts(title_opts=opts.TitleOpts(title=title,pos_top=style.setdefault('title_pos_top',None),
pos_right=style.setdefault('title_pos_right',None)),
legend_opts=opts.LegendOpts(pos_top=style.setdefault('legend_pos_top',None),
pos_left=style.setdefault('legend_pos_left',None),
pos_right=style.setdefault('legend_pos_right',None)),
toolbox_opts=toolbox_opts,
datazoom_opts=datazoom_opts,
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=style.setdefault('yaxis_opts_rotate',0),
formatter=style.setdefault('xaxis_opts_formatter',"{value}")),
axistick_opts=opts.AxisTickOpts(is_align_with_label=True),
is_scale=False,
boundary_gap=False,),
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=style.setdefault('yaxis_opts_formatter',"{value}"))),
)
c.add_xaxis(xlist)
for i in range(len(ylist)):
name = ylist[i]
if title == name :
c.add_yaxis(name, zdict[name],
markpoint_opts=opts.MarkPointOpts(data=[opts.MarkPointItem(type_=style.setdefault('type_',"max"))]),
is_smooth=style.setdefault('is_smooth',True),
label_opts=opts.LabelOpts(is_show=style.setdefault('is_show',False)),
areastyle_opts=opts.AreaStyleOpts(opacity=style.setdefault('opacity',0))
)
return c
if __name__=="__main__":
config_file="Logchart.ini"
logfile_directory = ""
monitor_index =["cn_flush_bio","total write bio","total read bio"]
mutli_chart_type ="tab"
style = {'themetype':'LIGHT','is_smooth':True,'is_show':False,'opacity':0,'datazoom_opts':'inside','toolbox_opts_is_show':True}
opt, args = getopt.getopt(sys.argv[1:], "d:m:")
for o,v in opt:
if o == "-d":
logfile_directory = v
elif o == "-m":
monitor_index = v.split(",")
if len(logfile_directory)==0 and os.path.exists(config_file):
v =''
config = configparser.ConfigParser()
config.read(config_file)
logfile_directory = config.get("set","logfile_directory")
monitor_index = config.get("set","monitor_index").split(",")
mutli_chart_type = config.get("set", "mutli_chart_type")
try:
v=config.get("set","chartstyle")
except:
pass
else:
if v != '':
style = eval(v)
style_themetype=style.setdefault('themetype','WHITE')
if style_themetype=='WHITE':
themetype=init_opts=opts.InitOpts(theme=ThemeType.WHITE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='LIGHT':
themetype=init_opts=opts.InitOpts(theme=ThemeType.LIGHT,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='DARK':
themetype=init_opts=opts.InitOpts(theme=ThemeType.DARK,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='CHALK':
themetype=init_opts=opts.InitOpts(theme=ThemeType.CHALK,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='ESSOS':
themetype=init_opts=opts.InitOpts(theme=ThemeType.ESSOS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='INFOGRAPHIC':
themetype=init_opts=opts.InitOpts(theme=ThemeType.INFOGRAPHIC,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='MACARONS':
themetype=init_opts=opts.InitOpts(theme=ThemeType.MACARONS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='PURPLE_PASSION':
themetype=init_opts=opts.InitOpts(theme=ThemeType.PURPLE_PASSION,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='ROMA':
themetype=init_opts=opts.InitOpts(theme=ThemeType.ROMA,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='ROMANTIC':
themetype=init_opts=opts.InitOpts(theme=ThemeType.ROMANTIC,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='SHINE':
themetype=init_opts=opts.InitOpts(theme=ThemeType.SHINE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='VINTAGE':
themetype=init_opts=opts.InitOpts(theme=ThemeType.VINTAGE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='WALDEN':
themetype=init_opts=opts.InitOpts(theme=ThemeType.WALDEN,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='WESTEROS':
themetype=init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
elif style_themetype=='WONDERLAND':
themetype=init_opts=opts.InitOpts(theme=ThemeType.WONDERLAND,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px"))
if os.path.exists(logfile_directory):
filenames=os.listdir(logfile_directory)
for logfilename in filenames:
if "omv-debugonoff.log" in logfilename and ".html" not in logfilename and ".gz" not in logfilename:
logfile = os.path.join(logfile_directory,logfilename)
htmlfile = logfile + '.html'
if not os.path.exists(htmlfile):
if mutli_chart_type=='page':
page = Page()
else:
page = Tab()
xlist=[]
datalist=[]
cn_flush_bio_p = 0
srcFile = open(logfile, 'r+')
lines = srcFile.readlines()
for line in lines:
x = line[11:19]
xlist.append(x)
for index in monitor_index:
if index in line:
if index =="cn_flush_bio":
tempv = line[line.index(index)+14:]
if ":" in tempv :
cn_flush_bio_c = int(tempv[:tempv.index(":")])
if cn_flush_bio_p != 0 :
keyv = cn_flush_bio_c-cn_flush_bio_p
else:
keyv = 0
else:
cn_flush_bio_c = 0
keyv = 0
cn_flush_bio_p = cn_flush_bio_c
else:
tempv = line[line.index(index):]
keyv = int(tempv[tempv.index("MB)")+4:tempv.index("mpage/perquery")-1])
data = []
data.append(x)
data.append(index)
data.append(keyv)
datalist.append(data)
srcFile.close()
for index in monitor_index:
if mutli_chart_type=='page':
page.add(chart('line',index,xlist,monitor_index,datalist,style,themetype))
else:
page.add(chart('line',index,xlist,monitor_index,datalist,style,themetype),index)
page.render(path=htmlfile)
else:
print('Please check '+logfile_directory+' exists!')
| true | true |
f73cfb931582ea2bcc2caff3e32dd7386747f99b | 1,061 | py | Python | tests.py | carlosperate/microrepl | 65881ac592f69f02c0247536c92896a3ea45d02c | [
"Apache-2.0"
] | 32 | 2015-10-23T10:23:18.000Z | 2021-09-23T14:19:07.000Z | tests.py | carlosperate/microrepl | 65881ac592f69f02c0247536c92896a3ea45d02c | [
"Apache-2.0"
] | 17 | 2015-11-12T19:54:46.000Z | 2018-07-13T10:05:43.000Z | tests.py | carlosperate/microrepl | 65881ac592f69f02c0247536c92896a3ea45d02c | [
"Apache-2.0"
] | 21 | 2015-09-21T10:02:56.000Z | 2020-07-23T23:18:02.000Z | import pytest
import io
from unittest.mock import patch
import os
import tempfile
from microrepl import connect_miniterm
@pytest.yield_fixture
def fake_stderr():
fake_stderr = io.StringIO()
with patch('sys.stderr', fake_stderr):
yield fake_stderr
@pytest.yield_fixture
def fake_sys_exit():
with patch('sys.exit', autospec=True) as fake_exit:
yield fake_exit
def test_connect_miniterm_suggests_solution_to_perms_problem_on_linux(fake_stderr, fake_sys_exit):
nonaccessible_port = tempfile.NamedTemporaryFile()
os.chmod(nonaccessible_port.name, 0o000)
connect_miniterm(nonaccessible_port.name)
error_message = fake_stderr.getvalue()
assert "Found micro:bit, but could not connect." in error_message
assert "[Errno 13] could not open port" in error_message
assert "Permission denied: {port!r}".format(port=nonaccessible_port.name) in error_message
assert 'On linux, try adding yourself to the "dialout" group' in error_message
assert 'sudo usermod -a -G dialout <your-username>' in error_message
| 34.225806 | 98 | 0.769086 | import pytest
import io
from unittest.mock import patch
import os
import tempfile
from microrepl import connect_miniterm
@pytest.yield_fixture
def fake_stderr():
fake_stderr = io.StringIO()
with patch('sys.stderr', fake_stderr):
yield fake_stderr
@pytest.yield_fixture
def fake_sys_exit():
with patch('sys.exit', autospec=True) as fake_exit:
yield fake_exit
def test_connect_miniterm_suggests_solution_to_perms_problem_on_linux(fake_stderr, fake_sys_exit):
nonaccessible_port = tempfile.NamedTemporaryFile()
os.chmod(nonaccessible_port.name, 0o000)
connect_miniterm(nonaccessible_port.name)
error_message = fake_stderr.getvalue()
assert "Found micro:bit, but could not connect." in error_message
assert "[Errno 13] could not open port" in error_message
assert "Permission denied: {port!r}".format(port=nonaccessible_port.name) in error_message
assert 'On linux, try adding yourself to the "dialout" group' in error_message
assert 'sudo usermod -a -G dialout <your-username>' in error_message
| true | true |
f73cfdf0281bc7eb3b3eb6a1ed0fedc6fae30ef1 | 1,244 | py | Python | ether/qpacketlist.py | alexin-ivan/ether | ab7334d800778206d14ed5a479ef1369549d7dca | [
"MIT"
] | null | null | null | ether/qpacketlist.py | alexin-ivan/ether | ab7334d800778206d14ed5a479ef1369549d7dca | [
"MIT"
] | null | null | null | ether/qpacketlist.py | alexin-ivan/ether | ab7334d800778206d14ed5a479ef1369549d7dca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
##############################################################################
from PyQt4.QtGui import (
QWidget,
QListWidget,
QVBoxLayout,
)
##############################################################################
class QPacketList(QWidget):
def __init__(self, graph, parent_=None):
super(QPacketList, self).__init__(parent_)
self.layoutV = QVBoxLayout(self)
self.qlist = QListWidget()
self.layoutV.addWidget(self.qlist)
#self.sButton = QPushButton("Start")
#self.eButton = QPushButton("Exit")
#self.layoutV.addWidget(self.sButton)
#self.layoutV.addWidget(self.eButton)
self.graph = graph
graph.receivedPacket.connect(self.addPacket)
#self.sButton.clicked.connect(sn.thread.start)
#self.eButton.clicked.connect(sn.softTerminate)
#self.eButton.clicked.connect(self.close)
#sn.receivedPacket.connect(self.appendList)
def addPacket(self, pkt):
self.appendList(pkt)
def appendList(self, s):
#if s.type == 2L and s.subtype == 0:
self.qlist.addItem(s.summary())
def main():
pass
if __name__ == '__main__':
main()
| 24.88 | 78 | 0.552251 | true | true | |
f73cfe82e35e2f60278615276eea55384c62a200 | 14,243 | py | Python | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/kernel/tests/test_message_spec.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 26 | 2018-02-14T23:52:58.000Z | 2021-08-16T13:50:03.000Z | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/kernel/tests/test_message_spec.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/kernel/tests/test_message_spec.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 10 | 2018-08-13T19:38:39.000Z | 2020-04-19T03:02:00.000Z | """Test suite for our zeromq-based messaging specification.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import re
from subprocess import PIPE
from Queue import Empty
import nose.tools as nt
from IPython.kernel import KernelManager
from IPython.testing import decorators as dec
from IPython.utils.traitlets import (
HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum, Any,
)
#-----------------------------------------------------------------------------
# Global setup and utilities
#-----------------------------------------------------------------------------
STARTUP_TIMEOUT = 60
TIMEOUT = 15
def setup():
global KM, KC
KM = KernelManager()
KM.start_kernel(stdout=PIPE, stderr=PIPE)
KC = KM.client()
KC.start_channels()
# wait for kernel to be ready
try:
msg = KC.iopub_channel.get_msg(block=True, timeout=STARTUP_TIMEOUT)
except Empty:
pass
msg_id = KC.kernel_info()
KC.get_shell_msg(block=True, timeout=STARTUP_TIMEOUT)
flush_channels()
def teardown():
KC.stop_channels()
KM.shutdown_kernel()
def flush_channels(kc=None):
"""flush any messages waiting on the queue"""
if kc is None:
kc = KC
for channel in (kc.shell_channel, kc.iopub_channel):
while True:
try:
msg = channel.get_msg(block=True, timeout=0.1)
except Empty:
break
else:
list(validate_message(msg))
def execute(code='', kc=None, **kwargs):
"""wrapper for doing common steps for validating an execution request"""
if kc is None:
kc = KC
msg_id = kc.execute(code=code, **kwargs)
reply = kc.get_shell_msg(timeout=TIMEOUT)
list(validate_message(reply, 'execute_reply', msg_id))
busy = kc.get_iopub_msg(timeout=TIMEOUT)
list(validate_message(busy, 'status', msg_id))
nt.assert_equal(busy['content']['execution_state'], 'busy')
if not kwargs.get('silent'):
pyin = kc.get_iopub_msg(timeout=TIMEOUT)
list(validate_message(pyin, 'pyin', msg_id))
nt.assert_equal(pyin['content']['code'], code)
return msg_id, reply['content']
#-----------------------------------------------------------------------------
# MSG Spec References
#-----------------------------------------------------------------------------
class Reference(HasTraits):
"""
Base class for message spec specification testing.
This class is the core of the message specification test. The
idea is that child classes implement trait attributes for each
message keys, so that message keys can be tested against these
traits using :meth:`check` method.
"""
def check(self, d):
"""validate a dict against our traits"""
for key in self.trait_names():
yield nt.assert_true(key in d, "Missing key: %r, should be found in %s" % (key, d))
# FIXME: always allow None, probably not a good idea
if d[key] is None:
continue
try:
setattr(self, key, d[key])
except TraitError as e:
yield nt.assert_true(False, str(e))
class RMessage(Reference):
msg_id = Unicode()
msg_type = Unicode()
header = Dict()
parent_header = Dict()
content = Dict()
class RHeader(Reference):
msg_id = Unicode()
msg_type = Unicode()
session = Unicode()
username = Unicode()
class RContent(Reference):
status = Enum((u'ok', u'error'))
class ExecuteReply(Reference):
execution_count = Integer()
status = Enum((u'ok', u'error'))
def check(self, d):
for tst in Reference.check(self, d):
yield tst
if d['status'] == 'ok':
for tst in ExecuteReplyOkay().check(d):
yield tst
elif d['status'] == 'error':
for tst in ExecuteReplyError().check(d):
yield tst
class ExecuteReplyOkay(Reference):
payload = List(Dict)
user_variables = Dict()
user_expressions = Dict()
class ExecuteReplyError(Reference):
ename = Unicode()
evalue = Unicode()
traceback = List(Unicode)
class OInfoReply(Reference):
name = Unicode()
found = Bool()
ismagic = Bool()
isalias = Bool()
namespace = Enum((u'builtin', u'magics', u'alias', u'Interactive'))
type_name = Unicode()
string_form = Unicode()
base_class = Unicode()
length = Integer()
file = Unicode()
definition = Unicode()
argspec = Dict()
init_definition = Unicode()
docstring = Unicode()
init_docstring = Unicode()
class_docstring = Unicode()
call_def = Unicode()
call_docstring = Unicode()
source = Unicode()
def check(self, d):
for tst in Reference.check(self, d):
yield tst
if d['argspec'] is not None:
for tst in ArgSpec().check(d['argspec']):
yield tst
class ArgSpec(Reference):
args = List(Unicode)
varargs = Unicode()
varkw = Unicode()
defaults = List()
class Status(Reference):
execution_state = Enum((u'busy', u'idle', u'starting'))
class CompleteReply(Reference):
matches = List(Unicode)
def Version(num, trait=Integer):
return List(trait, default_value=[0] * num, minlen=num, maxlen=num)
class KernelInfoReply(Reference):
protocol_version = Version(2)
ipython_version = Version(4, Any)
language_version = Version(3)
language = Unicode()
def _ipython_version_changed(self, name, old, new):
for v in new:
nt.assert_true(
isinstance(v, int) or isinstance(v, basestring),
'expected int or string as version component, got {0!r}'
.format(v))
# IOPub messages
class PyIn(Reference):
code = Unicode()
execution_count = Integer()
PyErr = ExecuteReplyError
class Stream(Reference):
name = Enum((u'stdout', u'stderr'))
data = Unicode()
mime_pat = re.compile(r'\w+/\w+')
class DisplayData(Reference):
source = Unicode()
metadata = Dict()
data = Dict()
def _data_changed(self, name, old, new):
for k,v in new.iteritems():
nt.assert_true(mime_pat.match(k))
nt.assert_true(isinstance(v, basestring), "expected string data, got %r" % v)
class PyOut(Reference):
execution_count = Integer()
data = Dict()
def _data_changed(self, name, old, new):
for k,v in new.iteritems():
nt.assert_true(mime_pat.match(k))
nt.assert_true(isinstance(v, basestring), "expected string data, got %r" % v)
references = {
'execute_reply' : ExecuteReply(),
'object_info_reply' : OInfoReply(),
'status' : Status(),
'complete_reply' : CompleteReply(),
'kernel_info_reply': KernelInfoReply(),
'pyin' : PyIn(),
'pyout' : PyOut(),
'pyerr' : PyErr(),
'stream' : Stream(),
'display_data' : DisplayData(),
}
"""
Specifications of `content` part of the reply messages.
"""
def validate_message(msg, msg_type=None, parent=None):
"""validate a message
This is a generator, and must be iterated through to actually
trigger each test.
If msg_type and/or parent are given, the msg_type and/or parent msg_id
are compared with the given values.
"""
RMessage().check(msg)
if msg_type:
yield nt.assert_equal(msg['msg_type'], msg_type)
if parent:
yield nt.assert_equal(msg['parent_header']['msg_id'], parent)
content = msg['content']
ref = references[msg['msg_type']]
for tst in ref.check(content):
yield tst
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
# Shell channel
@dec.parametric
def test_execute():
flush_channels()
msg_id = KC.execute(code='x=1')
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'execute_reply', msg_id):
yield tst
@dec.parametric
def test_execute_silent():
flush_channels()
msg_id, reply = execute(code='x=1', silent=True)
# flush status=idle
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(status, 'status', msg_id):
yield tst
nt.assert_equal(status['content']['execution_state'], 'idle')
yield nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
count = reply['execution_count']
msg_id, reply = execute(code='x=2', silent=True)
# flush status=idle
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(status, 'status', msg_id):
yield tst
yield nt.assert_equal(status['content']['execution_state'], 'idle')
yield nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
count_2 = reply['execution_count']
yield nt.assert_equal(count_2, count)
@dec.parametric
def test_execute_error():
flush_channels()
msg_id, reply = execute(code='1/0')
yield nt.assert_equal(reply['status'], 'error')
yield nt.assert_equal(reply['ename'], 'ZeroDivisionError')
pyerr = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(pyerr, 'pyerr', msg_id):
yield tst
def test_execute_inc():
"""execute request should increment execution_count"""
flush_channels()
msg_id, reply = execute(code='x=1')
count = reply['execution_count']
flush_channels()
msg_id, reply = execute(code='x=2')
count_2 = reply['execution_count']
nt.assert_equal(count_2, count+1)
def test_user_variables():
flush_channels()
msg_id, reply = execute(code='x=1', user_variables=['x'])
user_variables = reply['user_variables']
nt.assert_equal(user_variables, {u'x': {
u'status': u'ok',
u'data': {u'text/plain': u'1'},
u'metadata': {},
}})
def test_user_variables_fail():
flush_channels()
msg_id, reply = execute(code='x=1', user_variables=['nosuchname'])
user_variables = reply['user_variables']
foo = user_variables['nosuchname']
nt.assert_equal(foo['status'], 'error')
nt.assert_equal(foo['ename'], 'KeyError')
def test_user_expressions():
flush_channels()
msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1'))
user_expressions = reply['user_expressions']
nt.assert_equal(user_expressions, {u'foo': {
u'status': u'ok',
u'data': {u'text/plain': u'2'},
u'metadata': {},
}})
def test_user_expressions_fail():
flush_channels()
msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname'))
user_expressions = reply['user_expressions']
foo = user_expressions['foo']
nt.assert_equal(foo['status'], 'error')
nt.assert_equal(foo['ename'], 'NameError')
@dec.parametric
def test_oinfo():
flush_channels()
msg_id = KC.object_info('a')
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'object_info_reply', msg_id):
yield tst
@dec.parametric
def test_oinfo_found():
flush_channels()
msg_id, reply = execute(code='a=5')
msg_id = KC.object_info('a')
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'object_info_reply', msg_id):
yield tst
content = reply['content']
yield nt.assert_true(content['found'])
argspec = content['argspec']
yield nt.assert_true(argspec is None, "didn't expect argspec dict, got %r" % argspec)
@dec.parametric
def test_oinfo_detail():
flush_channels()
msg_id, reply = execute(code='ip=get_ipython()')
msg_id = KC.object_info('ip.object_inspect', detail_level=2)
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'object_info_reply', msg_id):
yield tst
content = reply['content']
yield nt.assert_true(content['found'])
argspec = content['argspec']
yield nt.assert_true(isinstance(argspec, dict), "expected non-empty argspec dict, got %r" % argspec)
yield nt.assert_equal(argspec['defaults'], [0])
@dec.parametric
def test_oinfo_not_found():
flush_channels()
msg_id = KC.object_info('dne')
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'object_info_reply', msg_id):
yield tst
content = reply['content']
yield nt.assert_false(content['found'])
@dec.parametric
def test_complete():
flush_channels()
msg_id, reply = execute(code="alpha = albert = 5")
msg_id = KC.complete('al', 'al', 2)
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'complete_reply', msg_id):
yield tst
matches = reply['content']['matches']
for name in ('alpha', 'albert'):
yield nt.assert_true(name in matches, "Missing match: %r" % name)
@dec.parametric
def test_kernel_info_request():
flush_channels()
msg_id = KC.kernel_info()
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'kernel_info_reply', msg_id):
yield tst
# IOPub channel
@dec.parametric
def test_stream():
flush_channels()
msg_id, reply = execute("print('hi')")
stdout = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(stdout, 'stream', msg_id):
yield tst
content = stdout['content']
yield nt.assert_equal(content['name'], u'stdout')
yield nt.assert_equal(content['data'], u'hi\n')
@dec.parametric
def test_display_data():
flush_channels()
msg_id, reply = execute("from IPython.core.display import display; display(1)")
display = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(display, 'display_data', parent=msg_id):
yield tst
data = display['content']['data']
yield nt.assert_equal(data['text/plain'], u'1')
| 27.549323 | 104 | 0.618269 |
import re
from subprocess import PIPE
from Queue import Empty
import nose.tools as nt
from IPython.kernel import KernelManager
from IPython.testing import decorators as dec
from IPython.utils.traitlets import (
HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum, Any,
)
STARTUP_TIMEOUT = 60
TIMEOUT = 15
def setup():
global KM, KC
KM = KernelManager()
KM.start_kernel(stdout=PIPE, stderr=PIPE)
KC = KM.client()
KC.start_channels()
try:
msg = KC.iopub_channel.get_msg(block=True, timeout=STARTUP_TIMEOUT)
except Empty:
pass
msg_id = KC.kernel_info()
KC.get_shell_msg(block=True, timeout=STARTUP_TIMEOUT)
flush_channels()
def teardown():
KC.stop_channels()
KM.shutdown_kernel()
def flush_channels(kc=None):
if kc is None:
kc = KC
for channel in (kc.shell_channel, kc.iopub_channel):
while True:
try:
msg = channel.get_msg(block=True, timeout=0.1)
except Empty:
break
else:
list(validate_message(msg))
def execute(code='', kc=None, **kwargs):
if kc is None:
kc = KC
msg_id = kc.execute(code=code, **kwargs)
reply = kc.get_shell_msg(timeout=TIMEOUT)
list(validate_message(reply, 'execute_reply', msg_id))
busy = kc.get_iopub_msg(timeout=TIMEOUT)
list(validate_message(busy, 'status', msg_id))
nt.assert_equal(busy['content']['execution_state'], 'busy')
if not kwargs.get('silent'):
pyin = kc.get_iopub_msg(timeout=TIMEOUT)
list(validate_message(pyin, 'pyin', msg_id))
nt.assert_equal(pyin['content']['code'], code)
return msg_id, reply['content']
class Reference(HasTraits):
def check(self, d):
for key in self.trait_names():
yield nt.assert_true(key in d, "Missing key: %r, should be found in %s" % (key, d))
if d[key] is None:
continue
try:
setattr(self, key, d[key])
except TraitError as e:
yield nt.assert_true(False, str(e))
class RMessage(Reference):
msg_id = Unicode()
msg_type = Unicode()
header = Dict()
parent_header = Dict()
content = Dict()
class RHeader(Reference):
msg_id = Unicode()
msg_type = Unicode()
session = Unicode()
username = Unicode()
class RContent(Reference):
status = Enum((u'ok', u'error'))
class ExecuteReply(Reference):
execution_count = Integer()
status = Enum((u'ok', u'error'))
def check(self, d):
for tst in Reference.check(self, d):
yield tst
if d['status'] == 'ok':
for tst in ExecuteReplyOkay().check(d):
yield tst
elif d['status'] == 'error':
for tst in ExecuteReplyError().check(d):
yield tst
class ExecuteReplyOkay(Reference):
payload = List(Dict)
user_variables = Dict()
user_expressions = Dict()
class ExecuteReplyError(Reference):
ename = Unicode()
evalue = Unicode()
traceback = List(Unicode)
class OInfoReply(Reference):
name = Unicode()
found = Bool()
ismagic = Bool()
isalias = Bool()
namespace = Enum((u'builtin', u'magics', u'alias', u'Interactive'))
type_name = Unicode()
string_form = Unicode()
base_class = Unicode()
length = Integer()
file = Unicode()
definition = Unicode()
argspec = Dict()
init_definition = Unicode()
docstring = Unicode()
init_docstring = Unicode()
class_docstring = Unicode()
call_def = Unicode()
call_docstring = Unicode()
source = Unicode()
def check(self, d):
for tst in Reference.check(self, d):
yield tst
if d['argspec'] is not None:
for tst in ArgSpec().check(d['argspec']):
yield tst
class ArgSpec(Reference):
args = List(Unicode)
varargs = Unicode()
varkw = Unicode()
defaults = List()
class Status(Reference):
execution_state = Enum((u'busy', u'idle', u'starting'))
class CompleteReply(Reference):
matches = List(Unicode)
def Version(num, trait=Integer):
return List(trait, default_value=[0] * num, minlen=num, maxlen=num)
class KernelInfoReply(Reference):
protocol_version = Version(2)
ipython_version = Version(4, Any)
language_version = Version(3)
language = Unicode()
def _ipython_version_changed(self, name, old, new):
for v in new:
nt.assert_true(
isinstance(v, int) or isinstance(v, basestring),
'expected int or string as version component, got {0!r}'
.format(v))
class PyIn(Reference):
code = Unicode()
execution_count = Integer()
PyErr = ExecuteReplyError
class Stream(Reference):
name = Enum((u'stdout', u'stderr'))
data = Unicode()
mime_pat = re.compile(r'\w+/\w+')
class DisplayData(Reference):
source = Unicode()
metadata = Dict()
data = Dict()
def _data_changed(self, name, old, new):
for k,v in new.iteritems():
nt.assert_true(mime_pat.match(k))
nt.assert_true(isinstance(v, basestring), "expected string data, got %r" % v)
class PyOut(Reference):
execution_count = Integer()
data = Dict()
def _data_changed(self, name, old, new):
for k,v in new.iteritems():
nt.assert_true(mime_pat.match(k))
nt.assert_true(isinstance(v, basestring), "expected string data, got %r" % v)
references = {
'execute_reply' : ExecuteReply(),
'object_info_reply' : OInfoReply(),
'status' : Status(),
'complete_reply' : CompleteReply(),
'kernel_info_reply': KernelInfoReply(),
'pyin' : PyIn(),
'pyout' : PyOut(),
'pyerr' : PyErr(),
'stream' : Stream(),
'display_data' : DisplayData(),
}
def validate_message(msg, msg_type=None, parent=None):
RMessage().check(msg)
if msg_type:
yield nt.assert_equal(msg['msg_type'], msg_type)
if parent:
yield nt.assert_equal(msg['parent_header']['msg_id'], parent)
content = msg['content']
ref = references[msg['msg_type']]
for tst in ref.check(content):
yield tst
@dec.parametric
def test_execute():
flush_channels()
msg_id = KC.execute(code='x=1')
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'execute_reply', msg_id):
yield tst
@dec.parametric
def test_execute_silent():
flush_channels()
msg_id, reply = execute(code='x=1', silent=True)
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(status, 'status', msg_id):
yield tst
nt.assert_equal(status['content']['execution_state'], 'idle')
yield nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
count = reply['execution_count']
msg_id, reply = execute(code='x=2', silent=True)
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(status, 'status', msg_id):
yield tst
yield nt.assert_equal(status['content']['execution_state'], 'idle')
yield nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
count_2 = reply['execution_count']
yield nt.assert_equal(count_2, count)
@dec.parametric
def test_execute_error():
flush_channels()
msg_id, reply = execute(code='1/0')
yield nt.assert_equal(reply['status'], 'error')
yield nt.assert_equal(reply['ename'], 'ZeroDivisionError')
pyerr = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(pyerr, 'pyerr', msg_id):
yield tst
def test_execute_inc():
flush_channels()
msg_id, reply = execute(code='x=1')
count = reply['execution_count']
flush_channels()
msg_id, reply = execute(code='x=2')
count_2 = reply['execution_count']
nt.assert_equal(count_2, count+1)
def test_user_variables():
flush_channels()
msg_id, reply = execute(code='x=1', user_variables=['x'])
user_variables = reply['user_variables']
nt.assert_equal(user_variables, {u'x': {
u'status': u'ok',
u'data': {u'text/plain': u'1'},
u'metadata': {},
}})
def test_user_variables_fail():
flush_channels()
msg_id, reply = execute(code='x=1', user_variables=['nosuchname'])
user_variables = reply['user_variables']
foo = user_variables['nosuchname']
nt.assert_equal(foo['status'], 'error')
nt.assert_equal(foo['ename'], 'KeyError')
def test_user_expressions():
flush_channels()
msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1'))
user_expressions = reply['user_expressions']
nt.assert_equal(user_expressions, {u'foo': {
u'status': u'ok',
u'data': {u'text/plain': u'2'},
u'metadata': {},
}})
def test_user_expressions_fail():
flush_channels()
msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname'))
user_expressions = reply['user_expressions']
foo = user_expressions['foo']
nt.assert_equal(foo['status'], 'error')
nt.assert_equal(foo['ename'], 'NameError')
@dec.parametric
def test_oinfo():
flush_channels()
msg_id = KC.object_info('a')
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'object_info_reply', msg_id):
yield tst
@dec.parametric
def test_oinfo_found():
flush_channels()
msg_id, reply = execute(code='a=5')
msg_id = KC.object_info('a')
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'object_info_reply', msg_id):
yield tst
content = reply['content']
yield nt.assert_true(content['found'])
argspec = content['argspec']
yield nt.assert_true(argspec is None, "didn't expect argspec dict, got %r" % argspec)
@dec.parametric
def test_oinfo_detail():
flush_channels()
msg_id, reply = execute(code='ip=get_ipython()')
msg_id = KC.object_info('ip.object_inspect', detail_level=2)
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'object_info_reply', msg_id):
yield tst
content = reply['content']
yield nt.assert_true(content['found'])
argspec = content['argspec']
yield nt.assert_true(isinstance(argspec, dict), "expected non-empty argspec dict, got %r" % argspec)
yield nt.assert_equal(argspec['defaults'], [0])
@dec.parametric
def test_oinfo_not_found():
flush_channels()
msg_id = KC.object_info('dne')
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'object_info_reply', msg_id):
yield tst
content = reply['content']
yield nt.assert_false(content['found'])
@dec.parametric
def test_complete():
flush_channels()
msg_id, reply = execute(code="alpha = albert = 5")
msg_id = KC.complete('al', 'al', 2)
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'complete_reply', msg_id):
yield tst
matches = reply['content']['matches']
for name in ('alpha', 'albert'):
yield nt.assert_true(name in matches, "Missing match: %r" % name)
@dec.parametric
def test_kernel_info_request():
flush_channels()
msg_id = KC.kernel_info()
reply = KC.get_shell_msg(timeout=TIMEOUT)
for tst in validate_message(reply, 'kernel_info_reply', msg_id):
yield tst
# IOPub channel
@dec.parametric
def test_stream():
flush_channels()
msg_id, reply = execute("print('hi')")
stdout = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(stdout, 'stream', msg_id):
yield tst
content = stdout['content']
yield nt.assert_equal(content['name'], u'stdout')
yield nt.assert_equal(content['data'], u'hi\n')
@dec.parametric
def test_display_data():
flush_channels()
msg_id, reply = execute("from IPython.core.display import display; display(1)")
display = KC.iopub_channel.get_msg(timeout=TIMEOUT)
for tst in validate_message(display, 'display_data', parent=msg_id):
yield tst
data = display['content']['data']
yield nt.assert_equal(data['text/plain'], u'1')
| true | true |
f73cfebdc4d158cade3ac32819dbfd4af475739a | 673 | py | Python | exercicios_python_brasil/estrutura_decisao/16_equacao_segundo_grau.py | MartinaLima/Python | 94dee598bd799cfe8de4c6369cea84e97e5ed024 | [
"MIT"
] | null | null | null | exercicios_python_brasil/estrutura_decisao/16_equacao_segundo_grau.py | MartinaLima/Python | 94dee598bd799cfe8de4c6369cea84e97e5ed024 | [
"MIT"
] | null | null | null | exercicios_python_brasil/estrutura_decisao/16_equacao_segundo_grau.py | MartinaLima/Python | 94dee598bd799cfe8de4c6369cea84e97e5ed024 | [
"MIT"
] | null | null | null | from math import pow, sqrt
print('\033[1m>>> EQUAÇÃO DE 2º GRAU <<<\033[m')
a = int(input('> VALOR DE A: '))
if a != 0:
b = int(input('> VALOR DE B: '))
c = int(input('> VALOR DE C: '))
print('-'*30)
print('\033[1mRESULTADO...\033[m')
delta = pow(b, 2) - (4 * a * c)
if delta > 0:
x1 = (-b + sqrt(delta)) / (2 * a)
x2 = (-b - sqrt(delta)) / (2 * a)
print(f'X1 = {x1:.4f}\nX2 = {x2:.4f}')
elif delta == 0:
x1 = -b / (2 * a)
print(f'X = {x1:.2f}')
elif delta < 0:
print('-> A EQUAÇÃO NÃO POSSUI RAÍZES!')
else:
print('-> A EQUAÇÃO NÃO É DE SEGUNDO GRAU QUANDO A = 0!')
| 30.590909 | 62 | 0.459138 | from math import pow, sqrt
print('\033[1m>>> EQUAÇÃO DE 2º GRAU <<<\033[m')
a = int(input('> VALOR DE A: '))
if a != 0:
b = int(input('> VALOR DE B: '))
c = int(input('> VALOR DE C: '))
print('-'*30)
print('\033[1mRESULTADO...\033[m')
delta = pow(b, 2) - (4 * a * c)
if delta > 0:
x1 = (-b + sqrt(delta)) / (2 * a)
x2 = (-b - sqrt(delta)) / (2 * a)
print(f'X1 = {x1:.4f}\nX2 = {x2:.4f}')
elif delta == 0:
x1 = -b / (2 * a)
print(f'X = {x1:.2f}')
elif delta < 0:
print('-> A EQUAÇÃO NÃO POSSUI RAÍZES!')
else:
print('-> A EQUAÇÃO NÃO É DE SEGUNDO GRAU QUANDO A = 0!')
| true | true |
f73cfeff7ae8267b2343f68a5bbba178daeff131 | 3,441 | py | Python | myproject/settings.py | albertopitti/coursera_final_template | bfeb3c71562ca63d83dce9b5203cb868ba287517 | [
"Apache-2.0"
] | null | null | null | myproject/settings.py | albertopitti/coursera_final_template | bfeb3c71562ca63d83dce9b5203cb868ba287517 | [
"Apache-2.0"
] | null | null | null | myproject/settings.py | albertopitti/coursera_final_template | bfeb3c71562ca63d83dce9b5203cb868ba287517 | [
"Apache-2.0"
] | null | null | null | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aay0j_9b&ky3a7(8m8il+-1ud(scw12@w5!+5-=gsk6ynzi0ls'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# <HINT> add your cloud host here
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'onlinecourse.templatetags.dict_key',
'onlinecourse.templatetags.is_get_score',
'onlinecourse.apps.OnlinecourseConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
MEDIA_URL = '/media/'
| 26.469231 | 91 | 0.699797 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'aay0j_9b&ky3a7(8m8il+-1ud(scw12@w5!+5-=gsk6ynzi0ls'
DEBUG = True
# <HINT> add your cloud host here
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'onlinecourse.templatetags.dict_key',
'onlinecourse.templatetags.is_get_score',
'onlinecourse.apps.OnlinecourseConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
MEDIA_URL = '/media/'
| true | true |
f73cff8aae354822f72fe132ce08d075d80bc53e | 6,715 | py | Python | src/dataloader.py | pyguan88/EDSR_WDSR_PyTorch | 8212c63f0905f2126dffef7678269b83342b290b | [
"MIT"
] | null | null | null | src/dataloader.py | pyguan88/EDSR_WDSR_PyTorch | 8212c63f0905f2126dffef7678269b83342b290b | [
"MIT"
] | null | null | null | src/dataloader.py | pyguan88/EDSR_WDSR_PyTorch | 8212c63f0905f2126dffef7678269b83342b290b | [
"MIT"
] | null | null | null | import sys
import threading
import queue
import random
import collections
import torch
import torch.multiprocessing as multiprocessing
# from torch._C import _set_worker_signal_handlers, _update_worker_pids, \
# _remove_worker_pids, _error_if_any_worker_fails
# from torch.utils.data.dataloader import DataLoader
# from torch.utils.data.dataloader import _DataLoaderIter
# from torch.utils.data.dataloader import ManagerWatchdog
# from torch.utils.data.dataloader import _pin_memory_loop
# from torch.utils.data.dataloader import MP_STATUS_CHECK_INTERVAL
# from torch.utils.data.dataloader import ExceptionWrapper
# from torch.utils.data.dataloader import _use_shared_memory
# from torch.utils.data.dataloader import numpy_type_map
# from torch.utils.data.dataloader import default_collate
# from torch.utils.data.dataloader import pin_memory_batch
# from torch.utils.data.dataloader import _SIGCHLD_handler_set
# from torch.utils.data.dataloader import _set_SIGCHLD_handler
###for pytorch 1.1
from torch._C import _set_worker_signal_handlers
from torch.utils.data import _utils
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataloader import _DataLoaderIter
_use_shared_memory = False
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id):
try:
global _use_shared_memory
_use_shared_memory = True
_set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data_queue.cancel_join_thread()
if init_fn is not None:
init_fn(worker_id)
# watchdog = ManagerWatchdog()
watchdog = _utils.worker.ManagerWatchdog()
while watchdog.is_alive():
# try:
# r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
try:
r = index_queue.get(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if r is None:
assert done_event.is_set()
return
elif done_event.is_set():
continue
idx, batch_indices = r
try:
idx_scale = 0
if len(scale) > 1 and dataset.train:
idx_scale = random.randrange(0, len(scale))
dataset.set_scale(idx_scale)
samples = collate_fn([dataset[i] for i in batch_indices])
samples.append(idx_scale)
# except Exception:
# data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
except Exception:
data_queue.put((idx, _utils.ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
except KeyboardInterrupt:
pass
class _MSDataLoaderIter(_DataLoaderIter):
def __init__(self, loader):
self.dataset = loader.dataset
self.scale = loader.scale
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.sample_iter = iter(self.batch_sampler)
base_seed = torch.LongTensor(1).random_().item()
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.Queue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.done_event = multiprocessing.Event()
base_seed = torch.LongTensor(1).random_()[0]
self.index_queues = []
self.workers = []
for i in range(self.num_workers):
index_queue = multiprocessing.Queue()
index_queue.cancel_join_thread()
w = multiprocessing.Process(
target=_ms_loop,
args=(
self.dataset,
index_queue,
self.worker_result_queue,
self.done_event,
self.collate_fn,
self.scale,
base_seed + i,
self.worker_init_fn,
i
)
)
w.start()
self.index_queues.append(index_queue)
self.workers.append(w)
if self.pin_memory:
self.data_queue = queue.Queue()
pin_memory_thread = threading.Thread(
# target=_pin_memory_loop,
target=_utils.pin_memory._pin_memory_loop,
args=(
self.worker_result_queue,
self.data_queue,
torch.cuda.current_device(),
self.done_event
)
)
pin_memory_thread.daemon = True
pin_memory_thread.start()
self.pin_memory_thread = pin_memory_thread
else:
self.data_queue = self.worker_result_queue
# _update_worker_pids(id(self), tuple(w.pid for w in self.workers))
# _set_SIGCHLD_handler()
_utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self.workers))
_utils.signal_handling._set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range(2 * self.num_workers):
self._put_indices()
class MSDataLoader(DataLoader):
def __init__(
self, args, dataset, batch_size=1, shuffle=False,
sampler=None, batch_sampler=None,
collate_fn=_utils.collate.default_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):
super(MSDataLoader, self).__init__(
dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=args.n_threads,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn
)
self.scale = args.scale
def __iter__(self):
return _MSDataLoaderIter(self)
| 35.157068 | 104 | 0.600745 | import sys
import threading
import queue
import random
import collections
import torch
import torch.multiprocessing as multiprocessing
_signal_handlers
from torch.utils.data import _utils
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataloader import _DataLoaderIter
_use_shared_memory = False
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id):
try:
global _use_shared_memory
_use_shared_memory = True
_set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data_queue.cancel_join_thread()
if init_fn is not None:
init_fn(worker_id)
watchdog = _utils.worker.ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if r is None:
assert done_event.is_set()
return
elif done_event.is_set():
continue
idx, batch_indices = r
try:
idx_scale = 0
if len(scale) > 1 and dataset.train:
idx_scale = random.randrange(0, len(scale))
dataset.set_scale(idx_scale)
samples = collate_fn([dataset[i] for i in batch_indices])
samples.append(idx_scale)
except Exception:
data_queue.put((idx, _utils.ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
except KeyboardInterrupt:
pass
class _MSDataLoaderIter(_DataLoaderIter):
def __init__(self, loader):
self.dataset = loader.dataset
self.scale = loader.scale
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.sample_iter = iter(self.batch_sampler)
base_seed = torch.LongTensor(1).random_().item()
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.Queue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.done_event = multiprocessing.Event()
base_seed = torch.LongTensor(1).random_()[0]
self.index_queues = []
self.workers = []
for i in range(self.num_workers):
index_queue = multiprocessing.Queue()
index_queue.cancel_join_thread()
w = multiprocessing.Process(
target=_ms_loop,
args=(
self.dataset,
index_queue,
self.worker_result_queue,
self.done_event,
self.collate_fn,
self.scale,
base_seed + i,
self.worker_init_fn,
i
)
)
w.start()
self.index_queues.append(index_queue)
self.workers.append(w)
if self.pin_memory:
self.data_queue = queue.Queue()
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(
self.worker_result_queue,
self.data_queue,
torch.cuda.current_device(),
self.done_event
)
)
pin_memory_thread.daemon = True
pin_memory_thread.start()
self.pin_memory_thread = pin_memory_thread
else:
self.data_queue = self.worker_result_queue
_utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self.workers))
_utils.signal_handling._set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range(2 * self.num_workers):
self._put_indices()
class MSDataLoader(DataLoader):
def __init__(
self, args, dataset, batch_size=1, shuffle=False,
sampler=None, batch_sampler=None,
collate_fn=_utils.collate.default_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):
super(MSDataLoader, self).__init__(
dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=args.n_threads,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn
)
self.scale = args.scale
def __iter__(self):
return _MSDataLoaderIter(self)
| true | true |
f73d028c9b85aae8bc073853db7e98b634f042f5 | 15,010 | py | Python | tests/unit/test_consumer.py | mmanciop/loki-operator | 1fe5a87afeebd3b2ce167bb8636f281954bade9c | [
"Apache-2.0"
] | null | null | null | tests/unit/test_consumer.py | mmanciop/loki-operator | 1fe5a87afeebd3b2ce167bb8636f281954bade9c | [
"Apache-2.0"
] | null | null | null | tests/unit/test_consumer.py | mmanciop/loki-operator | 1fe5a87afeebd3b2ce167bb8636f281954bade9c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Canonical Ltd.
# See LICENSE file for licensing details.
import json
import os
import textwrap
import unittest
from unittest.mock import patch
import yaml
from charms.loki_k8s.v0.loki_push_api import LokiPushApiConsumer
from fs.tempfs import TempFS
from helpers import TempFolderSandbox
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.testing import Harness
LABELED_ALERT_RULES = [
{
"name": "loki_20ce8299-3634-4bef-8bd8-5ace6c8816b4_promtail-k8s_alerts",
"rules": [
{
"alert": "HighPercentageError",
"expr": 'sum(rate({juju_model="loki", juju_model_uuid="20ce8299-3634-4bef-8bd8-5ace6c8816b4", juju_application="promtail-k8s"} |= "error" [5m])) by (job)\n /\nsum(rate({app="foo", env="production"}[5m])) by (job)\n > 0.05\n',
"for": "10m",
"labels": {
"severity": "page",
"juju_model": "loki",
"juju_model_uuid": "20ce8299-3634-4bef-8bd8-5ace6c8816b4",
"juju_application": "promtail-k8s",
},
"annotations": {"summary": "High request latency"},
}
],
}
]
ONE_RULE = {
"alert": "HighPercentageError",
"expr": 'sum(rate({%%juju_topology%%} |= "error" [5m])) by (job)\n /\nsum(rate({app="foo", env="production"}[5m])) by (job)\n > 0.05\n',
"for": "10m",
"labels": {"severity": "page"},
"annotations": {"summary": "High request latency"},
}
class FakeConsumerCharm(CharmBase):
_stored = StoredState()
metadata_yaml = textwrap.dedent(
"""
containers:
promtail:
resource: promtail-image
requires:
logging:
interface: loki_push_api
"""
)
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._port = 3100
self.loki_consumer = LokiPushApiConsumer(self)
@property
def _loki_push_api(self) -> str:
loki_push_api = f"http://{self.unit_ip}:{self.charm._port}/loki/api/v1/push"
data = {"loki_push_api": loki_push_api}
return json.dumps(data)
@property
def unit_ip(self) -> str:
"""Returns unit's IP."""
return "10.1.2.3"
class TestLokiPushApiConsumer(unittest.TestCase):
def setUp(self):
self.harness = Harness(FakeConsumerCharm, meta=FakeConsumerCharm.metadata_yaml)
self.addCleanup(self.harness.cleanup)
self.harness.set_leader(True)
self.harness.begin()
def test__on_logging_relation_changed_no_leader(self):
self.harness.set_leader(False)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.assertEqual(self.harness.update_relation_data(rel_id, "promtail", {}), None)
def test__on_logging_relation_changed_no_unit(self):
self.harness.set_leader(True)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.assertEqual(
self.harness.update_relation_data(
rel_id,
"promtail",
{"data": '{"loki_push_api": "http://10.1.2.3:3100/loki/api/v1/push"}'},
),
None,
)
@patch("charms.loki_k8s.v0.loki_push_api.AlertRules.add_path")
@patch("charms.loki_k8s.v0.loki_push_api.AlertRules.as_dict", new=lambda *a, **kw: {})
def test__on_logging_relation_changed(self, mock_as_dict):
mock_as_dict.return_value = (LABELED_ALERT_RULES, {})
loki_push_api = "http://10.1.2.3:3100/loki/api/v1/push"
self.harness.set_leader(True)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.harness.update_relation_data(
rel_id,
"promtail",
{"endpoints": '[{"url": "http://10.1.2.3:3100/loki/api/v1/push"}]'},
)
self.assertEqual(
self.harness.charm.loki_consumer.loki_endpoints[0]["url"],
loki_push_api,
)
@patch("charms.loki_k8s.v0.loki_push_api.LokiPushApiEvents.loki_push_api_endpoint_joined")
def test__on_upgrade_charm_endpoint_joined_event_fired_for_leader(self, mock_events):
self.harness.set_leader(True)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.harness.update_relation_data(
rel_id,
"promtail",
{"data": '{"loki_push_api": "http://10.1.2.3:3100/loki/api/v1/push"}'},
)
mock_events.emit.assert_called_once()
@patch("charms.loki_k8s.v0.loki_push_api.LokiPushApiEvents.loki_push_api_endpoint_joined")
def test__on_upgrade_charm_endpoint_joined_event_fired_for_follower(self, mock_events):
self.harness.set_leader(False)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.harness.update_relation_data(
rel_id,
"promtail",
{"data": '{"loki_push_api": "http://10.1.2.3:3100/loki/api/v1/push"}'},
)
mock_events.emit.assert_called_once()
class TestReloadAlertRules(unittest.TestCase):
"""Feature: Consumer charm can manually invoke reloading of alerts.
Background: In use cases such as cos-configuration-k8s-operator, the last hook can fire before
the alert files show up on disk. In that case relation data would remain empty of alerts. To
circumvent that, a public method for reloading alert rules is offered.
"""
NO_ALERTS = json.dumps({}) # relation data representation for the case of "no alerts"
# use a short-form free-standing alert, for brevity
ALERT = yaml.safe_dump({"alert": "free_standing", "expr": "avg(some_vector[5m]) > 5"})
def setUp(self):
self.sandbox = TempFolderSandbox()
alert_rules_path = os.path.join(self.sandbox.root, "alerts")
self.alert_rules_path = alert_rules_path
class ConsumerCharm(CharmBase):
metadata_yaml = textwrap.dedent(
"""
requires:
logging:
interface: loki_push_api
"""
)
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._port = 3100
self.loki_consumer = LokiPushApiConsumer(
self, alert_rules_path=alert_rules_path, recursive=True
)
self.harness = Harness(ConsumerCharm, meta=ConsumerCharm.metadata_yaml)
# self.harness = Harness(FakeConsumerCharm, meta=FakeConsumerCharm.metadata_yaml)
self.addCleanup(self.harness.cleanup)
self.harness.begin_with_initial_hooks()
self.harness.set_leader(True)
self.rel_id = self.harness.add_relation("logging", "loki")
# need to manually emit relation changed
# https://github.com/canonical/operator/issues/682
self.harness.charm.on.logging_relation_changed.emit(
self.harness.charm.model.get_relation("logging")
)
def test_reload_when_dir_is_still_empty_changes_nothing(self):
"""Scenario: The reload method is called when the alerts dir is still empty."""
# GIVEN relation data contains no alerts
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
# WHEN no rule files are present
# AND the reload method is called
self.harness.charm.loki_consumer._reinitialize_alert_rules()
# THEN relation data is unchanged
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
def test_reload_after_dir_is_populated_updates_relation_data(self):
"""Scenario: The reload method is called after some alert files are added."""
# GIVEN relation data contains no alerts
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
# WHEN some rule files are added to the alerts dir
self.sandbox.put_file(os.path.join(self.alert_rules_path, "alert.rule"), self.ALERT)
# AND the reload method is called
self.harness.charm.loki_consumer._reinitialize_alert_rules()
# THEN relation data is updated
relation = self.harness.charm.model.get_relation("logging")
self.assertNotEqual(
relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS
)
def test_reload_after_dir_is_emptied_updates_relation_data(self):
"""Scenario: The reload method is called after all the loaded alert files are removed."""
# GIVEN alert files are present and relation data contains respective alerts
alert_filename = os.path.join(self.alert_rules_path, "alert.rule")
self.sandbox.put_file(alert_filename, self.ALERT)
self.harness.charm.loki_consumer._reinitialize_alert_rules()
relation = self.harness.charm.model.get_relation("logging")
self.assertNotEqual(
relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS
)
# WHEN all rule files are deleted from the alerts dir
self.sandbox.remove(alert_filename)
# AND the reload method is called
self.harness.charm.loki_consumer._reinitialize_alert_rules()
# THEN relation data is empty again
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
def test_reload_after_dir_itself_removed_updates_relation_data(self):
"""Scenario: The reload method is called after the alerts dir doesn't exist anymore."""
# GIVEN alert files are present and relation data contains respective alerts
alert_filename = os.path.join(self.alert_rules_path, "alert.rule")
self.sandbox.put_file(alert_filename, self.ALERT)
self.harness.charm.loki_consumer._reinitialize_alert_rules()
relation = self.harness.charm.model.get_relation("logging")
self.assertNotEqual(
relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS
)
# WHEN the alerts dir itself is deleted
self.sandbox.remove(alert_filename)
self.sandbox.rmdir(self.alert_rules_path)
# AND the reload method is called
self.harness.charm.loki_consumer._reinitialize_alert_rules()
# THEN relation data is empty again
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
class TestAlertRuleFormat(unittest.TestCase):
"""Feature: Consumer lib should warn when encountering invalid rules files.
Background: It is not easy to determine the validity of rule files, but some cases are trivial:
- empty files
- files made up of only white-spaces that yaml.safe_load parses as None (space, newline)
In those cases a warning should be emitted.
"""
NO_ALERTS = json.dumps({}) # relation data representation for the case of "no alerts"
def setUp(self):
self.sandbox = TempFS("consumer_rule_files", auto_clean=True)
self.addCleanup(self.sandbox.close)
alert_rules_path = self.sandbox.getsyspath("/")
class ConsumerCharm(CharmBase):
metadata_yaml = textwrap.dedent(
"""
requires:
logging:
interface: loki_push_api
peers:
replicas:
interface: consumer_charm_replica
"""
)
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._port = 3100
self.loki_consumer = LokiPushApiConsumer(
self, alert_rules_path=alert_rules_path, recursive=True
)
self.harness = Harness(ConsumerCharm, meta=ConsumerCharm.metadata_yaml)
self.addCleanup(self.harness.cleanup)
self.peer_rel_id = self.harness.add_relation("replicas", self.harness.model.app.name)
self.harness.set_leader(True)
self.rel_id = self.harness.add_relation(relation_name="logging", remote_app="loki")
self.harness.add_relation_unit(self.rel_id, "loki/0")
def test_empty_rule_files_are_dropped_and_produce_an_error(self):
"""Scenario: Consumer charm attempts to forward an empty rule file."""
# GIVEN a bunch of empty rule files (and ONLY empty rule files)
self.sandbox.writetext("empty.rule", "")
self.sandbox.writetext("whitespace1.rule", " ")
self.sandbox.writetext("whitespace2.rule", "\n")
self.sandbox.writetext("whitespace3.rule", "\r\n")
# WHEN charm starts
with self.assertLogs(level="ERROR") as logger:
self.harness.begin_with_initial_hooks()
# THEN relation data is empty (empty rule files do not get forwarded in any way)
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
# AND an error message is recorded for every empty file
logger_output = "\n".join(logger.output)
self.assertIn("empty.rule", logger_output)
self.assertIn("whitespace1.rule", logger_output)
self.assertIn("whitespace2.rule", logger_output)
self.assertIn("whitespace3.rule", logger_output)
def test_rules_files_with_invalid_yaml_are_dropped_and_produce_an_error(self):
"""Scenario: Consumer charm attempts to forward a rule file which is invalid yaml."""
# GIVEN a bunch of invalid yaml rule files (and ONLY invalid yaml rule files)
self.sandbox.writetext("tab.rule", "\t")
self.sandbox.writetext("multicolon.rule", "this: is: not: yaml")
# WHEN charm starts
with self.assertLogs(level="ERROR") as logger:
self.harness.begin_with_initial_hooks()
# THEN relation data is empty (invalid rule files do not get forwarded in any way)
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
# AND an error message is recorded for every invalid file
logger_output = "\n".join(logger.output)
self.assertIn("tab.rule", logger_output)
self.assertIn("multicolon.rule", logger_output)
| 41.578947 | 243 | 0.65523 |
import json
import os
import textwrap
import unittest
from unittest.mock import patch
import yaml
from charms.loki_k8s.v0.loki_push_api import LokiPushApiConsumer
from fs.tempfs import TempFS
from helpers import TempFolderSandbox
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.testing import Harness
LABELED_ALERT_RULES = [
{
"name": "loki_20ce8299-3634-4bef-8bd8-5ace6c8816b4_promtail-k8s_alerts",
"rules": [
{
"alert": "HighPercentageError",
"expr": 'sum(rate({juju_model="loki", juju_model_uuid="20ce8299-3634-4bef-8bd8-5ace6c8816b4", juju_application="promtail-k8s"} |= "error" [5m])) by (job)\n /\nsum(rate({app="foo", env="production"}[5m])) by (job)\n > 0.05\n',
"for": "10m",
"labels": {
"severity": "page",
"juju_model": "loki",
"juju_model_uuid": "20ce8299-3634-4bef-8bd8-5ace6c8816b4",
"juju_application": "promtail-k8s",
},
"annotations": {"summary": "High request latency"},
}
],
}
]
ONE_RULE = {
"alert": "HighPercentageError",
"expr": 'sum(rate({%%juju_topology%%} |= "error" [5m])) by (job)\n /\nsum(rate({app="foo", env="production"}[5m])) by (job)\n > 0.05\n',
"for": "10m",
"labels": {"severity": "page"},
"annotations": {"summary": "High request latency"},
}
class FakeConsumerCharm(CharmBase):
_stored = StoredState()
metadata_yaml = textwrap.dedent(
"""
containers:
promtail:
resource: promtail-image
requires:
logging:
interface: loki_push_api
"""
)
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._port = 3100
self.loki_consumer = LokiPushApiConsumer(self)
@property
def _loki_push_api(self) -> str:
loki_push_api = f"http://{self.unit_ip}:{self.charm._port}/loki/api/v1/push"
data = {"loki_push_api": loki_push_api}
return json.dumps(data)
@property
def unit_ip(self) -> str:
return "10.1.2.3"
class TestLokiPushApiConsumer(unittest.TestCase):
def setUp(self):
self.harness = Harness(FakeConsumerCharm, meta=FakeConsumerCharm.metadata_yaml)
self.addCleanup(self.harness.cleanup)
self.harness.set_leader(True)
self.harness.begin()
def test__on_logging_relation_changed_no_leader(self):
self.harness.set_leader(False)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.assertEqual(self.harness.update_relation_data(rel_id, "promtail", {}), None)
def test__on_logging_relation_changed_no_unit(self):
self.harness.set_leader(True)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.assertEqual(
self.harness.update_relation_data(
rel_id,
"promtail",
{"data": '{"loki_push_api": "http://10.1.2.3:3100/loki/api/v1/push"}'},
),
None,
)
@patch("charms.loki_k8s.v0.loki_push_api.AlertRules.add_path")
@patch("charms.loki_k8s.v0.loki_push_api.AlertRules.as_dict", new=lambda *a, **kw: {})
def test__on_logging_relation_changed(self, mock_as_dict):
mock_as_dict.return_value = (LABELED_ALERT_RULES, {})
loki_push_api = "http://10.1.2.3:3100/loki/api/v1/push"
self.harness.set_leader(True)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.harness.update_relation_data(
rel_id,
"promtail",
{"endpoints": '[{"url": "http://10.1.2.3:3100/loki/api/v1/push"}]'},
)
self.assertEqual(
self.harness.charm.loki_consumer.loki_endpoints[0]["url"],
loki_push_api,
)
@patch("charms.loki_k8s.v0.loki_push_api.LokiPushApiEvents.loki_push_api_endpoint_joined")
def test__on_upgrade_charm_endpoint_joined_event_fired_for_leader(self, mock_events):
self.harness.set_leader(True)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.harness.update_relation_data(
rel_id,
"promtail",
{"data": '{"loki_push_api": "http://10.1.2.3:3100/loki/api/v1/push"}'},
)
mock_events.emit.assert_called_once()
@patch("charms.loki_k8s.v0.loki_push_api.LokiPushApiEvents.loki_push_api_endpoint_joined")
def test__on_upgrade_charm_endpoint_joined_event_fired_for_follower(self, mock_events):
self.harness.set_leader(False)
rel_id = self.harness.add_relation("logging", "promtail")
self.harness.add_relation_unit(rel_id, "promtail/0")
self.harness.update_relation_data(
rel_id,
"promtail",
{"data": '{"loki_push_api": "http://10.1.2.3:3100/loki/api/v1/push"}'},
)
mock_events.emit.assert_called_once()
class TestReloadAlertRules(unittest.TestCase):
NO_ALERTS = json.dumps({})
ALERT = yaml.safe_dump({"alert": "free_standing", "expr": "avg(some_vector[5m]) > 5"})
def setUp(self):
self.sandbox = TempFolderSandbox()
alert_rules_path = os.path.join(self.sandbox.root, "alerts")
self.alert_rules_path = alert_rules_path
class ConsumerCharm(CharmBase):
metadata_yaml = textwrap.dedent(
"""
requires:
logging:
interface: loki_push_api
"""
)
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._port = 3100
self.loki_consumer = LokiPushApiConsumer(
self, alert_rules_path=alert_rules_path, recursive=True
)
self.harness = Harness(ConsumerCharm, meta=ConsumerCharm.metadata_yaml)
self.addCleanup(self.harness.cleanup)
self.harness.begin_with_initial_hooks()
self.harness.set_leader(True)
self.rel_id = self.harness.add_relation("logging", "loki")
self.harness.charm.on.logging_relation_changed.emit(
self.harness.charm.model.get_relation("logging")
)
def test_reload_when_dir_is_still_empty_changes_nothing(self):
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
self.harness.charm.loki_consumer._reinitialize_alert_rules()
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
def test_reload_after_dir_is_populated_updates_relation_data(self):
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
self.sandbox.put_file(os.path.join(self.alert_rules_path, "alert.rule"), self.ALERT)
self.harness.charm.loki_consumer._reinitialize_alert_rules()
relation = self.harness.charm.model.get_relation("logging")
self.assertNotEqual(
relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS
)
def test_reload_after_dir_is_emptied_updates_relation_data(self):
alert_filename = os.path.join(self.alert_rules_path, "alert.rule")
self.sandbox.put_file(alert_filename, self.ALERT)
self.harness.charm.loki_consumer._reinitialize_alert_rules()
relation = self.harness.charm.model.get_relation("logging")
self.assertNotEqual(
relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS
)
self.sandbox.remove(alert_filename)
self.harness.charm.loki_consumer._reinitialize_alert_rules()
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
def test_reload_after_dir_itself_removed_updates_relation_data(self):
alert_filename = os.path.join(self.alert_rules_path, "alert.rule")
self.sandbox.put_file(alert_filename, self.ALERT)
self.harness.charm.loki_consumer._reinitialize_alert_rules()
relation = self.harness.charm.model.get_relation("logging")
self.assertNotEqual(
relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS
)
self.sandbox.remove(alert_filename)
self.sandbox.rmdir(self.alert_rules_path)
self.harness.charm.loki_consumer._reinitialize_alert_rules()
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
class TestAlertRuleFormat(unittest.TestCase):
NO_ALERTS = json.dumps({})
def setUp(self):
self.sandbox = TempFS("consumer_rule_files", auto_clean=True)
self.addCleanup(self.sandbox.close)
alert_rules_path = self.sandbox.getsyspath("/")
class ConsumerCharm(CharmBase):
metadata_yaml = textwrap.dedent(
"""
requires:
logging:
interface: loki_push_api
peers:
replicas:
interface: consumer_charm_replica
"""
)
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._port = 3100
self.loki_consumer = LokiPushApiConsumer(
self, alert_rules_path=alert_rules_path, recursive=True
)
self.harness = Harness(ConsumerCharm, meta=ConsumerCharm.metadata_yaml)
self.addCleanup(self.harness.cleanup)
self.peer_rel_id = self.harness.add_relation("replicas", self.harness.model.app.name)
self.harness.set_leader(True)
self.rel_id = self.harness.add_relation(relation_name="logging", remote_app="loki")
self.harness.add_relation_unit(self.rel_id, "loki/0")
def test_empty_rule_files_are_dropped_and_produce_an_error(self):
self.sandbox.writetext("empty.rule", "")
self.sandbox.writetext("whitespace1.rule", " ")
self.sandbox.writetext("whitespace2.rule", "\n")
self.sandbox.writetext("whitespace3.rule", "\r\n")
with self.assertLogs(level="ERROR") as logger:
self.harness.begin_with_initial_hooks()
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
logger_output = "\n".join(logger.output)
self.assertIn("empty.rule", logger_output)
self.assertIn("whitespace1.rule", logger_output)
self.assertIn("whitespace2.rule", logger_output)
self.assertIn("whitespace3.rule", logger_output)
def test_rules_files_with_invalid_yaml_are_dropped_and_produce_an_error(self):
self.sandbox.writetext("tab.rule", "\t")
self.sandbox.writetext("multicolon.rule", "this: is: not: yaml")
with self.assertLogs(level="ERROR") as logger:
self.harness.begin_with_initial_hooks()
relation = self.harness.charm.model.get_relation("logging")
self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS)
logger_output = "\n".join(logger.output)
self.assertIn("tab.rule", logger_output)
self.assertIn("multicolon.rule", logger_output)
| true | true |
f73d04417507eb9ad8bcb6fbcd23dc9cd31f5d8e | 4,036 | py | Python | models/transformer_block.py | druzhkov-paul/T2T-ViT | 819c3ddc4cb6f464d4a9866d8713c7ace42ebf6c | [
"BSD-3-Clause-Clear"
] | null | null | null | models/transformer_block.py | druzhkov-paul/T2T-ViT | 819c3ddc4cb6f464d4a9866d8713c7ace42ebf6c | [
"BSD-3-Clause-Clear"
] | null | null | null | models/transformer_block.py | druzhkov-paul/T2T-ViT | 819c3ddc4cb6f464d4a9866d8713c7ace42ebf6c | [
"BSD-3-Clause-Clear"
] | null | null | null | # Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
Borrow from timm(https://github.com/rwightman/pytorch-image-models)
"""
import torch
import torch.nn as nn
import numpy as np
from timm.models.layers import DropPath
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = original_shape = x.shape
C = int(C)
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(*original_shape)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_sinusoid_encoding(n_position, d_hid):
''' Sinusoid position encoding table '''
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def get_sinusoid_encoding_pt(n_position, d_hid):
''' Sinusoid position encoding table '''
pos = torch.arange(n_position).reshape(-1, 1).float()
dh = torch.pow(10000, 2 * (torch.arange(d_hid) // 2) / d_hid).reshape(1, -1).float()
sinusoid_table = pos / dh
shape = sinusoid_table.shape
sinusoid_table = sinusoid_table.reshape(-1, 2)
sinusoid_table = torch.stack([torch.sin(sinusoid_table[..., 0]), torch.cos(sinusoid_table[..., 1])], dim=1)
sinusoid_table = sinusoid_table.reshape(1, *shape)
# sinusoid_table[:, 0::2] = torch.sin(sinusoid_table[:, 0::2]) # dim 2i
# sinusoid_table[:, 1::2] = torch.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return sinusoid_table
| 38.807692 | 112 | 0.641724 |
import torch
import torch.nn as nn
import numpy as np
from timm.models.layers import DropPath
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = original_shape = x.shape
C = int(C)
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(*original_shape)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_sinusoid_encoding(n_position, d_hid):
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def get_sinusoid_encoding_pt(n_position, d_hid):
pos = torch.arange(n_position).reshape(-1, 1).float()
dh = torch.pow(10000, 2 * (torch.arange(d_hid) // 2) / d_hid).reshape(1, -1).float()
sinusoid_table = pos / dh
shape = sinusoid_table.shape
sinusoid_table = sinusoid_table.reshape(-1, 2)
sinusoid_table = torch.stack([torch.sin(sinusoid_table[..., 0]), torch.cos(sinusoid_table[..., 1])], dim=1)
sinusoid_table = sinusoid_table.reshape(1, *shape)
inusoid_table
| true | true |
f73d062989ac2a5616e28a5f89abef2a22ad7f7d | 3,379 | py | Python | sdcflows/workflows/tests/test_ancillary.py | madisoth/sdcflows | c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2 | [
"Apache-2.0"
] | 16 | 2020-02-25T17:47:10.000Z | 2022-03-07T02:54:51.000Z | sdcflows/workflows/tests/test_ancillary.py | madisoth/sdcflows | c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2 | [
"Apache-2.0"
] | 175 | 2020-02-15T00:52:28.000Z | 2022-03-29T21:42:31.000Z | sdcflows/workflows/tests/test_ancillary.py | madisoth/sdcflows | c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2 | [
"Apache-2.0"
] | 12 | 2019-05-28T23:34:37.000Z | 2020-01-22T21:32:22.000Z | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Check the tools submodule."""
import os
import pytest
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.interfaces.reportlets.masks import SimpleShowMaskRPT
from ..ancillary import init_brainextraction_wf
@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == "true", reason="this is GH Actions")
@pytest.mark.parametrize("folder", ["magnitude/ds000054", "magnitude/ds000217"])
def test_brainmasker(tmpdir, datadir, workdir, outdir, folder):
"""Exercise the brain masking tool."""
tmpdir.chdir()
wf = pe.Workflow(name=f"test_mask_{folder.replace('/', '_')}")
if workdir:
wf.base_dir = str(workdir)
input_files = [
str(f) for f in (datadir / "brain-extraction-tests" / folder).glob("*.nii.gz")
]
inputnode = pe.Node(niu.IdentityInterface(fields=("in_file",)), name="inputnode")
inputnode.iterables = ("in_file", input_files)
merger = pe.Node(niu.Function(function=_merge), name="merger")
brainmask_wf = init_brainextraction_wf()
# fmt:off
wf.connect([
(inputnode, merger, [("in_file", "in_file")]),
(merger, brainmask_wf, [("out", "inputnode.in_file")]),
])
# fmt:on
if outdir:
out_path = outdir / "masks" / folder.split("/")[-1]
out_path.mkdir(exist_ok=True, parents=True)
report = pe.Node(SimpleShowMaskRPT(), name="report")
report.interface._always_run = True
def _report_name(fname, out_path):
from pathlib import Path
return str(
out_path
/ Path(fname)
.name.replace(".nii", "_mask.svg")
.replace("_magnitude", "_desc-magnitude")
.replace(".gz", "")
)
# fmt: off
wf.connect([
(inputnode, report, [(("in_file", _report_name, out_path), "out_report")]),
(brainmask_wf, report, [("outputnode.out_mask", "mask_file"),
("outputnode.out_file", "background_file")]),
])
# fmt: on
wf.run()
def _merge(in_file):
import nibabel as nb
import numpy as np
img = nb.squeeze_image(nb.load(in_file))
data = np.asanyarray(img.dataobj)
if data.ndim == 3:
return in_file
from pathlib import Path
data = data.mean(-1)
out_file = (Path() / "merged.nii.gz").absolute()
img.__class__(data, img.affine, img.header).to_filename(out_file)
return str(out_file)
| 32.805825 | 87 | 0.645753 |
import os
import pytest
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.interfaces.reportlets.masks import SimpleShowMaskRPT
from ..ancillary import init_brainextraction_wf
@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == "true", reason="this is GH Actions")
@pytest.mark.parametrize("folder", ["magnitude/ds000054", "magnitude/ds000217"])
def test_brainmasker(tmpdir, datadir, workdir, outdir, folder):
tmpdir.chdir()
wf = pe.Workflow(name=f"test_mask_{folder.replace('/', '_')}")
if workdir:
wf.base_dir = str(workdir)
input_files = [
str(f) for f in (datadir / "brain-extraction-tests" / folder).glob("*.nii.gz")
]
inputnode = pe.Node(niu.IdentityInterface(fields=("in_file",)), name="inputnode")
inputnode.iterables = ("in_file", input_files)
merger = pe.Node(niu.Function(function=_merge), name="merger")
brainmask_wf = init_brainextraction_wf()
wf.connect([
(inputnode, merger, [("in_file", "in_file")]),
(merger, brainmask_wf, [("out", "inputnode.in_file")]),
])
if outdir:
out_path = outdir / "masks" / folder.split("/")[-1]
out_path.mkdir(exist_ok=True, parents=True)
report = pe.Node(SimpleShowMaskRPT(), name="report")
report.interface._always_run = True
def _report_name(fname, out_path):
from pathlib import Path
return str(
out_path
/ Path(fname)
.name.replace(".nii", "_mask.svg")
.replace("_magnitude", "_desc-magnitude")
.replace(".gz", "")
)
wf.connect([
(inputnode, report, [(("in_file", _report_name, out_path), "out_report")]),
(brainmask_wf, report, [("outputnode.out_mask", "mask_file"),
("outputnode.out_file", "background_file")]),
])
wf.run()
def _merge(in_file):
import nibabel as nb
import numpy as np
img = nb.squeeze_image(nb.load(in_file))
data = np.asanyarray(img.dataobj)
if data.ndim == 3:
return in_file
from pathlib import Path
data = data.mean(-1)
out_file = (Path() / "merged.nii.gz").absolute()
img.__class__(data, img.affine, img.header).to_filename(out_file)
return str(out_file)
| true | true |
f73d0655aeb3505542c77ad93a17e4f9ef9544a2 | 18,131 | py | Python | env/lib/python3.6/site-packages/sqlalchemy/orm/instrumentation.py | amogh-gulati/corona_dashboard | ce1a20ad56bdfb758d41513b4706fe3a47764c32 | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | env/lib/python3.6/site-packages/sqlalchemy/orm/instrumentation.py | amogh-gulati/corona_dashboard | ce1a20ad56bdfb758d41513b4706fe3a47764c32 | [
"MIT"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | env/lib/python3.6/site-packages/sqlalchemy/orm/instrumentation.py | amogh-gulati/corona_dashboard | ce1a20ad56bdfb758d41513b4706fe3a47764c32 | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # orm/instrumentation.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines SQLAlchemy's system of class instrumentation.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
instrumentation.py deals with registration of end-user classes
for state tracking. It interacts closely with state.py
and attributes.py which establish per-instance and per-class-attribute
instrumentation, respectively.
The class instrumentation system can be customized on a per-class
or global basis using the :mod:`sqlalchemy.ext.instrumentation`
module, which provides the means to build and specify
alternate instrumentation forms.
.. versionchanged: 0.8
The instrumentation extension system was moved out of the
ORM and into the external :mod:`sqlalchemy.ext.instrumentation`
package. When that package is imported, it installs
itself within sqlalchemy.orm so that its more comprehensive
resolution mechanics take effect.
"""
from . import base
from . import collections
from . import exc
from . import interfaces
from . import state
from .. import util
_memoized_key_collection = util.group_expirable_memoized_property()
class ClassManager(dict):
"""tracks state information at the class level."""
MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR
STATE_ATTR = base.DEFAULT_STATE_ATTR
_state_setter = staticmethod(util.attrsetter(STATE_ATTR))
deferred_scalar_loader = None
original_init = object.__init__
factory = None
def __init__(self, class_):
self.class_ = class_
self.info = {}
self.new_init = None
self.local_attrs = {}
self.originals = {}
self._bases = [
mgr
for mgr in [
manager_of_class(base)
for base in self.class_.__bases__
if isinstance(base, type)
]
if mgr is not None
]
for base_ in self._bases:
self.update(base_)
self.dispatch._events._new_classmanager_instance(class_, self)
# events._InstanceEventsHold.populate(class_, self)
for basecls in class_.__mro__:
mgr = manager_of_class(basecls)
if mgr is not None:
self.dispatch._update(mgr.dispatch)
self.manage()
self._instrument_init()
if "__del__" in class_.__dict__:
util.warn(
"__del__() method on class %s will "
"cause unreachable cycles and memory leaks, "
"as SQLAlchemy instrumentation often creates "
"reference cycles. Please remove this method." % class_
)
def __hash__(self):
return id(self)
def __eq__(self, other):
return other is self
@property
def is_mapped(self):
return "mapper" in self.__dict__
@_memoized_key_collection
def _all_key_set(self):
return frozenset(self)
@_memoized_key_collection
def _collection_impl_keys(self):
return frozenset(
[attr.key for attr in self.values() if attr.impl.collection]
)
@_memoized_key_collection
def _scalar_loader_impls(self):
return frozenset(
[
attr.impl
for attr in self.values()
if attr.impl.accepts_scalar_loader
]
)
@util.memoized_property
def mapper(self):
# raises unless self.mapper has been assigned
raise exc.UnmappedClassError(self.class_)
def _all_sqla_attributes(self, exclude=None):
"""return an iterator of all classbound attributes that are
implement :class:`.InspectionAttr`.
This includes :class:`.QueryableAttribute` as well as extension
types such as :class:`.hybrid_property` and
:class:`.AssociationProxy`.
"""
if exclude is None:
exclude = set()
for supercls in self.class_.__mro__:
for key in set(supercls.__dict__).difference(exclude):
exclude.add(key)
val = supercls.__dict__[key]
if (
isinstance(val, interfaces.InspectionAttr)
and val.is_attribute
):
yield key, val
def _get_class_attr_mro(self, key, default=None):
"""return an attribute on the class without tripping it."""
for supercls in self.class_.__mro__:
if key in supercls.__dict__:
return supercls.__dict__[key]
else:
return default
def _attr_has_impl(self, key):
"""Return True if the given attribute is fully initialized.
i.e. has an impl.
"""
return key in self and self[key].impl is not None
def _subclass_manager(self, cls):
"""Create a new ClassManager for a subclass of this ClassManager's
class.
This is called automatically when attributes are instrumented so that
the attributes can be propagated to subclasses against their own
class-local manager, without the need for mappers etc. to have already
pre-configured managers for the full class hierarchy. Mappers
can post-configure the auto-generated ClassManager when needed.
"""
manager = manager_of_class(cls)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(cls)
return manager
def _instrument_init(self):
# TODO: self.class_.__init__ is often the already-instrumented
# __init__ from an instrumented superclass. We still need to make
# our own wrapper, but it would
# be nice to wrap the original __init__ and not our existing wrapper
# of such, since this adds method overhead.
self.original_init = self.class_.__init__
self.new_init = _generate_init(self.class_, self)
self.install_member("__init__", self.new_init)
def _uninstrument_init(self):
if self.new_init:
self.uninstall_member("__init__")
self.new_init = None
@util.memoized_property
def _state_constructor(self):
self.dispatch.first_init(self, self.class_)
return state.InstanceState
def manage(self):
"""Mark this instance as the manager for its class."""
setattr(self.class_, self.MANAGER_ATTR, self)
def dispose(self):
"""Dissasociate this manager from its class."""
delattr(self.class_, self.MANAGER_ATTR)
@util.hybridmethod
def manager_getter(self):
return _default_manager_getter
@util.hybridmethod
def state_getter(self):
"""Return a (instance) -> InstanceState callable.
"state getter" callables should raise either KeyError or
AttributeError if no InstanceState could be found for the
instance.
"""
return _default_state_getter
@util.hybridmethod
def dict_getter(self):
return _default_dict_getter
def instrument_attribute(self, key, inst, propagated=False):
if propagated:
if key in self.local_attrs:
return # don't override local attr with inherited attr
else:
self.local_attrs[key] = inst
self.install_descriptor(key, inst)
_memoized_key_collection.expire_instance(self)
self[key] = inst
for cls in self.class_.__subclasses__():
manager = self._subclass_manager(cls)
manager.instrument_attribute(key, inst, True)
def subclass_managers(self, recursive):
for cls in self.class_.__subclasses__():
mgr = manager_of_class(cls)
if mgr is not None and mgr is not self:
yield mgr
if recursive:
for m in mgr.subclass_managers(True):
yield m
def post_configure_attribute(self, key):
_instrumentation_factory.dispatch.attribute_instrument(
self.class_, key, self[key]
)
def uninstrument_attribute(self, key, propagated=False):
if key not in self:
return
if propagated:
if key in self.local_attrs:
return # don't get rid of local attr
else:
del self.local_attrs[key]
self.uninstall_descriptor(key)
_memoized_key_collection.expire_instance(self)
del self[key]
for cls in self.class_.__subclasses__():
manager = manager_of_class(cls)
if manager:
manager.uninstrument_attribute(key, True)
def unregister(self):
"""remove all instrumentation established by this ClassManager."""
self._uninstrument_init()
self.mapper = self.dispatch = None
self.info.clear()
for key in list(self):
if key in self.local_attrs:
self.uninstrument_attribute(key)
def install_descriptor(self, key, inst):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
setattr(self.class_, key, inst)
def uninstall_descriptor(self, key):
delattr(self.class_, key)
def install_member(self, key, implementation):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
self.originals.setdefault(key, getattr(self.class_, key, None))
setattr(self.class_, key, implementation)
def uninstall_member(self, key):
original = self.originals.pop(key, None)
if original is not None:
setattr(self.class_, key, original)
def instrument_collection_class(self, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def initialize_collection(self, key, state, factory):
user_data = factory()
adapter = collections.CollectionAdapter(
self.get_impl(key), state, user_data
)
return adapter, user_data
def is_instrumented(self, key, search=False):
if search:
return key in self
else:
return key in self.local_attrs
def get_impl(self, key):
return self[key].impl
@property
def attributes(self):
return iter(self.values())
# InstanceState management
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return instance
def setup_instance(self, instance, state=None):
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
def teardown_instance(self, instance):
delattr(instance, self.STATE_ATTR)
def _serialize(self, state, state_dict):
return _SerializeManager(state, state_dict)
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if hasattr(instance, self.STATE_ATTR):
return False
elif self.class_ is not instance.__class__ and self.is_mapped:
# this will create a new ClassManager for the
# subclass, without a mapper. This is likely a
# user error situation but allow the object
# to be constructed, so that it is usable
# in a non-ORM context at least.
return self._subclass_manager(
instance.__class__
)._new_state_if_none(instance)
else:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return state
def has_state(self, instance):
return hasattr(instance, self.STATE_ATTR)
def has_parent(self, state, key, optimistic=False):
"""TODO"""
return self.get_impl(key).hasparent(state, optimistic=optimistic)
def __bool__(self):
"""All ClassManagers are non-zero regardless of attribute state."""
return True
__nonzero__ = __bool__
def __repr__(self):
return "<%s of %r at %x>" % (
self.__class__.__name__,
self.class_,
id(self),
)
class _SerializeManager(object):
"""Provide serialization of a :class:`.ClassManager`.
The :class:`.InstanceState` uses ``__init__()`` on serialize
and ``__call__()`` on deserialize.
"""
def __init__(self, state, d):
self.class_ = state.class_
manager = state.manager
manager.dispatch.pickle(state, d)
def __call__(self, state, inst, state_dict):
state.manager = manager = manager_of_class(self.class_)
if manager is None:
raise exc.UnmappedInstanceError(
inst,
"Cannot deserialize object of type %r - "
"no mapper() has "
"been configured for this class within the current "
"Python process!" % self.class_,
)
elif manager.is_mapped and not manager.mapper.configured:
manager.mapper._configure_all()
# setup _sa_instance_state ahead of time so that
# unpickle events can access the object normally.
# see [ticket:2362]
if inst is not None:
manager.setup_instance(inst, state)
manager.dispatch.unpickle(state, state_dict)
class InstrumentationFactory(object):
"""Factory for new ClassManager instances."""
def create_manager_for_cls(self, class_):
assert class_ is not None
assert manager_of_class(class_) is None
# give a more complicated subclass
# a chance to do what it wants here
manager, factory = self._locate_extended_factory(class_)
if factory is None:
factory = ClassManager
manager = factory(class_)
self._check_conflicts(class_, factory)
manager.factory = factory
self.dispatch.class_instrument(class_)
return manager
def _locate_extended_factory(self, class_):
"""Overridden by a subclass to do an extended lookup."""
return None, None
def _check_conflicts(self, class_, factory):
"""Overridden by a subclass to test for conflicting factories."""
return
def unregister(self, class_):
manager = manager_of_class(class_)
manager.unregister()
manager.dispose()
self.dispatch.class_uninstrument(class_)
if ClassManager.MANAGER_ATTR in class_.__dict__:
delattr(class_, ClassManager.MANAGER_ATTR)
# this attribute is replaced by sqlalchemy.ext.instrumentation
# when importred.
_instrumentation_factory = InstrumentationFactory()
# these attributes are replaced by sqlalchemy.ext.instrumentation
# when a non-standard InstrumentationManager class is first
# used to instrument a class.
instance_state = _default_state_getter = base.instance_state
instance_dict = _default_dict_getter = base.instance_dict
manager_of_class = _default_manager_getter = base.manager_of_class
def register_class(class_):
"""Register class instrumentation.
Returns the existing or newly created class manager.
"""
manager = manager_of_class(class_)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(class_)
return manager
def unregister_class(class_):
"""Unregister class instrumentation."""
_instrumentation_factory.unregister(class_)
def is_instrumented(instance, key):
"""Return True if the given attribute on the given instance is
instrumented by the attributes package.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
"""
return manager_of_class(instance.__class__).is_instrumented(
key, search=True
)
def _generate_init(class_, class_manager):
"""Build an __init__ decorator that triggers ClassManager events."""
# TODO: we should use the ClassManager's notion of the
# original '__init__' method, once ClassManager is fixed
# to always reference that.
original__init__ = class_.__init__
assert original__init__
# Go through some effort here and don't change the user's __init__
# calling signature, including the unlikely case that it has
# a return value.
# FIXME: need to juggle local names to avoid constructor argument
# clashes.
func_body = """\
def __init__(%(apply_pos)s):
new_state = class_manager._new_state_if_none(%(self_arg)s)
if new_state:
return new_state._initialize_instance(%(apply_kw)s)
else:
return original__init__(%(apply_kw)s)
"""
func_vars = util.format_argspec_init(original__init__, grouped=False)
func_text = func_body % func_vars
if util.py2k:
func = getattr(original__init__, "im_func", original__init__)
func_defaults = getattr(func, "func_defaults", None)
else:
func_defaults = getattr(original__init__, "__defaults__", None)
func_kw_defaults = getattr(original__init__, "__kwdefaults__", None)
env = locals().copy()
exec(func_text, env)
__init__ = env["__init__"]
__init__.__doc__ = original__init__.__doc__
__init__._sa_original_init = original__init__
if func_defaults:
__init__.__defaults__ = func_defaults
if not util.py2k and func_kw_defaults:
__init__.__kwdefaults__ = func_kw_defaults
return __init__
| 32.147163 | 78 | 0.650543 |
from . import base
from . import collections
from . import exc
from . import interfaces
from . import state
from .. import util
_memoized_key_collection = util.group_expirable_memoized_property()
class ClassManager(dict):
MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR
STATE_ATTR = base.DEFAULT_STATE_ATTR
_state_setter = staticmethod(util.attrsetter(STATE_ATTR))
deferred_scalar_loader = None
original_init = object.__init__
factory = None
def __init__(self, class_):
self.class_ = class_
self.info = {}
self.new_init = None
self.local_attrs = {}
self.originals = {}
self._bases = [
mgr
for mgr in [
manager_of_class(base)
for base in self.class_.__bases__
if isinstance(base, type)
]
if mgr is not None
]
for base_ in self._bases:
self.update(base_)
self.dispatch._events._new_classmanager_instance(class_, self)
for basecls in class_.__mro__:
mgr = manager_of_class(basecls)
if mgr is not None:
self.dispatch._update(mgr.dispatch)
self.manage()
self._instrument_init()
if "__del__" in class_.__dict__:
util.warn(
"__del__() method on class %s will "
"cause unreachable cycles and memory leaks, "
"as SQLAlchemy instrumentation often creates "
"reference cycles. Please remove this method." % class_
)
def __hash__(self):
return id(self)
def __eq__(self, other):
return other is self
@property
def is_mapped(self):
return "mapper" in self.__dict__
@_memoized_key_collection
def _all_key_set(self):
return frozenset(self)
@_memoized_key_collection
def _collection_impl_keys(self):
return frozenset(
[attr.key for attr in self.values() if attr.impl.collection]
)
@_memoized_key_collection
def _scalar_loader_impls(self):
return frozenset(
[
attr.impl
for attr in self.values()
if attr.impl.accepts_scalar_loader
]
)
@util.memoized_property
def mapper(self):
raise exc.UnmappedClassError(self.class_)
def _all_sqla_attributes(self, exclude=None):
if exclude is None:
exclude = set()
for supercls in self.class_.__mro__:
for key in set(supercls.__dict__).difference(exclude):
exclude.add(key)
val = supercls.__dict__[key]
if (
isinstance(val, interfaces.InspectionAttr)
and val.is_attribute
):
yield key, val
def _get_class_attr_mro(self, key, default=None):
for supercls in self.class_.__mro__:
if key in supercls.__dict__:
return supercls.__dict__[key]
else:
return default
def _attr_has_impl(self, key):
return key in self and self[key].impl is not None
def _subclass_manager(self, cls):
manager = manager_of_class(cls)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(cls)
return manager
def _instrument_init(self):
self.original_init = self.class_.__init__
self.new_init = _generate_init(self.class_, self)
self.install_member("__init__", self.new_init)
def _uninstrument_init(self):
if self.new_init:
self.uninstall_member("__init__")
self.new_init = None
@util.memoized_property
def _state_constructor(self):
self.dispatch.first_init(self, self.class_)
return state.InstanceState
def manage(self):
setattr(self.class_, self.MANAGER_ATTR, self)
def dispose(self):
delattr(self.class_, self.MANAGER_ATTR)
@util.hybridmethod
def manager_getter(self):
return _default_manager_getter
@util.hybridmethod
def state_getter(self):
return _default_state_getter
@util.hybridmethod
def dict_getter(self):
return _default_dict_getter
def instrument_attribute(self, key, inst, propagated=False):
if propagated:
if key in self.local_attrs:
return
else:
self.local_attrs[key] = inst
self.install_descriptor(key, inst)
_memoized_key_collection.expire_instance(self)
self[key] = inst
for cls in self.class_.__subclasses__():
manager = self._subclass_manager(cls)
manager.instrument_attribute(key, inst, True)
def subclass_managers(self, recursive):
for cls in self.class_.__subclasses__():
mgr = manager_of_class(cls)
if mgr is not None and mgr is not self:
yield mgr
if recursive:
for m in mgr.subclass_managers(True):
yield m
def post_configure_attribute(self, key):
_instrumentation_factory.dispatch.attribute_instrument(
self.class_, key, self[key]
)
def uninstrument_attribute(self, key, propagated=False):
if key not in self:
return
if propagated:
if key in self.local_attrs:
return # don't get rid of local attr
else:
del self.local_attrs[key]
self.uninstall_descriptor(key)
_memoized_key_collection.expire_instance(self)
del self[key]
for cls in self.class_.__subclasses__():
manager = manager_of_class(cls)
if manager:
manager.uninstrument_attribute(key, True)
def unregister(self):
self._uninstrument_init()
self.mapper = self.dispatch = None
self.info.clear()
for key in list(self):
if key in self.local_attrs:
self.uninstrument_attribute(key)
def install_descriptor(self, key, inst):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
setattr(self.class_, key, inst)
def uninstall_descriptor(self, key):
delattr(self.class_, key)
def install_member(self, key, implementation):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
self.originals.setdefault(key, getattr(self.class_, key, None))
setattr(self.class_, key, implementation)
def uninstall_member(self, key):
original = self.originals.pop(key, None)
if original is not None:
setattr(self.class_, key, original)
def instrument_collection_class(self, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def initialize_collection(self, key, state, factory):
user_data = factory()
adapter = collections.CollectionAdapter(
self.get_impl(key), state, user_data
)
return adapter, user_data
def is_instrumented(self, key, search=False):
if search:
return key in self
else:
return key in self.local_attrs
def get_impl(self, key):
return self[key].impl
@property
def attributes(self):
return iter(self.values())
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return instance
def setup_instance(self, instance, state=None):
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
def teardown_instance(self, instance):
delattr(instance, self.STATE_ATTR)
def _serialize(self, state, state_dict):
return _SerializeManager(state, state_dict)
def _new_state_if_none(self, instance):
if hasattr(instance, self.STATE_ATTR):
return False
elif self.class_ is not instance.__class__ and self.is_mapped:
return self._subclass_manager(
instance.__class__
)._new_state_if_none(instance)
else:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return state
def has_state(self, instance):
return hasattr(instance, self.STATE_ATTR)
def has_parent(self, state, key, optimistic=False):
return self.get_impl(key).hasparent(state, optimistic=optimistic)
def __bool__(self):
return True
__nonzero__ = __bool__
def __repr__(self):
return "<%s of %r at %x>" % (
self.__class__.__name__,
self.class_,
id(self),
)
class _SerializeManager(object):
def __init__(self, state, d):
self.class_ = state.class_
manager = state.manager
manager.dispatch.pickle(state, d)
def __call__(self, state, inst, state_dict):
state.manager = manager = manager_of_class(self.class_)
if manager is None:
raise exc.UnmappedInstanceError(
inst,
"Cannot deserialize object of type %r - "
"no mapper() has "
"been configured for this class within the current "
"Python process!" % self.class_,
)
elif manager.is_mapped and not manager.mapper.configured:
manager.mapper._configure_all()
if inst is not None:
manager.setup_instance(inst, state)
manager.dispatch.unpickle(state, state_dict)
class InstrumentationFactory(object):
def create_manager_for_cls(self, class_):
assert class_ is not None
assert manager_of_class(class_) is None
manager, factory = self._locate_extended_factory(class_)
if factory is None:
factory = ClassManager
manager = factory(class_)
self._check_conflicts(class_, factory)
manager.factory = factory
self.dispatch.class_instrument(class_)
return manager
def _locate_extended_factory(self, class_):
return None, None
def _check_conflicts(self, class_, factory):
return
def unregister(self, class_):
manager = manager_of_class(class_)
manager.unregister()
manager.dispose()
self.dispatch.class_uninstrument(class_)
if ClassManager.MANAGER_ATTR in class_.__dict__:
delattr(class_, ClassManager.MANAGER_ATTR)
_instrumentation_factory = InstrumentationFactory()
instance_state = _default_state_getter = base.instance_state
instance_dict = _default_dict_getter = base.instance_dict
manager_of_class = _default_manager_getter = base.manager_of_class
def register_class(class_):
manager = manager_of_class(class_)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(class_)
return manager
def unregister_class(class_):
_instrumentation_factory.unregister(class_)
def is_instrumented(instance, key):
return manager_of_class(instance.__class__).is_instrumented(
key, search=True
)
def _generate_init(class_, class_manager):
# original '__init__' method, once ClassManager is fixed
# to always reference that.
original__init__ = class_.__init__
assert original__init__
# Go through some effort here and don't change the user's __init__
# calling signature, including the unlikely case that it has
# a return value.
# FIXME: need to juggle local names to avoid constructor argument
# clashes.
func_body = """\
def __init__(%(apply_pos)s):
new_state = class_manager._new_state_if_none(%(self_arg)s)
if new_state:
return new_state._initialize_instance(%(apply_kw)s)
else:
return original__init__(%(apply_kw)s)
"""
func_vars = util.format_argspec_init(original__init__, grouped=False)
func_text = func_body % func_vars
if util.py2k:
func = getattr(original__init__, "im_func", original__init__)
func_defaults = getattr(func, "func_defaults", None)
else:
func_defaults = getattr(original__init__, "__defaults__", None)
func_kw_defaults = getattr(original__init__, "__kwdefaults__", None)
env = locals().copy()
exec(func_text, env)
__init__ = env["__init__"]
__init__.__doc__ = original__init__.__doc__
__init__._sa_original_init = original__init__
if func_defaults:
__init__.__defaults__ = func_defaults
if not util.py2k and func_kw_defaults:
__init__.__kwdefaults__ = func_kw_defaults
return __init__
| true | true |
f73d0759d039f23a64d0b7e1e12cb250c63a58e4 | 5,706 | py | Python | graphs/minimum_spanning_tree_boruvka.py | hank-chou/python | a9f729fa263bce599d2774f3f6afb5a18bcc9862 | [
"MIT"
] | 11 | 2021-02-18T04:53:44.000Z | 2022-01-16T10:57:39.000Z | graphs/minimum_spanning_tree_boruvka.py | hank-chou/python | a9f729fa263bce599d2774f3f6afb5a18bcc9862 | [
"MIT"
] | 162 | 2021-03-09T01:52:11.000Z | 2022-03-12T01:09:07.000Z | graphs/minimum_spanning_tree_boruvka.py | hank-chou/python | a9f729fa263bce599d2774f3f6afb5a18bcc9862 | [
"MIT"
] | 8 | 2021-02-18T05:12:34.000Z | 2022-03-06T19:02:14.000Z | class Graph:
"""
Data structure to store graphs (based on adjacency lists)
"""
def __init__(self):
self.num_vertices = 0
self.num_edges = 0
self.adjacency = {}
def add_vertex(self, vertex):
"""
Adds a vertex to the graph
"""
if vertex not in self.adjacency:
self.adjacency[vertex] = {}
self.num_vertices += 1
def add_edge(self, head, tail, weight):
"""
Adds an edge to the graph
"""
self.add_vertex(head)
self.add_vertex(tail)
if head == tail:
return
self.adjacency[head][tail] = weight
self.adjacency[tail][head] = weight
def distinct_weight(self):
"""
For Boruvks's algorithm the weights should be distinct
Converts the weights to be distinct
"""
edges = self.get_edges()
for edge in edges:
head, tail, weight = edge
edges.remove((tail, head, weight))
for i in range(len(edges)):
edges[i] = list(edges[i])
edges.sort(key=lambda e: e[2])
for i in range(len(edges) - 1):
if edges[i][2] >= edges[i + 1][2]:
edges[i + 1][2] = edges[i][2] + 1
for edge in edges:
head, tail, weight = edge
self.adjacency[head][tail] = weight
self.adjacency[tail][head] = weight
def __str__(self):
"""
Returns string representation of the graph
"""
string = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
weight = self.adjacency[head][tail]
string += "%d -> %d == %d\n" % (head, tail, weight)
return string.rstrip("\n")
def get_edges(self):
"""
Returna all edges in the graph
"""
output = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def get_vertices(self):
"""
Returns all vertices in the graph
"""
return self.adjacency.keys()
@staticmethod
def build(vertices=None, edges=None):
"""
Builds a graph from the given set of vertices and edges
"""
g = Graph()
if vertices is None:
vertices = []
if edges is None:
edge = []
for vertex in vertices:
g.add_vertex(vertex)
for edge in edges:
g.add_edge(*edge)
return g
class UnionFind:
"""
Disjoint set Union and Find for Boruvka's algorithm
"""
def __init__(self):
self.parent = {}
self.rank = {}
def __len__(self):
return len(self.parent)
def make_set(self, item):
if item in self.parent:
return self.find(item)
self.parent[item] = item
self.rank[item] = 0
return item
def find(self, item):
if item not in self.parent:
return self.make_set(item)
if item != self.parent[item]:
self.parent[item] = self.find(self.parent[item])
return self.parent[item]
def union(self, item1, item2):
root1 = self.find(item1)
root2 = self.find(item2)
if root1 == root2:
return root1
if self.rank[root1] > self.rank[root2]:
self.parent[root2] = root1
return root1
if self.rank[root1] < self.rank[root2]:
self.parent[root1] = root2
return root2
if self.rank[root1] == self.rank[root2]:
self.rank[root1] += 1
self.parent[root2] = root1
return root1
@staticmethod
def boruvka_mst(graph):
"""
Implementation of Boruvka's algorithm
>>> g = Graph()
>>> g = Graph.build([0, 1, 2, 3], [[0, 1, 1], [0, 2, 1],[2, 3, 1]])
>>> g.distinct_weight()
>>> bg = Graph.boruvka_mst(g)
>>> print(bg)
1 -> 0 == 1
2 -> 0 == 2
0 -> 1 == 1
0 -> 2 == 2
3 -> 2 == 3
2 -> 3 == 3
"""
num_components = graph.num_vertices
union_find = Graph.UnionFind()
mst_edges = []
while num_components > 1:
cheap_edge = {}
for vertex in graph.get_vertices():
cheap_edge[vertex] = -1
edges = graph.get_edges()
for edge in edges:
head, tail, weight = edge
edges.remove((tail, head, weight))
for edge in edges:
head, tail, weight = edge
set1 = union_find.find(head)
set2 = union_find.find(tail)
if set1 != set2:
if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight:
cheap_edge[set1] = [head, tail, weight]
if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight:
cheap_edge[set2] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
head, tail, weight = cheap_edge[vertex]
if union_find.find(head) != union_find.find(tail):
union_find.union(head, tail)
mst_edges.append(cheap_edge[vertex])
num_components = num_components - 1
mst = Graph.build(edges=mst_edges)
return mst
| 28.964467 | 78 | 0.487907 | class Graph:
def __init__(self):
self.num_vertices = 0
self.num_edges = 0
self.adjacency = {}
def add_vertex(self, vertex):
if vertex not in self.adjacency:
self.adjacency[vertex] = {}
self.num_vertices += 1
def add_edge(self, head, tail, weight):
self.add_vertex(head)
self.add_vertex(tail)
if head == tail:
return
self.adjacency[head][tail] = weight
self.adjacency[tail][head] = weight
def distinct_weight(self):
edges = self.get_edges()
for edge in edges:
head, tail, weight = edge
edges.remove((tail, head, weight))
for i in range(len(edges)):
edges[i] = list(edges[i])
edges.sort(key=lambda e: e[2])
for i in range(len(edges) - 1):
if edges[i][2] >= edges[i + 1][2]:
edges[i + 1][2] = edges[i][2] + 1
for edge in edges:
head, tail, weight = edge
self.adjacency[head][tail] = weight
self.adjacency[tail][head] = weight
def __str__(self):
string = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
weight = self.adjacency[head][tail]
string += "%d -> %d == %d\n" % (head, tail, weight)
return string.rstrip("\n")
def get_edges(self):
output = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def get_vertices(self):
return self.adjacency.keys()
@staticmethod
def build(vertices=None, edges=None):
g = Graph()
if vertices is None:
vertices = []
if edges is None:
edge = []
for vertex in vertices:
g.add_vertex(vertex)
for edge in edges:
g.add_edge(*edge)
return g
class UnionFind:
def __init__(self):
self.parent = {}
self.rank = {}
def __len__(self):
return len(self.parent)
def make_set(self, item):
if item in self.parent:
return self.find(item)
self.parent[item] = item
self.rank[item] = 0
return item
def find(self, item):
if item not in self.parent:
return self.make_set(item)
if item != self.parent[item]:
self.parent[item] = self.find(self.parent[item])
return self.parent[item]
def union(self, item1, item2):
root1 = self.find(item1)
root2 = self.find(item2)
if root1 == root2:
return root1
if self.rank[root1] > self.rank[root2]:
self.parent[root2] = root1
return root1
if self.rank[root1] < self.rank[root2]:
self.parent[root1] = root2
return root2
if self.rank[root1] == self.rank[root2]:
self.rank[root1] += 1
self.parent[root2] = root1
return root1
@staticmethod
def boruvka_mst(graph):
num_components = graph.num_vertices
union_find = Graph.UnionFind()
mst_edges = []
while num_components > 1:
cheap_edge = {}
for vertex in graph.get_vertices():
cheap_edge[vertex] = -1
edges = graph.get_edges()
for edge in edges:
head, tail, weight = edge
edges.remove((tail, head, weight))
for edge in edges:
head, tail, weight = edge
set1 = union_find.find(head)
set2 = union_find.find(tail)
if set1 != set2:
if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight:
cheap_edge[set1] = [head, tail, weight]
if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight:
cheap_edge[set2] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
head, tail, weight = cheap_edge[vertex]
if union_find.find(head) != union_find.find(tail):
union_find.union(head, tail)
mst_edges.append(cheap_edge[vertex])
num_components = num_components - 1
mst = Graph.build(edges=mst_edges)
return mst
| true | true |
f73d08975ca3cbf8ae8233b59ede8665af9489fd | 1,585 | py | Python | signbank/context_processors.py | Signbank/signbank | d28c843475141b8a0ede17b7977f88d93b27aa85 | [
"BSD-3-Clause"
] | null | null | null | signbank/context_processors.py | Signbank/signbank | d28c843475141b8a0ede17b7977f88d93b27aa85 | [
"BSD-3-Clause"
] | null | null | null | signbank/context_processors.py | Signbank/signbank | d28c843475141b8a0ede17b7977f88d93b27aa85 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from guardian.shortcuts import get_objects_for_user
from signbank.tools import get_selected_datasets_for_user, get_datasets_with_public_glosses
from signbank.dictionary.models import Dataset
def url(request):
if not request.user.is_authenticated():
# for anonymous users, show datasets with public glosses in header
viewable_datasets = get_datasets_with_public_glosses()
if 'selected_datasets' in request.session.keys():
selected_datasets = Dataset.objects.filter(acronym__in=request.session['selected_datasets'])
else:
# this happens at the start of a session
selected_datasets = Dataset.objects.filter(acronym=settings.DEFAULT_DATASET_ACRONYM)
else:
# display all datasets in header
viewable_datasets = Dataset.objects.all()
selected_datasets = get_selected_datasets_for_user(request.user)
return {'URL': settings.URL,
'PREFIX_URL': settings.PREFIX_URL,
'viewable_datasets': [(dataset, dataset in selected_datasets) for dataset in viewable_datasets],
'selected_datasets': selected_datasets,
'SHOW_DATASET_INTERFACE_OPTIONS': settings.SHOW_DATASET_INTERFACE_OPTIONS,
'SEPARATE_ENGLISH_IDGLOSS_FIELD':settings.SEPARATE_ENGLISH_IDGLOSS_FIELD,
'CROP_GLOSS_IMAGES': settings.CROP_GLOSS_IMAGES,
'INTERFACE_LANGUAGE_CODES': [language_code for language_code, full_name in settings.LANGUAGES],
'INTERFACE_LANGUAGE_SHORT_NAMES': settings.INTERFACE_LANGUAGE_SHORT_NAMES
}
| 49.53125 | 108 | 0.748896 | from django.conf import settings
from guardian.shortcuts import get_objects_for_user
from signbank.tools import get_selected_datasets_for_user, get_datasets_with_public_glosses
from signbank.dictionary.models import Dataset
def url(request):
if not request.user.is_authenticated():
viewable_datasets = get_datasets_with_public_glosses()
if 'selected_datasets' in request.session.keys():
selected_datasets = Dataset.objects.filter(acronym__in=request.session['selected_datasets'])
else:
selected_datasets = Dataset.objects.filter(acronym=settings.DEFAULT_DATASET_ACRONYM)
else:
viewable_datasets = Dataset.objects.all()
selected_datasets = get_selected_datasets_for_user(request.user)
return {'URL': settings.URL,
'PREFIX_URL': settings.PREFIX_URL,
'viewable_datasets': [(dataset, dataset in selected_datasets) for dataset in viewable_datasets],
'selected_datasets': selected_datasets,
'SHOW_DATASET_INTERFACE_OPTIONS': settings.SHOW_DATASET_INTERFACE_OPTIONS,
'SEPARATE_ENGLISH_IDGLOSS_FIELD':settings.SEPARATE_ENGLISH_IDGLOSS_FIELD,
'CROP_GLOSS_IMAGES': settings.CROP_GLOSS_IMAGES,
'INTERFACE_LANGUAGE_CODES': [language_code for language_code, full_name in settings.LANGUAGES],
'INTERFACE_LANGUAGE_SHORT_NAMES': settings.INTERFACE_LANGUAGE_SHORT_NAMES
}
| true | true |
f73d09c2da9b1f1b580b0a0903e9b18a0b4a3e83 | 524 | py | Python | ltcd/test_349.py | ValentynaGorbachenko/cd2 | ad9e05092ddca9bfef29f185b23fdf5b22099e05 | [
"MIT"
] | null | null | null | ltcd/test_349.py | ValentynaGorbachenko/cd2 | ad9e05092ddca9bfef29f185b23fdf5b22099e05 | [
"MIT"
] | null | null | null | ltcd/test_349.py | ValentynaGorbachenko/cd2 | ad9e05092ddca9bfef29f185b23fdf5b22099e05 | [
"MIT"
] | null | null | null | # test_349.py
import unittest
from intersection_349 import intersect
class intersectTest(unittest.TestCase):
def test_intersect_1(self):
self.assertEqual(intersect([1,2,2,1], [2,2]), [2])
# @unittest.skip("demonstrating skipping")
def test_intersect_2(self):
res = intersect([4,4,9,5],[9,4,9,8,4])
res.sort()
self.assertEqual(res, [4,9])
def test_intersect_5(self):
self.assertEqual(intersect([],[]), [])
if __name__ == '__main__':
unittest.main() | 26.2 | 58 | 0.625954 |
import unittest
from intersection_349 import intersect
class intersectTest(unittest.TestCase):
def test_intersect_1(self):
self.assertEqual(intersect([1,2,2,1], [2,2]), [2])
def test_intersect_2(self):
res = intersect([4,4,9,5],[9,4,9,8,4])
res.sort()
self.assertEqual(res, [4,9])
def test_intersect_5(self):
self.assertEqual(intersect([],[]), [])
if __name__ == '__main__':
unittest.main() | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.